From a68cace63b920b09404841fe459e4da8ff6860bc Mon Sep 17 00:00:00 2001 From: Sourav Gupta <98318303+souravgupta-msft@users.noreply.github.com> Date: Thu, 16 May 2024 17:21:20 +0530 Subject: [PATCH 01/73] Updating changelog (#1415) --- CHANGELOG.md | 9 ++++++++- common/types.go | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cb27d7ca..e778f5a91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,11 @@ -## 2.3.0 (Unreleased) +## 2.3.1 (Unreleased) +**Bug Fixes** + +**Features** + +**Other Changes** + +## 2.3.0 (2024-05-16) **Bug Fixes** - For fuse minor version check rely on the fusermount3 command output rather then one exposed from fuse_common. - Fixed large number of threads from TLRU causing crash during disk eviction in block-cache. diff --git a/common/types.go b/common/types.go index f3520bf75..baefaf59f 100644 --- a/common/types.go +++ b/common/types.go @@ -47,7 +47,7 @@ import ( // Standard config default values const ( - blobfuse2Version_ = "2.3.0" + blobfuse2Version_ = "2.3.1" DefaultMaxLogFileSize = 512 DefaultLogFileCount = 10 From 28bd925a9d32e75524ca1d89e5a2881b12bea677 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 21:21:31 +0530 Subject: [PATCH 02/73] Bump github.com/Azure/azure-sdk-for-go/sdk/azidentity (#1433) Bumps [github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://github.com/Azure/azure-sdk-for-go) from 1.5.2 to 1.6.0. --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 34d4e5f0b..70dc54783 100755 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.1 github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda @@ -49,11 +49,11 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect ) replace github.com/spf13/cobra => github.com/gapra-msft/cobra v1.4.1-0.20220411185530-5b83e8ba06dd diff --git a/go.sum b/go.sum index fa66c75c8..024714b64 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= @@ -99,16 +99,16 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -117,13 +117,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= From 0790773436cdae951a35a43bd3b7b6a9746f7c3e Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Thu, 13 Jun 2024 08:23:16 +0530 Subject: [PATCH 03/73] Correct error code from EIO to EACCESS when creation of file fails due to insufficient permissions (#1428) --- component/libfuse/libfuse2_handler.go | 2 ++ component/libfuse/libfuse_handler.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/component/libfuse/libfuse2_handler.go b/component/libfuse/libfuse2_handler.go index 034ff70c2..24e7f3268 100644 --- a/component/libfuse/libfuse2_handler.go +++ b/component/libfuse/libfuse2_handler.go @@ -601,6 +601,8 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { log.Err("Libfuse::libfuse2_create : Failed to create %s [%s]", name, err.Error()) if os.IsExist(err) { return -C.EEXIST + } else if os.IsPermission(err) { + return -C.EACCES } else { return -C.EIO } diff --git a/component/libfuse/libfuse_handler.go b/component/libfuse/libfuse_handler.go index 7985b878e..a2b087868 100644 --- a/component/libfuse/libfuse_handler.go +++ b/component/libfuse/libfuse_handler.go @@ -645,6 +645,8 @@ func libfuse_create(path *C.char, mode C.mode_t, fi *C.fuse_file_info_t) C.int { log.Err("Libfuse::libfuse_create : Failed to create %s [%s]", name, err.Error()) if os.IsExist(err) { return -C.EEXIST + } else if os.IsPermission(err) { + return -C.EACCES } else { return -C.EIO } From cefb0226f9a4f75c277dc816ffa0b52618e25126 Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Mon, 17 Jun 2024 16:19:22 +0530 Subject: [PATCH 04/73] Removing usage of SPN in pipelines (#1431) * Removing usage of SPN in pipelines --- .github/workflows/codeql-analysis.yml | 9 +- .../blobfuse2-ci-template.yml | 5 +- azure-pipeline-templates/build-release.yml | 3 - azure-pipeline-templates/build.yml | 5 +- azure-pipeline-templates/distro-tests.yml | 7 +- .../e2e-tests-block-cache.yml | 3 - azure-pipeline-templates/e2e-tests-spcl.yml | 4 - azure-pipeline-templates/e2e-tests.yml | 5 +- azure-pipeline-templates/setup.yml | 4 +- azure-pipeline-templates/verbose-tests.yml | 152 +- blobfuse2-ci.yaml | 15 +- blobfuse2-code-coverage.yaml | 89 +- blobfuse2-nightly.yaml | 1235 ++++++----------- component/azstorage/azauth_test.go | 109 -- component/file_cache/file_cache.go | 11 +- component/file_cache/file_cache_test.go | 44 +- component/file_cache/lfu_policy.go | 483 ------- component/file_cache/lfu_policy_test.go | 286 ---- component/file_cache/lru_policy_test.go | 3 + go.mod | 14 +- go.sum | 24 +- go_installer.sh | 3 +- test/e2e_tests/dir_test.go | 2 +- testdata/config/azure_key_lfu.yaml | 36 - 24 files changed, 528 insertions(+), 2023 deletions(-) delete mode 100644 component/file_cache/lfu_policy.go delete mode 100644 component/file_cache/lfu_policy_test.go delete mode 100644 testdata/config/azure_key_lfu.yaml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 69997a924..4f5022baf 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -35,13 +35,14 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. @@ -50,7 +51,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v1 + uses: github/codeql-action/autobuild@v3 # ℹī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -64,4 +65,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v3 diff --git a/azure-pipeline-templates/blobfuse2-ci-template.yml b/azure-pipeline-templates/blobfuse2-ci-template.yml index f3f05f12a..bbfbbcd4b 100644 --- a/azure-pipeline-templates/blobfuse2-ci-template.yml +++ b/azure-pipeline-templates/blobfuse2-ci-template.yml @@ -46,7 +46,7 @@ steps: # azcli login - script: | - az login --service-principal -u $(AZTEST_CLIENT) -p $(AZTEST_SECRET) --tenant $(AZTEST_TENANT) + az login --identity --username $(ACLI_BLOBFUSE_MSI_APP_ID) displayName: 'Azure CLI login' - task: Go@0 @@ -74,9 +74,6 @@ steps: echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile - echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile - echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile - echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile echo "\"skip-msi\"": "${{ parameters.skip_msi }}", >> $cnfFile echo "\"skip-azcli\"": "false", >> $cnfFile echo "\"proxy-address\"": "\"${{ parameters.proxy_address }}\"" >> $cnfFile diff --git a/azure-pipeline-templates/build-release.yml b/azure-pipeline-templates/build-release.yml index d99cafc4f..8982b9f46 100644 --- a/azure-pipeline-templates/build-release.yml +++ b/azure-pipeline-templates/build-release.yml @@ -80,9 +80,6 @@ steps: echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile - echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile - echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile - echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile echo "\"skip-msi\"": "true", >> $cnfFile echo "\"skip-azcli\"": "true", >> $cnfFile echo "\"proxy-address\"": "\"\"" >> $cnfFile diff --git a/azure-pipeline-templates/build.yml b/azure-pipeline-templates/build.yml index f212f2f82..7762dca8f 100755 --- a/azure-pipeline-templates/build.yml +++ b/azure-pipeline-templates/build.yml @@ -124,9 +124,6 @@ steps: echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile - echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile - echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile - echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile echo "\"skip-msi\"": "${{ parameters.skip_msi }}", >> $cnfFile echo "\"skip-azcli\"": "${{ parameters.skip_azcli }}", >> $cnfFile echo "\"proxy-address\"": "\"${{ parameters.proxy_address }}\"" >> $cnfFile @@ -146,7 +143,7 @@ steps: # azcli login - script: | - az login --service-principal -u $(AZTEST_CLIENT) -p $(AZTEST_SECRET) --tenant $(AZTEST_TENANT) + az login --identity --username $(ACLI_BLOBFUSE_MSI_APP_ID) displayName: 'Azure CLI login' condition: eq('${{ parameters.skip_azcli }}', 'false') diff --git a/azure-pipeline-templates/distro-tests.yml b/azure-pipeline-templates/distro-tests.yml index 2b8f7e567..279fa5ba4 100755 --- a/azure-pipeline-templates/distro-tests.yml +++ b/azure-pipeline-templates/distro-tests.yml @@ -42,8 +42,6 @@ parameters: default: false steps: - - checkout: none - # Get the host details on which these test are running - script: | echo $(Description) @@ -52,6 +50,9 @@ steps: # Create directory structure and prepare to mount - ${{ parameters.installStep }} + + - checkout: none + - script: | sudo rm -rf $(ROOT_DIR) sudo mkdir -p $(ROOT_DIR) @@ -101,7 +102,6 @@ steps: quick_test: ${{ parameters.quick_test }} verbose_log: ${{ parameters.verbose_log }} clone: ${{ parameters.clone }} - stream_direct_test: false # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable temp_dir: $(TEMP_DIR) mount_dir: $(MOUNT_DIR) @@ -122,7 +122,6 @@ steps: quick_test: ${{ parameters.quick_test }} verbose_log: ${{ parameters.verbose_log }} clone: ${{ parameters.clone }} - stream_direct_test: false # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable temp_dir: $(TEMP_DIR) mount_dir: $(MOUNT_DIR) diff --git a/azure-pipeline-templates/e2e-tests-block-cache.yml b/azure-pipeline-templates/e2e-tests-block-cache.yml index 778755152..a2bd19581 100644 --- a/azure-pipeline-templates/e2e-tests-block-cache.yml +++ b/azure-pipeline-templates/e2e-tests-block-cache.yml @@ -31,9 +31,6 @@ parameters: - name: clone type: boolean default: false - - name: stream_direct_test - type: boolean - default: false steps: - script: | diff --git a/azure-pipeline-templates/e2e-tests-spcl.yml b/azure-pipeline-templates/e2e-tests-spcl.yml index e48e72eda..470ea8bc9 100644 --- a/azure-pipeline-templates/e2e-tests-spcl.yml +++ b/azure-pipeline-templates/e2e-tests-spcl.yml @@ -31,9 +31,6 @@ parameters: - name: clone type: boolean default: false - - name: stream_direct_test - type: boolean - default: false - name: enable_symlink_adls type: boolean default: false @@ -67,7 +64,6 @@ steps: artifact_name: '${{ parameters.distro_name }}_${{ parameters.idstring }}.txt' verbose_log: ${{ parameters.verbose_log }} clone: ${{ parameters.clone }} - stream_direct_test: ${{ parameters.stream_direct_test }} enable_symlink_adls: ${{ parameters.enable_symlink_adls }} mountStep: script: | diff --git a/azure-pipeline-templates/e2e-tests.yml b/azure-pipeline-templates/e2e-tests.yml index 2dd90c4e9..bbf6e6f37 100755 --- a/azure-pipeline-templates/e2e-tests.yml +++ b/azure-pipeline-templates/e2e-tests.yml @@ -19,9 +19,6 @@ parameters: - name: quick_test type: boolean default: true - - name: stream_direct_test - type: boolean - default: false - name: enable_symlink_adls type: boolean default: false @@ -69,7 +66,7 @@ steps: - task: Go@0 inputs: command: 'test' - arguments: '-v -timeout=2h ./... -args -mnt-path=${{ parameters.mount_dir }} -adls=${{parameters.adls}} -clone=${{parameters.clone}} -tmp-path=${{parameters.temp_dir}} -quick-test=${{parameters.quick_test}} -stream-direct-test=${{parameters.stream_direct_test}} -enable-symlink-adls=${{parameters.enable_symlink_adls}} -distro-name="${{parameters.distro_name}}"' + arguments: '-v -timeout=2h ./... -args -mnt-path=${{ parameters.mount_dir }} -adls=${{parameters.adls}} -clone=${{parameters.clone}} -tmp-path=${{parameters.temp_dir}} -quick-test=${{parameters.quick_test}} -enable-symlink-adls=${{parameters.enable_symlink_adls}} -distro-name="${{parameters.distro_name}}"' workingDirectory: ${{ parameters.working_dir }}/test/e2e_tests displayName: 'E2E Test: ${{ parameters.idstring }}' timeoutInMinutes: 120 diff --git a/azure-pipeline-templates/setup.yml b/azure-pipeline-templates/setup.yml index 3720b1b2c..5cdd9d984 100644 --- a/azure-pipeline-templates/setup.yml +++ b/azure-pipeline-templates/setup.yml @@ -6,6 +6,8 @@ parameters: default: "null" steps: + # Create directory structure and prepare to mount + - ${{ parameters.installStep }} - checkout: none # Get the host details on which these test are running @@ -14,8 +16,6 @@ steps: hostnamectl displayName: 'Print Agent Info' - # Create directory structure and prepare to mount - - ${{ parameters.installStep }} - script: | sudo rm -rf $(ROOT_DIR) sudo mkdir -p $(ROOT_DIR) diff --git a/azure-pipeline-templates/verbose-tests.yml b/azure-pipeline-templates/verbose-tests.yml index aea9bafa5..a171af0c3 100644 --- a/azure-pipeline-templates/verbose-tests.yml +++ b/azure-pipeline-templates/verbose-tests.yml @@ -5,14 +5,10 @@ parameters: type: string - name: account_endpoint type: string - - name: spn_account_endpoint - type: string - name: adls type: boolean - name: account_name type: string - - name: spn_account_name - type: string - name: account_key type: string - name: account_sas @@ -43,23 +39,13 @@ parameters: type: boolean - name: test_sas_credential type: boolean - - name: test_spn_credential - type: boolean - name: test_azcli_credential type: boolean - - name: test_stream - type: boolean - - name: stream_config - type: string - - name: stream_filename_config - type: string - name: test_azurite type: boolean default: false - name: sas_credential_config type: string - - name: spn_credential_config - type: string - name: azcli_credential_config type: string - name: azurite_config @@ -96,42 +82,6 @@ steps: displayName: Print config file condition: ${{ parameters.test_key_credential }} -# Stream e2e - - script: | - cd ${{ parameters.working_dir }} - ${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_stream.yaml --container-name=${{ parameters.container }} --output-file=${{ parameters.stream_config }} - displayName: Create Stream Config File - env: - ACCOUNT_TYPE: ${{ parameters.account_type }} - NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }} - NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }} - ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }} - VERBOSE_LOG: ${{ parameters.verbose_log }} - condition: ${{ parameters.test_stream }} - continueOnError: false - - - script: cat ${{ parameters.stream_config }} - displayName: Print Stream config file with Handle Level Caching - condition: ${{ parameters.test_stream }} - -# Stream e2e filename level caching - - script: | - cd ${{ parameters.working_dir }} - ${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_stream_filename.yaml --container-name=${{ parameters.container }} --output-file=${{ parameters.stream_filename_config }} - displayName: Create Stream Config File - env: - ACCOUNT_TYPE: ${{ parameters.account_type }} - NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }} - NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }} - ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }} - VERBOSE_LOG: ${{ parameters.verbose_log }} - condition: ${{ parameters.test_stream }} - continueOnError: false - - - script: cat ${{ parameters.stream_filename_config }} - displayName: Print Stream config file with Filename Caching - condition: ${{ parameters.test_stream }} - # Create sas credential config file if we need to test it - script: | cd ${{ parameters.working_dir }} @@ -150,33 +100,13 @@ steps: displayName: Print SAS config file condition: ${{ parameters.test_sas_credential }} -# Create spn credential config file if we need to test it - - script: | - cd ${{ parameters.working_dir }} - ${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_spn.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.spn_credential_config }} - displayName: Create SPN Config File - env: - NIGHTLY_SPN_ACC_NAME: ${{ parameters.spn_account_name }} - NIGHTLY_SPN_CLIENT_ID: ${{ parameters.client_id }} - NIGHTLY_SPN_TENANT_ID: ${{ parameters.tenant_id }} - NIGHTLY_SPN_CLIENT_SECRET: ${{ parameters.client_secret }} - ACCOUNT_TYPE: ${{ parameters.account_type }} - ACCOUNT_ENDPOINT: ${{ parameters.spn_account_endpoint }} - VERBOSE_LOG: ${{ parameters.verbose_log }} - condition: ${{ parameters.test_spn_credential }} - continueOnError: false - - - script: cat ${{ parameters.spn_credential_config }} - displayName: Print SPN config file - condition: ${{ parameters.test_spn_credential }} - # Create azcli credential config file if we need to test it - script: | cd ${{ parameters.working_dir }} ${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_cli.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.azcli_credential_config }} displayName: Create Azure CLI Config File env: - NIGHTLY_STO_BLOB_ACC_NAME: ${{ parameters.spn_account_name }} + NIGHTLY_STO_BLOB_ACC_NAME: ${{ parameters.account_name }} ACCOUNT_TYPE: ${{ parameters.account_type }} ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }} VERBOSE_LOG: ${{ parameters.verbose_log }} @@ -246,47 +176,6 @@ steps: quick_test: ${{ parameters.quick_test }} verbose_log: ${{ parameters.verbose_log }} clone: false - stream_direct_test: false - - - ${{ if eq(parameters.test_stream, true) }}: - - template: e2e-tests.yml - parameters: - working_dir: ${{ parameters.working_dir }} - temp_dir: ${{ parameters.temp_dir }} - mount_dir: ${{ parameters.mount_dir }} - adls: ${{ parameters.adls }} - idstring: ${{ parameters.service }} with Streaming - distro_name: ${{ parameters.distro_name }} - quick_test: ${{ parameters.quick_test }} - artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_stream.txt' - verbose_log: ${{ parameters.verbose_log }} - mountStep: - script: > - ${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.stream_config }} - --default-working-dir=${{ parameters.working_dir }} - displayName: 'E2E Test: Mount with Stream Configuration' - timeoutInMinutes: 3 - continueOnError: false - - - ${{ if eq(parameters.test_stream, true) }}: - - template: e2e-tests.yml - parameters: - working_dir: ${{ parameters.working_dir }} - temp_dir: ${{ parameters.temp_dir }} - mount_dir: ${{ parameters.mount_dir }} - adls: ${{ parameters.adls }} - idstring: ${{ parameters.service }} with Streaming with filename - distro_name: ${{ parameters.distro_name }} - quick_test: ${{ parameters.quick_test }} - artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_stream_with_filename.txt' - verbose_log: ${{ parameters.verbose_log }} - mountStep: - script: > - ${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.stream_filename_config }} - --default-working-dir=${{ parameters.working_dir }} - displayName: 'E2E Test: Mount with Filename Stream Configuration' - timeoutInMinutes: 3 - continueOnError: false - ${{ if eq(parameters.test_sas_credential, true) }}: - template: e2e-tests.yml @@ -307,25 +196,6 @@ steps: timeoutInMinutes: 3 continueOnError: false - - ${{ if eq(parameters.test_spn_credential, true) }}: - - template: e2e-tests.yml - parameters: - working_dir: ${{ parameters.working_dir }} - mount_dir: ${{ parameters.mount_dir }} - temp_dir: ${{ parameters.temp_dir }} - adls: ${{ parameters.adls }} - idstring: ${{ parameters.service }} with SPN Credentials - distro_name: ${{ parameters.distro_name }} - artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_spn.txt' - verbose_log: ${{ parameters.verbose_log }} - mountStep: - script: > - ${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.spn_credential_config }} - --default-working-dir=${{ parameters.working_dir }} - displayName: 'E2E Test: Mount with SPN Credential Configuration' - timeoutInMinutes: 3 - continueOnError: false - - ${{ if eq(parameters.test_azcli_credential, true) }}: - template: e2e-tests.yml parameters: @@ -347,8 +217,8 @@ steps: - ${{ if eq(parameters.test_azurite, true) }}: - bash: | - sudo apt-get install azure-cli -y - sudo npm install -g azurite + sudo apt-get install azure-cli npm -y + sudo npm install -g azurite@3.29.0 sudo mkdir azurite sudo azurite --silent --location azurite --debug azurite\debug.log & az storage container create -n ${{ parameters.container }} --connection-string "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;" @@ -375,22 +245,6 @@ steps: #--------------------------------------- Tests: End to end tests with different File Cache configurations ------------------------------------------ - - template: e2e-tests-spcl.yml - parameters: - conf_template: azure_key_lfu.yaml - config_file: ${{ parameters.config }} - container: ${{ parameters.container }} - temp_dir: ${{ parameters.temp_dir }} - mount_dir: ${{ parameters.mount_dir }} - adls: ${{ parameters.adls }} - account_name: ${{ parameters.account_name }} - account_key: ${{ parameters.account_key }} - account_type: ${{ parameters.account_type }} - account_endpoint: ${{ parameters.account_endpoint }} - idstring: "${{ parameters.service }} LFU policy" - distro_name: ${{ parameters.distro_name }} - verbose_log: ${{ parameters.verbose_log }} - - template: e2e-tests-spcl.yml parameters: conf_template: azure_key_lru_purge.yaml diff --git a/blobfuse2-ci.yaml b/blobfuse2-ci.yaml index 44f0f17b2..2298d0df1 100644 --- a/blobfuse2-ci.yaml +++ b/blobfuse2-ci.yaml @@ -13,23 +13,26 @@ jobs: strategy: matrix: Ubuntu-20: - imageName: 'ubuntu-20.04' + AgentName: 'blobfuse-ubuntu20' containerName: 'test-cnt-ubn-20' fuselib: 'libfuse-dev' tags: 'fuse2' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20) Ubuntu-22: - imageName: 'ubuntu-22.04' + AgentName: 'blobfuse-ubuntu22' containerName: 'test-cnt-ubn-22' fuselib: 'libfuse3-dev' tags: 'fuse3' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) pool: - vmImage: $(imageName) + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) + variables: - group: NightlyBlobFuse - + steps: # ---------------------------------------------------------------- - template: 'azure-pipeline-templates/blobfuse2-ci-template.yml' @@ -44,7 +47,7 @@ jobs: strategy: matrix: Ubuntu-22-ARM64: - imageName: 'blobfuse-ubn22-arm64' + AgentName: 'blobfuse-ubn22-arm64' containerName: 'test-cnt-ubn-22' fuselib: 'libfuse3-dev' tags: 'fuse3' @@ -53,7 +56,7 @@ jobs: pool: name: "blobfuse-ubn-arm64-pool" demands: - - ImageOverride -equals $(imageName) + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse diff --git a/blobfuse2-code-coverage.yaml b/blobfuse2-code-coverage.yaml index 79cea5b2f..9c9a02996 100644 --- a/blobfuse2-code-coverage.yaml +++ b/blobfuse2-code-coverage.yaml @@ -28,14 +28,12 @@ stages: matrix: Ubuntu-20: AgentName: 'blobfuse-ubuntu20' - imageName: 'ubuntu-20.04' containerName: 'test-cnt-ubn-20' fuselib: 'libfuse-dev' fuselib2: 'fuse' tags: 'fuse2' Ubuntu-22: AgentName: 'blobfuse-ubuntu22' - imageName: 'ubuntu-22.04' containerName: 'test-cnt-ubn-22' fuselib: 'libfuse3-dev' fuselib2: 'fuse3' @@ -54,8 +52,6 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2_tmp' - name: BLOBFUSE2_CFG value: '$(Pipeline.Workspace)/blobfuse2.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: GOPATH @@ -236,84 +232,6 @@ stages: workingDirectory: $(WORK_DIR) displayName: "ADLS Coverage with profilers" - - # ------------------------------------------------------- - # Config Generation (Block Blob - LFU policy) - - script: | - cd $(WORK_DIR) - ./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_gentest2.cov gen-test-config --config-file=azure_key_lfu.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG) - env: - NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME) - NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY) - ACCOUNT_TYPE: 'block' - ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net' - VERBOSE_LOG: false - displayName: 'Create Config File - LFU' - continueOnError: false - workingDirectory: $(WORK_DIR) - - # Code Coverage with e2e-tests for block blob with lfu policy - - script: | - rm -rf $(MOUNT_DIR)/* - rm -rf $(TEMP_DIR)/* - ./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_block_lfu.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --foreground=true & - sleep 10 - ps -aux | grep blobfuse2 - rm -rf $(MOUNT_DIR)/* - cd test/e2e_tests - go test -v -timeout=7200s ./... -args -mnt-path=$(MOUNT_DIR) -tmp-path=$(TEMP_DIR) - cd - - ./blobfuse2 unmount $(MOUNT_DIR) - sleep 5 - workingDirectory: $(WORK_DIR) - displayName: "Block Blob LFU Coverage" - - - # ------------------------------------------------------- - # Config Generation (Block Blob - Stream) - - script: | - cd $(WORK_DIR) - ./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_gentest3.cov gen-test-config --config-file=azure_stream.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_STREAM_CFG) - displayName: 'Create Config File - Stream' - env: - NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME) - NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY) - ACCOUNT_TYPE: 'block' - ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net' - VERBOSE_LOG: false - continueOnError: false - workingDirectory: $(WORK_DIR) - - # Streaming test preparation - - script: | - rm -rf $(MOUNT_DIR)/* - rm -rf $(TEMP_DIR)/* - ./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_stream_prep.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --foreground=true & - sleep 10 - ps -aux | grep blobfuse2 - for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > $(WORK_DIR)/myfile_{}' - for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'cp $(WORK_DIR)/myfile_{} $(MOUNT_DIR)/' - ./blobfuse2 unmount "$(MOUNT_DIR)*" - sudo fusermount -u $(MOUNT_DIR) - sleep 5 - workingDirectory: $(WORK_DIR) - displayName: "Block Blob Stream Preparation" - - # Code Coverage with e2e-tests for block blob with streaming on - - script: | - rm -rf $(MOUNT_DIR)/* - rm -rf $(TEMP_DIR)/* - ./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_stream.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_STREAM_CFG) --foreground=true & - sleep 10 - ps -aux | grep blobfuse2 - ./blobfuse2 mount list - for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'diff $(WORK_DIR)/myfile_{} $(MOUNT_DIR)/myfile_{}' - sudo fusermount -u $(MOUNT_DIR) - sleep 5 - workingDirectory: $(WORK_DIR) - displayName: "Block Blob Stream Coverage" - - # ------------------------------------------------------- # Config Generation (Block Blob) for cli options - script: | @@ -596,7 +514,7 @@ stages: sudo apt-get install python3-setuptools -y sudo apt install python3-pip -y sudo pip3 install mitmproxy - mitmdump -w proxy_logs.txt & + mitmdump -q -w proxy_logs.txt & displayName: 'Install & Start Proxy' # Configure Proxy cert & env @@ -674,7 +592,7 @@ stages: - script: | echo 'mode: count' > ./blobfuse2_coverage_raw.rpt tail -q -n +2 ./*.cov >> ./blobfuse2_coverage_raw.rpt - cat ./blobfuse2_coverage_raw.rpt | grep -v mock_component | grep -v base_component | grep -v loopback | grep -v tools | grep -v "common/log" | grep -v "common/exectime" | grep -v "internal/stats_manager" | grep -v "main.go" | grep -v "component/azstorage/azauthmsi.go" | grep -v "component/azstorage/azauthcli.go" > ./blobfuse2_coverage.rpt + cat ./blobfuse2_coverage_raw.rpt | grep -v mock_component | grep -v base_component | grep -v loopback | grep -v tools | grep -v "common/log" | grep -v "common/exectime" | grep -v "common/types.go" | grep -v "internal/stats_manager" | grep -v "main.go" | grep -v "component/azstorage/azauthmsi.go" | grep -v "component/azstorage/azauthspn.go" | grep -v "component/stream" | grep -v "component/azstorage/azauthcli.go" > ./blobfuse2_coverage.rpt go tool cover -func blobfuse2_coverage.rpt > ./blobfuse2_func_cover.rpt go tool cover -html=./blobfuse2_coverage.rpt -o ./blobfuse2_coverage.html go tool cover -html=./blobfuse2_ut.cov -o ./blobfuse2_ut.html @@ -717,7 +635,6 @@ stages: matrix: Ubuntu-20: AgentName: 'blobfuse-ubuntu20' - imageName: 'ubuntu-20.04' containerName: 'test-cnt-ubn-20' fuselib: 'libfuse3-dev' fuselib2: 'fuse3' @@ -736,8 +653,6 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2_tmp' - name: BLOBFUSE2_CFG value: '$(Pipeline.Workspace)/blobfuse2.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: GOPATH diff --git a/blobfuse2-nightly.yaml b/blobfuse2-nightly.yaml index 406e43f4f..d475ebd5b 100755 --- a/blobfuse2-nightly.yaml +++ b/blobfuse2-nightly.yaml @@ -46,16 +46,6 @@ parameters: displayName: 'Quick Stress' type: boolean default: true - - - name: data_validation - displayName: 'Data Validation Test' - type: boolean - default: true - - - name: block_cache_validation - displayName: 'Block Cache Validation Test' - type: boolean - default: true - name: verbose_log displayName: 'Verbose Log' @@ -74,19 +64,22 @@ stages: strategy: matrix: Ubuntu-20-BlockBlob: - imageName: 'ubuntu-20.04' + AgentName: 'blobfuse-ubuntu20' containerName: 'test-cnt-ubn-20' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20) fuselib: 'libfuse-dev' tags: 'fuse2' Ubuntu-22-BlockBlob: - imageName: 'ubuntu-22.04' + AgentName: 'blobfuse-ubuntu22' containerName: 'test-cnt-ubn-22' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) fuselib: 'libfuse3-dev' tags: 'fuse3' + pool: - vmImage: $(imageName) + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse @@ -98,14 +91,8 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2.yaml' - name: BLOBFUSE2_SAS_CFG value: '$(Pipeline.Workspace)/blobfuse2_sas_config.yaml' - - name: BLOBFUSE2_SPN_CFG - value: '$(Pipeline.Workspace)/blobfuse2_spn_config.yaml' - name: BLOBFUSE2_AZCLI_CFG value: '$(Pipeline.Workspace)/blobfuse2_azcli_config.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - - name: BLOBFUSE2_STREAM_FILENAME_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream_filename.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: BLOBFUSE2_GTEST_CFG @@ -124,6 +111,15 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' steps: + - script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git parallel -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + fi + displayName: 'Install libfuse' + - checkout: none # Clone the repo @@ -138,13 +134,7 @@ stages: displayName: 'Checkout Branch' workingDirectory: $(WORK_DIR) - - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - if [ $(tags) == "fuse2" ]; then - sudo apt-get install fuse -y - fi - displayName: 'Install libfuse' + # ------------------------------------------------------- # Pull and build the code @@ -176,8 +166,6 @@ stages: account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) account_sas: $(NIGHTLY_STO_ACC_SAS) - spn_account_name: $(AZTEST_BLOCK_ACC_NAME) - spn_account_endpoint: 'https://$(AZTEST_BLOCK_ACC_NAME).blob.core.windows.net' client_id: $(AZTEST_CLIENT) tenant_id: $(AZTEST_TENANT) client_secret: $(AZTEST_SECRET) @@ -191,17 +179,12 @@ stages: quick_stress: ${{ parameters.quick_stress }} test_key_credential: true test_sas_credential: true - test_spn_credential: true test_azcli_credential: true - test_stream: true test_azurite: true - stream_config: $(BLOBFUSE2_STREAM_CFG) - stream_filename_config: $(BLOBFUSE2_STREAM_FILENAME_CFG) sas_credential_config: $(BLOBFUSE2_SAS_CFG) - spn_credential_config: $(BLOBFUSE2_SPN_CFG) azcli_credential_config: $(BLOBFUSE2_AZCLI_CFG) azurite_config: $(BLOBFUSE2_AZURITE_CFG) - distro_name: $(imageName) + distro_name: $(AgentName) verbose_log: ${{ parameters.verbose_log }} tags: $(tags) @@ -217,7 +200,7 @@ stages: strategy: matrix: Ubuntu-22-ARM64-BlockBlob: - imageName: 'blobfuse-ubn22-arm64' + AgentName: 'blobfuse-ubn22-arm64' containerName: 'test-cnt-ubn-22-arm64' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22_ARM) fuselib: 'libfuse3-dev' @@ -226,7 +209,7 @@ stages: pool: name: "blobfuse-ubn-arm64-pool" demands: - - ImageOverride -equals $(imageName) + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse @@ -238,14 +221,8 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2.yaml' - name: BLOBFUSE2_SAS_CFG value: '$(Pipeline.Workspace)/blobfuse2_sas_config.yaml' - - name: BLOBFUSE2_SPN_CFG - value: '$(Pipeline.Workspace)/blobfuse2_spn_config.yaml' - name: BLOBFUSE2_AZCLI_CFG value: '$(Pipeline.Workspace)/blobfuse2_azcli_config.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - - name: BLOBFUSE2_STREAM_FILENAME_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream_filename.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: BLOBFUSE2_GTEST_CFG @@ -264,6 +241,15 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' steps: + - script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git parallel -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + fi + displayName: 'Install libfuse' + - checkout: none - script: | @@ -283,11 +269,6 @@ stages: displayName: 'Checkout Branch' workingDirectory: $(WORK_DIR) - - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(tags) $(fuselib) gcc -y - displayName: 'Install fuse' - # ------------------------------------------------------- # Pull and build the code - template: 'azure-pipeline-templates/build.yml' @@ -318,8 +299,6 @@ stages: account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) account_sas: $(NIGHTLY_STO_ACC_SAS) - spn_account_name: $(AZTEST_BLOCK_ACC_NAME) - spn_account_endpoint: 'https://$(AZTEST_BLOCK_ACC_NAME).blob.core.windows.net' client_id: $(AZTEST_CLIENT) tenant_id: $(AZTEST_TENANT) client_secret: $(AZTEST_SECRET) @@ -333,17 +312,12 @@ stages: quick_stress: ${{ parameters.quick_stress }} test_key_credential: true test_sas_credential: true - test_spn_credential: true test_azcli_credential: true - test_stream: true test_azurite: false - stream_config: $(BLOBFUSE2_STREAM_CFG) - stream_filename_config: $(BLOBFUSE2_STREAM_FILENAME_CFG) sas_credential_config: $(BLOBFUSE2_SAS_CFG) - spn_credential_config: $(BLOBFUSE2_SPN_CFG) azcli_credential_config: $(BLOBFUSE2_AZCLI_CFG) azurite_config: $(BLOBFUSE2_AZURITE_CFG) - distro_name: $(imageName) + distro_name: $(AgentName) verbose_log: ${{ parameters.verbose_log }} tags: $(tags) @@ -360,19 +334,22 @@ stages: strategy: matrix: Ubuntu-20-ADLS: - imageName: 'ubuntu-20.04' + AgentName: 'blobfuse-ubuntu20' containerName: 'test-cnt-ubn-20' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20) fuselib: 'libfuse-dev' tags: 'fuse2' Ubuntu-22-ADLS: - imageName: 'ubuntu-22.04' + AgentName: 'blobfuse-ubuntu22' containerName: 'test-cnt-ubn-22' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) fuselib: 'libfuse3-dev' tags: 'fuse3' + pool: - vmImage: $(imageName) + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse @@ -384,14 +361,8 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2.yaml' - name: BLOBFUSE2_SAS_CFG value: '$(Pipeline.Workspace)/blobfuse2_sas_config.yaml' - - name: BLOBFUSE2_SPN_CFG - value: '$(Pipeline.Workspace)/blobfuse2_spn_config.yaml' - name: BLOBFUSE2_AZCLI_CFG value: '$(Pipeline.Workspace)/blobfuse2_azcli_config.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - - name: BLOBFUSE2_STREAM_FILENAME_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream_filename.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: BLOBFUSE2_GTEST_CFG @@ -410,6 +381,15 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' steps: + - script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + fi + displayName: 'Install libfuse' + - checkout: none # Clone the repo @@ -424,14 +404,6 @@ stages: displayName: 'Checkout Branch' workingDirectory: $(WORK_DIR) - - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - if [ $(tags) == "fuse2" ]; then - sudo apt-get install fuse -y - fi - displayName: 'Install libfuse' - # ------------------------------------------------------- # Pull and build the code - template: 'azure-pipeline-templates/build.yml' @@ -458,8 +430,6 @@ stages: account_name: $(AZTEST_ADLS_ACC_NAME) account_key: $(AZTEST_ADLS_KEY) account_sas: $(adlsSas) - spn_account_name: $(AZTEST_ADLS_ACC_NAME) - spn_account_endpoint: 'https://$(AZTEST_ADLS_ACC_NAME).dfs.core.windows.net' client_id: $(AZTEST_CLIENT) tenant_id: $(AZTEST_TENANT) client_secret: $(AZTEST_SECRET) @@ -473,17 +443,12 @@ stages: quick_stress: ${{ parameters.quick_stress }} test_key_credential: true test_sas_credential: true - test_spn_credential: true test_azcli_credential: true - test_stream: true test_azurite: false - stream_config: $(BLOBFUSE2_STREAM_CFG) - stream_filename_config: $(BLOBFUSE2_STREAM_FILENAME_CFG) sas_credential_config: $(BLOBFUSE2_SAS_CFG) - spn_credential_config: $(BLOBFUSE2_SPN_CFG) azcli_credential_config: $(BLOBFUSE2_AZCLI_CFG) azurite_config: $(BLOBFUSE2_AZURITE_CFG) - distro_name: $(imageName) + distro_name: $(AgentName) verbose_log: ${{ parameters.verbose_log }} tags: $(tags) @@ -499,7 +464,7 @@ stages: strategy: matrix: Ubuntu-22-ARM64-ADLS: - imageName: 'blobfuse-ubn22-arm64' + AgentName: 'blobfuse-ubn22-arm64' containerName: 'test-cnt-ubn-22-arm64' adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22_ARM) fuselib: 'libfuse3-dev' @@ -507,7 +472,7 @@ stages: pool: name: "blobfuse-ubn-arm64-pool" demands: - - ImageOverride -equals $(imageName) + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse @@ -519,14 +484,8 @@ stages: value: '$(Pipeline.Workspace)/blobfuse2.yaml' - name: BLOBFUSE2_SAS_CFG value: '$(Pipeline.Workspace)/blobfuse2_sas_config.yaml' - - name: BLOBFUSE2_SPN_CFG - value: '$(Pipeline.Workspace)/blobfuse2_spn_config.yaml' - name: BLOBFUSE2_AZCLI_CFG value: '$(Pipeline.Workspace)/blobfuse2_azcli_config.yaml' - - name: BLOBFUSE2_STREAM_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml' - - name: BLOBFUSE2_STREAM_FILENAME_CFG - value: '$(Pipeline.Workspace)/blobfuse2_stream_filename.yaml' - name: BLOBFUSE2_ADLS_CFG value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml' - name: BLOBFUSE2_GTEST_CFG @@ -545,6 +504,15 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' steps: + - script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + fi + displayName: 'Install libfuse' + - checkout: none # Clone the repo @@ -559,11 +527,6 @@ stages: displayName: 'Checkout Branch' workingDirectory: $(WORK_DIR) - - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) gcc -y - displayName: 'Install fuse' - # ------------------------------------------------------- # Pull and build the code - template: 'azure-pipeline-templates/build.yml' @@ -590,8 +553,6 @@ stages: account_name: $(AZTEST_ADLS_ACC_NAME) account_key: $(AZTEST_ADLS_KEY) account_sas: $(adlsSas) - spn_account_name: $(AZTEST_ADLS_ACC_NAME) - spn_account_endpoint: 'https://$(AZTEST_ADLS_ACC_NAME).dfs.core.windows.net' client_id: $(AZTEST_CLIENT) tenant_id: $(AZTEST_TENANT) client_secret: $(AZTEST_SECRET) @@ -605,17 +566,12 @@ stages: quick_stress: ${{ parameters.quick_stress }} test_key_credential: true test_sas_credential: false - test_spn_credential: true test_azcli_credential: true - test_stream: true test_azurite: false - stream_config: $(BLOBFUSE2_STREAM_CFG) - stream_filename_config: $(BLOBFUSE2_STREAM_FILENAME_CFG) sas_credential_config: $(BLOBFUSE2_SAS_CFG) - spn_credential_config: $(BLOBFUSE2_SPN_CFG) azcli_credential_config: $(BLOBFUSE2_AZCLI_CFG) azurite_config: $(BLOBFUSE2_AZURITE_CFG) - distro_name: $(imageName) + distro_name: $(AgentName) verbose_log: ${{ parameters.verbose_log }} tags: $(tags) @@ -634,10 +590,13 @@ stages: strategy: matrix: Ubuntu-20-Proxy: - imageName: 'ubuntu-20.04' + AgentName: 'blobfuse-ubuntu20' containerName: 'test-cnt-ubn-18-proxy' + pool: - vmImage: $(imageName) + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) variables: - group: NightlyBlobFuse @@ -659,17 +618,23 @@ stages: value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' steps: + - script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc libfuse3-dev git -y + displayName: 'Install libfuse' + - checkout: none # Start Proxy - script: | sudo apt-get update --fix-missing - sudo apt remove mitmproxy - sudo apt-get install python3-setuptools - sudo apt install python3-pip + sudo apt remove mitmproxy -y + sudo apt-get install python3-setuptools -y + sudo apt install python3-pip -y sudo pip3 install werkzeug==2.3.7 sudo pip3 install mitmproxy - mitmdump -w proxy_logs.txt & + mitmdump -q -w proxy_logs.txt & displayName: 'Install & Start Proxy' # Configure Proxy cert & env @@ -693,11 +658,6 @@ stages: displayName: 'Checkout Branch' workingDirectory: $(WORK_DIR) - - script: | - sudo apt-get update --fix-missing - sudo apt-get install libfuse3-dev - displayName: 'Install fuse3' - # ------------------------------------------------------- # Pull, build and unit test the code - template: 'azure-pipeline-templates/build.yml' @@ -736,7 +696,7 @@ stages: mount_dir: $(MOUNT_DIR) temp_dir: $(TEMP_DIR) idstring: 'BlockBlob with Proxy and Key Credentials' - distro_name: $(imageName) + distro_name: $(AgentName) adls: false artifact_name: 'blockblob_proxy_key.txt' verbose_log: ${{ parameters.verbose_log }} @@ -770,7 +730,7 @@ stages: mount_dir: $(MOUNT_DIR) temp_dir: $(TEMP_DIR) idstring: 'ADLS with Proxy and Key Credentials' - distro_name: $(imageName) + distro_name: $(AgentName) adls: true artifact_name: 'adls_proxy_key.txt' verbose_log: ${{ parameters.verbose_log }} @@ -805,44 +765,13 @@ stages: mount_dir: $(MOUNT_DIR) temp_dir: $(TEMP_DIR) idstring: 'Block SAS' - distro_name: $(imageName) + distro_name: $(AgentName) mountStep: script: | $(WORK_DIR)/blobfuse2 mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --default-working-dir=$(WORK_DIR) displayName: 'AuthVerify-SAS: Mount' continueOnError: false - # Block SPN and proxy test - - script: | - cd $(WORK_DIR) - $(WORK_DIR)/blobfuse2 gen-test-config --config-file=azure_spn_proxy.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG) - displayName: "Create SPN Config File" - env: - NIGHTLY_SPN_ACC_NAME: $(AZTEST_BLOCK_ACC_NAME) - NIGHTLY_SPN_CLIENT_ID: $(AZTEST_CLIENT) - NIGHTLY_SPN_TENANT_ID: $(AZTEST_TENANT) - NIGHTLY_SPN_CLIENT_SECRET: $(AZTEST_SECRET) - ACCOUNT_ENDPOINT: 'https://$(AZTEST_BLOCK_ACC_NAME).blob.core.windows.net' - VERBOSE_LOG: ${{ parameters.verbose_log }} - continueOnError: false - - - script: - cat $(BLOBFUSE2_CFG) - displayName: "Print SPN Config File" - - - template: 'azure-pipeline-templates/verify-auth.yml' - parameters: - working_dir: $(WORK_DIR) - mount_dir: $(MOUNT_DIR) - temp_dir: $(TEMP_DIR) - idstring: 'Block SPN' - distro_name: $(imageName) - mountStep: - script: | - $(WORK_DIR)/blobfuse2 mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --default-working-dir=$(WORK_DIR) - displayName: 'AuthVerify-SPN: Mount' - continueOnError: false - # ------------------------------------------------------------ - template: 'azure-pipeline-templates/cleanup.yml' parameters: @@ -1428,645 +1357,387 @@ stages: temp_dir: $(TEMP_DIR) - - ${{ if eq(parameters.block_cache_validation, true) }}: - - stage: DataValidationBlockCache - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-block-cache.yml' - parameters: - conf_template: azure_key.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Block_Blob - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: true - stream_direct_test: false - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationBlob - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_key.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Block_Blob - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: true - stream_direct_test: false - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationADLS - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_key.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: ADLS - adls: true - account_name: $(AZTEST_ADLS_ACC_NAME) - account_key: $(AZTEST_ADLS_KEY) - account_type: adls - account_endpoint: https://$(AZTEST_ADLS_ACC_NAME).dfs.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: true - stream_direct_test: false - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationStreamFileHandle - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_stream.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Stream_File_Handle - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: false - stream_direct_test: false - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationStreamFileHandleDirect - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_stream_direct.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Stream_File_Handle_Direct - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: false - stream_direct_test: true - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationStreamFileName - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_stream_filename.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Stream_File_Name - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: false - stream_direct_test: false - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.data_validation, true) }}: - - stage: DataValidationStreamFileNameDirect - jobs: - # Ubuntu Tests - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-20: - imageName: 'ubuntu-20.04' - containerName: 'test-cnt-ubn-20' - fuselib: 'libfuse-dev' - tags: 'fuse2' - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: ROOT_DIR - value: "/usr/pipeline/workv2" - - name: WORK_DIR - value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" - - name: skipComponentGovernanceDetection - value: true - - name: MOUNT_DIR - value: "/usr/pipeline/workv2/blob_mnt" - - name: TEMP_DIR - value: "/usr/pipeline/workv2/temp" - - name: BLOBFUSE2_CFG - value: "/usr/pipeline/workv2/blobfuse2.yaml" - - name: GOPATH - value: "/usr/pipeline/workv2/go" - - steps: - - checkout: none - - - template: 'azure-pipeline-templates/setup.yml' - parameters: - tags: $(tags) - installStep: - script: | - sudo apt-get update --fix-missing - sudo apt-get install $(fuselib) -y - displayName: 'Install fuse' - - - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' - parameters: - conf_template: azure_stream_filename_direct.yaml - config_file: $(BLOBFUSE2_CFG) - container: $(containerName) - idstring: Stream_File_Name_Direct - adls: false - account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) - account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) - account_type: block - account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net - distro_name: $(imageName) - quick_test: false - verbose_log: ${{ parameters.verbose_log }} - clone: false - stream_direct_test: true - # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable - temp_dir: $(TEMP_DIR) - mount_dir: $(MOUNT_DIR) - - - ${{ if eq(parameters.healthmon_test, true) }}: - - stage: Healthmon - jobs: - - job: Set_1 - timeoutInMinutes: 300 - strategy: - matrix: - Ubuntu-22: - imageName: 'ubuntu-22.04' - containerName: 'test-cnt-ubn-22' - adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) - fuselib: 'libfuse3-dev' - tags: 'fuse3' - pool: - vmImage: $(imageName) - - variables: - - group: NightlyBlobFuse - - name: MOUNT_DIR - value: '$(Pipeline.Workspace)/blob_mnt' - - name: TEMP_DIR - value: '$(Pipeline.Workspace)/blobfuse2_tmp' - - name: BLOBFUSE2_CFG - value: '$(Pipeline.Workspace)/blobfuse2.yaml' - - name: GOPATH - value: '$(Pipeline.Workspace)/go' - - name: ROOT_DIR - value: '$(System.DefaultWorkingDirectory)' - - name: WORK_DIR - value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' - - steps: - - checkout: none - - - script: | - sudo apt-get update --fix-missing -o Dpkg::Options::="--force-confnew" - sudo apt-get install make cmake gcc g++ parallel $(fuselib) -y -o Dpkg::Options::="--force-confnew" - displayName: 'Install libfuse' - - # Clone the repo - - script: | - git clone https://github.com/Azure/azure-storage-fuse - displayName: 'Checkout Code' - workingDirectory: $(ROOT_DIR) - - # Checkout the branch - - script: | - git checkout `echo $(Build.SourceBranch) | cut -d "/" -f 1,2 --complement` - displayName: 'Checkout Branch' + - stage: BlockCacheDataValidation + jobs: + # Ubuntu Tests + - job: Set_1 + timeoutInMinutes: 300 + strategy: + matrix: + Ubuntu-22: + AgentName: 'blobfuse-ubuntu22' + containerName: 'test-cnt-ubn-22' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) + fuselib: 'libfuse3-dev' + tags: 'fuse3' + + pool: + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) + + variables: + - group: NightlyBlobFuse + - name: ROOT_DIR + value: "/usr/pipeline/workv2" + - name: WORK_DIR + value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" + - name: skipComponentGovernanceDetection + value: true + - name: MOUNT_DIR + value: "/usr/pipeline/workv2/blob_mnt" + - name: TEMP_DIR + value: "/usr/pipeline/workv2/temp" + - name: BLOBFUSE2_CFG + value: "/usr/pipeline/workv2/blobfuse2.yaml" + - name: GOPATH + value: "/usr/pipeline/workv2/go" + + steps: + - template: 'azure-pipeline-templates/setup.yml' + parameters: + tags: $(tags) + installStep: + script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git parallel -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + else + sudo apt-get install fuse3 -y + fi + displayName: 'Install fuse' + + - template: 'azure-pipeline-templates/e2e-tests-block-cache.yml' + parameters: + conf_template: azure_key.yaml + config_file: $(BLOBFUSE2_CFG) + container: $(containerName) + idstring: Block_Blob + adls: false + account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) + account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) + account_type: block + account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net + distro_name: $(AgentName) + quick_test: false + verbose_log: ${{ parameters.verbose_log }} + clone: true + # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable + temp_dir: $(TEMP_DIR) + mount_dir: $(MOUNT_DIR) + + - stage: FNSDataValidation + jobs: + # Ubuntu Tests + - job: Set_1 + timeoutInMinutes: 300 + strategy: + matrix: + Ubuntu-20: + AgentName: 'blobfuse-ubuntu20' + containerName: 'test-cnt-ubn-20' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20) + fuselib: 'libfuse-dev' + tags: 'fuse2' + Ubuntu-22: + AgentName: 'blobfuse-ubuntu22' + containerName: 'test-cnt-ubn-22' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) + fuselib: 'libfuse3-dev' + tags: 'fuse3' + + pool: + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) + + variables: + - group: NightlyBlobFuse + - name: ROOT_DIR + value: "/usr/pipeline/workv2" + - name: WORK_DIR + value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" + - name: skipComponentGovernanceDetection + value: true + - name: MOUNT_DIR + value: "/usr/pipeline/workv2/blob_mnt" + - name: TEMP_DIR + value: "/usr/pipeline/workv2/temp" + - name: BLOBFUSE2_CFG + value: "/usr/pipeline/workv2/blobfuse2.yaml" + - name: GOPATH + value: "/usr/pipeline/workv2/go" + + steps: + - template: 'azure-pipeline-templates/setup.yml' + parameters: + tags: $(tags) + installStep: + script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git parallel -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + else + sudo apt-get install fuse3 -y + fi + displayName: 'Install fuse' + + - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' + parameters: + conf_template: azure_key.yaml + config_file: $(BLOBFUSE2_CFG) + container: $(containerName) + idstring: Block_Blob + adls: false + account_name: $(NIGHTLY_STO_BLOB_ACC_NAME) + account_key: $(NIGHTLY_STO_BLOB_ACC_KEY) + account_type: block + account_endpoint: https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net + distro_name: $(AgentName) + quick_test: false + verbose_log: ${{ parameters.verbose_log }} + clone: true + # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable + temp_dir: $(TEMP_DIR) + mount_dir: $(MOUNT_DIR) + + - stage: HNSDataValidation + jobs: + # Ubuntu Tests + - job: Set_1 + timeoutInMinutes: 300 + strategy: + matrix: + Ubuntu-20: + AgentName: 'blobfuse-ubuntu20' + containerName: 'test-cnt-ubn-20' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20) + fuselib: 'libfuse-dev' + tags: 'fuse2' + Ubuntu-22: + AgentName: 'blobfuse-ubuntu22' + containerName: 'test-cnt-ubn-22' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) + fuselib: 'libfuse3-dev' + tags: 'fuse3' + + pool: + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) + + variables: + - group: NightlyBlobFuse + - name: ROOT_DIR + value: "/usr/pipeline/workv2" + - name: WORK_DIR + value: "/usr/pipeline/workv2/go/src/azure-storage-fuse" + - name: skipComponentGovernanceDetection + value: true + - name: MOUNT_DIR + value: "/usr/pipeline/workv2/blob_mnt" + - name: TEMP_DIR + value: "/usr/pipeline/workv2/temp" + - name: BLOBFUSE2_CFG + value: "/usr/pipeline/workv2/blobfuse2.yaml" + - name: GOPATH + value: "/usr/pipeline/workv2/go" + + steps: + - template: 'azure-pipeline-templates/setup.yml' + parameters: + tags: $(tags) + installStep: + script: | + sudo apt-get update --fix-missing + sudo apt update + sudo apt-get install cmake gcc $(fuselib) git parallel -y + if [ $(tags) == "fuse2" ]; then + sudo apt-get install fuse -y + else + sudo apt-get install fuse3 -y + fi + displayName: 'Install fuse' + + - template: 'azure-pipeline-templates/e2e-tests-spcl.yml' + parameters: + conf_template: azure_key.yaml + config_file: $(BLOBFUSE2_CFG) + container: $(containerName) + idstring: ADLS + adls: true + account_name: $(AZTEST_ADLS_ACC_NAME) + account_key: $(AZTEST_ADLS_KEY) + account_type: adls + account_endpoint: https://$(AZTEST_ADLS_ACC_NAME).dfs.core.windows.net + distro_name: $(AgentName) + quick_test: false + verbose_log: ${{ parameters.verbose_log }} + clone: true + # TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable + temp_dir: $(TEMP_DIR) + mount_dir: $(MOUNT_DIR) + + - stage: Healthmon + jobs: + - job: Set_1 + timeoutInMinutes: 300 + strategy: + matrix: + Ubuntu-22: + AgentName: 'blobfuse-ubuntu22' + containerName: 'test-cnt-ubn-22' + adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22) + fuselib: 'libfuse3-dev' + tags: 'fuse3' + + pool: + name: "blobfuse-ubuntu-pool" + demands: + - ImageOverride -equals $(AgentName) + + variables: + - group: NightlyBlobFuse + - name: MOUNT_DIR + value: '$(Pipeline.Workspace)/blob_mnt' + - name: TEMP_DIR + value: '$(Pipeline.Workspace)/blobfuse2_tmp' + - name: BLOBFUSE2_CFG + value: '$(Pipeline.Workspace)/blobfuse2.yaml' + - name: GOPATH + value: '$(Pipeline.Workspace)/go' + - name: ROOT_DIR + value: '$(System.DefaultWorkingDirectory)' + - name: WORK_DIR + value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' + + steps: + - script: | + sudo apt-get update --fix-missing -o Dpkg::Options::="--force-confnew" + sudo apt-get install make cmake gcc g++ parallel $(fuselib) -y -o Dpkg::Options::="--force-confnew" + displayName: 'Install libfuse' + + - checkout: none + + # Clone the repo + - script: | + git clone https://github.com/Azure/azure-storage-fuse + displayName: 'Checkout Code' + workingDirectory: $(ROOT_DIR) + + # Checkout the branch + - script: | + git checkout `echo $(Build.SourceBranch) | cut -d "/" -f 1,2 --complement` + displayName: 'Checkout Branch' + workingDirectory: $(WORK_DIR) + + # Create directory structure + - script: | + sudo mkdir -p $(ROOT_DIR) + sudo chown -R `whoami` $(ROOT_DIR) + chmod 777 $(ROOT_DIR) + displayName: 'Create Directory Structure' + + # ------------------------------------------------------- + # Alternative custom script for Self-Hosted agents to install Go-lang + - task: ShellScript@2 + inputs: + scriptPath: "$(WORK_DIR)/go_installer.sh" + args: "$(ROOT_DIR)/" + displayName: "GoTool Custom Setup" + + # Downloading Go dependency packages + - task: Go@0 + inputs: + command: 'get' + arguments: '-d' workingDirectory: $(WORK_DIR) + displayName: "Go Get" - # Create directory structure - - script: | - sudo mkdir -p $(ROOT_DIR) - sudo chown -R `whoami` $(ROOT_DIR) - chmod 777 $(ROOT_DIR) - displayName: 'Create Directory Structure' - - # ------------------------------------------------------- - # Alternative custom script for Self-Hosted agents to install Go-lang - - task: ShellScript@2 - inputs: - scriptPath: "$(WORK_DIR)/go_installer.sh" - args: "$(ROOT_DIR)/" - displayName: "GoTool Custom Setup" - - # Downloading Go dependency packages - - task: Go@0 - inputs: - command: 'get' - arguments: '-d' - workingDirectory: $(WORK_DIR) - displayName: "Go Get" - - # Building our package - - task: Go@0 - inputs: - command: 'build' - arguments: "-o blobfuse2" - workingDirectory: $(WORK_DIR) - displayName: "Go Build Blobfuse2" - - # Building our package - - task: Go@0 - inputs: - command: 'build' - arguments: "-o bfusemon ./tools/health-monitor/" - workingDirectory: $(WORK_DIR) - displayName: "Go Build bfusemon" - - - script: | - cd $(WORK_DIR) - $(WORK_DIR)/blobfuse2 gen-test-config --config-file=azure_key_hmon.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG) - displayName: Create Key Config File - env: - ACCOUNT_TYPE: 'block' - NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME) - NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY) - ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net' - VERBOSE_LOG: false - USE_HTTP: false - HMON_OUTPUT: $(WORK_DIR) - continueOnError: false - - - script: cat $(BLOBFUSE2_CFG) - displayName: Print config file - - - script: - mkdir -p $(MOUNT_DIR) - mkdir -p $(TEMP_DIR) - displayName: "Create directory structure" - - - template: 'azure-pipeline-templates/mount.yml' - parameters: - working_dir: $(WORK_DIR) - mount_dir: $(MOUNT_DIR) - temp_dir: $(TEMP_DIR) - prefix: "Mount" - mountStep: - script: | - sudo ln -s `pwd`/bfusemon /usr/local/bin/bfusemon - ls -l /usr/local/bin/bfusemon* - $(WORK_DIR)/blobfuse2 mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) - displayName: 'Mount Blobfuse2' - workingDirectory: $(WORK_DIR) - timeoutInMinutes: 3 - continueOnError: false - - - script: | - sudo ps -aux | grep bfusemon - displayName: 'Print bfusemon Process info' - env: - mount_dir: $(MOUNT_DIR) - - - script: | - touch $(MOUNT_DIR)/h1.txt - echo "123456" > $(MOUNT_DIR)/h2.txt - cp *.md $(MOUNT_DIR)/ - mkdir $(MOUNT_DIR)/A - mv $(MOUNT_DIR)/h1.txt $(MOUNT_DIR)/h5.txt - mv $(MOUNT_DIR)/h*.* $(MOUNT_DIR)/A - rm -rf $(MOUNT_DIR)/*.md - rm -rf $(MOUNT_DIR)/A - displayName: "Test for health monitor" + # Building our package + - task: Go@0 + inputs: + command: 'build' + arguments: "-o blobfuse2" workingDirectory: $(WORK_DIR) + displayName: "Go Build Blobfuse2" - - script: | - sleep 20s - cat $(WORK_DIR)/monitor_*.json - displayName: "Print bfusemon output" + # Building our package + - task: Go@0 + inputs: + command: 'build' + arguments: "-o bfusemon ./tools/health-monitor/" workingDirectory: $(WORK_DIR) + displayName: "Go Build bfusemon" + + - script: | + cd $(WORK_DIR) + $(WORK_DIR)/blobfuse2 gen-test-config --config-file=azure_key_hmon.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG) + displayName: Create Key Config File + env: + ACCOUNT_TYPE: 'block' + NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME) + NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY) + ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net' + VERBOSE_LOG: false + USE_HTTP: false + HMON_OUTPUT: $(WORK_DIR) + continueOnError: false + + - script: cat $(BLOBFUSE2_CFG) + displayName: Print config file + + - script: + mkdir -p $(MOUNT_DIR) + mkdir -p $(TEMP_DIR) + displayName: "Create directory structure" + + - template: 'azure-pipeline-templates/mount.yml' + parameters: + working_dir: $(WORK_DIR) + mount_dir: $(MOUNT_DIR) + temp_dir: $(TEMP_DIR) + prefix: "Mount" + mountStep: + script: | + sudo ln -s `pwd`/bfusemon /usr/local/bin/bfusemon + ls -l /usr/local/bin/bfusemon* + $(WORK_DIR)/blobfuse2 mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) + displayName: 'Mount Blobfuse2' + workingDirectory: $(WORK_DIR) + timeoutInMinutes: 3 + continueOnError: false - - template: 'azure-pipeline-templates/cleanup.yml' - parameters: - working_dir: $(WORK_DIR) - mount_dir: $(MOUNT_DIR) - temp_dir: $(TEMP_DIR) \ No newline at end of file + - script: | + sudo ps -aux | grep bfusemon + displayName: 'Print bfusemon Process info' + env: + mount_dir: $(MOUNT_DIR) + + - script: | + touch $(MOUNT_DIR)/h1.txt + echo "123456" > $(MOUNT_DIR)/h2.txt + cp *.md $(MOUNT_DIR)/ + mkdir $(MOUNT_DIR)/A + mv $(MOUNT_DIR)/h1.txt $(MOUNT_DIR)/h5.txt + mv $(MOUNT_DIR)/h*.* $(MOUNT_DIR)/A + rm -rf $(MOUNT_DIR)/*.md + rm -rf $(MOUNT_DIR)/A + displayName: "Test for health monitor" + workingDirectory: $(WORK_DIR) + + - script: | + sleep 20s + cat $(WORK_DIR)/monitor_*.json + displayName: "Print bfusemon output" + workingDirectory: $(WORK_DIR) + + - template: 'azure-pipeline-templates/cleanup.yml' + parameters: + working_dir: $(WORK_DIR) + mount_dir: $(MOUNT_DIR) + temp_dir: $(TEMP_DIR) \ No newline at end of file diff --git a/component/azstorage/azauth_test.go b/component/azstorage/azauth_test.go index 47f259550..bbc2dae19 100644 --- a/component/azstorage/azauth_test.go +++ b/component/azstorage/azauth_test.go @@ -687,115 +687,6 @@ func (suite *authTestSuite) TestAdlskMsiResId() { } } -func (suite *authTestSuite) TestBlockInvalidSpn() { - defer suite.cleanupTest() - stgConfig := AzStorageConfig{ - container: storageTestConfigurationParameters.BlockContainer, - authConfig: azAuthConfig{ - AuthMode: EAuthType.SPN(), - AccountType: EAccountType.BLOCK(), - AccountName: storageTestConfigurationParameters.BlockAccount, - ClientID: storageTestConfigurationParameters.SpnClientId, - TenantID: storageTestConfigurationParameters.SpnTenantId, - ClientSecret: "", - Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()), - }, - } - assert := assert.New(suite.T()) - stg := NewAzStorageConnection(stgConfig) - if stg == nil { - assert.Fail("TestBlockInvalidSpn : Failed to create Storage object") - } - if err := stg.SetupPipeline(); err == nil { - assert.Fail("TestBlockInvalidSpn : Setup pipeline even though spn is invalid") - } -} - -func (suite *authTestSuite) TestBlockInvalidTokenPathSpn() { - defer suite.cleanupTest() - - _ = os.WriteFile("newtoken.txt", []byte("abcdef"), 0777) - defer os.Remove("newtoken.txt") - - stgConfig := AzStorageConfig{ - container: storageTestConfigurationParameters.BlockContainer, - authConfig: azAuthConfig{ - AuthMode: EAuthType.SPN(), - AccountType: EAccountType.BLOCK(), - AccountName: storageTestConfigurationParameters.BlockAccount, - ClientID: storageTestConfigurationParameters.SpnClientId, - TenantID: storageTestConfigurationParameters.SpnTenantId, - ClientSecret: "", - Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()), - OAuthTokenFilePath: "newtoken.txt", - }, - } - assert := assert.New(suite.T()) - stg := NewAzStorageConnection(stgConfig) - if stg == nil { - assert.Fail("TestBlockInvalidSpn : Failed to create Storage object") - } - _ = stg.SetupPipeline() -} - -func (suite *authTestSuite) TestBlockSpn() { - defer suite.cleanupTest() - stgConfig := AzStorageConfig{ - container: storageTestConfigurationParameters.BlockContainer, - authConfig: azAuthConfig{ - AuthMode: EAuthType.SPN(), - AccountType: EAccountType.BLOCK(), - AccountName: storageTestConfigurationParameters.BlockAccount, - ClientID: storageTestConfigurationParameters.SpnClientId, - TenantID: storageTestConfigurationParameters.SpnTenantId, - ClientSecret: storageTestConfigurationParameters.SpnClientSecret, - Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()), - }, - } - suite.validateStorageTest("TestBlockSpn", stgConfig) -} - -func (suite *authTestSuite) TestAdlsInvalidSpn() { - defer suite.cleanupTest() - stgConfig := AzStorageConfig{ - container: storageTestConfigurationParameters.AdlsContainer, - authConfig: azAuthConfig{ - AuthMode: EAuthType.SPN(), - AccountType: EAccountType.ADLS(), - AccountName: storageTestConfigurationParameters.AdlsAccount, - ClientID: storageTestConfigurationParameters.SpnClientId, - TenantID: storageTestConfigurationParameters.SpnTenantId, - ClientSecret: "", - Endpoint: generateEndpoint(false, storageTestConfigurationParameters.AdlsAccount, EAccountType.ADLS()), - }, - } - assert := assert.New(suite.T()) - stg := NewAzStorageConnection(stgConfig) - if stg == nil { - assert.Fail("TestAdlsInvalidSpn : Failed to create Storage object") - } - if err := stg.SetupPipeline(); err == nil { - assert.Fail("TestAdlsInvalidSpn : Setup pipeline even though spn is invalid") - } -} - -func (suite *authTestSuite) TestAdlsSpn() { - defer suite.cleanupTest() - stgConfig := AzStorageConfig{ - container: storageTestConfigurationParameters.AdlsContainer, - authConfig: azAuthConfig{ - AuthMode: EAuthType.SPN(), - AccountType: EAccountType.ADLS(), - AccountName: storageTestConfigurationParameters.AdlsAccount, - ClientID: storageTestConfigurationParameters.SpnClientId, - TenantID: storageTestConfigurationParameters.SpnTenantId, - ClientSecret: storageTestConfigurationParameters.SpnClientSecret, - Endpoint: generateEndpoint(false, storageTestConfigurationParameters.AdlsAccount, EAccountType.ADLS()), - }, - } - suite.validateStorageTest("TestAdlsSpn", stgConfig) -} - func (suite *authTestSuite) TestBlockAzCLI() { defer suite.cleanupTest() stgConfig := AzStorageConfig{ diff --git a/component/file_cache/file_cache.go b/component/file_cache/file_cache.go index 30f676e61..e7ebedbab 100644 --- a/component/file_cache/file_cache.go +++ b/component/file_cache/file_cache.go @@ -294,16 +294,7 @@ func (c *FileCache) Configure(_ bool) error { } cacheConfig := c.GetPolicyConfig(conf) - - switch strings.ToLower(conf.Policy) { - case "lru": - c.policy = NewLRUPolicy(cacheConfig) - case "lfu": - c.policy = NewLFUPolicy(cacheConfig) - default: - log.Info("FileCache::Configure : Using default eviction policy") - c.policy = NewLRUPolicy(cacheConfig) - } + c.policy = NewLRUPolicy(cacheConfig) if c.policy == nil { log.Err("FileCache::Configure : failed to create cache eviction policy") diff --git a/component/file_cache/file_cache_test.go b/component/file_cache/file_cache_test.go index e147068fd..c3f0d4e39 100644 --- a/component/file_cache/file_cache_test.go +++ b/component/file_cache/file_cache_test.go @@ -161,7 +161,7 @@ func (suite *fileCacheTestSuite) TestEmpty() { func (suite *fileCacheTestSuite) TestConfig() { defer suite.cleanupTest() suite.cleanupTest() // teardown the default file cache generated - policy := "lfu" + policy := "lru" maxSizeMb := 1024 cacheTimeout := 60 maxDeletion := 10 @@ -178,10 +178,10 @@ func (suite *fileCacheTestSuite) TestConfig() { suite.assert.Equal(suite.fileCache.tmpPath, suite.cache_path) suite.assert.Equal(suite.fileCache.policy.Name(), policy) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxSizeMB, maxSizeMb) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxEviction, maxDeletion) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).highThreshold, highThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).lowThreshold, lowThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxSizeMB, maxSizeMb) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxEviction, maxDeletion) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).highThreshold, highThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).lowThreshold, lowThreshold) suite.assert.Equal(suite.fileCache.createEmptyFile, createEmptyFile) suite.assert.Equal(suite.fileCache.allowNonEmpty, allowNonEmptyTemp) @@ -192,7 +192,7 @@ func (suite *fileCacheTestSuite) TestConfig() { func (suite *fileCacheTestSuite) TestConfigPolicyTimeout() { defer suite.cleanupTest() suite.cleanupTest() // teardown the default file cache generated - policy := "lfu" + policy := "lru" maxSizeMb := 1024 cacheTimeout := 60 maxDeletion := 10 @@ -209,11 +209,11 @@ func (suite *fileCacheTestSuite) TestConfigPolicyTimeout() { suite.assert.Equal(suite.fileCache.tmpPath, suite.cache_path) suite.assert.Equal(suite.fileCache.policy.Name(), policy) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxSizeMB, maxSizeMb) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxEviction, maxDeletion) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).highThreshold, highThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).lowThreshold, lowThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).cacheTimeout, cacheTimeout) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxSizeMB, maxSizeMb) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxEviction, maxDeletion) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).highThreshold, highThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).lowThreshold, lowThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).cacheTimeout, cacheTimeout) suite.assert.Equal(suite.fileCache.createEmptyFile, createEmptyFile) suite.assert.Equal(suite.fileCache.allowNonEmpty, allowNonEmptyTemp) @@ -224,7 +224,7 @@ func (suite *fileCacheTestSuite) TestConfigPolicyTimeout() { func (suite *fileCacheTestSuite) TestConfigPolicyDefaultTimeout() { defer suite.cleanupTest() suite.cleanupTest() // teardown the default file cache generated - policy := "lfu" + policy := "lru" maxSizeMb := 1024 cacheTimeout := defaultFileCacheTimeout maxDeletion := 10 @@ -241,11 +241,11 @@ func (suite *fileCacheTestSuite) TestConfigPolicyDefaultTimeout() { suite.assert.Equal(suite.fileCache.tmpPath, suite.cache_path) suite.assert.Equal(suite.fileCache.policy.Name(), policy) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxSizeMB, maxSizeMb) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxEviction, maxDeletion) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).highThreshold, highThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).lowThreshold, lowThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).cacheTimeout, cacheTimeout) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxSizeMB, maxSizeMb) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxEviction, maxDeletion) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).highThreshold, highThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).lowThreshold, lowThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).cacheTimeout, cacheTimeout) suite.assert.Equal(suite.fileCache.createEmptyFile, createEmptyFile) suite.assert.Equal(suite.fileCache.allowNonEmpty, allowNonEmptyTemp) @@ -256,7 +256,7 @@ func (suite *fileCacheTestSuite) TestConfigPolicyDefaultTimeout() { func (suite *fileCacheTestSuite) TestConfigZero() { defer suite.cleanupTest() suite.cleanupTest() // teardown the default file cache generated - policy := "lfu" + policy := "lru" maxSizeMb := 1024 cacheTimeout := 0 maxDeletion := 10 @@ -273,10 +273,10 @@ func (suite *fileCacheTestSuite) TestConfigZero() { suite.assert.Equal(suite.fileCache.tmpPath, suite.cache_path) suite.assert.Equal(suite.fileCache.policy.Name(), policy) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxSizeMB, maxSizeMb) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).maxEviction, maxDeletion) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).highThreshold, highThreshold) - suite.assert.EqualValues(suite.fileCache.policy.(*lfuPolicy).lowThreshold, lowThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxSizeMB, maxSizeMb) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).maxEviction, maxDeletion) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).highThreshold, highThreshold) + suite.assert.EqualValues(suite.fileCache.policy.(*lruPolicy).lowThreshold, lowThreshold) suite.assert.Equal(suite.fileCache.createEmptyFile, createEmptyFile) suite.assert.Equal(suite.fileCache.allowNonEmpty, allowNonEmptyTemp) diff --git a/component/file_cache/lfu_policy.go b/component/file_cache/lfu_policy.go deleted file mode 100644 index 5bd7b1da5..000000000 --- a/component/file_cache/lfu_policy.go +++ /dev/null @@ -1,483 +0,0 @@ -/* - _____ _____ _____ ____ ______ _____ ------ - | | | | | | | | | | | | | - | | | | | | | | | | | | | - | --- | | | | |-----| |---- | | |-----| |----- ------ - | | | | | | | | | | | | | - | ____| |_____ | ____| | ____| | |_____| _____| |_____ |_____ - - - Licensed under the MIT License . - - Copyright Š 2020-2024 Microsoft Corporation. All rights reserved. - Author : - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE -*/ - -package file_cache - -import ( - "os" - "strings" - "sync" - "time" - - "github.com/Azure/azure-storage-fuse/v2/common/log" -) - -type lfuPolicy struct { - sync.Mutex - cachePolicyConfig - list *lfuList - removeFiles chan string - closeChan chan int -} - -var _ cachePolicy = &lfuPolicy{} - -func (l *lfuPolicy) StartPolicy() error { - log.Trace("lfuPolicy::StartPolicy") - - go l.clearCache() - return nil -} - -func (l *lfuPolicy) ShutdownPolicy() error { - log.Trace("lfuPolicy::ShutdownPolicy") - - l.closeChan <- 1 - return nil -} - -func (l *lfuPolicy) UpdateConfig(config cachePolicyConfig) error { - log.Trace("lfuPolicy::UpdateConfig") - - l.maxSizeMB = config.maxSizeMB - l.highThreshold = config.highThreshold - l.lowThreshold = config.lowThreshold - l.maxEviction = config.maxEviction - - l.list.maxSizeMB = config.maxSizeMB - l.list.upperThresh = config.highThreshold - l.list.lowerThresh = config.lowThreshold - l.list.cacheTimeout = config.cacheTimeout - - l.policyTrace = config.policyTrace - return nil -} - -func (l *lfuPolicy) CacheValid(name string) { - log.Trace("lfuPolicy::CacheValid : %s", name) - - l.list.Lock() - defer l.list.Unlock() - - l.list.put(name) -} - -func (l *lfuPolicy) CacheInvalidate(name string) { - log.Trace("lfuPolicy::CacheInvalidate : %s", name) - - if l.cacheTimeout == 0 { - l.CachePurge(name) - } -} - -func (l *lfuPolicy) CachePurge(name string) { - log.Trace("lfuPolicy::CachePurge : %s", name) - - l.list.Lock() - defer l.list.Unlock() - - l.list.delete(name) - l.removeFiles <- name -} - -func (l *lfuPolicy) IsCached(name string) bool { - log.Trace("lfuPolicy::IsCached : %s", name) - - l.list.Lock() - defer l.list.Unlock() - - val := l.list.get(name) - if val != nil { - log.Debug("lfuPolicy::IsCached : %s found", name) - return true - } else { - log.Debug("lfuPolicy::IsCached : %s not found", name) - return false - } -} - -func (l *lfuPolicy) Name() string { - return "lfu" -} - -func (l *lfuPolicy) clearItemFromCache(path string) { - azPath := strings.TrimPrefix(path, l.tmpPath) - if azPath[0] == '/' { - azPath = azPath[1:] - } - - flock := l.fileLocks.Get(azPath) - if l.fileLocks.Locked(azPath) { - log.Warn("lfuPolicy::DeleteItem : File in under download %s", azPath) - l.CacheValid(path) - return - } - - flock.Lock() - defer flock.Unlock() - - // Check if there are any open handles to this file or not - if flock.Count() > 0 { - log.Err("lfuPolicy::clearItemFromCache : File in use %s", path) - l.CacheValid(path) - return - } - - // There are no open handles for this file so its safe to remove this - err := deleteFile(path) - if err != nil && !os.IsNotExist(err) { - log.Err("lfuPolicy::DeleteItem : failed to delete local file %s [%s]", path, err.Error()) - } - - // File was deleted so try clearing its parent directory - // TODO: Delete directories up the path recursively that are "safe to delete". Ensure there is no race between this code and code that creates directories (like OpenFile) - // This might require something like hierarchical locking. -} - -func (l *lfuPolicy) clearCache() { - log.Trace("lfuPolicy::clearCache") - - for { - select { - - case path := <-l.removeFiles: - l.clearItemFromCache(path) - - case <-l.closeChan: - return - } - } - -} - -func NewLFUPolicy(cfg cachePolicyConfig) cachePolicy { - pol := &lfuPolicy{ - cachePolicyConfig: cfg, - removeFiles: make(chan string, 10), - closeChan: make(chan int, 10), - } - pol.list = newLFUList(cfg.maxSizeMB, cfg.lowThreshold, cfg.highThreshold, pol.removeFiles, cfg.tmpPath, cfg.cacheTimeout) - return pol -} - -//Double DoublyLinkedList Implementation for O(1) lfu - -type dataNode struct { - key string - frequency uint64 - next *dataNode - prev *dataNode - timer *time.Timer -} - -func newDataNode(key string) *dataNode { - return &dataNode{ - key: key, - frequency: 1, - } -} - -type dataNodeLinkedList struct { - size uint64 - first *dataNode - last *dataNode -} - -func (dl *dataNodeLinkedList) pop() *dataNode { - if dl.size == 0 { - return nil - } - return dl.remove(dl.first) -} - -func (dl *dataNodeLinkedList) remove(node *dataNode) *dataNode { - if dl.size == 0 { - return nil - } - if dl.first == dl.last { - dl.first = nil - dl.last = nil - } else if dl.first == node { - temp := dl.first - dl.first = temp.next - temp.next = nil - dl.first.prev = nil - } else if dl.last == node { - temp := dl.last - dl.last = temp.prev - temp.prev = nil - dl.last.next = nil - } else { - nextNode := node.next - prevNode := node.prev - prevNode.next = nextNode - nextNode.prev = prevNode - node.next = nil - node.prev = nil - } - dl.size-- - return node -} - -func (dl *dataNodeLinkedList) push(node *dataNode) { - if dl.first == nil { - dl.first = node - dl.last = node - } else { - temp := dl.last - temp.next = node - node.prev = temp - dl.last = node - } - dl.size++ -} - -func newDataNodeLinkedList() *dataNodeLinkedList { - return &dataNodeLinkedList{} -} - -type frequencyNode struct { - list *dataNodeLinkedList - next *frequencyNode - prev *frequencyNode - frequency uint64 -} - -func (fn *frequencyNode) pop() *dataNode { - return fn.list.pop() -} - -func (fn *frequencyNode) remove(dn *dataNode) *dataNode { - return fn.list.remove(dn) -} - -func (fn *frequencyNode) push(dn *dataNode) { - fn.list.push(dn) -} - -func newFrequencyNode(freq uint64) *frequencyNode { - return &frequencyNode{ - list: newDataNodeLinkedList(), - frequency: freq, - } -} - -type lfuList struct { - sync.Mutex - first *frequencyNode - last *frequencyNode - dataNodeMap map[string]*dataNode - freqNodeMap map[uint64]*frequencyNode - size uint64 - maxSizeMB float64 - lowerThresh float64 - upperThresh float64 - deleteFiles chan string - cachePath string - cacheAge uint64 - cacheTimeout uint32 -} - -func (list *lfuList) deleteFrequency(freq uint64) { - freqNode := list.freqNodeMap[freq] - if list.first == list.last { - list.first = nil - list.last = nil - } else if list.first == freqNode { - list.first = list.first.next - list.first.prev = nil - freqNode.next = nil - } else if list.last == freqNode { - list.last = list.last.prev - list.last.next = nil - freqNode.prev = nil - } else { - nextNode := freqNode.next - prevNode := freqNode.prev - nextNode.prev = prevNode - prevNode.next = nextNode - freqNode.next = nil - freqNode.prev = nil - } - list.size-- - delete(list.freqNodeMap, freq) -} - -func (list *lfuList) addFrequency(freq uint64, freqNode *frequencyNode, prevFreqNode *frequencyNode) { - - if list.first == nil && list.last == nil { - list.first = freqNode - list.last = freqNode - - list.freqNodeMap[freq] = freqNode - list.size++ - return - } - - if prevFreqNode == nil { - prevFreqNode = list.first - } - - for prevFreqNode.next != nil && freq > prevFreqNode.next.frequency { - prevFreqNode = prevFreqNode.next - } - - if prevFreqNode == nil { - freqNode.next = list.first - list.first.prev = freqNode - list.first = freqNode - } else if prevFreqNode == list.last { - prevFreqNode.next = freqNode - freqNode.prev = prevFreqNode - list.last = freqNode - } else { - nextNode := prevFreqNode.next - freqNode.next = nextNode - nextNode.prev = freqNode - prevFreqNode.next = freqNode - freqNode.prev = prevFreqNode - } - list.freqNodeMap[freq] = freqNode - list.size++ -} - -func (list *lfuList) promote(dataNode *dataNode) { - prevFreqNode := list.freqNodeMap[dataNode.frequency] - prevFreqNode.remove(dataNode) - dataNode.frequency += 1 + list.cacheAge - if newFreqNode, ok := list.freqNodeMap[dataNode.frequency]; ok { - newFreqNode.push(dataNode) - } else { - newFreqNode := newFrequencyNode(dataNode.frequency) - list.addFrequency(dataNode.frequency, newFreqNode, prevFreqNode) - list.freqNodeMap[dataNode.frequency] = newFreqNode - newFreqNode.push(dataNode) - } - - if prevFreqNode.list.size == 0 { - list.deleteFrequency(prevFreqNode.frequency) - list.size-- - } -} - -func (list *lfuList) get(key string) *dataNode { - if node, ok := list.dataNodeMap[key]; ok { - if list.cacheTimeout > 0 { - node.timer.Stop() - } - list.promote(node) - list.setTimerIfValid(node) - return node - } else { - return nil - } -} - -// Requires Lock() -func (list *lfuList) put(key string) { - if node, ok := list.dataNodeMap[key]; ok { - if list.cacheTimeout > 0 { - node.timer.Stop() - } - list.promote(node) - list.setTimerIfValid(node) - } else { - if usage := getUsagePercentage(list.cachePath, list.maxSizeMB); usage > list.upperThresh { - for usage > list.lowerThresh && list.first != nil { - toDeletePath := list.first.list.first.key - list.first.pop() - delete(list.dataNodeMap, toDeletePath) - if list.first.list.size == 0 { - list.deleteFrequency(list.first.frequency) - list.size-- - usage = getUsagePercentage(list.cachePath, list.maxSizeMB) - } - list.deleteFiles <- toDeletePath - } - } - newNode := newDataNode(key) - list.dataNodeMap[key] = newNode - if freqNode, ok := list.freqNodeMap[newNode.frequency]; ok { - freqNode.push(newNode) - } else { - freqNode := newFrequencyNode(newNode.frequency) - list.freqNodeMap[newNode.frequency] = freqNode - freqNode.push(newNode) - list.addFrequency(newNode.frequency, freqNode, nil) - } - list.setTimerIfValid(newNode) - } -} - -// Requires Lock() -func (list *lfuList) delete(key string) { - if node, ok := list.dataNodeMap[key]; ok { - if list.cacheTimeout > 0 { - node.timer.Stop() - } - freqNode := list.freqNodeMap[node.frequency] - freqNode.remove(node) - delete(list.dataNodeMap, key) - if freqNode.list.size == 0 { - list.deleteFrequency(node.frequency) - list.size-- - } - list.deleteFiles <- node.key - list.cacheAge = node.frequency - } -} - -func (list *lfuList) setTimerIfValid(node *dataNode) { - if list.cacheTimeout > 0 { - timer := time.AfterFunc(time.Duration(list.cacheTimeout)*time.Second, func() { - list.Lock() - list.delete(node.key) - list.Unlock() - }) - node.timer = timer - } -} - -func newLFUList(maxSizMB float64, lowerThresh float64, upperThresh float64, deleteChan chan string, cachePath string, cacheTimeout uint32) *lfuList { - return &lfuList{ - dataNodeMap: make(map[string]*dataNode), - freqNodeMap: make(map[uint64]*frequencyNode), - size: 0, - maxSizeMB: maxSizMB, - lowerThresh: lowerThresh, - upperThresh: upperThresh, - deleteFiles: deleteChan, - cachePath: cachePath, - cacheTimeout: cacheTimeout, - } -} diff --git a/component/file_cache/lfu_policy_test.go b/component/file_cache/lfu_policy_test.go deleted file mode 100644 index 0f26f9e09..000000000 --- a/component/file_cache/lfu_policy_test.go +++ /dev/null @@ -1,286 +0,0 @@ -/* - _____ _____ _____ ____ ______ _____ ------ - | | | | | | | | | | | | | - | | | | | | | | | | | | | - | --- | | | | |-----| |---- | | |-----| |----- ------ - | | | | | | | | | | | | | - | ____| |_____ | ____| | ____| | |_____| _____| |_____ |_____ - - - Licensed under the MIT License . - - Copyright Š 2020-2024 Microsoft Corporation. All rights reserved. - Author : - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE -*/ - -package file_cache - -import ( - "fmt" - "io/fs" - "os" - "path/filepath" - "testing" - "time" - - "github.com/Azure/azure-storage-fuse/v2/common" - "github.com/Azure/azure-storage-fuse/v2/common/log" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" -) - -type lfuPolicyTestSuite struct { - suite.Suite - assert *assert.Assertions - policy *lfuPolicy -} - -var cache_path = filepath.Join(home_dir, "file_cache") - -func (suite *lfuPolicyTestSuite) SetupTest() { - err := log.SetDefaultLogger("silent", common.LogConfig{Level: common.ELogLevel.LOG_DEBUG()}) - if err != nil { - panic("Unable to set silent logger as default.") - } - suite.assert = assert.New(suite.T()) - - os.Mkdir(cache_path, fs.FileMode(0777)) - - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 0, - maxEviction: defaultMaxEviction, - maxSizeMB: 0, - highThreshold: defaultMaxThreshold, - lowThreshold: defaultMinThreshold, - fileLocks: &common.LockMap{}, - } - - suite.setupTestHelper(config) -} - -func (suite *lfuPolicyTestSuite) setupTestHelper(config cachePolicyConfig) { - suite.policy = NewLFUPolicy(config).(*lfuPolicy) - suite.policy.StartPolicy() -} - -func (suite *lfuPolicyTestSuite) cleanupTest() { - suite.policy.ShutdownPolicy() - - os.RemoveAll(cache_path) -} - -func (suite *lfuPolicyTestSuite) TestDefault() { - defer suite.cleanupTest() - suite.assert.EqualValues("lfu", suite.policy.Name()) - suite.assert.EqualValues(0, suite.policy.cacheTimeout) // cacheTimeout does not change - suite.assert.EqualValues(defaultMaxEviction, suite.policy.maxEviction) - suite.assert.EqualValues(0, suite.policy.maxSizeMB) - suite.assert.EqualValues(defaultMaxThreshold, suite.policy.highThreshold) - suite.assert.EqualValues(defaultMinThreshold, suite.policy.lowThreshold) -} - -func (suite *lfuPolicyTestSuite) TestUpdateConfig() { - defer suite.cleanupTest() - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 120, - maxEviction: 100, - maxSizeMB: 10, - highThreshold: 70, - lowThreshold: 20, - fileLocks: &common.LockMap{}, - } - suite.policy.UpdateConfig(config) - - suite.assert.NotEqualValues(120, suite.policy.cacheTimeout) // cacheTimeout does not change - suite.assert.EqualValues(0, suite.policy.cacheTimeout) // cacheTimeout does not change - suite.assert.EqualValues(100, suite.policy.maxEviction) - suite.assert.EqualValues(10, suite.policy.maxSizeMB) - suite.assert.EqualValues(70, suite.policy.highThreshold) - suite.assert.EqualValues(20, suite.policy.lowThreshold) -} - -func (suite *lfuPolicyTestSuite) TestCacheValidNew() { - defer suite.cleanupTest() - suite.policy.CacheValid("temp") - - node := suite.policy.list.get("temp") - suite.assert.NotNil(node) - suite.assert.EqualValues("temp", node.key) - suite.assert.EqualValues(2, node.frequency) // the get will promote the node -} - -func (suite *lfuPolicyTestSuite) TestClearItemFromCache() { - defer suite.cleanupTest() - f, _ := os.Create(cache_path + "/test") - suite.policy.clearItemFromCache(f.Name()) - _, attr := os.Stat(f.Name()) - suite.assert.NotEqual(nil, attr.Error()) -} - -func (suite *lfuPolicyTestSuite) TestCacheValidExisting() { - defer suite.cleanupTest() - suite.policy.CacheValid("temp") - - suite.policy.CacheValid("temp") - node := suite.policy.list.get("temp") - suite.assert.NotNil(node) - suite.assert.EqualValues("temp", node.key) - suite.assert.EqualValues(3, node.frequency) // the get will promote the node -} - -func (suite *lfuPolicyTestSuite) TestCacheInvalidate() { - defer suite.cleanupTest() - suite.policy.CacheValid("temp") - suite.policy.CacheInvalidate("temp") // this is equivalent to purge since timeout=0 - - node := suite.policy.list.get("temp") - suite.assert.Nil(node) -} - -func (suite *lfuPolicyTestSuite) TestCacheInvalidateTimeout() { - defer suite.cleanupTest() - suite.cleanupTest() - - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 1, - maxEviction: defaultMaxEviction, - maxSizeMB: 0, - highThreshold: defaultMaxThreshold, - lowThreshold: defaultMinThreshold, - fileLocks: &common.LockMap{}, - } - - suite.setupTestHelper(config) - - suite.policy.CacheValid("temp") - suite.policy.CacheInvalidate("temp") - - node := suite.policy.list.get("temp") - suite.assert.NotNil(node) - suite.assert.EqualValues("temp", node.key) - suite.assert.EqualValues(2, node.frequency) // the get will promote the node -} - -func (suite *lfuPolicyTestSuite) TestCachePurge() { - defer suite.cleanupTest() - suite.policy.CacheValid("temp") - suite.policy.CachePurge("temp") - - node := suite.policy.list.get("temp") - suite.assert.Nil(node) -} - -func (suite *lfuPolicyTestSuite) TestIsCached() { - defer suite.cleanupTest() - suite.policy.CacheValid("temp") - - suite.assert.True(suite.policy.IsCached("temp")) -} - -func (suite *lfuPolicyTestSuite) TestIsCachedFalse() { - defer suite.cleanupTest() - suite.assert.False(suite.policy.IsCached("temp")) -} - -func (suite *lfuPolicyTestSuite) TestTimeout() { - defer suite.cleanupTest() - suite.cleanupTest() - - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 1, - maxEviction: defaultMaxEviction, - maxSizeMB: 0, - highThreshold: defaultMaxThreshold, - lowThreshold: defaultMinThreshold, - fileLocks: &common.LockMap{}, - } - - suite.setupTestHelper(config) - - suite.policy.CacheValid("temp") - - time.Sleep(5 * time.Second) // Wait for time > cacheTimeout, the file should no longer be cached - - suite.assert.False(suite.policy.IsCached("temp")) -} - -func (suite *lfuPolicyTestSuite) TestMaxEvictionDefault() { - defer suite.cleanupTest() - suite.cleanupTest() - - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 1, - maxEviction: defaultMaxEviction, - maxSizeMB: 0, - highThreshold: defaultMaxThreshold, - lowThreshold: defaultMinThreshold, - fileLocks: &common.LockMap{}, - } - - suite.setupTestHelper(config) - - for i := 1; i < 5000; i++ { - suite.policy.CacheValid("temp" + fmt.Sprint(i)) - } - - time.Sleep(5 * time.Second) // Wait for time > cacheTimeout, the file should no longer be cached - - for i := 1; i < 5000; i++ { - suite.assert.False(suite.policy.IsCached("temp" + fmt.Sprint(i))) - } -} - -func (suite *lfuPolicyTestSuite) TestMaxEviction() { - defer suite.cleanupTest() - suite.cleanupTest() - - config := cachePolicyConfig{ - tmpPath: cache_path, - cacheTimeout: 1, - maxEviction: 5, - maxSizeMB: 0, - highThreshold: defaultMaxThreshold, - lowThreshold: defaultMinThreshold, - fileLocks: &common.LockMap{}, - } - - suite.setupTestHelper(config) - - for i := 1; i < 5; i++ { - suite.policy.CacheValid("temp" + fmt.Sprint(i)) - } - - time.Sleep(5 * time.Second) // Wait for time > cacheTimeout, the file should no longer be cached - - for i := 1; i < 5; i++ { - suite.assert.False(suite.policy.IsCached("temp" + fmt.Sprint(i))) - } -} - -func TestLFUPolicyTestSuite(t *testing.T) { - suite.Run(t, new(lfuPolicyTestSuite)) -} diff --git a/component/file_cache/lru_policy_test.go b/component/file_cache/lru_policy_test.go index 61bb28dd4..959122779 100644 --- a/component/file_cache/lru_policy_test.go +++ b/component/file_cache/lru_policy_test.go @@ -37,6 +37,7 @@ import ( "fmt" "io/fs" "os" + "path/filepath" "testing" "time" @@ -52,6 +53,8 @@ type lruPolicyTestSuite struct { policy *lruPolicy } +var cache_path = filepath.Join(home_dir, "file_cache") + func (suite *lruPolicyTestSuite) SetupTest() { // err := log.SetDefaultLogger("silent", common.LogConfig{Level: common.ELogLevel.LOG_DEBUG()}) // if err != nil { diff --git a/go.mod b/go.mod index 70dc54783..d82600c90 100755 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/Azure/azure-storage-fuse/v2 go 1.20 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 - github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.3 github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda github.com/fsnotify/fsnotify v1.7.0 github.com/golang/mock v1.6.0 @@ -15,9 +15,9 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/radovskyb/watcher v1.0.7 github.com/sevlyar/go-daemon v0.1.6 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.18.2 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/vibhansa-msft/tlru v0.0.0-20240410102558-9e708419e21f go.uber.org/atomic v1.11.0 @@ -27,7 +27,7 @@ require ( ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -42,7 +42,7 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -50,7 +50,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.24.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect diff --git a/go.sum b/go.sum index 024714b64..8809737aa 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2 h1:YUUxeiOWgdAQE3pXt2H7QXzZs0q8UBjgRbl56qo8GYM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.1 h1:mkaGMgFkpDJVs7QUQrHvqEEpJFvoDrqGaHqMkywhGN0= -github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.1/go.mod h1:3S0vo7Y+O3Fjnnon5JXVrlG2IrfQkXasvKWB4OwX1lk= +github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.3 h1:RxiW5e1f3kgm6WGsnaRcnF1BOI+RnbwGKLSz+KPbxGY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake v1.1.3/go.mod h1:X6kh3l8pYjkOCz+PNLSrlyAhQSU/Cmkdnx72LwRwWZI= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/JeffreyRichter/enum v0.0.0-20180725232043-2567042f9cda h1:NOo6+gM9NNPJ3W56nxOKb4164LEw094U0C8zYQM8mQU= @@ -63,8 +63,8 @@ github.com/radovskyb/watcher v1.0.7/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sevlyar/go-daemon v0.1.6 h1:EUh1MDjEM4BI109Jign0EaknA2izkOyi0LV3ro3QQGs= @@ -77,8 +77,8 @@ github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -101,8 +101,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/go_installer.sh b/go_installer.sh index 5ca9e98e7..fdc566717 100755 --- a/go_installer.sh +++ b/go_installer.sh @@ -1,6 +1,6 @@ #!/bin/bash work_dir=$(echo $1 | sed 's:/*$::') -version="1.22.1" +version="1.22.4" arch=`hostnamectl | grep "Arch" | rev | cut -d " " -f 1 | rev` if [ $arch != "arm64" ] @@ -13,3 +13,4 @@ wget "https://golang.org/dl/go$version.linux-$arch.tar.gz" -P "$work_dir" sudo rm -rf /usr/local/go sudo tar -C /usr/local -xzf "$work_dir"/go"$version".linux-$arch.tar.gz sudo ln -sf /usr/local/go/bin/go /usr/bin/go +sudo ln -sf /usr/local/go/bin/gofmt /usr/bin/gofmt diff --git a/test/e2e_tests/dir_test.go b/test/e2e_tests/dir_test.go index b2f333837..172b11c45 100644 --- a/test/e2e_tests/dir_test.go +++ b/test/e2e_tests/dir_test.go @@ -281,7 +281,7 @@ func (suite *dirTestSuite) TestDirGetStats() { // # Change mod of directory func (suite *dirTestSuite) TestDirChmod() { if suite.adlsTest == true { - dirName := suite.testPath + "/test3" + dirName := suite.testPath + "/testchmod" err := os.Mkdir(dirName, 0777) suite.Equal(nil, err) diff --git a/testdata/config/azure_key_lfu.yaml b/testdata/config/azure_key_lfu.yaml deleted file mode 100644 index 8c794f2c9..000000000 --- a/testdata/config/azure_key_lfu.yaml +++ /dev/null @@ -1,36 +0,0 @@ -logging: - level: log_debug - file-path: "blobfuse2-logs.txt" - type: base - -components: - - libfuse - - file_cache - - attr_cache - - azstorage - -libfuse: - attribute-expiration-sec: 0 - entry-expiration-sec: 0 - negative-entry-expiration-sec: 0 - ignore-open-flags: true - -file_cache: - path: { 1 } - policy: lfu - max-size-mb: 2048 - allow-non-empty-temp: true - cleanup-on-start: true - -attr_cache: - timeout-sec: 3600 - -azstorage: - type: { ACCOUNT_TYPE } - endpoint: { ACCOUNT_ENDPOINT } - use-http: false - account-name: { NIGHTLY_STO_ACC_NAME } - account-key: { NIGHTLY_STO_ACC_KEY } - mode: key - container: { 0 } - tier: hot From 0e56714623585c69914de9e171433f32dc31da17 Mon Sep 17 00:00:00 2001 From: Sourav Gupta <98318303+souravgupta-msft@users.noreply.github.com> Date: Tue, 18 Jun 2024 12:21:18 +0530 Subject: [PATCH 05/73] Fixing proxy URL parsing (#1429) --- CHANGELOG.md | 3 +++ component/azstorage/azauthcli.go | 16 +++++++++-- component/azstorage/azauthkey.go | 16 +++++++++-- component/azstorage/azauthmsi.go | 16 +++++++++-- component/azstorage/azauthsas.go | 16 +++++++++-- component/azstorage/azauthspn.go | 16 +++++++++-- component/azstorage/config.go | 1 + component/azstorage/config_test.go | 37 ++++++++++++++++++++----- component/azstorage/utils.go | 43 +++++++++++++++++------------- component/azstorage/utils_test.go | 32 +++++++++++++++++++--- 10 files changed, 157 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e778f5a91..60337bde7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,12 @@ ## 2.3.1 (Unreleased) **Bug Fixes** +- Fixed the case where file creation using SAS on HNS accounts was returning back wrong error code. +- [#1402](https://github.com/Azure/azure-storage-fuse/issues/1402) Fixed proxy URL parsing. **Features** **Other Changes** +- LFU policy in file cache has been deprecated. ## 2.3.0 (2024-05-16) **Bug Fixes** diff --git a/component/azstorage/azauthcli.go b/component/azstorage/azauthcli.go index 66972426a..86d6b14f1 100644 --- a/component/azstorage/azauthcli.go +++ b/component/azstorage/azauthcli.go @@ -66,7 +66,13 @@ func (azcli *azAuthBlobCLI) getServiceClient(stConfig *AzStorageConfig) (interfa return nil, err } - svcClient, err := service.NewClient(azcli.config.Endpoint, cred, getAzBlobServiceClientOptions(stConfig)) + opts, err := getAzBlobServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthBlobCLI::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := service.NewClient(azcli.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthBlobCLI::getServiceClient : Failed to create service client [%s]", err.Error()) } @@ -86,7 +92,13 @@ func (azcli *azAuthDatalakeCLI) getServiceClient(stConfig *AzStorageConfig) (int return nil, err } - svcClient, err := serviceBfs.NewClient(azcli.config.Endpoint, cred, getAzDatalakeServiceClientOptions(stConfig)) + opts, err := getAzDatalakeServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := serviceBfs.NewClient(azcli.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create service client [%s]", err.Error()) } diff --git a/component/azstorage/azauthkey.go b/component/azstorage/azauthkey.go index b78b61780..17263453e 100644 --- a/component/azstorage/azauthkey.go +++ b/component/azstorage/azauthkey.go @@ -68,7 +68,13 @@ func (azkey *azAuthBlobKey) getServiceClient(stConfig *AzStorageConfig) (interfa return nil, err } - svcClient, err := service.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, getAzBlobServiceClientOptions(stConfig)) + opts, err := getAzBlobServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthBlobKey::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := service.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthBlobKey::getServiceClient : Failed to create service client [%s]", err.Error()) } @@ -93,7 +99,13 @@ func (azkey *azAuthDatalakeKey) getServiceClient(stConfig *AzStorageConfig) (int return nil, err } - svcClient, err := serviceBfs.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, getAzDatalakeServiceClientOptions(stConfig)) + opts, err := getAzDatalakeServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthDatalakeKey::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := serviceBfs.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthDatalakeKey::getServiceClient : Failed to create service client [%s]", err.Error()) } diff --git a/component/azstorage/azauthmsi.go b/component/azstorage/azauthmsi.go index 26c99276f..64b403043 100644 --- a/component/azstorage/azauthmsi.go +++ b/component/azstorage/azauthmsi.go @@ -119,7 +119,13 @@ func (azmsi *azAuthBlobMSI) getServiceClient(stConfig *AzStorageConfig) (interfa return nil, err } - svcClient, err := service.NewClient(azmsi.config.Endpoint, cred, getAzBlobServiceClientOptions(stConfig)) + opts, err := getAzBlobServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthBlobMSI::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := service.NewClient(azmsi.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthBlobMSI::getServiceClient : Failed to create service client [%s]", err.Error()) } @@ -139,7 +145,13 @@ func (azmsi *azAuthDatalakeMSI) getServiceClient(stConfig *AzStorageConfig) (int return nil, err } - svcClient, err := serviceBfs.NewClient(azmsi.config.Endpoint, cred, getAzDatalakeServiceClientOptions(stConfig)) + opts, err := getAzDatalakeServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := serviceBfs.NewClient(azmsi.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create service client [%s]", err.Error()) } diff --git a/component/azstorage/azauthsas.go b/component/azstorage/azauthsas.go index 6e36d507a..79ff8dc03 100644 --- a/component/azstorage/azauthsas.go +++ b/component/azstorage/azauthsas.go @@ -73,7 +73,13 @@ func (azsas *azAuthBlobSAS) getServiceClient(stConfig *AzStorageConfig) (interfa return nil, errors.New("sas key for account is empty, cannot authenticate user") } - svcClient, err := service.NewClientWithNoCredential(azsas.getEndpoint(), getAzBlobServiceClientOptions(stConfig)) + opts, err := getAzBlobServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthBlobSAS::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := service.NewClientWithNoCredential(azsas.getEndpoint(), opts) if err != nil { log.Err("azAuthBlobSAS::getServiceClient : Failed to create service client [%s]", err.Error()) } @@ -92,7 +98,13 @@ func (azsas *azAuthDatalakeSAS) getServiceClient(stConfig *AzStorageConfig) (int return nil, errors.New("sas key for account is empty, cannot authenticate user") } - svcClient, err := serviceBfs.NewClientWithNoCredential(azsas.getEndpoint(), getAzDatalakeServiceClientOptions(stConfig)) + opts, err := getAzDatalakeServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := serviceBfs.NewClientWithNoCredential(azsas.getEndpoint(), opts) if err != nil { log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create service client [%s]", err.Error()) } diff --git a/component/azstorage/azauthspn.go b/component/azstorage/azauthspn.go index 4e357fa88..62ac4f04c 100644 --- a/component/azstorage/azauthspn.go +++ b/component/azstorage/azauthspn.go @@ -96,7 +96,13 @@ func (azspn *azAuthBlobSPN) getServiceClient(stConfig *AzStorageConfig) (interfa return nil, err } - svcClient, err := service.NewClient(azspn.config.Endpoint, cred, getAzBlobServiceClientOptions(stConfig)) + opts, err := getAzBlobServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthBlobSPN::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := service.NewClient(azspn.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthBlobSPN::getServiceClient : Failed to create service client [%s]", err.Error()) } @@ -116,7 +122,13 @@ func (azspn *azAuthDatalakeSPN) getServiceClient(stConfig *AzStorageConfig) (int return nil, err } - svcClient, err := serviceBfs.NewClient(azspn.config.Endpoint, cred, getAzDatalakeServiceClientOptions(stConfig)) + opts, err := getAzDatalakeServiceClientOptions(stConfig) + if err != nil { + log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create client options [%s]", err.Error()) + return nil, err + } + + svcClient, err := serviceBfs.NewClient(azspn.config.Endpoint, cred, opts) if err != nil { log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create service client [%s]", err.Error()) } diff --git a/component/azstorage/config.go b/component/azstorage/config.go index a01937ee6..095a6650f 100644 --- a/component/azstorage/config.go +++ b/component/azstorage/config.go @@ -407,6 +407,7 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error { } } } + az.stConfig.proxyAddress = formatEndpointProtocol(az.stConfig.proxyAddress, opt.UseHTTP) log.Info("ParseAndValidateConfig : using the following proxy address from the config file: %s", az.stConfig.proxyAddress) err = ParseAndReadDynamicConfig(az, opt, false) diff --git a/component/azstorage/config_test.go b/component/azstorage/config_test.go index 56ac46736..92c41419c 100644 --- a/component/azstorage/config_test.go +++ b/component/azstorage/config_test.go @@ -172,26 +172,51 @@ func (s *configTestSuite) TestProxyConfig() { opt.HttpsProxyAddress = "127.0.0.1" err := ParseAndValidateConfig(az, opt) assert.Nil(err) - assert.Equal(az.stConfig.proxyAddress, opt.HttpsProxyAddress) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) - opt.HttpProxyAddress = "128.0.0.1" + opt.HttpsProxyAddress = "https://128.0.0.1:8080/" err = ParseAndValidateConfig(az, opt) assert.Nil(err) - assert.Equal(az.stConfig.proxyAddress, opt.HttpProxyAddress) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) + + opt.HttpsProxyAddress = "http://129.0.0.1:8080/" + err = ParseAndValidateConfig(az, opt) + assert.Nil(err) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) + + opt.HttpProxyAddress = "130.0.0.1" + err = ParseAndValidateConfig(az, opt) + assert.Nil(err) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpProxyAddress, !opt.UseHTTPS)) + + opt.HttpProxyAddress = "http://131.0.0.1:8080/" + err = ParseAndValidateConfig(az, opt) + assert.Nil(err) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpProxyAddress, !opt.UseHTTPS)) config.SetBool(compName+".use-https", true) opt.UseHTTPS = true opt.HttpsProxyAddress = "" - opt.HttpProxyAddress = "127.0.0.1" + opt.HttpProxyAddress = "132.0.0.1" err = ParseAndValidateConfig(az, opt) assert.NotNil(err) assert.Contains(err.Error(), "`http-proxy` Invalid : must set `use-http: true`") - opt.HttpsProxyAddress = "128.0.0.1" + opt.HttpsProxyAddress = "133.0.0.1" + err = ParseAndValidateConfig(az, opt) + assert.Nil(err) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) + + opt.HttpsProxyAddress = "http://134.0.0.1:8080/" + err = ParseAndValidateConfig(az, opt) + assert.Nil(err) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) + + opt.HttpsProxyAddress = "https://135.0.0.1:8080/" err = ParseAndValidateConfig(az, opt) assert.Nil(err) - assert.Equal(az.stConfig.proxyAddress, opt.HttpsProxyAddress) + assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS)) } func (s *configTestSuite) TestMaxResultsForList() { diff --git a/component/azstorage/utils.go b/component/azstorage/utils.go index 2c3139be3..b0aea0e56 100644 --- a/component/azstorage/utils.go +++ b/component/azstorage/utils.go @@ -84,7 +84,7 @@ const ( ) // getAzStorageClientOptions : Create client options based on the config -func getAzStorageClientOptions(conf *AzStorageConfig) azcore.ClientOptions { +func getAzStorageClientOptions(conf *AzStorageConfig) (azcore.ClientOptions, error) { retryOptions := policy.RetryOptions{ MaxRetries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries) TryTimeout: time.Second * time.Duration(conf.maxTimeout), // Maximum time allowed for any single try @@ -101,28 +101,33 @@ func getAzStorageClientOptions(conf *AzStorageConfig) azcore.ClientOptions { logOptions := getSDKLogOptions() - transportOptions := newBlobfuse2HttpClient(conf) + transportOptions, err := newBlobfuse2HttpClient(conf) + if err != nil { + log.Err("utils::getAzStorageClientOptions : Failed to create transport client [%s]", err.Error()) + } return azcore.ClientOptions{ Retry: retryOptions, Logging: logOptions, PerCallPolicies: []policy.Policy{telemetryPolicy}, Transport: transportOptions, - } + }, err } // getAzBlobServiceClientOptions : Create azblob service client options based on the config -func getAzBlobServiceClientOptions(conf *AzStorageConfig) *service.ClientOptions { +func getAzBlobServiceClientOptions(conf *AzStorageConfig) (*service.ClientOptions, error) { + opts, err := getAzStorageClientOptions(conf) return &service.ClientOptions{ - ClientOptions: getAzStorageClientOptions(conf), - } + ClientOptions: opts, + }, err } // getAzDatalakeServiceClientOptions : Create azdatalake service client options based on the config -func getAzDatalakeServiceClientOptions(conf *AzStorageConfig) *serviceBfs.ClientOptions { +func getAzDatalakeServiceClientOptions(conf *AzStorageConfig) (*serviceBfs.ClientOptions, error) { + opts, err := getAzStorageClientOptions(conf) return &serviceBfs.ClientOptions{ - ClientOptions: getAzStorageClientOptions(conf), - } + ClientOptions: opts, + }, err } // getLogOptions : to configure the SDK logging policy @@ -154,17 +159,17 @@ func setSDKLogListener() { } // Create an HTTP Client with configured proxy -func newBlobfuse2HttpClient(conf *AzStorageConfig) *http.Client { - var ProxyURL func(req *http.Request) (*url.URL, error) = func(req *http.Request) (*url.URL, error) { - // If a proxy address is passed return - var proxyURL url.URL = url.URL{ - Host: conf.proxyAddress, - } - return &proxyURL, nil - } - +func newBlobfuse2HttpClient(conf *AzStorageConfig) (*http.Client, error) { + var ProxyURL func(req *http.Request) (*url.URL, error) if conf.proxyAddress == "" { ProxyURL = nil + } else { + u, err := url.Parse(conf.proxyAddress) + if err != nil { + log.Err("utils::newBlobfuse2HttpClient : Failed to parse proxy : %s [%s]", conf.proxyAddress, err.Error()) + return nil, err + } + ProxyURL = http.ProxyURL(u) } return &http.Client{ @@ -188,7 +193,7 @@ func newBlobfuse2HttpClient(conf *AzStorageConfig) *http.Client { DisableCompression: conf.disableCompression, MaxResponseHeaderBytes: MaxResponseHeaderBytes, }, - } + }, nil } // getCloudConfiguration : returns cloud configuration type on the basis of endpoint diff --git a/component/azstorage/utils_test.go b/component/azstorage/utils_test.go index 5818c7737..5cedf5e99 100644 --- a/component/azstorage/utils_test.go +++ b/component/azstorage/utils_test.go @@ -322,28 +322,52 @@ func (s *utilsTestSuite) TestSanitizeSASKey() { func (s *utilsTestSuite) TestBlockNonProxyOptions() { assert := assert.New(s.T()) - opt := getAzBlobServiceClientOptions(&AzStorageConfig{}) + opt, err := getAzBlobServiceClientOptions(&AzStorageConfig{}) + assert.Nil(err) assert.EqualValues(opt.Retry.MaxRetries, 0) assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) } func (s *utilsTestSuite) TestBlockProxyOptions() { assert := assert.New(s.T()) - opt := getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3}) + opt, err := getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3}) + assert.Nil(err) + assert.EqualValues(opt.Retry.MaxRetries, 3) + assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) + + opt, err = getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "http://127.0.0.1:8080", maxRetries: 3}) + assert.Nil(err) + assert.EqualValues(opt.Retry.MaxRetries, 3) + assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) + + opt, err = getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "https://128.0.0.1:8080", maxRetries: 3}) + assert.Nil(err) assert.EqualValues(opt.Retry.MaxRetries, 3) assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) } func (s *utilsTestSuite) TestBfsNonProxyOptions() { assert := assert.New(s.T()) - opt := getAzDatalakeServiceClientOptions(&AzStorageConfig{}) + opt, err := getAzDatalakeServiceClientOptions(&AzStorageConfig{}) + assert.Nil(err) assert.EqualValues(opt.Retry.MaxRetries, 0) assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) } func (s *utilsTestSuite) TestBfsProxyOptions() { assert := assert.New(s.T()) - opt := getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3}) + opt, err := getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3}) + assert.Nil(err) + assert.EqualValues(opt.Retry.MaxRetries, 3) + assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) + + opt, err = getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "http://127.0.0.1:8080", maxRetries: 3}) + assert.Nil(err) + assert.EqualValues(opt.Retry.MaxRetries, 3) + assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) + + opt, err = getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "https://128.0.0.1:8080", maxRetries: 3}) + assert.Nil(err) assert.EqualValues(opt.Retry.MaxRetries, 3) assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1) } From 6be841d32d14f5716edd096ebfbb7c3682f292df Mon Sep 17 00:00:00 2001 From: ashruti-msft <137055338+ashruti-msft@users.noreply.github.com> Date: Fri, 21 Jun 2024 11:14:51 +0530 Subject: [PATCH 06/73] Autoconfig (#1416) --- .github/CODEOWNERS | 2 +- CHANGELOG.md | 5 ++ README.md | 8 +-- component/block_cache/block_cache.go | 38 +++++++++---- component/block_cache/block_cache_test.go | 66 ++++++++++++++++++++--- setup/baseConfig.yaml | 8 +-- setup/devConfig.yaml | 8 +-- 7 files changed, 106 insertions(+), 29 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5d5380059..965cbc731 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @vibhansa-msft @souravgupta-msft @ashruti-msft +* @vibhansa-msft @souravgupta-msft @ashruti-msft @syeleti-msft diff --git a/CHANGELOG.md b/CHANGELOG.md index 60337bde7..12383a513 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ **Other Changes** - LFU policy in file cache has been deprecated. +- Default values, if not assigned in config, for the following parameters in block-cache are calculated as follows: + - Memory preallocated for Block-Cache is 80% of free memory + - Disk Cache size is 80% of free disk space + - Prefetch is 2 times number of CPU cores + - Parallelism is 3 times the number of CPU cores ## 2.3.0 (2024-05-16) **Bug Fixes** diff --git a/README.md b/README.md index d9b97d5d4..d9fa1c1ae 100755 --- a/README.md +++ b/README.md @@ -138,12 +138,12 @@ To learn about a specific command, just include the name of the command (For exa * `--block-size-mb=`: Size of a block to be downloaded during streaming. - Block-Cache options * `--block-cache-block-size=`: Size of a block to be downloaded as a unit. - * `--block-cache-pool-size=`: Size of pool to be used for caching. This limits total memory used by block-cache. + * `--block-cache-pool-size=`: Size of pool to be used for caching. This limits total memory used by block-cache. Default - 80% of free memory available. * `--block-cache-path=`: Path where downloaded blocks will be persisted. Not providing this parameter will disable the disk caching. - * `--block-cache-disk-size=`: Disk space to be used for caching. + * `--block-cache-disk-size=`: Disk space to be used for caching. Default - 80% of free disk space. * `--block-cache-disk-timeout=`: Timeout for which disk cache is valid. - * `--block-cache-prefetch=`: Number of blocks to prefetch at max when sequential reads are in progress. - * `--block-cache-parallelism=`: Number of parallel threads doing upload/download operation. + * `--block-cache-prefetch=`: Number of blocks to prefetch at max when sequential reads are in progress. Default - 2 times number of CPU cores. + * `--block-cache-parallelism=`: Number of parallel threads doing upload/download operation. Default - 3 times number of CPU cores. * `--block-cache-prefetch-on-open=true`: Start prefetching on open system call instead of waiting for first read. Enhances perf if file is read sequentially from offset 0. - Fuse options * `--attr-timeout=`: Time the kernel can cache inode attributes. diff --git a/component/block_cache/block_cache.go b/component/block_cache/block_cache.go index 72290d442..fffdcf787 100644 --- a/component/block_cache/block_cache.go +++ b/component/block_cache/block_cache.go @@ -41,9 +41,11 @@ import ( "io" "os" "path/filepath" + "runtime" "sort" "strings" "sync" + "syscall" "time" "github.com/Azure/azure-storage-fuse/v2/common" @@ -206,22 +208,26 @@ func (bc *BlockCache) Configure(_ bool) error { bc.blockSize = uint64(conf.BlockSize * float64(_1MB)) } - bc.memSize = uint64(4192) * _1MB if config.IsSet(compName + ".mem-size-mb") { bc.memSize = conf.MemSize * _1MB + } else { + var sysinfo syscall.Sysinfo_t + err = syscall.Sysinfo(&sysinfo) + if err != nil { + log.Err("BlockCache::Configure : config error %s [%s]. Assigning a pre-defined value of 4GB.", bc.Name(), err.Error()) + bc.memSize = uint64(4192) * _1MB + } else { + bc.memSize = uint64(0.8 * (float64)(sysinfo.Freeram) * float64(sysinfo.Unit)) + } } - bc.diskSize = uint64(4192) - if config.IsSet(compName + ".disk-size-mb") { - bc.diskSize = conf.DiskSize - } bc.diskTimeout = defaultTimeout if config.IsSet(compName + ".disk-timeout-sec") { bc.diskTimeout = conf.DiskTimeout } bc.prefetchOnOpen = conf.PrefetchOnOpen - bc.prefetch = MIN_PREFETCH + bc.prefetch = uint32(2 * runtime.NumCPU()) bc.noPrefetch = false err = config.UnmarshalKey("lazy-write", &bc.lazyWrite) @@ -242,7 +248,7 @@ func (bc *BlockCache) Configure(_ bool) error { bc.maxDiskUsageHit = false - bc.workers = 128 + bc.workers = uint32(3 * runtime.NumCPU()) if config.IsSet(compName + ".parallelism") { bc.workers = conf.Workers } @@ -261,6 +267,18 @@ func (bc *BlockCache) Configure(_ bool) error { return fmt.Errorf("config error in %s [%s]", bc.Name(), err.Error()) } } + var stat syscall.Statfs_t + err = syscall.Statfs(bc.tmpPath, &stat) + if err != nil { + log.Err("BlockCache::Configure : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", bc.Name(), err.Error()) + bc.diskSize = uint64(4192) * _1MB + } else { + bc.diskSize = uint64(0.8 * float64(stat.Bavail) * float64(stat.Bsize)) + } + } + + if config.IsSet(compName + ".disk-size-mb") { + bc.diskSize = conf.DiskSize * _1MB } if (uint64(bc.prefetch) * uint64(bc.blockSize)) > bc.memSize { @@ -268,7 +286,7 @@ func (bc *BlockCache) Configure(_ bool) error { return fmt.Errorf("config error in %s [memory limit too low for configured prefetch]", bc.Name()) } - log.Info("BlockCache::Configure : block size %v, mem size %v, worker %v, prefetch %v, disk path %v, max size %vMB, disk timeout %v, prefetch-on-open %t, maxDiskUsageHit %v, noPrefetch %v", + log.Info("BlockCache::Configure : block size %v, mem size %v, worker %v, prefetch %v, disk path %v, max size %v, disk timeout %v, prefetch-on-open %t, maxDiskUsageHit %v, noPrefetch %v", bc.blockSize, bc.memSize, bc.workers, bc.prefetch, bc.tmpPath, bc.diskSize, bc.diskTimeout, bc.prefetchOnOpen, bc.maxDiskUsageHit, bc.noPrefetch) bc.blockPool = NewBlockPool(bc.blockSize, bc.memSize) @@ -284,7 +302,7 @@ func (bc *BlockCache) Configure(_ bool) error { } if bc.tmpPath != "" { - bc.diskPolicy, err = tlru.New(uint32((bc.diskSize*_1MB)/bc.blockSize), bc.diskTimeout, bc.diskEvict, 60, bc.checkDiskUsage) + bc.diskPolicy, err = tlru.New(uint32((bc.diskSize)/bc.blockSize), bc.diskTimeout, bc.diskEvict, 60, bc.checkDiskUsage) if err != nil { log.Err("BlockCache::Configure : fail to create LRU for memory nodes [%s]", err.Error()) return fmt.Errorf("config error in %s [%s]", bc.Name(), err.Error()) @@ -1258,7 +1276,7 @@ func (bc *BlockCache) diskEvict(node *list.Element) { // checkDiskUsage : Callback to check usage of disk and decide whether eviction is needed func (bc *BlockCache) checkDiskUsage() bool { data, _ := common.GetUsage(bc.tmpPath) - usage := uint32((data * 100) / float64(bc.diskSize)) + usage := uint32((data * 100) / float64(bc.diskSize/_1MB)) if bc.maxDiskUsageHit { if usage >= MIN_POOL_USAGE { diff --git a/component/block_cache/block_cache_test.go b/component/block_cache/block_cache_test.go index 02667c7e4..e748b7610 100644 --- a/component/block_cache/block_cache_test.go +++ b/component/block_cache/block_cache_test.go @@ -34,12 +34,16 @@ package block_cache import ( + "bytes" "context" "fmt" "io/ioutil" + "math" "math/rand" "os" + "os/exec" "path/filepath" + "strconv" "strings" "testing" "time" @@ -159,16 +163,66 @@ func (suite *blockCacheTestSuite) TestEmpty() { suite.assert.Nil(err) suite.assert.Equal(tobj.blockCache.Name(), "block_cache") suite.assert.EqualValues(tobj.blockCache.blockSize, 16*_1MB) - suite.assert.EqualValues(tobj.blockCache.memSize, 4192*_1MB) - suite.assert.EqualValues(tobj.blockCache.diskSize, 4192) + suite.assert.EqualValues(tobj.blockCache.diskSize, 0) suite.assert.EqualValues(tobj.blockCache.diskTimeout, defaultTimeout) - suite.assert.EqualValues(tobj.blockCache.workers, 128) - suite.assert.EqualValues(tobj.blockCache.prefetch, MIN_PREFETCH) + + cmd := exec.Command("nproc") + output, err := cmd.Output() + suite.assert.Nil(err) + coresStr := strings.TrimSpace(string(output)) + cores, err := strconv.Atoi(coresStr) + suite.assert.Nil(err) + suite.assert.EqualValues(tobj.blockCache.workers, uint32(3*cores)) + suite.assert.EqualValues(tobj.blockCache.prefetch, uint32(2*cores)) suite.assert.EqualValues(tobj.blockCache.noPrefetch, false) suite.assert.NotNil(tobj.blockCache.blockPool) suite.assert.NotNil(tobj.blockCache.threadPool) } +func (suite *blockCacheTestSuite) TestMemory() { + emptyConfig := "read-only: true\n\nblock_cache:\n block-size-mb: 16\n" + tobj, err := setupPipeline(emptyConfig) + defer tobj.cleanupPipeline() + + suite.assert.Nil(err) + suite.assert.Equal(tobj.blockCache.Name(), "block_cache") + cmd := exec.Command("bash", "-c", "free -b | grep Mem | awk '{print $4}'") + var out bytes.Buffer + cmd.Stdout = &out + err = cmd.Run() + suite.assert.Nil(err) + free, err := strconv.Atoi(strings.TrimSpace(out.String())) + suite.assert.Nil(err) + expected := uint64(0.8 * float64(free)) + actual := tobj.blockCache.memSize + difference := math.Abs(float64(actual) - float64(expected)) + tolerance := 0.10 * float64(math.Max(float64(actual), float64(expected))) + suite.assert.LessOrEqual(difference, tolerance) +} + +func (suite *blockCacheTestSuite) TestFreeDiskSpace() { + disk_cache_path := getFakeStoragePath("fake_storage") + config := fmt.Sprintf("read-only: true\n\nblock_cache:\n block-size-mb: 1\n path: %s", disk_cache_path) + tobj, err := setupPipeline(config) + defer tobj.cleanupPipeline() + + suite.assert.Nil(err) + suite.assert.Equal(tobj.blockCache.Name(), "block_cache") + + cmd := exec.Command("bash", "-c", fmt.Sprintf("df -B1 %s | awk 'NR==2{print $4}'", disk_cache_path)) + var out bytes.Buffer + cmd.Stdout = &out + err = cmd.Run() + suite.assert.Nil(err) + freeDisk, err := strconv.Atoi(strings.TrimSpace(out.String())) + suite.assert.Nil(err) + expected := uint64(0.8 * float64(freeDisk)) + actual := tobj.blockCache.diskSize + difference := math.Abs(float64(actual) - float64(expected)) + tolerance := 0.10 * float64(math.Max(float64(actual), float64(expected))) + suite.assert.LessOrEqual(difference, tolerance, "mssg:", actual, expected) +} + func (suite *blockCacheTestSuite) TestInvalidPrefetchCount() { cfg := "read-only: true\n\nblock_cache:\n block-size-mb: 16\n mem-size-mb: 500\n prefetch: 8\n parallelism: 10\n path: abcd\n disk-size-mb: 100\n disk-timeout-sec: 5" tobj, err := setupPipeline(cfg) @@ -233,7 +287,7 @@ func (suite *blockCacheTestSuite) TestManualConfig() { suite.assert.EqualValues(tobj.blockCache.blockSize, 16*_1MB) suite.assert.EqualValues(tobj.blockCache.memSize, 500*_1MB) suite.assert.EqualValues(tobj.blockCache.workers, 10) - suite.assert.EqualValues(tobj.blockCache.diskSize, 100) + suite.assert.EqualValues(tobj.blockCache.diskSize, 100*_1MB) suite.assert.EqualValues(tobj.blockCache.diskTimeout, 5) suite.assert.EqualValues(tobj.blockCache.prefetch, 12) suite.assert.EqualValues(tobj.blockCache.workers, 10) @@ -256,7 +310,7 @@ func (suite *blockCacheTestSuite) TestOpenFileFail() { suite.assert.Contains(err.Error(), "no such file or directory") } -func (suite *blockCacheTestSuite) TestFileOpneClose() { +func (suite *blockCacheTestSuite) TestFileOpenClose() { tobj, err := setupPipeline("") defer tobj.cleanupPipeline() diff --git a/setup/baseConfig.yaml b/setup/baseConfig.yaml index a02a6139e..6e74cbf75 100644 --- a/setup/baseConfig.yaml +++ b/setup/baseConfig.yaml @@ -56,12 +56,12 @@ libfuse: # Block cache related configuration block_cache: block-size-mb: - mem-size-mb: + mem-size-mb: path: - disk-size-mb: + disk-size-mb: disk-timeout-sec: - prefetch: - parallelism: + prefetch: + parallelism: # Disk cache related configuration file_cache: diff --git a/setup/devConfig.yaml b/setup/devConfig.yaml index bfe40dbaf..d4140c6af 100644 --- a/setup/devConfig.yaml +++ b/setup/devConfig.yaml @@ -78,12 +78,12 @@ stream: # Block cache related configuration block_cache: block-size-mb: - mem-size-mb: + mem-size-mb: path: - disk-size-mb: + disk-size-mb: disk-timeout-sec: - prefetch: - parallelism: + prefetch: + parallelism: # Disk cache related configuration file_cache: From 3d06a5cfec84b95df3fe1190f899511f94cb0c9b Mon Sep 17 00:00:00 2001 From: ashruti-msft <137055338+ashruti-msft@users.noreply.github.com> Date: Mon, 24 Jun 2024 10:39:36 +0530 Subject: [PATCH 07/73] fc autoconfig (#1439) --- CHANGELOG.md | 3 ++- README.md | 2 +- component/block_cache/block_cache_test.go | 2 +- component/file_cache/file_cache.go | 14 ++++++++++++- component/file_cache/file_cache_test.go | 24 +++++++++++++++++++++++ setup/baseConfig.yaml | 2 +- setup/devConfig.yaml | 2 +- 7 files changed, 43 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12383a513..61d0e07e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,10 @@ - LFU policy in file cache has been deprecated. - Default values, if not assigned in config, for the following parameters in block-cache are calculated as follows: - Memory preallocated for Block-Cache is 80% of free memory - - Disk Cache size is 80% of free disk space + - Disk Cache Size is 80% of free disk space - Prefetch is 2 times number of CPU cores - Parallelism is 3 times the number of CPU cores +- Default value of Disk Cache Size in File Cache is 80% of free disk space ## 2.3.0 (2024-05-16) **Bug Fixes** diff --git a/README.md b/README.md index d9fa1c1ae..5942154f3 100755 --- a/README.md +++ b/README.md @@ -130,7 +130,7 @@ To learn about a specific command, just include the name of the command (For exa - File cache options * `--file-cache-timeout=`: Timeout for which file is cached on local system. * `--tmp-path=`: The path to the file cache. - * `--cache-size-mb=`: Amount of disk cache that can be used by blobfuse. + * `--cache-size-mb=`: Amount of disk cache that can be used by blobfuse. Default - 80% of free disk space. * `--high-disk-threshold=`: If local cache usage exceeds this, start early eviction of files from cache. * `--low-disk-threshold=`: If local cache usage comes below this threshold then stop early eviction. * `--sync-to-flush=false` : Sync call will force upload a file to storage container if this is set to true, otherwise it just evicts file from local cache. diff --git a/component/block_cache/block_cache_test.go b/component/block_cache/block_cache_test.go index e748b7610..29a534515 100644 --- a/component/block_cache/block_cache_test.go +++ b/component/block_cache/block_cache_test.go @@ -220,7 +220,7 @@ func (suite *blockCacheTestSuite) TestFreeDiskSpace() { actual := tobj.blockCache.diskSize difference := math.Abs(float64(actual) - float64(expected)) tolerance := 0.10 * float64(math.Max(float64(actual), float64(expected))) - suite.assert.LessOrEqual(difference, tolerance, "mssg:", actual, expected) + suite.assert.LessOrEqual(difference, tolerance) } func (suite *blockCacheTestSuite) TestInvalidPrefetchCount() { diff --git a/component/file_cache/file_cache.go b/component/file_cache/file_cache.go index e7ebedbab..8e38770d2 100644 --- a/component/file_cache/file_cache.go +++ b/component/file_cache/file_cache.go @@ -241,7 +241,6 @@ func (c *FileCache) Configure(_ bool) error { c.cleanupOnStart = conf.CleanupOnStart c.policyTrace = conf.EnablePolicyTrace c.offloadIO = conf.OffloadIO - c.maxCacheSize = conf.MaxSizeMB c.syncToFlush = conf.SyncToFlush c.syncToDelete = !conf.SyncNoOp c.refreshSec = conf.RefreshSec @@ -276,6 +275,19 @@ func (c *FileCache) Configure(_ bool) error { } } + var stat syscall.Statfs_t + err = syscall.Statfs(c.tmpPath, &stat) + if err != nil { + log.Err("FileCache::Configure : config error %s [%s]. Assigning a default value of 4GB or if any value is assigned to .disk-size-mb in config.", c.Name(), err.Error()) + c.maxCacheSize = 4192 * MB + } else { + c.maxCacheSize = 0.8 * float64(stat.Bavail) * float64(stat.Bsize) + } + + if config.IsSet(compName+".max-size-mb") && conf.MaxSizeMB != 0 { + c.maxCacheSize = conf.MaxSizeMB + } + if !isLocalDirEmpty(c.tmpPath) && !c.allowNonEmpty { log.Err("FileCache: config error %s directory is not empty", c.tmpPath) return fmt.Errorf("config error in %s [%s]", c.Name(), "temp directory not empty") diff --git a/component/file_cache/file_cache_test.go b/component/file_cache/file_cache_test.go index c3f0d4e39..4f51cf3f4 100644 --- a/component/file_cache/file_cache_test.go +++ b/component/file_cache/file_cache_test.go @@ -34,11 +34,15 @@ package file_cache import ( + "bytes" "context" "fmt" + "math" "math/rand" "os" + "os/exec" "path/filepath" + "strconv" "strings" "syscall" "testing" @@ -189,6 +193,26 @@ func (suite *fileCacheTestSuite) TestConfig() { suite.assert.Equal(suite.fileCache.cleanupOnStart, cleanupOnStart) } +func (suite *fileCacheTestSuite) TestDefaultCacheSize() { + defer suite.cleanupTest() + // Setup + config := fmt.Sprintf("file_cache:\n path: %s\n", suite.cache_path) + suite.setupTestHelper(config) // setup a new file cache with a custom config (teardown will occur after the test as usual) + + cmd := exec.Command("bash", "-c", fmt.Sprintf("df -B1 %s | awk 'NR==2{print $4}'", suite.cache_path)) + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + suite.assert.Nil(err) + freeDisk, err := strconv.Atoi(strings.TrimSpace(out.String())) + suite.assert.Nil(err) + expected := uint64(0.8 * float64(freeDisk)) + actual := suite.fileCache.maxCacheSize + difference := math.Abs(float64(actual) - float64(expected)) + tolerance := 0.10 * float64(math.Max(float64(actual), float64(expected))) + suite.assert.LessOrEqual(difference, tolerance, "mssg:", actual, expected) +} + func (suite *fileCacheTestSuite) TestConfigPolicyTimeout() { defer suite.cleanupTest() suite.cleanupTest() // teardown the default file cache generated diff --git a/setup/baseConfig.yaml b/setup/baseConfig.yaml index 6e74cbf75..54bb54f04 100644 --- a/setup/baseConfig.yaml +++ b/setup/baseConfig.yaml @@ -70,7 +70,7 @@ file_cache: # Optional timeout-sec: - max-size-mb: + max-size-mb: sync-to-flush: true|false refresh-sec: ignore-sync: true|false diff --git a/setup/devConfig.yaml b/setup/devConfig.yaml index d4140c6af..04c1fd876 100644 --- a/setup/devConfig.yaml +++ b/setup/devConfig.yaml @@ -92,7 +92,7 @@ file_cache: # Optional timeout-sec: - max-size-mb: + max-size-mb: allow-non-empty-temp: true|false cleanup-on-start: true|false sync-to-flush: true|false From 5eabe8a1f5f8325cf880d539c15a503be4d38cb2 Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:36:23 +0530 Subject: [PATCH 08/73] Create PerfTest.yml (#1349) * Create perf test runner to regularly benchmark performance --- .github/template/generate_page/action.yml | 63 +++ .github/workflows/benchmark.yml | 251 ++++++++++++ build.sh | 2 +- component/azstorage/block_blob.go | 2 +- .../create/1_1000_files_in_10_threads.fio | 14 + .../create/2_1000_files_in_100_threads.fio | 14 + .../create/3_1l_files_in_20_threads.fio | 14 + .../high_threads/1_seq_write_112_threads.fio | 22 ++ .../high_threads/2_seq_read_128_threads.fio | 22 ++ .../high_threads/3_rand_read_128_threads.fio | 23 ++ perf_testing/config/read/1_seq_read.fio | 13 + perf_testing/config/read/2_rand_read.fio | 13 + perf_testing/config/read/3_seq_read_small.fio | 13 + .../config/read/4_rand_read_small.fio | 13 + .../config/read/5_seq_read_directio.fio | 14 + .../config/read/6_rand_read_directio.fio | 14 + .../config/read/7_seq_read_4thread.fio | 14 + .../config/read/8_seq_read_16thread.fio | 14 + .../config/read/9_rand_read_4thread.fio | 14 + perf_testing/config/write/1_seq_write.fio | 15 + .../config/write/2_seq_write_directio.fio | 16 + .../config/write/3_seq_write_4thread.fio | 15 + .../config/write/4_seq_write_16thread.fio | 15 + perf_testing/scripts/fio_bench.sh | 370 ++++++++++++++++++ perf_testing/scripts/highspeed_create.py | 40 ++ perf_testing/scripts/highspeed_read.py | 51 +++ perf_testing/scripts/read.py | 33 ++ perf_testing/scripts/rename.py | 61 +++ perf_testing/scripts/write.py | 35 ++ sampleDataSetFuseConfig.json | 17 - testdata/config/azure_block_bench.yaml | 35 ++ 31 files changed, 1233 insertions(+), 19 deletions(-) create mode 100644 .github/template/generate_page/action.yml create mode 100644 .github/workflows/benchmark.yml create mode 100755 perf_testing/config/create/1_1000_files_in_10_threads.fio create mode 100755 perf_testing/config/create/2_1000_files_in_100_threads.fio create mode 100755 perf_testing/config/create/3_1l_files_in_20_threads.fio create mode 100755 perf_testing/config/high_threads/1_seq_write_112_threads.fio create mode 100755 perf_testing/config/high_threads/2_seq_read_128_threads.fio create mode 100755 perf_testing/config/high_threads/3_rand_read_128_threads.fio create mode 100755 perf_testing/config/read/1_seq_read.fio create mode 100755 perf_testing/config/read/2_rand_read.fio create mode 100755 perf_testing/config/read/3_seq_read_small.fio create mode 100755 perf_testing/config/read/4_rand_read_small.fio create mode 100755 perf_testing/config/read/5_seq_read_directio.fio create mode 100755 perf_testing/config/read/6_rand_read_directio.fio create mode 100755 perf_testing/config/read/7_seq_read_4thread.fio create mode 100755 perf_testing/config/read/8_seq_read_16thread.fio create mode 100755 perf_testing/config/read/9_rand_read_4thread.fio create mode 100755 perf_testing/config/write/1_seq_write.fio create mode 100755 perf_testing/config/write/2_seq_write_directio.fio create mode 100644 perf_testing/config/write/3_seq_write_4thread.fio create mode 100644 perf_testing/config/write/4_seq_write_16thread.fio create mode 100755 perf_testing/scripts/fio_bench.sh create mode 100755 perf_testing/scripts/highspeed_create.py create mode 100755 perf_testing/scripts/highspeed_read.py create mode 100755 perf_testing/scripts/read.py create mode 100644 perf_testing/scripts/rename.py create mode 100755 perf_testing/scripts/write.py delete mode 100644 sampleDataSetFuseConfig.json create mode 100644 testdata/config/azure_block_bench.yaml diff --git a/.github/template/generate_page/action.yml b/.github/template/generate_page/action.yml new file mode 100644 index 000000000..df24959fb --- /dev/null +++ b/.github/template/generate_page/action.yml @@ -0,0 +1,63 @@ +name: generate_page +description: "Generate github page for performance benchmark" + +inputs: + TEST: + required: true + description: "Test to run" + TYPE: + required: true + description: "Type of storage account" + TOKEN: + required: true + description: "Token for checkin" + +runs: + using: "composite" + + steps: + # Pre-run cleanup + - name: "Cleanup before test" + shell: bash + run: | + rm -rf /mnt/blob_mnt/* + rm -rf /mnt/tempcache/* + + # Run the benchmark script + - name: "Run Benchmark Script : ${{ inputs.TEST }}" + shell: bash + run: | + ./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt ${{ inputs.TEST }} + + # Push the bandwidth results and publish the graphs + - name: "Update Bandwidth Results : ${{ inputs.TEST }}" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: ${{ inputs.TEST }}/bandwidth_results.json + tool: 'customBiggerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ inputs.TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ inputs.TYPE }}/bandwidth/${{ inputs.TEST }} + + # Push the latency results and publish the graphs + - name: "Update Latency Results : ${{ inputs.TEST }}" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: ${{ inputs.TEST }}/latency_results.json + tool: 'customSmallerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ inputs.TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ inputs.TYPE }}/latency/${{ inputs.TEST }} + + + \ No newline at end of file diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 000000000..6a9e84eb7 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,251 @@ +name: Benchmark +on: + schedule: + - cron: '0 0,3,6,9,12,15,18,21 * * *' + push: + branches: + - main + +jobs: + PerfTesting: + strategy: + matrix: + TestType: ["premium", "standard"] + # TestType: ["premium", "standard", "premium_hns", "standard_hns"] + + runs-on: [self-hosted, 1ES.Pool=blobfuse2-benchmark] + timeout-minutes: 360 + + permissions: + id-token: write + contents: write + pages: write + + steps: + # Print the host info + - name: 'Host info' + run: hostnamectl + + # Install Fuse3 + - name: "Install Fuse3" + run: | + sudo apt-get update + sudo apt-get install fuse3 libfuse3-dev gcc -y + + # Install Tools + - name: "Install Tools" + run: | + sudo apt-get install fio jq python3 -y + + # Checkout main branch + - name: 'Checkout Blobfuse2' + uses: actions/checkout@v4.1.1 + # with: + # ref: vibhansa/perftestrunner + + # Install GoLang + - name: "Install Go" + run: | + ./go_installer.sh ../ + go version + + # Build Blobfuse2 + - name: "Build Blobfuse2" + run: | + ./build.sh + + # Run binary and validate the version + - name: "Validate Version" + run: | + sudo cp ./blobfuse2 /usr/bin/ + which blobfuse2 + blobfuse2 --version + + - name: "Create Env variables for account name and key" + run: | + if [ "${{ matrix.TestType }}" == "standard" ]; then + echo "Create standard account env" + echo "AZURE_STORAGE_ACCOUNT=${{ secrets.STANDARD_ACCOUNT }}" >> $GITHUB_ENV + echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.STANDARD_KEY }}" >> $GITHUB_ENV + elif [ "${{ matrix.TestType }}" == "premium" ]; then + echo "Create premium account env" + echo "AZURE_STORAGE_ACCOUNT=${{ secrets.PREMIUM_ACCOUNT }}" >> $GITHUB_ENV + echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.PREMIUM_KEY }}" >> $GITHUB_ENV + elif [ "${{ matrix.TestType }}" == "standard_hns" ]; then + echo "Create standard hns account env" + echo "AZURE_STORAGE_ACCOUNT=${{ secrets.STANDARD_HNS_ACCOUNT }}" >> $GITHUB_ENV + echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.STANDARD_HNS_KEY }}" >> $GITHUB_ENV + elif [ "${{ matrix.TestType }}" == "premium_hns" ]; then + echo "Create premium hns account env" + echo "AZURE_STORAGE_ACCOUNT=${{ secrets.PREMIUM_HNS_ACCOUNT }}" >> $GITHUB_ENV + echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.PREMIUM_HNS_KEY }}" >> $GITHUB_ENV + fi + + # Create the config file for testing + - name: "Create config file for account type: ${{ matrix.TestType }}" + run: | + blobfuse2 gen-test-config --config-file=azure_block_bench.yaml --container-name=${{ secrets.BENCH_CONTAINER }} --output-file=./config.yaml + cat ./config.yaml + + # Create the config file for testing + - name: "Create mount path" + run: | + sudo mkdir -p /mnt/blob_mnt + sudo mkdir -p /mnt/tempcache + sudo chmod 777 /mnt/blob_mnt + sudo chmod 777 /mnt/tempcache + + # --------------------------------------------------------------------------------------------------------------------------------------------------- + # Run the basic tests using FIO + + # Run the Write tests + - name: "Read Test" + uses: "./.github/template/generate_page" + with: + TEST: "read" + TYPE: ${{ matrix.TestType }} + TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Run the Write tests with high number of threads + - name: "High threads Test" + uses: "./.github/template/generate_page" + with: + TEST: "highlyparallel" + TYPE: ${{ matrix.TestType }} + TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Run the Write tests + - name: "Write Test" + uses: "./.github/template/generate_page" + with: + TEST: "write" + TYPE: ${{ matrix.TestType }} + TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Run the Create tests + - name: "Create File Test" + uses: "./.github/template/generate_page" + with: + TEST: "create" + TYPE: ${{ matrix.TestType }} + TOKEN: ${{ secrets.GITHUB_TOKEN }} + # --------------------------------------------------------------------------------------- + + + # Below tests needs to run seperatly as output is different + # --------------------------------------------------------------------------------------------------- + # Run the List tests + # this shall always runs after create tests + - name: "List File Test" + shell: bash + run: | + rm -rf /mnt/blob_mnt/* + rm -rf /mnt/tempcache/* + ./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt list + + - name: "Update Benchmark Results : List" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: list/list_results.json + tool: 'customSmallerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/time/list + + # --------------------------------------------------------------------------------------- + # Run App baseed tests + # This needs to run seperatly as output is different + - name: "App based Test" + shell: bash + run: | + rm -rf /mnt/blob_mnt/* + rm -rf /mnt/tempcache/* + ./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt app + + + - name: "Update Bandwidth Results : App" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: app/app_bandwidth.json + tool: 'customBiggerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/bandwidth/app + + - name: "Update Latency Results : App" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: app/app_time.json + tool: 'customSmallerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/time/app + + - name: "Update Bandwidth Results : High Speed App" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: app/highapp_bandwidth.json + tool: 'customBiggerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/bandwidth/highapp + + - name: "Update Latency Results : High Speed App" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: app/highapp_time.json + tool: 'customSmallerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/time/highapp + + # --------------------------------------------------------------------------------------- + # Run Rename tests + # This needs to run seperatly as output is different + - name: "Rename Test" + shell: bash + run: | + rm -rf /mnt/blob_mnt/* + rm -rf /mnt/tempcache/* + ./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt rename + + - name: "Update Latency Results : Rename" + uses: benchmark-action/github-action-benchmark@v1 + with: + output-file-path: rename/rename_time.json + tool: 'customSmallerIsBetter' + alert-threshold: "160%" + max-items-in-chart: 100 + github-token: ${{ secrets.GITHUB_TOKEN }} + fail-on-alert: true + auto-push: true + comment-on-alert: true + gh-pages-branch: benchmarks + benchmark-data-dir-path: ${{ matrix.TestType }}/time/rename + # --------------------------------------------------------------------------------------- + \ No newline at end of file diff --git a/build.sh b/build.sh index 362adb298..8e0e5cb87 100755 --- a/build.sh +++ b/build.sh @@ -1,5 +1,5 @@ #!/bin/bash - +echo "Using Go - $(go version)" if [ "$1" == "fuse2" ] then # Build blobfuse2 with fuse2 diff --git a/component/azstorage/block_blob.go b/component/azstorage/block_blob.go index 8a7314b43..8745b6c44 100644 --- a/component/azstorage/block_blob.go +++ b/component/azstorage/block_blob.go @@ -496,7 +496,7 @@ func (bb *BlockBlob) getAttrUsingList(name string) (attr *internal.ObjAttr, err } if err == nil { - log.Err("BlockBlob::getAttrUsingList : blob %s does not exist", name) + log.Warn("BlockBlob::getAttrUsingList : blob %s does not exist", name) return nil, syscall.ENOENT } diff --git a/perf_testing/config/create/1_1000_files_in_10_threads.fio b/perf_testing/config/create/1_1000_files_in_10_threads.fio new file mode 100755 index 000000000..b904f9e22 --- /dev/null +++ b/perf_testing/config/create/1_1000_files_in_10_threads.fio @@ -0,0 +1,14 @@ +[global] +create_on_open=1 +ioengine=filecreate +fallocate=none +filesize=1M +bs=256k +filename_format=$jobname.$jobnum.$filenum +nrfiles=100 +group_reporting + +[create_1000_files_in_10_threads] +numjobs=10 +rw=write + diff --git a/perf_testing/config/create/2_1000_files_in_100_threads.fio b/perf_testing/config/create/2_1000_files_in_100_threads.fio new file mode 100755 index 000000000..294f83886 --- /dev/null +++ b/perf_testing/config/create/2_1000_files_in_100_threads.fio @@ -0,0 +1,14 @@ +[global] +create_on_open=1 +ioengine=filecreate +fallocate=none +filesize=1M +bs=256k +filename_format=$jobname.$jobnum.$filenum +nrfiles=10 +group_reporting + +[create_1000_files_in_100_threads] +numjobs=100 +rw=write + diff --git a/perf_testing/config/create/3_1l_files_in_20_threads.fio b/perf_testing/config/create/3_1l_files_in_20_threads.fio new file mode 100755 index 000000000..2e470e572 --- /dev/null +++ b/perf_testing/config/create/3_1l_files_in_20_threads.fio @@ -0,0 +1,14 @@ +[global] +create_on_open=1 +ioengine=filecreate +fallocate=none +filesize=1K +bs=1k +filename_format=$jobname.$jobnum.$filenum +openfiles=500 +nrfiles=5000 +group_reporting + +[create_1l_files_in_20_threads] +numjobs=20 +rw=write \ No newline at end of file diff --git a/perf_testing/config/high_threads/1_seq_write_112_threads.fio b/perf_testing/config/high_threads/1_seq_write_112_threads.fio new file mode 100755 index 000000000..a4165b830 --- /dev/null +++ b/perf_testing/config/high_threads/1_seq_write_112_threads.fio @@ -0,0 +1,22 @@ +[global] +ioengine=libaio +direct=1 +fadvise_hint=0 +verify=0 +iodepth=64 +invalidate=1 +ramp_time=10s +runtime=60s +time_based=1 +nrfiles=1 +thread=1 +group_reporting +allrandrepeat=1 +filename_format=$jobname.$jobnum.$filenum +bs=1M +size=1G + +[seq_write_112_thread] +stonewall +numjobs=112 +rw=write \ No newline at end of file diff --git a/perf_testing/config/high_threads/2_seq_read_128_threads.fio b/perf_testing/config/high_threads/2_seq_read_128_threads.fio new file mode 100755 index 000000000..56b1f7fbc --- /dev/null +++ b/perf_testing/config/high_threads/2_seq_read_128_threads.fio @@ -0,0 +1,22 @@ +[global] +ioengine=libaio +direct=1 +fadvise_hint=0 +verify=0 +iodepth=64 +invalidate=1 +ramp_time=10s +runtime=60s +time_based=1 +nrfiles=1 +thread=1 +group_reporting +allrandrepeat=1 +filename_format=$jobname.$jobnum.$filenum +bs=1M +size=1G + +[seq_read_128_thread] +stonewall +numjobs=128 +rw=read \ No newline at end of file diff --git a/perf_testing/config/high_threads/3_rand_read_128_threads.fio b/perf_testing/config/high_threads/3_rand_read_128_threads.fio new file mode 100755 index 000000000..cc959c3f3 --- /dev/null +++ b/perf_testing/config/high_threads/3_rand_read_128_threads.fio @@ -0,0 +1,23 @@ +[global] +ioengine=libaio +direct=1 +fadvise_hint=0 +verify=0 +rw=read +iodepth=64 +invalidate=1 +ramp_time=10s +runtime=60s +time_based=1 +nrfiles=1 +thread=1 +group_reporting +allrandrepeat=1 +filename_format=$jobname.$jobnum.$filenum +bs=1M +size=1G + +[rand_read_128_thread] +stonewall +numjobs=128 +rw=randread \ No newline at end of file diff --git a/perf_testing/config/read/1_seq_read.fio b/perf_testing/config/read/1_seq_read.fio new file mode 100755 index 000000000..be37043b9 --- /dev/null +++ b/perf_testing/config/read/1_seq_read.fio @@ -0,0 +1,13 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[sequential_read] +size=100G +rw=read +ioengine=sync +fallocate=none \ No newline at end of file diff --git a/perf_testing/config/read/2_rand_read.fio b/perf_testing/config/read/2_rand_read.fio new file mode 100755 index 000000000..c45507f2a --- /dev/null +++ b/perf_testing/config/read/2_rand_read.fio @@ -0,0 +1,13 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[random_read] +size=100G +rw=randread +ioengine=sync +fallocate=none \ No newline at end of file diff --git a/perf_testing/config/read/3_seq_read_small.fio b/perf_testing/config/read/3_seq_read_small.fio new file mode 100755 index 000000000..f3c3c428b --- /dev/null +++ b/perf_testing/config/read/3_seq_read_small.fio @@ -0,0 +1,13 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[sequential_read_small_file] +size=5m +rw=read +ioengine=sync +fallocate=none \ No newline at end of file diff --git a/perf_testing/config/read/4_rand_read_small.fio b/perf_testing/config/read/4_rand_read_small.fio new file mode 100755 index 000000000..45396a777 --- /dev/null +++ b/perf_testing/config/read/4_rand_read_small.fio @@ -0,0 +1,13 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_small_fio.data +group_reporting + +[random_read_small_file] +size=5m +rw=randread +ioengine=sync +fallocate=none diff --git a/perf_testing/config/read/5_seq_read_directio.fio b/perf_testing/config/read/5_seq_read_directio.fio new file mode 100755 index 000000000..0d8b2ee2d --- /dev/null +++ b/perf_testing/config/read/5_seq_read_directio.fio @@ -0,0 +1,14 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[sequential_read_direct_io] +size=100G +rw=read +ioengine=sync +fallocate=none +direct=1 \ No newline at end of file diff --git a/perf_testing/config/read/6_rand_read_directio.fio b/perf_testing/config/read/6_rand_read_directio.fio new file mode 100755 index 000000000..f46fc4aaa --- /dev/null +++ b/perf_testing/config/read/6_rand_read_directio.fio @@ -0,0 +1,14 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[random_read_direct_io] +size=100G +rw=randread +ioengine=sync +fallocate=none +direct=1 \ No newline at end of file diff --git a/perf_testing/config/read/7_seq_read_4thread.fio b/perf_testing/config/read/7_seq_read_4thread.fio new file mode 100755 index 000000000..e009ac9c1 --- /dev/null +++ b/perf_testing/config/read/7_seq_read_4thread.fio @@ -0,0 +1,14 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[sequential_read_4_threads] +size=100G +rw=read +ioengine=sync +fallocate=none +numjobs=4 \ No newline at end of file diff --git a/perf_testing/config/read/8_seq_read_16thread.fio b/perf_testing/config/read/8_seq_read_16thread.fio new file mode 100755 index 000000000..f1ff7cf6e --- /dev/null +++ b/perf_testing/config/read/8_seq_read_16thread.fio @@ -0,0 +1,14 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[sequential_read_16_threads] +size=100G +rw=read +ioengine=sync +fallocate=none +numjobs=16 \ No newline at end of file diff --git a/perf_testing/config/read/9_rand_read_4thread.fio b/perf_testing/config/read/9_rand_read_4thread.fio new file mode 100755 index 000000000..f11e8ab02 --- /dev/null +++ b/perf_testing/config/read/9_rand_read_4thread.fio @@ -0,0 +1,14 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=read_fio.data +group_reporting + +[random_read_4_threads] +size=100G +rw=randread +ioengine=sync +fallocate=none +numjobs=4 \ No newline at end of file diff --git a/perf_testing/config/write/1_seq_write.fio b/perf_testing/config/write/1_seq_write.fio new file mode 100755 index 000000000..3d5062a40 --- /dev/null +++ b/perf_testing/config/write/1_seq_write.fio @@ -0,0 +1,15 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=write_fio.data +group_reporting + +[sequential_write] +size=100G +rw=write +ioengine=sync +fallocate=none +create_on_open=1 +unlink=1 \ No newline at end of file diff --git a/perf_testing/config/write/2_seq_write_directio.fio b/perf_testing/config/write/2_seq_write_directio.fio new file mode 100755 index 000000000..0d7d454bc --- /dev/null +++ b/perf_testing/config/write/2_seq_write_directio.fio @@ -0,0 +1,16 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename=write_fio.data +group_reporting + +[sequential_write_directio] +size=100G +rw=write +ioengine=sync +fallocate=none +create_on_open=1 +unlink=1 +direct=1 diff --git a/perf_testing/config/write/3_seq_write_4thread.fio b/perf_testing/config/write/3_seq_write_4thread.fio new file mode 100644 index 000000000..f8a0c5f34 --- /dev/null +++ b/perf_testing/config/write/3_seq_write_4thread.fio @@ -0,0 +1,15 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename_format=$jobname.$jobnum.$filenum +group_reporting + +[sequential_write_4_threads] +size=100G +rw=write +ioengine=sync +fallocate=none +create_on_open=1 +unlink=1 \ No newline at end of file diff --git a/perf_testing/config/write/4_seq_write_16thread.fio b/perf_testing/config/write/4_seq_write_16thread.fio new file mode 100644 index 000000000..42af157a4 --- /dev/null +++ b/perf_testing/config/write/4_seq_write_16thread.fio @@ -0,0 +1,15 @@ +[global] +name=blobfuse_benchmark +bs=256k +runtime=30s +time_based +filename_format=$jobname.$jobnum.$filenum +group_reporting + +[sequential_write_16_threads] +size=100G +rw=write +ioengine=sync +fallocate=none +create_on_open=1 +unlink=1 \ No newline at end of file diff --git a/perf_testing/scripts/fio_bench.sh b/perf_testing/scripts/fio_bench.sh new file mode 100755 index 000000000..43cf85148 --- /dev/null +++ b/perf_testing/scripts/fio_bench.sh @@ -0,0 +1,370 @@ +#!/bin/bash +set -e + +# Each test will be performed 3 times +iterations=3 + +# Mount path for blobfuse is supplied on command line while executing this script +mount_dir=$1 + +# Name of tests we are going to perform +test_name=$2 + +# Directory where output logs will be generated by fio +output="./${test_name}" + +# Additional mount parameters +log_type="syslog" +log_level="log_err" +cache_path="" + +# -------------------------------------------------------------------------------------------------- +# Method to mount blobfuse and wait for system to stabilize +mount_blobfuse() { + set +e + + blobfuse2 mount ${mount_dir} --config-file=./config.yaml --log-type=${log_type} --log-level=${log_level} ${cache_path} + mount_status=$? + set -e + if [ $mount_status -ne 0 ]; then + echo "Failed to mount file system" + exit 1 + else + echo "File system mounted successfully on ${mount_dir}" + fi + + # Wait for daemon to come up and stablise + sleep 5 + + df -h | grep blobfuse + df_status=$? + if [ $df_status -ne 0 ]; then + echo "Failed to find blobfuse mount" + exit 1 + else + echo "File system stable now on ${mount_dir}" + fi +} + +# -------------------------------------------------------------------------------------------------- +# Method to execute fio command for a given config file and generate summary result +execute_test() { + job_file=$1 + + job_name=$(basename "${job_file}") + job_name="${job_name%.*}" + + echo -n "Running job ${job_name} for ${iterations} iterations... " + + for i in $(seq 1 $iterations); + do + echo -n "${i};" + set +e + + timeout 300m fio --thread \ + --output=${output}/${job_name}trial${i}.json \ + --output-format=json \ + --directory=${mount_dir} \ + --eta=never \ + ${job_file} + + job_status=$? + set -e + if [ $job_status -ne 0 ]; then + echo "Job ${job_name} failed : ${job_status}" + exit 1 + fi + done + + # From the fio output get the bandwidth details and put it in a summary file + jq -n 'reduce inputs.jobs[] as $job (null; .name = $job.jobname | .len += 1 | .value += (if ($job."job options".rw == "read") + then $job.read.bw / 1024 + elif ($job."job options".rw == "randread") then $job.read.bw / 1024 + elif ($job."job options".rw == "randwrite") then $job.write.bw / 1024 + else $job.write.bw / 1024 end)) | {name: .name, value: (.value / .len), unit: "MiB/s"}' ${output}/${job_name}trial*.json | tee ${output}/${job_name}_bandwidth_summary.json + + # From the fio output get the latency details and put it in a summary file + jq -n 'reduce inputs.jobs[] as $job (null; .name = $job.jobname | .len += 1 | .value += (if ($job."job options".rw == "read") + then $job.read.lat_ns.mean / 1000000 + elif ($job."job options".rw == "randread") then $job.read.lat_ns.mean / 1000000 + elif ($job."job options".rw == "randwrite") then $job.write.lat_ns.mean / 1000000 + else $job.write.lat_ns.mean / 1000000 end)) | {name: .name, value: (.value / .len), unit: "milliseconds"}' ${output}/${job_name}trial*.json | tee ${output}/${job_name}_latency_summary.json +} + +# -------------------------------------------------------------------------------------------------- +# Method to iterate over fio files in given directory and execute each test +iterate_fio_files() { + jobs_dir=$1 + job_type=$(basename "${jobs_dir}") + + for job_file in "${jobs_dir}"/*.fio; do + job_name=$(basename "${job_file}") + job_name="${job_name%.*}" + + mount_blobfuse + + execute_test $job_file + + blobfuse2 unmount all + sleep 5 + + rm -rf ~/.blobfuse2/* + done +} + +# -------------------------------------------------------------------------------------------------- +# Method to list files on the mount path and generate report +list_files() { + # Mount blobfuse and creat files to list + mount_blobfuse + total_seconds=0 + + # List files and capture the time related details + work_dir=`pwd` + cd ${mount_dir} + /usr/bin/time -o ${work_dir}/lst.txt -v ls -U --color=never > ${work_dir}/lst.out + cd ${work_dir} + cat ${work_dir}/lst.txt + + # Extract Elapsed time for listing files + list_time=`cat ${work_dir}/lst.txt | grep "Elapsed" | rev | cut -d " " -f 1 | rev` + echo $list_time + + IFS=':'; time_fragments=($list_time); unset IFS; + list_min=`printf '%5.5f' ${time_fragments[0]}` + list_sec=`printf '%5.5f' ${time_fragments[1]}` + + avg_list_time=`printf %5.5f $(echo "scale = 10; ($list_min * 60) + $list_sec" | bc)` + + # ------------------------------ + # Measure time taken to delete these files + cat ${work_dir}/lst.out | wc -l + cat ${work_dir}/lst.out | rev | cut -d " " -f 1 | rev | tail +2 > ${work_dir}/lst.out1 + + cd ${mount_dir} + /usr/bin/time -o ${work_dir}/del.txt -v xargs rm -rf < ${work_dir}/lst.out1 + cd - + cat ${work_dir}/del.txt + + # Extract Deletion time + del_time=`cat del.txt | grep "Elapsed" | rev | cut -d " " -f 1 | rev` + echo $del_time + + IFS=':'; time_fragments=($del_time); unset IFS; + del_min=`printf '%5.5f' ${time_fragments[0]}` + del_sec=`printf '%5.5f' ${time_fragments[1]}` + + avg_del_time=`printf %5.5f $(echo "scale = 10; ($del_min * 60) + $del_sec" | bc)` + + # Unmount and cleanup now + blobfuse2 unmount all + + echo $avg_list_time " : " $avg_del_time + + jq -n --arg list_time $avg_list_time --arg del_time $avg_del_time '[{name: "list_100k_files", value: $list_time, unit: "seconds"}, + {name: "delete_100k_files", value: $del_time, unit: "seconds"}] ' | tee ${output}/list_results.json +} + +# -------------------------------------------------------------------------------------------------- +# Method to run read/write test using a python script +read_write_using_app() { + + # Clean up the results + rm -rf ${output}/app_write_*.json + rm -rf ${output}/app_read_*.json + + # ----- Write tests ----------- + # Mount blobfuse and creat files to list + mount_blobfuse + + # Run the python script to write files + echo `date` ' : Starting write tests' + for i in {1,10,40,100} + do + echo `date` " : Write test for ${i} GB file" + python3 ./perf_testing/scripts/write.py ${mount_dir} ${i} > ${output}/app_write_${i}.json + done + + # Unmount and cleanup now + blobfuse2 unmount all + + cat ${output}/app_write_*.json + + # ----- Read tests ----------- + # Mount blobfuse and creat files to list + mount_blobfuse + + # Run the python script to read files + echo `date` ' : Starting read tests' + for i in {1,10,40,100} + do + echo `date` " : Read test for ${i} GB file" + python3 ./perf_testing/scripts/read.py ${mount_dir} ${i} > ${output}/app_read_${i}.json + done + + rm -rf ${mount_dir}/application_* + + # Unmount and cleanup now + blobfuse2 unmount all + + cat ${output}/app_read_*.json + + # Local SSD Writing just for comparison + # echo `date` ' : Starting Local write tests' + # for i in {1,10,40,100} + # do + # echo `date` ' : Write test for ${i} GB file' + # python3 ./perf_testing/scripts/write.py ${mount_dir} ${i} > ${output}/app_local_write_${i}.json + # done + # rm -rf ${mount_dir}/* + + + # ----- HighSpeed tests ----------- + # Mount blobfuse + mount_blobfuse + rm -rf ${mount_dir}/20GFile* + + # Run the python script to read files + echo `date` ' : Starting highspeed tests' + python3 ./perf_testing/scripts/highspeed_create.py ${mount_dir} 10 > ${output}/highspeed_app_write.json + + blobfuse2 unmount all + sleep 3 + mount_blobfuse + python3 ./perf_testing/scripts/highspeed_read.py ${mount_dir}/20GFile* > ${output}/highspeed_app_read.json + rm -rf ${mount_dir}/20GFile* + + # Unmount and cleanup now + blobfuse2 unmount all + + cat ${output}/highspeed_app_*.json + + # Generate output + jq '{"name": .name, "value": .speed, "unit": .unit}' ${output}/app_write_*.json ${output}/app_read_*.json | jq -s '.' | tee ./${output}/app_bandwidth.json + jq '{"name": .name, "value": .total_time, "unit": "seconds"}' ${output}/app_write_*.json ${output}/app_read_*.json | jq -s '.' | tee ./${output}/app_time.json + + jq '{"name": .name, "value": .speed, "unit": .unit}' ${output}/highspeed_app*.json | jq -s '.' | tee ./${output}/highapp_bandwidth.json + jq '{"name": .name, "value": .total_time, "unit": "seconds"}' ${output}/highspeed_app*.json | jq -s '.' | tee ./${output}/highapp_time.json + + # jq '{"name": .name, "value": .speed, "unit": .unit}' ${output}/app_local_write_*.json | jq -s '.' | tee ./${output}/app_local_bandwidth.json +} + +# -------------------------------------------------------------------------------------------------- +# Method to create and then rename files +rename_files() { + # ----- Rename tests ----------- + # Mount blobfuse + mount_blobfuse + + total_seconds=0 + + # List files and capture the time related details + work_dir=`pwd` + cd ${mount_dir} + python3 ${work_dir}/perf_testing/scripts/rename.py > ${work_dir}/rename.json + cd ${work_dir} + cat rename.json + + jq '{"name": .name, "value": .rename_time, "unit": .unit}' ${work_dir}/rename.json | jq -s '.' | tee ./${output}/rename_time.json +} + +# -------------------------------------------------------------------------------------------------- +# Method to prepare the system for test +prepare_system() { + blobfuse2 unmount all + # Clean up logs and create output directory + mkdir -p ${output} + chmod 777 ${output} +} + + +# -------------------------------------------------------------------------------------------------- +# Prepare the system for test +prepare_system + +# -------------------------------------------------------------------------------------------------- +executed=1 +if [[ ${test_name} == "write" ]] +then + # Execute write benchmark using fio + echo "Running Write test cases" + cache_path="--block-cache-path=/mnt/tempcache" + iterate_fio_files "./perf_testing/config/write" + +elif [[ ${test_name} == "read" ]] +then + # Execute read benchmark using fio + echo "Running Read test cases" + iterate_fio_files "./perf_testing/config/read" +elif [[ ${test_name} == "highlyparallel" ]] +then + # Execute multi-threaded benchmark using fio + echo "Running Highly Parallel test cases" + cache_path="--block-cache-path=/mnt/tempcache" + iterate_fio_files "./perf_testing/config/high_threads" +elif [[ ${test_name} == "create" ]] +then + # Set log type to silent as this is going to generate a lot of logs + log_type="silent" + iterations=1 + + # Pre creation cleanup + mount_blobfuse + echo "Deleting old data" + cd ${mount_dir} + find . -name "create_1000_files_in_10_threads*" -delete + find . -name "create_1000_files_in_100_threads*" -delete + find . -name "create_1l_files_in_20_threads*" -delete + cd - + ./blobfuse2 unmount all + + # Execute file create tests + echo "Running Create test cases" + iterate_fio_files "./perf_testing/config/create" +elif [[ ${test_name} == "list" ]] +then + # Set log type to silent as this is going to generate a lot of logs + log_type="silent" + + # Execute file listing tests + echo "Running File listing test cases" + list_files + + # No need to generate bandwidth or latecy related reports in this case + executed=0 +elif [[ ${test_name} == "app" ]] +then + # App based read/write tests being executed + # This is done using a python script which read/write in sequential order + echo "Running App based tests" + read_write_using_app + + # No need to generate bandwidth or latecy related reports in this case + executed=0 +elif [[ ${test_name} == "rename" ]] +then + # Set log type to silent as this is going to generate a lot of logs + log_type="silent" + + # Execute rename tests + echo "Running File rename test cases" + rename_files + + # No need to generate bandwidth or latecy related reports in this case + executed=0 +else + executed=0 + echo "Invalid argument. Please provide either 'read', 'write', 'multi' or 'create' as argument" +fi + +# -------------------------------------------------------------------------------------------------- +if [[ $executed -eq 1 ]] +then + # Merge all results and generate a json summary for bandwidth + jq -n '[inputs]' ${output}/*_bandwidth_summary.json | tee ./${output}/bandwidth_results.json + + # Merge all results and generate a json summary for latency + jq -n '[inputs]' ${output}/*_latency_summary.json | tee ./${output}/latency_results.json +fi + +# -------------------------------------------------------------------------------------------------- diff --git a/perf_testing/scripts/highspeed_create.py b/perf_testing/scripts/highspeed_create.py new file mode 100755 index 000000000..7e7b410f0 --- /dev/null +++ b/perf_testing/scripts/highspeed_create.py @@ -0,0 +1,40 @@ +import os +import subprocess +import concurrent.futures +import time +import multiprocessing +import argparse +import json + +def create_file(file_index, folder): + timestamp = int(time.time()) # Get current timestamp + filename = os.path.join(folder, f'20GFile_{timestamp}_{file_index}') + command = f"dd if=/dev/zero of={filename} bs=16M count=1280 oflag=direct" + start_time = time.time() + subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + end_time = time.time() + return (filename, end_time - start_time) + +def main(folder, num_files): + if not os.path.exists(folder): + os.makedirs(folder) + + start_time = time.time() + with concurrent.futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor: + futures = [executor.submit(create_file, i, folder) for i in range(num_files)] + results = [f.result() for f in concurrent.futures.as_completed(futures)] + end_time = time.time() + + total_time = end_time - start_time + total_data_written = num_files * 20 # in GB + speed_gbps = (total_data_written * 8) / total_time # converting GB to Gb and then calculating Gbps + + print(json.dumps({"name": "create_10_20GB_file", "total_time": total_time, "speed": speed_gbps, "unit": "GiB/s"})) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Create multiple 20GB files in parallel.') + parser.add_argument('folder', type=str, help='The folder where the files will be written.') + parser.add_argument('num_files', type=int, help='The number of 20GB files to create.') + + args = parser.parse_args() + main(args.folder, args.num_files) \ No newline at end of file diff --git a/perf_testing/scripts/highspeed_read.py b/perf_testing/scripts/highspeed_read.py new file mode 100755 index 000000000..454204846 --- /dev/null +++ b/perf_testing/scripts/highspeed_read.py @@ -0,0 +1,51 @@ +import os +import sys +import subprocess +import time +from multiprocessing import Pool, cpu_count +import json + +def copy_file(src): + try: + process = subprocess.Popen(['dd', f'if={src}', 'of=/dev/null', 'bs=4M', 'status=none'], stdout=subprocess.PIPE) + bytes_transferred = 0 + start_time = time.time() + for line in process.stdout: + bytes_transferred += len(line) + # Calculate speed in Gbps and update live + elapsed_time = time.time() - start_time + speed_gbps = (bytes_transferred * 8) / (elapsed_time * 10**9) + # print(f"\rBytes transferred: {bytes_transferred} bytes | Speed: {speed_gbps:.2f} Gbps", end="") + sys.stdout.flush() + process.wait() + file_size = os.path.getsize(src) + return file_size + except subprocess.CalledProcessError as e: + return 0 + +def main(file_paths): + cpu_cores = cpu_count() + total_size = 0 + + start_time = time.time() + + with Pool(cpu_cores) as pool: + sizes = pool.map(copy_file, file_paths) + total_size = sum(sizes) + + end_time = time.time() + time_taken = end_time - start_time + + total_size_gb = total_size / (1024 ** 3) # Convert bytes to GB + speed_gbps = (total_size * 8) / (time_taken * 10**9) # Convert bytes to bits and calculate speed in Gbps + + print(json.dumps({"name": "read_10_20GB_file", "total_time": time_taken, "speed": speed_gbps, "unit": "GiB/s"})) + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("Usage: python parallel_copy.py [ ... ]") + sys.exit(1) + + file_paths = sys.argv[1:] + + main(file_paths) \ No newline at end of file diff --git a/perf_testing/scripts/read.py b/perf_testing/scripts/read.py new file mode 100755 index 000000000..294bb4f02 --- /dev/null +++ b/perf_testing/scripts/read.py @@ -0,0 +1,33 @@ +import time +import os +import sys +import json + +mountpath = sys.argv[1] +size = sys.argv[2] + +blockSize = 8 * 1024 * 1024 +fileSize = int(size) * (1024 * 1024 * 1024) +bytes_read = 0 + +t1 = time.time() +fd = open(os.path.join(mountpath, "application_"+size+".data"), "rb") +t2 = time.time() + +while bytes_read <= fileSize: + data_byte = fd.read(blockSize) + bytes_read += len(data_byte) + +t3 = time.time() +fd.close() +t4 = time.time() + +open_time = t2 - t1 +close_time = t4 - t3 +read_time = t3 - t2 +total_time = t4 - t1 + +read_mbps = ((bytes_read/read_time) * 8)/(1024 * 1024) +total_mbps = ((bytes_read/total_time) * 8)/(1024 * 1024) + +print(json.dumps({"name": "read_" + size + "GB", "open_time": open_time, "read_time": read_time, "close_time": close_time, "total_time": total_time, "read_mbps": read_mbps, "speed": total_mbps, "unit": "MiB/s"})) \ No newline at end of file diff --git a/perf_testing/scripts/rename.py b/perf_testing/scripts/rename.py new file mode 100644 index 000000000..d8c8b0817 --- /dev/null +++ b/perf_testing/scripts/rename.py @@ -0,0 +1,61 @@ +import datetime +import os +import time +import shutil +import json + +# Function to create unique folder for each run +def create_folder(folder_path): + if os.path.exists(folder_path): + shutil.rmtree(folder_path) + os.makedirs(folder_path) + +# Function to create files of a specified size +def create_files(folder_path, num_files, file_size): + start_time = time.time() + for i in range(num_files): + file_path = os.path.join(folder_path, f"file_{i}.txt") + with open(file_path, 'wb') as f: + f.write(b'\0' * file_size) + end_time = time.time() + return end_time - start_time + +# Function to rename files +def rename_files(folder_path): + start_time = time.time() + for i, filename in enumerate(os.listdir(folder_path)): + old_file_path = os.path.join(folder_path, filename) + new_file_path = os.path.join(folder_path, f"new_file_{i}.txt") + os.rename(old_file_path, new_file_path) + end_time = time.time() + return end_time - start_time + +# Specify the folder path +base_folder = "./" +timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") +folder_path = os.path.join(base_folder, f"test_folder_{timestamp}") + +# Specify the number of files +num_files = 5000 + +# Specify the file size in bytes (1 MB) +file_size = 1024 * 1024 + +# Output file +output_file = "output.txt" + +# Create unique folder for each run +create_folder(folder_path) + +# Measure the time taken to create files +create_time = create_files(folder_path, num_files, file_size) +# print(f"Time taken to create {num_files} files: {create_time:.4f} seconds") + +# Measure the time taken to rename files +rename_time = rename_files(folder_path) +# print(f"Time taken to rename {num_files} files: {rename_time:.4f} seconds") + +# Clear the test data +shutil.rmtree(folder_path) + +print(json.dumps({"name": "rename_5000_1MB_files", "rename_time": rename_time, "create_time": create_time, "unit": "seconds"})) diff --git a/perf_testing/scripts/write.py b/perf_testing/scripts/write.py new file mode 100755 index 000000000..c9f74ed3b --- /dev/null +++ b/perf_testing/scripts/write.py @@ -0,0 +1,35 @@ +import time +import os +import sys +import json + +mountpath = sys.argv[1] +size = sys.argv[2] + +blockSize = 8 * 1024 * 1024 +fileSize = int(size) * (1024 * 1024 * 1024) +bytes_written = 0 + +data = os.urandom(blockSize) + +t1 = time.time() +fd = open(os.path.join(mountpath, "application_"+size+".data"), "wb") +t2 = time.time() + +while bytes_written <= fileSize: + data_byte = fd.write(data) + bytes_written += data_byte + +t3 = time.time() +fd.close() +t4 = time.time() + +open_time = t2 - t1 +close_time = t4 - t3 +write_time = t3 - t2 +total_time = t4 - t1 + +write_mbps = ((bytes_written/write_time) * 8)/(1024 * 1024) +total_mbps = ((bytes_written/total_time) * 8)/(1024 * 1024) + +print(json.dumps({"name": "write_" + size + "GB", "open_time": open_time, "write_time": write_time, "close_time": close_time, "total_time": total_time, "write_mbps": write_mbps, "speed": total_mbps, "unit": "MiB/s"})) \ No newline at end of file diff --git a/sampleDataSetFuseConfig.json b/sampleDataSetFuseConfig.json deleted file mode 100644 index 632610924..000000000 --- a/sampleDataSetFuseConfig.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "clientid": "myClientId", - "tenantid": "myTenantId", - "authorityurl": "myActiveDirectoryEndpoint", - "credentialtype": "servicePrincipal", - "resourceurl": "https://datalake.azure.net/", - "fuseattrtimeout": 120, - "fuseentrytimeout": 120, - "fuseallowother": false, - "loglevel": "LOG_DEBUG", - "retrycount": 2, - "maxcachesizeinmb": 500, - "requiredfreespaceinmb":2000, - "cachedir":"/home/myuser/tempcache", - "resourceid" : "adl://myAccountName.azuredatalakestore.net/myContainer", - "mountdir":"/home/myuser/mntdir" -} \ No newline at end of file diff --git a/testdata/config/azure_block_bench.yaml b/testdata/config/azure_block_bench.yaml new file mode 100644 index 000000000..580397246 --- /dev/null +++ b/testdata/config/azure_block_bench.yaml @@ -0,0 +1,35 @@ +logging: + level: log_err + file-path: "./blobfuse2.log" + type: base + +components: + - libfuse + - block_cache + - attr_cache + - azstorage + +libfuse: + attribute-expiration-sec: 120 + entry-expiration-sec: 120 + negative-entry-expiration-sec: 240 + ignore-open-flags: true + +block_cache: + block-size-mb: 16 + mem-size-mb: 204800 + prefetch: 200 + parallelism: 600 + disk-size-mb: 512000 + disk-timeout-sec: 120 + #prefetch-on-open: true + +attr_cache: + timeout-sec: 7200 + +azstorage: + mode: key + container: { 0 } + account-name: { AZURE_STORAGE_ACCOUNT } + account-key: { AZURE_STORAGE_ACCESS_KEY } + From 98acac361ff7a594d3e2bc18f2eef0d611e055c2 Mon Sep 17 00:00:00 2001 From: ashruti-msft <137055338+ashruti-msft@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:53:58 +0530 Subject: [PATCH 09/73] Added min prefetch check (#1446) * Added check for memsize and prefetch if set by default --- component/block_cache/block_cache.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/component/block_cache/block_cache.go b/component/block_cache/block_cache.go index fffdcf787..af994c2cc 100644 --- a/component/block_cache/block_cache.go +++ b/component/block_cache/block_cache.go @@ -39,6 +39,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "os" "path/filepath" "runtime" @@ -196,6 +197,7 @@ func (bc *BlockCache) TempCacheCleanup() error { func (bc *BlockCache) Configure(_ bool) error { log.Trace("BlockCache::Configure : %s", bc.Name()) + defaultMemSize := false conf := BlockCacheOptions{} err := config.UnmarshalKey(bc.Name(), &conf) if err != nil { @@ -218,6 +220,7 @@ func (bc *BlockCache) Configure(_ bool) error { bc.memSize = uint64(4192) * _1MB } else { bc.memSize = uint64(0.8 * (float64)(sysinfo.Freeram) * float64(sysinfo.Unit)) + defaultMemSize = true } } @@ -227,9 +230,13 @@ func (bc *BlockCache) Configure(_ bool) error { } bc.prefetchOnOpen = conf.PrefetchOnOpen - bc.prefetch = uint32(2 * runtime.NumCPU()) + bc.prefetch = uint32(math.Max((MIN_PREFETCH*2)+1, (float64)(2*runtime.NumCPU()))) bc.noPrefetch = false + if defaultMemSize && (uint64(bc.prefetch)*uint64(bc.blockSize)) > bc.memSize { + bc.prefetch = (MIN_PREFETCH * 2) + 1 + } + err = config.UnmarshalKey("lazy-write", &bc.lazyWrite) if err != nil { log.Err("BlockCache: config error [unable to obtain lazy-write]") From 5d7a9e4a7bb4ae16ed9b50d878fa817c40b1d284 Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Wed, 10 Jul 2024 07:41:14 +0530 Subject: [PATCH 10/73] Update benchmark.yml --- .github/workflows/benchmark.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 6a9e84eb7..90cd64830 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -148,7 +148,7 @@ jobs: with: output-file-path: list/list_results.json tool: 'customSmallerIsBetter' - alert-threshold: "160%" + alert-threshold: "190%" max-items-in-chart: 100 github-token: ${{ secrets.GITHUB_TOKEN }} fail-on-alert: true @@ -248,4 +248,4 @@ jobs: gh-pages-branch: benchmarks benchmark-data-dir-path: ${{ matrix.TestType }}/time/rename # --------------------------------------------------------------------------------------- - \ No newline at end of file + From 8f767d0251fc23bddb7dc73f3a2a8e792f39412d Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:41:56 +0530 Subject: [PATCH 11/73] Remove RHEL 7.5 from nightly and artifacts. (#1448) * Remove RHLE7.5 from nightly and artifact tests. --- blobfuse2-nightly.yaml | 23 +--------- blobfuse2-release.yaml | 96 ++++++++++++++++++++++++++++++------------ 2 files changed, 69 insertions(+), 50 deletions(-) diff --git a/blobfuse2-nightly.yaml b/blobfuse2-nightly.yaml index d475ebd5b..a235a5e58 100755 --- a/blobfuse2-nightly.yaml +++ b/blobfuse2-nightly.yaml @@ -788,12 +788,6 @@ stages: timeoutInMinutes: 60 strategy: matrix: - RHEL-7.5: - DistroVer: "RHEL-7.5" - Description: "Red Hat Enterprise Linux 7.5" - AgentName: "blobfuse-rhel7_5" - ContainerName: "test-cnt-rhel-75" - tags: 'fuse3' RHEL-8.6: DistroVer: "RHEL-8.6" Description: "Red Hat Enterprise Linux 8.6" @@ -831,12 +825,6 @@ stages: value: "/usr/pipeline/workv2/go" steps: - - script: | - sudo touch /etc/yum.repos.d/centos.repo - sudo sh -c 'echo -e "[centos-extras]\nname=Centos extras - $basearch\nbaseurl=http://mirror.centos.org/centos/7/extras/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=http://centos.org/keys/RPM-GPG-KEY-CentOS-7" > /etc/yum.repos.d/centos.repo' - condition: eq(variables['AgentName'], 'blobfuse-rhel7_5') - displayName: "Update OS mirrors" - - template: 'azure-pipeline-templates/distro-tests.yml' parameters: working_dir: $(WORK_DIR) @@ -857,10 +845,7 @@ stages: sudo sed -i '/^failovermethod=/d' /etc/yum.repos.d/*.repo sudo yum update -y sudo yum groupinstall "Development Tools" -y - if [ $(AgentName) == "blobfuse-rhel7_5" ]; then - sudo yum-config-manager --save --setopt=rhui-rhel-7-server-dotnet-rhui-rpms.skip_if_unavailable=true - sudo yum install git fuse fuse3-libs fuse3-devel fuse3 rh-python36 -y - elif [ $(AgentName) == "blobfuse-rhel9" ]; then + if [ $(AgentName) == "blobfuse-rhel9" ]; then sudo yum install git fuse fuse3-libs fuse3-devel fuse3 python3 -y --nobest --allowerasing else sudo yum install git fuse fuse3-libs fuse3-devel fuse3 python36 -y --nobest --allowerasing @@ -1039,12 +1024,6 @@ stages: value: "/usr/pipeline/workv2/go" steps: - - script: | - sudo touch /etc/yum.repos.d/centos.repo - sudo sh -c 'echo -e "[centos-extras]\nname=Centos extras - $basearch\nbaseurl=http://mirror.centos.org/centos/7/extras/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=http://centos.org/keys/RPM-GPG-KEY-CentOS-7" > /etc/yum.repos.d/centos.repo' - condition: eq(variables['AgentName'], 'blobfuse-rhel7_5') - displayName: "Update OS mirrors" - - template: 'azure-pipeline-templates/distro-tests.yml' parameters: working_dir: $(WORK_DIR) diff --git a/blobfuse2-release.yaml b/blobfuse2-release.yaml index 6e3a9fece..8fe9083bd 100644 --- a/blobfuse2-release.yaml +++ b/blobfuse2-release.yaml @@ -807,7 +807,7 @@ stages: artifactName: 'blobfuse2' displayName: 'Publish Artifacts' - - job: Set_4 + - job: Set_4_1 timeoutInMinutes: 120 strategy: matrix: @@ -818,6 +818,72 @@ stages: fuse-version: 'fuse3' tags: 'fuse3' container: 'test-cnt-rhel-75' + + pool: + name: "blobfuse-rhel-pool" + demands: + - ImageOverride -equals $(agentName) + + variables: + - group: NightlyBlobFuse + - name: root_dir + value: '$(System.DefaultWorkingDirectory)' + - name: work_dir + value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse' + - name: mount_dir + value: '$(System.DefaultWorkingDirectory)/fusetmp' + - name: temp_dir + value: '$(System.DefaultWorkingDirectory)/fusetmpcache' + + steps: + - checkout: none + + - script: | + sudo yum update -y + sudo yum install git -y + sudo yum groupinstall "Development Tools" -y + displayName: 'Install Git' + + - task: DownloadBuildArtifacts@0 + displayName: 'Download Build Artifacts' + inputs: + artifactName: 'blobfuse2-signed' + downloadPath: $(root_dir) + itemPattern: blobfuse2-signed/blobfuse2*$(tags)*x86_64.rpm + + - script: | + ls -l + result=$(ls -1 | wc -l) + if [ $result -ne 1 ]; then + exit 1 + fi + displayName: 'List Downloaded Package' + workingDirectory: $(root_dir)/blobfuse2-signed + + - script: | + for f in ./blobfuse2*$(tags)*.rpm; do mv -v "$f" "${f/-$(tags)./-$(vmImage).}"; done; + cp ./blobfuse2*$(vmImage)*.rpm $(Build.ArtifactStagingDirectory) + f=`ls ./blobfuse2*$(vmImage)*.rpm` + cp "$f" $(sed 's:RHEL-7.5:RHEL-7.8:' <<< "$f") + cp "$f" $(sed 's:RHEL-7.5:RHEL-8.1:' <<< "$f") + cp "$f" $(sed 's:RHEL-7.5:RHEL-8.2:' <<< "$f") + cp ./blobfuse2*RHEL-7.8*.rpm $(Build.ArtifactStagingDirectory) + cp ./blobfuse2*RHEL-8*.rpm $(Build.ArtifactStagingDirectory) + rm -rf ./blobfuse2*RHEL-7.8*.rpm + rm -rf ./blobfuse2*RHEL-8*.rpm + displayName: 'Rename Package' + workingDirectory: $(root_dir)/blobfuse2-signed + + # publishing the artifacts generated + - task: PublishBuildArtifacts@1 + inputs: + artifactName: 'blobfuse2' + displayName: 'Publish Artifacts' + + - job: Set_4_2 + timeoutInMinutes: 120 + strategy: + matrix: RHEL-8.6: agentName: "blobfuse-rhel8_6" vmImage: 'RHEL-8.6' @@ -852,12 +918,6 @@ stages: steps: - checkout: none - - script: | - sudo touch /etc/yum.repos.d/centos.repo - sudo sh -c 'echo -e "[centos-extras]\nname=Centos extras - $basearch\nbaseurl=http://mirror.centos.org/centos/7/extras/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=http://centos.org/keys/RPM-GPG-KEY-CentOS-7" > /etc/yum.repos.d/centos.repo' - condition: or(eq(variables['AgentName'], 'blobfuse-rhel7_5'),eq(variables['AgentName'], 'blobfuse-rhel7_8')) - displayName: "Update OS mirrors" - - script: | sudo yum update -y sudo yum install git -y @@ -904,16 +964,6 @@ stages: - script: | for f in ./blobfuse2*$(tags)*.rpm; do mv -v "$f" "${f/-$(tags)./-$(vmImage).}"; done; cp ./blobfuse2*$(vmImage)*.rpm $(Build.ArtifactStagingDirectory) - if [ $(agentName) == "blobfuse-rhel7_5" ]; then - f=`ls ./blobfuse2*$(vmImage)*.rpm` - cp "$f" $(sed 's:RHEL-7.5:RHEL-7.8:' <<< "$f") - cp "$f" $(sed 's:RHEL-7.5:RHEL-8.1:' <<< "$f") - cp "$f" $(sed 's:RHEL-7.5:RHEL-8.2:' <<< "$f") - cp ./blobfuse2*RHEL-7.8*.rpm $(Build.ArtifactStagingDirectory) - cp ./blobfuse2*RHEL-8*.rpm $(Build.ArtifactStagingDirectory) - rm -rf ./blobfuse2*RHEL-7.8*.rpm - rm -rf ./blobfuse2*RHEL-8*.rpm - fi displayName: 'Rename Package' workingDirectory: $(root_dir)/blobfuse2-signed @@ -921,11 +971,7 @@ stages: sudo sed -i '/^failovermethod=/d' /etc/yum.repos.d/*.repo sudo rpm -qip blobfuse2*$(vmImage)*.rpm sudo yum groupinstall "Development Tools" -y - if [[ $(agentName) == "blobfuse-rhel7_5" || $(agentName) == "blobfuse-rhel7_8" ]]; then - sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y - else - sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y --nobest --allowerasing - fi + sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y --nobest --allowerasing sudo rpm -i blobfuse2*$(vmImage)*.rpm displayName: 'Install Package' workingDirectory: $(Build.ArtifactStagingDirectory) @@ -1394,12 +1440,6 @@ stages: steps: - checkout: none - - script: | - sudo touch /etc/yum.repos.d/centos.repo - sudo sh -c 'echo -e "[centos-extras]\nname=Centos extras - $basearch\nbaseurl=http://mirror.centos.org/centos/7/extras/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=http://centos.org/keys/RPM-GPG-KEY-CentOS-7" > /etc/yum.repos.d/centos.repo' - condition: or(eq(variables['AgentName'], 'blobfuse-rhel7_5'),eq(variables['AgentName'], 'blobfuse-rhel7_8')) - displayName: "Update OS mirrors" - - script: | sudo yum update -y sudo yum install wget git -y From f088b803fe387bbb1f5f76caedbe75cf2439b003 Mon Sep 17 00:00:00 2001 From: ashruti-msft <137055338+ashruti-msft@users.noreply.github.com> Date: Wed, 10 Jul 2024 16:47:29 +0530 Subject: [PATCH 12/73] Fixed block-cache test (#1454) * Fix UT for prefetch count --- component/block_cache/block_cache_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/component/block_cache/block_cache_test.go b/component/block_cache/block_cache_test.go index 29a534515..bbcd48971 100644 --- a/component/block_cache/block_cache_test.go +++ b/component/block_cache/block_cache_test.go @@ -173,7 +173,7 @@ func (suite *blockCacheTestSuite) TestEmpty() { cores, err := strconv.Atoi(coresStr) suite.assert.Nil(err) suite.assert.EqualValues(tobj.blockCache.workers, uint32(3*cores)) - suite.assert.EqualValues(tobj.blockCache.prefetch, uint32(2*cores)) + suite.assert.EqualValues(tobj.blockCache.prefetch, math.Max((MIN_PREFETCH*2)+1, float64(2*cores))) suite.assert.EqualValues(tobj.blockCache.noPrefetch, false) suite.assert.NotNil(tobj.blockCache.blockPool) suite.assert.NotNil(tobj.blockCache.threadPool) From 30b4cfff983f65c1d9a2e438386e0ae5120d4eda Mon Sep 17 00:00:00 2001 From: Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> Date: Fri, 12 Jul 2024 12:43:58 +0530 Subject: [PATCH 13/73] Update benchmark.yml --- .github/workflows/benchmark.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 90cd64830..2815dd32d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -148,7 +148,7 @@ jobs: with: output-file-path: list/list_results.json tool: 'customSmallerIsBetter' - alert-threshold: "190%" + alert-threshold: "500%" max-items-in-chart: 100 github-token: ${{ secrets.GITHUB_TOKEN }} fail-on-alert: true From 7f395591dbea6264df3160b99c37fbaf4baea1dd Mon Sep 17 00:00:00 2001 From: ashruti-msft <137055338+ashruti-msft@users.noreply.github.com> Date: Fri, 12 Jul 2024 16:11:38 +0530 Subject: [PATCH 14/73] ObjectID info updated and simplified base config (#1452) --- README.md | 1 + cmd/mount.go | 9 ++- setup/{devConfig.yaml => advancedConfig.yaml} | 2 +- setup/baseConfig.yaml | 69 +------------------ 4 files changed, 10 insertions(+), 71 deletions(-) rename setup/{devConfig.yaml => advancedConfig.yaml} (99%) diff --git a/README.md b/README.md index 5942154f3..a1a95537a 100755 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ Blobfuse2 is stable, and is ***supported by Microsoft*** provided that it is use ## NOTICE - We have seen some customer issues around files getting corrupted when `streaming` is used in write mode. Kindly avoid using this feature for write while we investigate and resolve it. - You can now use block-cache instead of streaming for both read and write workflows, which offers much better performance compared to streaming. To enable `block-cache` instead of `streaming`, use `--block-cache` in CLI param or `block-cache` as component in config file instead of `streaming`. +- As of version 2.3.0, blobfuse has updated its authentication methods. For Managed Identity, Object-ID based OAuth is solely accessible via CLI-based login, requiring Azure CLI on the system. For a dependency-free option, users may utilize Application/Client-ID or Resource ID based authentication. ## Supported Platforms Visit [this](https://github.com/Azure/azure-storage-fuse/wiki/Blobfuse2-Supported-Platforms) page to see list of supported linux distros. diff --git a/cmd/mount.go b/cmd/mount.go index b4c02ed78..94afb7f0b 100644 --- a/cmd/mount.go +++ b/cmd/mount.go @@ -410,8 +410,13 @@ var mountCmd = &cobra.Command{ log.Debug("Mount allowed on nonempty path : %v", options.NonEmpty) pipeline, err = internal.NewPipeline(options.Components, !daemon.WasReborn()) if err != nil { - log.Err("mount : failed to initialize new pipeline [%v]", err) - return Destroy(fmt.Sprintf("failed to initialize new pipeline [%s]", err.Error())) + if err.Error() == "Azure CLI not found on path" { + log.Err("mount : failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%v]", err) + return Destroy(fmt.Sprintf("failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%s]", err.Error())) + } else { + log.Err("mount : failed to initialize new pipeline [%v]", err) + return Destroy(fmt.Sprintf("failed to initialize new pipeline [%s]", err.Error())) + } } common.ForegroundMount = options.Foreground diff --git a/setup/devConfig.yaml b/setup/advancedConfig.yaml similarity index 99% rename from setup/devConfig.yaml rename to setup/advancedConfig.yaml index 04c1fd876..8aabd69ec 100644 --- a/setup/devConfig.yaml +++ b/setup/advancedConfig.yaml @@ -123,7 +123,7 @@ azstorage: # OR appid: resid: - objid: + objid: # OR tenantid: clientid: diff --git a/setup/baseConfig.yaml b/setup/baseConfig.yaml index 54bb54f04..a6ccef65d 100644 --- a/setup/baseConfig.yaml +++ b/setup/baseConfig.yaml @@ -1,36 +1,3 @@ -# MUST READ : -# If you are creating a blobfuse2 config file using this kindly take care of below points -# 1. All boolean configs (true|false config) (except ignore-open-flags, virtual-directory) are set to 'false' by default. -# No need to mention them in your config file unless you are setting them to true. -# 2. 'loopbackfs' is purely for testing and shall not be used in production configuration. -# 3. 'stream' and 'file_cache' can not co-exist and config file shall have only one of them based on your use case. -# 4. By default log level is set to 'log_warning' level and are redirected to syslog. -# Either use 'base' logging or syslog filters to redirect logs to separate file. -# To install syslog filter follow below steps: -# sudo cp setup/11-blobfuse2.conf /etc/rsyslog.d/ -# sudo cp setup/blobfuse2-logrotate /etc/logrotate.d/ -# sudo service rsyslog restart -# 5. For non-HNS (flat namespace) accounts blobfuse expects special directory marker files to -# exists in container to identify a directory. -# If these files do not exist in container, then 'virtual-directory: true' in 'azstorage' section is required -# 6. By default 'writeback-cache' is enabled for libfuse3 and this may result in append/write operations to fail. -# Either you can disable 'writeback-cache', which might hurt the performance -# or you can configure blobfuse2 to ignore open flags given by user and make it work with ''writeback-cache'. -# 'libfuse' sectoin below has both the configurations. -# 7. If are you using 'allow-other: true' config then make sure user_allow_other is enabled in /etc/fuse.conf file as -# well otherwise mount will fail. By default /etc/fuse.conf will have this option disabled we just need to -# enable it and save the file. -# 8. If data in your storage account (non-HNS) is created using Blobfuse or AzCopy then there are marker files present -# in your container to mark a directory. In such cases you can optimize your listing by setting 'virtual-directory' -# flag to false in mount command. -# 9. If you are using 'file_cache' component then make sure you have enough disk space available for cache. -# 10. 'sdk-trace' has been removed with v2.3.0 release and setting log level to log_debug will auto enable these logs. -# ----------------------------------------------------------------------------------------------------------------------- - -# Common configurations -allow-other: true|false -nonempty: true|false - # Logger configuration logging: type: syslog|silent|base @@ -44,14 +11,6 @@ components: - file_cache - attr_cache - azstorage - -# Libfuse configuration -libfuse: - default-permission: 0777|0666|0644|0444 - attribute-expiration-sec: