diff --git a/component/file_cache/file_cache.go b/component/file_cache/file_cache.go index e0dcd0f9e..7cf26d1a8 100644 --- a/component/file_cache/file_cache.go +++ b/component/file_cache/file_cache.go @@ -386,12 +386,15 @@ func (fc *FileCache) StatFs() (*common.Statfs_t, bool, error) { return fc.NextComponent().StatFs() } + log.Trace("FileCache::StatFs") + // cache_size = f_blocks * f_frsize/1024 // cache_size - used = f_frsize * f_bavail/1024 // cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024 // if cache size is set to 0 then we have the root mount usage maxCacheSize := fc.maxCacheSize * MB if maxCacheSize == 0 { + log.Err("FileCache::StatFs : Not responding to StatFs because max cache size is zero") return nil, false, nil } usage, _ := common.GetUsage(fc.tmpPath) @@ -400,6 +403,7 @@ func (fc *FileCache) StatFs() (*common.Statfs_t, bool, error) { // how much space is available on the underlying file system? availableOnCacheFS, err := fc.getAvailableSize() if err != nil { + log.Err("FileCache::StatFs : Not responding to StatFs because getAvailableSize failed. Here's why: %v", err) return nil, false, err } @@ -416,6 +420,7 @@ func (fc *FileCache) StatFs() (*common.Statfs_t, bool, error) { Namemax: 255, } + log.Debug("FileCache::StatFs : responding with free=%d avail=%d blocks=%d (bsize=%d)", stat.Bfree, stat.Bavail, stat.Blocks, stat.Bsize) return &stat, true, nil } diff --git a/component/s3storage/client.go b/component/s3storage/client.go index ae84e14c8..e28bca13f 100644 --- a/component/s3storage/client.go +++ b/component/s3storage/client.go @@ -1131,15 +1131,15 @@ func (cl *Client) combineSmallBlocks(name string, blockList []*common.Block) ([] return newBlockList, nil } -func (cl *Client) GetUsedSize() uint64 { +func (cl *Client) GetUsedSize() (uint64, error) { headBucketOutput, err := cl.headBucket() if err != nil { - return 0 + return 0, err } response, ok := middleware.GetRawResponse(headBucketOutput.ResultMetadata).(*smithyHttp.Response) if !ok || response == nil { - return 0 + return 0, fmt.Errorf("Failed GetRawResponse from HeadBucketOutput") } headerValue, ok := response.Header["X-Rstor-Size"] @@ -1147,13 +1147,13 @@ func (cl *Client) GetUsedSize() uint64 { headerValue, ok = response.Header["X-Lyve-Size"] } if !ok || len(headerValue) == 0 { - return 0 + return 0, fmt.Errorf("HeadBucket response has no size header (is the endpoint not Lyve Cloud?)") } bucketSizeBytes, err := strconv.ParseUint(headerValue[0], 10, 64) if err != nil { - return 0 + return 0, err } - return bucketSizeBytes + return bucketSizeBytes, nil } diff --git a/component/s3storage/connection.go b/component/s3storage/connection.go index 7e4499d0a..e579f7ec3 100644 --- a/component/s3storage/connection.go +++ b/component/s3storage/connection.go @@ -112,5 +112,5 @@ type S3Connection interface { StageAndCommit(name string, bol *common.BlockOffsetList) error NewCredentialKey(_, _ string) error - GetUsedSize() uint64 + GetUsedSize() (uint64, error) } diff --git a/component/s3storage/s3storage.go b/component/s3storage/s3storage.go index e3a3e2c9b..8eb91fdc9 100644 --- a/component/s3storage/s3storage.go +++ b/component/s3storage/s3storage.go @@ -479,16 +479,24 @@ func (s3 *S3Storage) FlushFile(options internal.FlushFileOptions) error { const blockSize = 4096 func (s3 *S3Storage) StatFs() (*common.Statfs_t, bool, error) { + log.Trace("S3Storage::StatFs") // cache_size = f_blocks * f_frsize/1024 // cache_size - used = f_frsize * f_bavail/1024 // cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024 // if cache size is set to 0 then we have the root mount usage - sizeUsed := s3.storage.GetUsedSize() + sizeUsed, err := s3.storage.GetUsedSize() + if err != nil { + // TODO: will returning EIO break any applications that depend on StatFs? + return nil, false, err + } stat := common.Statfs_t{ - Blocks: sizeUsed / blockSize, - Bavail: sizeUsed / blockSize, - Bfree: sizeUsed / blockSize, + Blocks: sizeUsed / blockSize, + // there is no set capacity limit in cloud storage + // so we use zero for free and avail + // this zero value is used in the libfuse component to recognize that cloud storage responded + Bavail: 0, + Bfree: 0, Bsize: blockSize, Ffree: 1e9, Files: 1e9, @@ -496,6 +504,8 @@ func (s3 *S3Storage) StatFs() (*common.Statfs_t, bool, error) { Namemax: 255, } + log.Debug("S3Storage::StatFs : responding with free=%d avail=%d blocks=%d (bsize=%d)", stat.Bfree, stat.Bavail, stat.Blocks, stat.Bsize) + return &stat, true, nil }