Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
e7104c9
First pass at getting size of bucket
jfantinhardesty Aug 27, 2024
68ac53f
Get HeadBucketOutput from HeadBucket call.
foodprocessor Oct 21, 2024
2d2d6ba
Merge remote-tracking branch 'origin/report-size-bucket' into headBuc…
foodprocessor Oct 21, 2024
e07333b
Replace import lost on merge
foodprocessor Oct 21, 2024
3dfd985
Print the fields in the HeadBucket result
foodprocessor Oct 21, 2024
e2c41f9
Pull custom header to get bucket size
foodprocessor Oct 21, 2024
25d04e5
s3storage: Statfs: Use total size to signal used size
foodprocessor Oct 22, 2024
a9d82ab
Don't use fc for statfs
foodprocessor Oct 24, 2024
e865cd0
Fix underflow
foodprocessor Oct 24, 2024
e0fb7cd
Merge branch 'main' into headBucketUsage
foodprocessor Nov 1, 2024
082f56a
Let Libfuse StatFs math work with both file cache and cloud data
foodprocessor Nov 1, 2024
8a70e2e
Add new flag "usage-from-cache"
foodprocessor Nov 2, 2024
9263e4a
Use the same logic for StatFs for both platforms.
foodprocessor Nov 2, 2024
c173e53
Update tests
foodprocessor Nov 2, 2024
b7374b3
Scrub and update Cobra documentation
foodprocessor Nov 2, 2024
951fd02
Add more error handling and logging.
foodprocessor Nov 2, 2024
46a3614
Use extra math to pass test and allow avail and free to be different
foodprocessor Nov 5, 2024
63abf80
Merge remote-tracking branch 'origin/main' into headBucketUsage
foodprocessor Nov 5, 2024
db2c361
Remove flag from file cache
foodprocessor Nov 5, 2024
9faa81e
Add flag to s3storage
foodprocessor Nov 5, 2024
9e795e9
Add explicit note that only Lyve Cloud provides bucket size for StatFs
foodprocessor Nov 5, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/mount.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,8 @@ func parseConfig() error {
// Look at https://cobra.dev/ for more information
var mountCmd = &cobra.Command{
Use: "mount <mount path>",
Short: "Mounts the container as a filesystem",
Long: "Mounts the container as a filesystem",
Short: "Mount the container as a filesystem",
Long: "Mount the container as a filesystem",
SuggestFor: []string{"mnt", "mout"},
Args: cobra.ExactArgs(1),
FlagErrorHandling: cobra.ExitOnError,
Expand Down
4 changes: 2 additions & 2 deletions cmd/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ var disableVersionCheck bool

var rootCmd = &cobra.Command{
Use: "cloudfuse",
Short: "Cloudfuse is an open source project developed to provide a virtual filesystem backed by the Azure Storage.",
Long: "Cloudfuse is an open source project developed to provide a virtual filesystem backed by the Azure Storage. It uses the fuse protocol to communicate with the Linux FUSE kernel module, and implements the filesystem operations using the Azure Storage REST APIs.",
Short: "Cloudfuse is an open source project developed to provide a virtual filesystem backed by cloud storage.",
Long: "Cloudfuse is an open source project developed to provide a virtual filesystem backed by cloud storage. It uses the FUSE protocol to communicate with the operating system, and implements filesystem operations using Azure or S3 cloud storage REST APIs.",
Version: common.CloudfuseVersion,
FlagErrorHandling: cobra.ExitOnError,
SilenceUsage: true,
Expand Down
2 changes: 1 addition & 1 deletion cmd/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ var check bool

var versionCmd = &cobra.Command{
Use: "version",
Short: "Command to print the current version along with optional check for latest version",
Short: "Print the current version and optionally check for latest version",
FlagErrorHandling: cobra.ExitOnError,
RunE: func(cmd *cobra.Command, args []string) error {
fmt.Println("cloudfuse version:", common.CloudfuseVersion)
Expand Down
45 changes: 45 additions & 0 deletions component/file_cache/file_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,51 @@ func (c *FileCache) GetPolicyConfig(conf FileCacheOptions) cachePolicyConfig {
return cacheConfig
}

func (fc *FileCache) StatFs() (*common.Statfs_t, bool, error) {

statfs, populated, err := fc.NextComponent().StatFs()
if populated {
return statfs, populated, err
}

log.Trace("FileCache::StatFs")

// cache_size = f_blocks * f_frsize/1024
// cache_size - used = f_frsize * f_bavail/1024
// cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024
// if cache size is set to 0 then we have the root mount usage
maxCacheSize := fc.maxCacheSize * MB
if maxCacheSize == 0 {
log.Err("FileCache::StatFs : Not responding to StatFs because max cache size is zero")
return nil, false, nil
}
usage, _ := common.GetUsage(fc.tmpPath)
available := maxCacheSize - usage*MB

// how much space is available on the underlying file system?
availableOnCacheFS, err := fc.getAvailableSize()
if err != nil {
log.Err("FileCache::StatFs : Not responding to StatFs because getAvailableSize failed. Here's why: %v", err)
return nil, false, err
}

const blockSize = 4096

stat := common.Statfs_t{
Blocks: uint64(maxCacheSize) / uint64(blockSize),
Bavail: uint64(max(0, available)) / uint64(blockSize),
Bfree: availableOnCacheFS / uint64(blockSize),
Bsize: blockSize,
Ffree: 1e9,
Files: 1e9,
Frsize: blockSize,
Namemax: 255,
}

log.Debug("FileCache::StatFs : responding with free=%d avail=%d blocks=%d (bsize=%d)", stat.Bfree, stat.Bavail, stat.Blocks, stat.Bsize)
return &stat, true, nil
}

// isLocalDirEmpty: Whether or not the local directory is empty.
func isLocalDirEmpty(path string) bool {
f, _ := common.Open(path)
Expand Down
38 changes: 1 addition & 37 deletions component/file_cache/file_cache_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,14 @@ package file_cache

import (
"io/fs"
"math"
"os"
"syscall"
"time"

"golang.org/x/sys/unix"

"github.com/Seagate/cloudfuse/common"
"github.com/Seagate/cloudfuse/common/log"
"github.com/Seagate/cloudfuse/internal"
"golang.org/x/sys/unix"
)

// Creates a new object attribute
Expand Down Expand Up @@ -144,40 +142,6 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock
return downloadRequired, fileExists, attr, err
}

func (c *FileCache) StatFs() (*common.Statfs_t, bool, error) {
// cache_size = f_blocks * f_frsize/1024
// cache_size - used = f_frsize * f_bavail/1024
// cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024
// if cache size is set to 0 then we have the root mount usage
maxCacheSize := c.maxCacheSize * MB
if maxCacheSize == 0 {
return nil, false, nil
}
usage, _ := common.GetUsage(c.tmpPath)
usage *= MB

available := maxCacheSize - usage
statfs := &unix.Statfs_t{}
err := unix.Statfs("/", statfs)
if err != nil {
log.Debug("FileCache::StatFs : statfs err [%s].", err.Error())
return nil, false, err
}

stat := common.Statfs_t{
Blocks: uint64(maxCacheSize) / uint64(statfs.Frsize),
Bavail: uint64(math.Max(0, available)) / uint64(statfs.Frsize),
Bfree: statfs.Bavail,
Bsize: statfs.Bsize,
Ffree: statfs.Ffree,
Files: statfs.Files,
Frsize: statfs.Frsize,
Namemax: 255,
}

return &stat, true, nil
}

func (fc *FileCache) getAvailableSize() (uint64, error) {
statfs := &unix.Statfs_t{}
err := unix.Statfs(fc.tmpPath, statfs)
Expand Down
42 changes: 0 additions & 42 deletions component/file_cache/file_cache_windows.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@ package file_cache

import (
"io/fs"
"math"
"os"
"syscall"
"time"
Expand Down Expand Up @@ -144,47 +143,6 @@ func (fc *FileCache) isDownloadRequired(localPath string, blobPath string, flock
return downloadRequired, fileExists, attr, err
}

func (fc *FileCache) StatFs() (*common.Statfs_t, bool, error) {
// cache_size = f_blocks * f_frsize/1024
// cache_size - used = f_frsize * f_bavail/1024
// cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024
// if cache size is set to 0 then we have the root mount usage
maxCacheSize := fc.maxCacheSize * MB
if maxCacheSize == 0 {
return nil, false, nil
}
usage, _ := common.GetUsage(fc.tmpPath)
available := maxCacheSize - usage

var free, total, avail uint64

// Get path to the cache
pathPtr, err := windows.UTF16PtrFromString(fc.tmpPath)
if err != nil {
return nil, false, err
}
err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail)
if err != nil {
log.Debug("FileCache::StatFs : statfs err [%s].", err.Error())
return nil, false, err
}

const blockSize = 4096

stat := common.Statfs_t{
Blocks: uint64(maxCacheSize) / uint64(blockSize),
Bavail: uint64(math.Max(0, available)) / uint64(blockSize),
Bfree: free / uint64(blockSize),
Bsize: blockSize,
Ffree: 1e9,
Files: 1e9,
Frsize: blockSize,
Namemax: 255,
}

return &stat, true, nil
}

func (fc *FileCache) getAvailableSize() (uint64, error) {
var free, total, avail uint64

Expand Down
32 changes: 20 additions & 12 deletions component/libfuse/libfuse2_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -333,24 +333,32 @@ func (cf *CgofuseFS) Statfs(path string, stat *fuse.Statfs_t) int {
if populated {
stat.Bsize = uint64(attr.Bsize)
stat.Frsize = uint64(attr.Frsize)
stat.Blocks = attr.Blocks
stat.Bavail = attr.Bavail
stat.Bfree = attr.Bfree
// cloud storage always sets free and avail to zero
statsFromCloudStorage := attr.Bfree == 0 && attr.Bavail == 0
// calculate blocks used from attr
blocksUnavailable := attr.Blocks - attr.Bavail
blocksUsed := attr.Blocks - attr.Bfree
// we only use displayCapacity to complement used size from cloud storage
if statsFromCloudStorage {
displayCapacityBlocks := fuseFS.displayCapacityMb * common.MbToBytes / uint64(attr.Bsize)
// if used > displayCapacity, then report used and show that we are out of space
stat.Blocks = max(displayCapacityBlocks, blocksUnavailable)
} else {
stat.Blocks = attr.Blocks
}
// adjust avail and free to make sure we display used space correctly
stat.Bavail = stat.Blocks - blocksUnavailable
stat.Bfree = stat.Blocks - blocksUsed
stat.Files = attr.Files
stat.Ffree = attr.Ffree
stat.Namemax = attr.Namemax
} else {
var free, total, avail uint64
// TODO: if display capacity is specified, should it overwrite populated Bavail?
total = fuseFS.displayCapacityMb * common.MbToBytes
avail = total
free = total

stat.Bsize = blockSize
stat.Frsize = blockSize
stat.Blocks = total / blockSize
stat.Bavail = avail / blockSize
stat.Bfree = free / blockSize
displayCapacityBlocks := fuseFS.displayCapacityMb * common.MbToBytes / blockSize
stat.Blocks = displayCapacityBlocks
stat.Bavail = displayCapacityBlocks
stat.Bfree = displayCapacityBlocks
stat.Files = 1e9
stat.Ffree = 1e9
stat.Namemax = maxNameSize
Expand Down
37 changes: 36 additions & 1 deletion component/s3storage/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"net/http"
"net/url"
"os"
"strconv"
"strings"
"syscall"
"time"
Expand All @@ -47,12 +48,14 @@ import (
"github.com/Seagate/cloudfuse/internal/stats_manager"

"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/middleware"
awsHttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go"
smithyHttp "github.com/aws/smithy-go/transport/http"
)

const (
Expand Down Expand Up @@ -203,7 +206,7 @@ func (cl *Client) Configure(cfg Config) error {
}

// Check that the provided bucket exists and that user has access to bucket
exists, err := cl.headBucket()
exists, err := cl.bucketExists()
if err != nil || !exists {
// From the aws-sdk-go-v2 documentation
// If the bucket does not exist or you do not have permission to access it,
Expand Down Expand Up @@ -277,6 +280,11 @@ func (cl *Client) SetPrefixPath(path string) error {
return nil
}

func (cl *Client) bucketExists() (bool, error) {
_, err := cl.headBucket()
return err != syscall.ENOENT, err
}

// CreateFile : Create a new file in the bucket/virtual directory
func (cl *Client) CreateFile(name string, mode os.FileMode) error {
log.Trace("Client::CreateFile : name %s", name)
Expand Down Expand Up @@ -1122,3 +1130,30 @@ func (cl *Client) combineSmallBlocks(name string, blockList []*common.Block) ([]
}
return newBlockList, nil
}

func (cl *Client) GetUsedSize() (uint64, error) {
headBucketOutput, err := cl.headBucket()
if err != nil {
return 0, err
}

response, ok := middleware.GetRawResponse(headBucketOutput.ResultMetadata).(*smithyHttp.Response)
if !ok || response == nil {
return 0, fmt.Errorf("Failed GetRawResponse from HeadBucketOutput")
}

headerValue, ok := response.Header["X-Rstor-Size"]
if !ok {
headerValue, ok = response.Header["X-Lyve-Size"]
}
if !ok || len(headerValue) == 0 {
return 0, fmt.Errorf("HeadBucket response has no size header (is the endpoint not Lyve Cloud?)")
}

bucketSizeBytes, err := strconv.ParseUint(headerValue[0], 10, 64)
if err != nil {
return 0, err
}

return bucketSizeBytes, nil
}
1 change: 1 addition & 0 deletions component/s3storage/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ func newTestClient(configuration string) (*Client, error) {
partSize: conf.PartSizeMb * common.MbToBytes,
uploadCutoff: conf.UploadCutoffMb * common.MbToBytes,
usePathStyle: conf.UsePathStyle,
disableUsage: conf.DisableUsage,
}
// create a Client
client, err := NewConnection(configForS3Client)
Expand Down
2 changes: 2 additions & 0 deletions component/s3storage/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ type Options struct {
EnableChecksum bool `config:"enable-checksum" yaml:"enable-checksum,omitempty"`
ChecksumAlgorithm types.ChecksumAlgorithm `config:"checksum-algorithm" yaml:"checksum-algorithm,omitempty"`
UsePathStyle bool `config:"use-path-style" yaml:"use-path-style,omitempty"`
DisableUsage bool `config:"disable-usage" yaml:"disable-usage,omitempty"`
}

// ParseAndValidateConfig : Parse and validate config
Expand All @@ -77,6 +78,7 @@ func ParseAndValidateConfig(s3 *S3Storage, opt Options) error {
s3.stConfig.restrictedCharsWin = opt.RestrictedCharsWin
s3.stConfig.disableConcurrentDownload = opt.DisableConcurrentDownload
s3.stConfig.usePathStyle = opt.UsePathStyle
s3.stConfig.disableUsage = opt.DisableUsage

// Part size must be at least 5 MB and smaller than 5GB. Otherwise, set to default.
if opt.PartSizeMb < 5 || opt.PartSizeMb > MaxPartSizeMb {
Expand Down
2 changes: 2 additions & 0 deletions component/s3storage/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ type Config struct {
checksumAlgorithm types.ChecksumAlgorithm
usePathStyle bool
disableSymlink bool
disableUsage bool
}

// TODO: move s3AuthConfig to s3auth.go
Expand Down Expand Up @@ -112,4 +113,5 @@ type S3Connection interface {
StageAndCommit(name string, bol *common.BlockOffsetList) error

NewCredentialKey(_, _ string) error
GetUsedSize() (uint64, error)
}
37 changes: 37 additions & 0 deletions component/s3storage/s3storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -476,6 +476,43 @@ func (s3 *S3Storage) FlushFile(options internal.FlushFileOptions) error {
return s3.storage.StageAndCommit(options.Handle.Path, options.Handle.CacheObj.BlockOffsetList)
}

const blockSize = 4096

func (s3 *S3Storage) StatFs() (*common.Statfs_t, bool, error) {
if s3.stConfig.disableUsage {
return nil, false, nil
}

log.Trace("S3Storage::StatFs")
// cache_size = f_blocks * f_frsize/1024
// cache_size - used = f_frsize * f_bavail/1024
// cache_size - used = vfs.f_bfree * vfs.f_frsize / 1024
// if cache size is set to 0 then we have the root mount usage
sizeUsed, err := s3.storage.GetUsedSize()
if err != nil {
// TODO: will returning EIO break any applications that depend on StatFs?
return nil, true, err
}

stat := common.Statfs_t{
Blocks: sizeUsed / blockSize,
// there is no set capacity limit in cloud storage
// so we use zero for free and avail
// this zero value is used in the libfuse component to recognize that cloud storage responded
Bavail: 0,
Bfree: 0,
Bsize: blockSize,
Ffree: 1e9,
Files: 1e9,
Frsize: blockSize,
Namemax: 255,
}

log.Debug("S3Storage::StatFs : responding with free=%d avail=%d blocks=%d (bsize=%d)", stat.Bfree, stat.Bavail, stat.Blocks, stat.Bsize)

return &stat, true, nil
}

// TODO: decide if the TODO below is relevant and delete if not
// TODO : Below methods are pending to be implemented
// FlushFile(*handlemap.Handle) error
Expand Down
Loading