diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 000000000..6b192d64d --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,192 @@ +# Azure Storage Fuse (cloudfuse) + +Cloudfuse is a FUSE filesystem driver that provides virtual filesystem backed by S3 or Azure Blob Storage. It uses libfuse (fuse3) to communicate with the Linux FUSE kernel module and implements filesystem operations using the AWS S3 or Azure Storage REST APIs. + +Always reference these instructions first and fallback to search or bash commands only when you encounter unexpected information that does not match the info here. + +## Working Effectively + +### Bootstrap, Build, and Test the Repository + +**CRITICAL**: All build and test commands include specific timeout warnings. NEVER CANCEL long-running operations. + +- Install required dependencies: + ```bash + sudo apt update + sudo apt install -y libfuse3-dev fuse3 gcc + ``` + +- Install Go 1.25.4+ (already available in most environments): + ```bash + go version # Should show 1.25.4 or higher + ``` + +- Build cloudfuse binary: + ```bash + ./build.sh + ``` + **Timing**: ~30 seconds. NEVER CANCEL. Use timeout 120+ seconds. + +- Build health monitor binary: + ```bash + ./build.sh health + ``` + **Timing**: ~5 seconds. Use timeout 60+ seconds. + +- Verify binary functionality: + ```bash + ./cloudfuse --version + ./cloudfuse -h + ``` + +### Testing + +- Run unit tests (core components only): + ```bash + go test -v -timeout=10m ./internal/... ./common/... --tags=unittest,fuse3 + ``` + **Timing**: ~2 minutes. NEVER CANCEL. Use timeout 15+ minutes. + +- Run full unit tests (some may fail without Azure credentials): + ```bash + go test -v -timeout=45m ./... --tags=unittest,fuse3 + ``` + **WARNING**: Expected network/credential test failures. **Timing**: ~5-10 minutes. NEVER CANCEL. Use timeout 60+ minutes. + +- Run linting: + ```bash + # Install golangci-lint if not available + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin + + # Run linting + $(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags fuse3 --max-issues-per-linter=0 + ``` + **Timing**: ~10 seconds. Use timeout 60+ seconds. + +- Check code formatting: + ```bash + gofmt -s -l -d . + ``` + **Timing**: ~2 seconds. Should return no output if properly formatted. + +### Validation Scenarios + +**ALWAYS test these scenarios after making changes**: + +1. **Binary Creation and Basic Commands**: + ```bash + ./build.sh + ./cloudfuse --version + ./cloudfuse -h + ./cloudfuse mount --help + ``` + +2. **Config Generation**: + ```bash + mkdir -p /tmp/cloudfuse-test + ./cloudfuse gen-config --tmp-path=/tmp/cloudfuse-test --o /tmp/cloudfuse-test/config.yaml + cat /tmp/cloudfuse-test/config.yaml + ``` + +3. **Health Monitor**: + ```bash + ./build.sh health + ./cfusemon --help + ``` + +4. **Format and Lint Validation**: + ```bash + gofmt -s -l -d . # Should return no output + $(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags fuse3 --max-issues-per-linter=0 + ``` + +## Build System Details + +- **Primary Build Script**: `./build.sh` - builds cloudfuse with fuse3 by default +- **Build Variants**: + - `./build.sh` - standard fuse3 build + - `./build.sh fuse2` - legacy fuse2 build + - `./build.sh health` - health monitor binary +- **Output**: `cloudfuse` binary (~30MB) and optionally `cfusemon` binary (~6MB) +- **Go Version**: Requires Go 1.25.4+ (specified in go.mod) +- **Tags**: Use `fuse3` tag for testing/building (default), `fuse2` for legacy systems + +## Testing Infrastructure + +- **Unit Tests**: Use `--tags=unittest,fuse3` to run unit tests +- **E2E Tests**: Located in `test/e2e_tests/` - require Azure Storage credentials +- **Mount Tests**: `test/mount_test/` - comprehensive filesystem testing +- **Performance Tests**: `test/scripts/` - benchmarking and stress testing +- **Test Timeout**: Mount tests can take up to 120 minutes - NEVER CANCEL + +## Key Components and Architecture + +- **cmd/**: CLI commands and main entry points +- **component/**: Core components (libfuse, azstorage, caching) +- **common/**: Shared utilities, configuration, logging +- **internal/**: Internal APIs and pipeline management +- **test/**: All testing code and scripts +- **tools/health-monitor/**: cloudfuse monitoring tool + +## Configuration + +- **Sample Configs**: + - `sampleFileCacheConfig.yaml` - file-based caching + - `sampleBlockCacheConfig.yaml` - block-based caching + - `setup/baseConfig.yaml` - complete configuration options +- **Config Generation**: Use `cloudfuse gen-config` to auto-generate configs +- **Authentication**: Supports account keys, SAS tokens, MSI, SPN, Azure CLI + +## Important Notes + +- **Mount Operations**: Require Azure Storage credentials - will fail in testing without them +- **Permissions**: May require sudo for actual mount operations +- **FUSE Configuration**: `/etc/fuse.conf` may need `user_allow_other` enabled for multi-user access +- **Dependencies**: Requires libfuse3-dev for building, fuse3 for runtime +- **Platform**: Linux only (Ubuntu 20+, other distros listed in wiki) + +## Common Pre-commit Validation + +Always run these before committing changes: + +```bash +# Format check +gofmt -s -l -d . + +# Build verification +./build.sh + +# Core unit tests +go test -v -timeout=10m ./internal/... ./common/... --tags=unittest,fuse3 + +# Linting +$(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags fuse3 --max-issues-per-linter=0 + +# Binary functionality +./cloudfuse --version +./cloudfuse gen-config --tmp-path=/tmp/test --o /tmp/test-config.yaml +``` + +## CI/CD Integration + +- **Build Pipeline**: Github Actions +- **Testing**: Automated on Ubuntu 24, both x86 and ARM64 +- **Performance**: Dedicated benchmark workflows +- **Security**: CodeQL analysis and dependency scanning +- **Release**: Automated package building and distribution + +## Troubleshooting + +- **Build Failures**: Check Go version, ensure libfuse3-dev installed +- **Test Failures**: Network/credential tests expected to fail without Azure setup +- **Mount Failures**: Verify FUSE permissions and Azure credentials +- **Performance**: Use health monitor (`cfusemon`) for runtime diagnostics + +## Key Files to Monitor + +When making changes, always check these files for consistency: +- `go.mod` - dependency versions +- `main.go` - entry point +- `build.sh` - build configuration +- `cmd/mount.go` - core mount functionality +- Configuration templates in `setup/` and root directory \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 6ade0370d..c542498c8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -34,6 +34,8 @@ linters: - third_party$ - builtin$ - examples$ + - component/azstorage/config.go + - common/version.go rules: - path: component/libfuse/libfuse2_handler_test_wrapper.go text: "(\\w) (\\w+|\\(\\*\\w+\\)\\.\\w+) is unused" diff --git a/cmd/mount.go b/cmd/mount.go index ea145afd3..2b66fec3c 100644 --- a/cmd/mount.go +++ b/cmd/mount.go @@ -460,7 +460,6 @@ var mountCmd = &cobra.Command{ Level: logLevel, TimeTracker: options.Logging.TimeTracker, }) - if err != nil { return fmt.Errorf("failed to initialize logger [%s]", err.Error()) } @@ -528,18 +527,14 @@ var mountCmd = &cobra.Command{ "mount : failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%v]", err, ) - return Destroy( - fmt.Sprintf( - "failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%s]", - err.Error(), - ), + return fmt.Errorf( + "failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%s]", + err.Error(), ) } log.Err("mount : failed to initialize new pipeline [%v]", err) - return Destroy( - fmt.Sprintf("mount : failed to initialize new pipeline [%s]", err.Error()), - ) + return fmt.Errorf("mount : failed to initialize new pipeline [%s]", err.Error()) } // Dry run ends here @@ -668,16 +663,15 @@ func runPipeline(pipeline *internal.Pipeline, ctx context.Context) error { err := pipeline.Start(ctx) if err != nil { log.Err("mount: error unable to start pipeline [%s]", err.Error()) - return Destroy(fmt.Sprintf("unable to start pipeline [%s]", err.Error())) + return fmt.Errorf("unable to start pipeline [%s]", err.Error()) } err = pipeline.Stop() if err != nil { log.Err("mount: error unable to stop pipeline [%s]", err.Error()) - return Destroy(fmt.Sprintf("unable to stop pipeline [%s]", err.Error())) + return fmt.Errorf("unable to stop pipeline [%s]", err.Error()) } - _ = log.Destroy() return nil } @@ -928,12 +922,3 @@ func init() { config.AttachFlagCompletions(mountCmd) config.AddConfigChangeEventListener(config.ConfigChangeEventHandlerFunc(OnConfigChange)) } - -func Destroy(message string) error { - _ = log.Destroy() - if message != "" { - return fmt.Errorf("%s", message) - } - - return nil -} diff --git a/cmd/mount_linux.go b/cmd/mount_linux.go index 502051055..5ce75940d 100644 --- a/cmd/mount_linux.go +++ b/cmd/mount_linux.go @@ -41,6 +41,7 @@ import ( "strings" "time" + "github.com/Seagate/cloudfuse/common" "github.com/Seagate/cloudfuse/common/config" "github.com/Seagate/cloudfuse/common/log" "github.com/Seagate/cloudfuse/internal" @@ -63,6 +64,11 @@ func createDaemon( umask int, fname string, ) error { + pid := os.Getpid() + traceFile := fmt.Sprintf("%s.%d.trace", strings.ReplaceAll(options.MountPath, "/", "_"), pid) + // we link this file to stderr of child process in daemon mode + traceFilePath := filepath.Join(os.ExpandEnv(common.DefaultWorkDir), traceFile) + dmnCtx := &daemon.Context{ PidFileName: pidFileName, PidFilePerm: pidFilePerm, @@ -92,7 +98,7 @@ retry: rmErr := os.Remove(pidFileName) if rmErr != nil { log.Err("mount : auto cleanup failed [%v]", rmErr.Error()) - return Destroy(fmt.Sprintf("failed to daemonize application [%s]", err.Error())) + return fmt.Errorf("failed to daemonize application [%s]", err.Error()) } goto retry } @@ -151,9 +157,15 @@ retry: buff, err := os.ReadFile(dmnCtx.LogFileName) if err != nil { log.Err("mount: failed to read child [%v] failure logs [%s]", child.Pid, err.Error()) - return Destroy(fmt.Sprintf("failed to mount, please check logs [%s]", err.Error())) + err = fmt.Errorf("failed to mount, please check logs [%s]", err.Error()) } else { - return Destroy(string(buff)) + err = fmt.Errorf("%s", string(buff)) + } + + // Safe to delete the temp file. + rmErr := os.Remove(traceFilePath) + if rmErr != nil { + log.Err("mount : Failed to delete temp file: %s[%v]", traceFilePath, err) } case <-time.After(options.WaitForMount): diff --git a/common/log/logger.go b/common/log/logger.go index 1f4806645..e99e33859 100644 --- a/common/log/logger.go +++ b/common/log/logger.go @@ -26,6 +26,7 @@ package log import ( + "fmt" "log" "time" @@ -136,8 +137,12 @@ func SetLogLevel(lvl common.LogLevel) { } // Destroy : DeInitialize the logging library +// This should only be called from the main function. func Destroy() error { - return logObj.Destroy() + if logObj != nil { + return logObj.Destroy() + } + return fmt.Errorf("Logger is not initialized") } // ------------------ Public methods for logging events ------------------ diff --git a/common/types.go b/common/types.go index 467d61556..76ec422d5 100644 --- a/common/types.go +++ b/common/types.go @@ -182,12 +182,11 @@ type LogConfig struct { Tag string // logging tag which can be either cloudfuse or cfusemon } -// Flags for blocks +// Flags for block const ( BlockFlagUnknown uint16 = iota DirtyBlock TruncatedBlock - RemovedBlocks ) type Block struct { @@ -223,14 +222,11 @@ func (block *Block) Truncated() bool { return block.Flags.IsSet(TruncatedBlock) } -func (block *Block) Removed() bool { - return block.Flags.IsSet(RemovedBlocks) -} - // Flags for block offset list const ( - BolFlagUnknown uint16 = iota - SmallFile + BlobFlagUnknown uint16 = iota + BlobFlagHasNoBlocks // set if the blob does not have any blocks + BlobFlagBlockListModified ) // list that holds blocks containing ids and corresponding offsets @@ -242,13 +238,39 @@ type BlockOffsetList struct { Mtime time.Time } -// Dirty : Handle is dirty or not -func (bol *BlockOffsetList) SmallFile() bool { - return bol.Flags.IsSet(SmallFile) +func (bol *BlockOffsetList) HasNoBlocks() bool { + return len(bol.BlockList) == 0 +} + +func (bol *BlockOffsetList) IsBlockListModified() bool { + return bol.Flags.IsSet(BlobFlagBlockListModified) +} + +func (bol *BlockOffsetList) ValidateBlockListAgainstFileSize(fileSize int64) bool { + if bol.HasNoBlocks() { + return fileSize == 0 + } + if bol.BlockList[len(bol.BlockList)-1].EndIndex != fileSize { + return false + } + return true +} + +func (bol *BlockOffsetList) HasAllBlocksWithSameBlockSize() (blockSize int64, ok bool) { + if bol.HasNoBlocks() { + return 0, true + } + blockSize = bol.BlockList[0].EndIndex - bol.BlockList[0].StartIndex + for _, blk := range bol.BlockList { + if blk.EndIndex-blk.StartIndex != blockSize { + return 0, false + } + } + return blockSize, true } // return true if item found and index of the item -func (bol BlockOffsetList) BinarySearch(offset int64) (bool, int) { +func (bol *BlockOffsetList) BinarySearch(offset int64) (bool, int) { lowerBound := 0 size := len(bol.BlockList) higherBound := size - 1 diff --git a/component/attr_cache/attr_cache.go b/component/attr_cache/attr_cache.go index c3e0bf071..f9e6ddb46 100644 --- a/component/attr_cache/attr_cache.go +++ b/component/attr_cache/attr_cache.go @@ -926,7 +926,7 @@ func (ac *AttrCache) RenameFile(options internal.RenameFileOptions) error { } // WriteFile : Mark the file invalid -func (ac *AttrCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (ac *AttrCache) WriteFile(options *internal.WriteFileOptions) (int, error) { // GetAttr on cache hit will serve from cache, on cache miss will serve from next component. attr, err := ac.GetAttr( @@ -983,14 +983,14 @@ func (ac *AttrCache) TruncateFile(options internal.TruncateFileOptions) error { if !found || !truncatedItem.exists() { log.Warn("AttrCache::TruncateFile : %s replacing missing cache entry", options.Name) // replace the missing entry - truncatedAttr := internal.CreateObjAttr(options.Name, options.Size, modifyTime) + truncatedAttr := internal.CreateObjAttr(options.Name, options.NewSize, modifyTime) truncatedItem = ac.cache.insert(insertOptions{ attr: truncatedAttr, exists: true, cachedAt: modifyTime, }) } - truncatedItem.setSize(options.Size, modifyTime) + truncatedItem.setSize(options.NewSize, modifyTime) } return err } diff --git a/component/attr_cache/attr_cache_test.go b/component/attr_cache/attr_cache_test.go index 5629f3bab..960cc0bd4 100644 --- a/component/attr_cache/attr_cache_test.go +++ b/component/attr_cache/attr_cache_test.go @@ -1390,9 +1390,9 @@ func (suite *attrCacheTestSuite) TestWriteFileError() { suite.mock.EXPECT(). GetAttr(internal.GetAttrOptions{Name: path, RetrieveMetadata: true}). Return(&internal.ObjAttr{Path: path}, nil) - suite.mock.EXPECT().WriteFile(options).Return(0, errors.New("Failed to write a file")) + suite.mock.EXPECT().WriteFile(&options).Return(0, errors.New("Failed to write a file")) - _, err := suite.attrCache.WriteFile(options) + _, err := suite.attrCache.WriteFile(&options) suite.assert.Error(err) _, found := suite.attrCache.cache.get(path) suite.assert.True(found) @@ -1412,9 +1412,9 @@ func (suite *attrCacheTestSuite) TestWriteFileDoesNotExist() { suite.mock.EXPECT(). GetAttr(internal.GetAttrOptions{Name: path, RetrieveMetadata: true}). Return(&internal.ObjAttr{Path: path}, nil) - suite.mock.EXPECT().WriteFile(options).Return(0, nil) + suite.mock.EXPECT().WriteFile(&options).Return(0, nil) - _, err := suite.attrCache.WriteFile(options) + _, err := suite.attrCache.WriteFile(&options) suite.assert.NoError(err) _, found := suite.attrCache.cache.get(path) suite.assert.True(found) @@ -1432,9 +1432,9 @@ func (suite *attrCacheTestSuite) TestWriteFileExists() { options := internal.WriteFileOptions{Handle: &handle, Metadata: nil} // Entry Already Exists suite.addPathToCache(path, true) - suite.mock.EXPECT().WriteFile(options).Return(0, nil) + suite.mock.EXPECT().WriteFile(&options).Return(0, nil) - _, err := suite.attrCache.WriteFile(options) + _, err := suite.attrCache.WriteFile(&options) suite.assert.NoError(err) suite.assertExists(path) } @@ -1445,7 +1445,7 @@ func (suite *attrCacheTestSuite) TestTruncateFile() { path := "a" size := 1024 - options := internal.TruncateFileOptions{Name: path, Size: int64(size)} + options := internal.TruncateFileOptions{Name: path, NewSize: int64(size)} // Error suite.mock.EXPECT().TruncateFile(options).Return(errors.New("Failed to truncate a file")) diff --git a/component/azstorage/azstorage.go b/component/azstorage/azstorage.go index 0da081ec6..2cdefa7c0 100644 --- a/component/azstorage/azstorage.go +++ b/component/azstorage/azstorage.go @@ -104,7 +104,7 @@ reconfigure: } // If user has not specified the account type then detect it's HNS or FNS - if conf.AccountType == "" && az.storage.IsAccountADLS() { + if conf.AccountType == "" && !config.IsSet(compName+".use-adls") && az.storage.IsAccountADLS() { log.Crit( "AzStorage::Configure : Auto detected account type as adls, reconfiguring storage connection.", ) @@ -445,7 +445,7 @@ func (az *AzStorage) RenameFile(options internal.RenameFileOptions) error { return err } -func (az *AzStorage) ReadInBuffer(options internal.ReadInBufferOptions) (length int, err error) { +func (az *AzStorage) ReadInBuffer(options *internal.ReadInBufferOptions) (length int, err error) { //log.Trace("AzStorage::ReadInBuffer : Read %s from %d offset", h.Path, offset) var size int64 @@ -485,7 +485,7 @@ func (az *AzStorage) ReadInBuffer(options internal.ReadInBufferOptions) (length return } -func (az *AzStorage) WriteFile(options internal.WriteFileOptions) (int, error) { +func (az *AzStorage) WriteFile(options *internal.WriteFileOptions) (int, error) { err := az.storage.Write(options) return len(options.Data), err } @@ -498,11 +498,15 @@ func (az *AzStorage) GetFileBlockOffsets( } func (az *AzStorage) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("AzStorage::TruncateFile : %s to %d bytes", options.Name, options.Size) - err := az.storage.TruncateFile(options.Name, options.Size) + log.Trace("AzStorage::TruncateFile : %s to %d bytes", options.Name, options.NewSize) + err := az.storage.TruncateFile(options) if err == nil { - azStatsCollector.PushEvents(truncateFile, options.Name, map[string]any{size: options.Size}) + azStatsCollector.PushEvents( + truncateFile, + options.Name, + map[string]any{size: options.NewSize}, + ) azStatsCollector.UpdateStats(stats_manager.Increment, truncateFile, (int64)(1)) } return err diff --git a/component/azstorage/block_blob.go b/component/azstorage/block_blob.go index a83e353a0..eddff9d82 100644 --- a/component/azstorage/block_blob.go +++ b/component/azstorage/block_blob.go @@ -36,6 +36,7 @@ import ( "path/filepath" "reflect" "runtime" + "sort" "strings" "syscall" "time" @@ -189,11 +190,20 @@ func (bb *BlockBlob) TestPipeline() error { return nil } + includeFields := bb.listDetails + if bb.listDetails.Permissions { + // This flag is set to true if user has explicitly asked to mount a HNS account + // Validate account is indeed HNS checking permissions field + // If the account is FNS, the call will fail with InvalidQueryParameterValue and such mount shall fail + includeFields.Permissions = true + } + listBlobPager := bb.Container.NewListBlobsHierarchyPager( "/", &container.ListBlobsHierarchyOptions{ MaxResults: to.Ptr((int32)(2)), Prefix: &bb.Config.prefixPath, + Include: includeFields, }, ) @@ -207,6 +217,12 @@ func (bb *BlockBlob) TestPipeline() error { var respErr *azcore.ResponseError errors.As(err, &respErr) if respErr != nil { + if respErr.ErrorCode == "InvalidQueryParameterValue" { + // User explicitly mounting FNS account as HNS which is not supported + return fmt.Errorf( + "BlockBlob::TestPipeline : Detected FNS account being mounted as HNS", + ) + } return fmt.Errorf("BlockBlob::TestPipeline : [%s]", respErr.ErrorCode) } return err @@ -1318,10 +1334,12 @@ func (bb *BlockBlob) GetFileBlockOffsets(name string) (*common.BlockOffsetList, // if block list empty its a small file if len(storageBlockList.CommittedBlocks) == 0 { - blockList.Flags.Set(common.SmallFile) + blockList.BlockIdLength = common.BlockIDLength return &blockList, nil } + blockList.BlockList = make([]*common.Block, 0, len(storageBlockList.CommittedBlocks)) + for _, block := range storageBlockList.CommittedBlocks { blk := &common.Block{ Id: *block.Name, @@ -1349,6 +1367,22 @@ func (bb *BlockBlob) createBlock(blockIdLength, startIndex, size int64) *common. return newBlock } +func (bb *BlockBlob) createBlockFromBuffer( + blockIdLength, startIndex int64, + data []byte, +) *common.Block { + newBlockId := common.GetBlockID(blockIdLength) + newBlock := &common.Block{ + Id: newBlockId, + StartIndex: startIndex, + EndIndex: startIndex + int64(len(data)), + Data: data, + } + // mark dirty since it is a new block with data + newBlock.Flags.Set(common.DirtyBlock) + return newBlock +} + // create new blocks based on the offset and total length we're adding to the file func (bb *BlockBlob) createNewBlocks( blockList *common.BlockOffsetList, @@ -1385,178 +1419,362 @@ func (bb *BlockBlob) createNewBlocks( return bufferSize, nil } -func (bb *BlockBlob) removeBlocks( +// This function is called when the file is expanded using truncate operation and the new size is greater than the old +// size of the file. +func (bb *BlockBlob) createNewBlocksTruncate( blockList *common.BlockOffsetList, - size int64, - name string, -) *common.BlockOffsetList { - _, index := blockList.BinarySearch(size) - // if the start index is equal to new size - block should be removed - move one index back - if blockList.BlockList[index].StartIndex == size { - index = index - 1 + options *internal.TruncateFileOptions, +) error { + log.Trace( + "BlockBlob::createNewBlocksTruncate : name: %s, old size: %d, new size: %d", + options.Name, + options.OldSize, + options.NewSize, + ) + + numOfBlocks := int64(len(blockList.BlockList)) + length := options.NewSize - options.OldSize + + blockSize := options.BlockSize + if blockSize == 0 { + blockSize = bb.Config.blockSize + if blockSize == 0 { + // TODO: This should be set in init of blobfuse + blockSize = 16 * 1024 * 1024 + } + + blocksNeeded := (length + blockSize - 1) / blockSize + + if numOfBlocks+blocksNeeded > blockblob.MaxBlocks { + // if we cannot accommodate the data with current block size then recalculate the block size + availableBlocks := blockblob.MaxBlocks - numOfBlocks + blockSize = (length + availableBlocks - 1) / availableBlocks + if blockSize > blockblob.MaxStageBlockBytes { + return errors.New("cannot accommodate data within the block limit") + } + } + } else { + // Case when user has specified the block size from previous component + // Say if a 10MB file is created using block_cache with blockSize 8MB, say it's block list would be 1(8M), 2(2M). + // Now say user is trying to truncate it to say 20MB. Then we should first download the second block and modify + // it (i.e., New block list would be 1(8M), 2(8M), 3(4M)). + + // Calculate how many blocks we need with this block size + blocksNeeded := (length + blockSize - 1) / blockSize + // For user specified block size, validate if we can accommodate the data within the block limit + if numOfBlocks+blocksNeeded > blockblob.MaxBlocks { + return fmt.Errorf("cannot accommodate data within the block limit with configured block-size: %d", blockSize) + } + + // if last block is not aligned to block size then adjust it + if len(blockList.BlockList) > 0 { + lastBlock := blockList.BlockList[len(blockList.BlockList)-1] + + if lastBlock.EndIndex-lastBlock.StartIndex < blockSize { + // last block is smaller than block size, so adjust it + lastBlock.EndIndex = min(lastBlock.StartIndex+blockSize, options.NewSize) + + lastBlock.Data = make([]byte, lastBlock.EndIndex-lastBlock.StartIndex) + lastBlock.Flags.Set(common.DirtyBlock) + + // create the new block id for this block otherwise it would corrupt the state of the blob. + lastBlock.Id = common.GetBlockID(blockList.BlockIdLength) + + err := bb.ReadInBuffer(options.Name, lastBlock.StartIndex, lastBlock.EndIndex-lastBlock.StartIndex, lastBlock.Data, nil) + if err != nil { + log.Err("BlockBlob::createNewBlocksTruncate : Failed to adjust last block %s [%v]", options.Name, err) + return err + } + } + + } + } + + var startIdx int64 = 0 + if len(blockList.BlockList) > 0 { + startIdx = blockList.BlockList[len(blockList.BlockList)-1].EndIndex + } + + for i := startIdx; i < options.NewSize; i += blockSize { + curBlkSize := min(blockSize, options.NewSize-i) + + newBlock := bb.createBlock(blockList.BlockIdLength, i, curBlkSize) + blockList.BlockList = append(blockList.BlockList, newBlock) } + + blockList.Flags.Set(common.BlobFlagBlockListModified) + + return nil +} + +func (bb *BlockBlob) removeBlocksTruncate( + blockList *common.BlockOffsetList, + options *internal.TruncateFileOptions, +) error { + log.Trace( + "BlockBlob::removeBlocksTruncate : name: %s, old size: %d, new size: %d", + options.Name, + options.OldSize, + options.NewSize, + ) + + if len(blockList.BlockList) == 0 { + return errors.New("removeBlocksTruncate: block list is empty, cannot remove blocks") + } + + size := options.NewSize + + idx := sort.Search(len(blockList.BlockList), func(i int) bool { + return blockList.BlockList[i].StartIndex >= size + }) + + idx-- // move one index back to get the last block which is less than or equal to new size + + log.Debug( + "BlockBlob::removeBlocksTruncate : idx: %d, blockList length: %d, idx.start: %d, idx.end: %d, new size: %d", + idx, + len(blockList.BlockList), + blockList.BlockList[idx].StartIndex, + blockList.BlockList[idx].EndIndex, + size, + ) + // if the file we're shrinking is in the middle of a block then shrink that block - if blockList.BlockList[index].EndIndex > size { - blk := blockList.BlockList[index] + if blockList.BlockList[idx].EndIndex > size { + blk := blockList.BlockList[idx] blk.EndIndex = size blk.Data = make([]byte, blk.EndIndex-blk.StartIndex) blk.Flags.Set(common.DirtyBlock) - err := bb.ReadInBuffer(name, blk.StartIndex, blk.EndIndex-blk.StartIndex, blk.Data, nil) + // create the new block id for this block otherwise it would corrupt the state of the blob. + blk.Id = common.GetBlockID(blockList.BlockIdLength) + + err := bb.ReadInBuffer( + options.Name, + blk.StartIndex, + blk.EndIndex-blk.StartIndex, + blk.Data, + nil, + ) if err != nil { - log.Err("BlockBlob::removeBlocks : Failed to remove blocks %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::removeBlocksTruncate : Failed to remove blocks %s [%v]", + options.Name, + err, + ) + return err } - } - blk := blockList.BlockList[index] - blk.Flags.Set(common.RemovedBlocks) - blockList.BlockList = blockList.BlockList[:index+1] - return blockList + blockList.BlockList = blockList.BlockList[:idx+1] + blockList.Flags.Set(common.BlobFlagBlockListModified) + + return nil } -func (bb *BlockBlob) TruncateFile(name string, size int64) error { - // log.Trace("BlockBlob::TruncateFile : name=%s, size=%d", name, size) - attr, err := bb.GetAttr(name) - if err != nil { - log.Err( - "BlockBlob::TruncateFile : Failed to get attributes of file %s [%s]", - name, - err.Error(), +func (bb *BlockBlob) TruncateFile(options internal.TruncateFileOptions) error { + log.Trace("BlockBlob::TruncateFile : name: %s, old size: %d, new size: %d", + options.Name, options.OldSize, options.NewSize) + + // If old size is not specified, get it from the storage. + if options.OldSize == -1 { + attr, err := bb.GetAttr(options.Name) + if err != nil { + log.Err( + "BlockBlob::TruncateFile : Failed to get attributes of file %s [%v]", + options.Name, + err, + ) + return err + } + + options.OldSize = attr.Size + + log.Trace( + "BlockBlob::TruncateFile : name: %s, old size: %d[Got from storage], new size: %d", + options.Name, + options.OldSize, + options.NewSize, ) - if err == syscall.ENOENT { + } + + if options.OldSize == options.NewSize { + return nil + } + + if options.NewSize == 0 { + var buf []byte + if err := bb.WriteFromBuffer(options.Name, nil, buf); err != nil { + log.Err( + "BlockBlob::TruncateFile : Failed to truncate file %s to zero size [%v]", + options.Name, + err, + ) return err } + return nil } - if size == 0 || attr.Size == 0 { - // If we are resizing to a value > 1GB then we need to upload multiple blocks to resize - if size > 1*common.GbToBytes { - blkSize := int64(16 * common.MbToBytes) - blobName := common.JoinUnixFilepath(bb.Config.prefixPath, name) - blobClient := bb.Container.NewBlockBlobClient(blobName) - - blkList := make([]string, 0) - id := common.GetBlockID(common.BlockIDLength) - - for i := 0; size > 0; i++ { - if i == 0 || size < blkSize { - // Only first and last block we upload and rest all we replicate with the first block itself - if size < blkSize { - blkSize = size - id = common.GetBlockID(common.BlockIDLength) - } - data := make([]byte, blkSize) - - _, err = blobClient.StageBlock(context.Background(), - id, - streaming.NopCloser(bytes.NewReader(data)), - &blockblob.StageBlockOptions{ - CPKInfo: bb.blobCPKOpt, - }) - if err != nil { - log.Err( - "BlockBlob::TruncateFile : Failed to stage block for %s [%s]", - name, - err.Error(), - ) - return err - } - } - blkList = append(blkList, id) - size -= blkSize - } - err = bb.CommitBlocks(blobName, blkList, nil) - if err != nil { - log.Err( - "BlockBlob::TruncateFile : Failed to commit blocks for %s [%s]", - name, - err.Error(), - ) - return err - } - } else { - err := bb.WriteFromBuffer(name, nil, make([]byte, size)) - if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to set the %s to 0 bytes [%s]", name, err.Error()) - } + // Handle files whose new size <= 256MiB && when block size is not specified. + if options.NewSize <= blockblob.MaxUploadBlobBytes && options.BlockSize == 0 { + if err := bb.TruncateFileWithoutBlocks(&options); err != nil { + log.Err("BlockBlob::TruncateFile : Failed to truncate file %s[%v]", options.Name, err) + return err } + return nil + } + + // Truncate file by managing blocks + if err := bb.TruncateFileUsingBlocks(&options); err != nil { + log.Err("BlockBlob::TruncateFile : Failed to truncate file %s[%v]", options.Name, err) return err } - //If new size is less than 256MB - if size < blockblob.MaxUploadBlobBytes { - data, err := bb.HandleSmallFile(name, size, attr.Size) + return nil +} + +func (bb *BlockBlob) TruncateFileWithoutBlocks(options *internal.TruncateFileOptions) error { + log.Trace( + "BlockBlob::TruncateFileWithoutBlocks : name: %s, old size: %d, new size: %d", + options.Name, + options.OldSize, + options.NewSize, + ) + var err error + + buf := make([]byte, options.NewSize) + + if options.OldSize > 0 { + // Read the file + err = bb.ReadInBuffer(options.Name, 0, min(options.NewSize, options.OldSize), buf, nil) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to read small file %s", name, err.Error()) + log.Err( + "BlockBlob::TruncateFileWithoutBlocks : Failed to read small file %s[%v]", + options.Name, + err, + ) return err } - err = bb.WriteFromBuffer(name, nil, data) - if err != nil { - log.Err( - "BlockBlob::TruncateFile : Failed to write from buffer file %s", - name, - err.Error(), + } + + // Write the file + err = bb.WriteFromBuffer(options.Name, nil, buf) + if err != nil { + log.Err( + "BlockBlob::TruncateFileWithoutBlocks : Failed to write from buffer file %s[%v]", + options.Name, + err, + ) + return err + } + + return nil +} + +func (bb *BlockBlob) TruncateFileUsingBlocks(options *internal.TruncateFileOptions) error { + log.Trace( + "BlockBlob::TruncateFileUsingBlocks : name: %s, old size: %d, new size: %d", + options.Name, + options.OldSize, + options.NewSize, + ) + var err error + + blob, err := bb.GetFileBlockOffsets(options.Name) + if err != nil { + log.Err( + "BlockBlob::TruncateFileUsingBlocks : Failed to get block offsets for file %s[%v]", + options.Name, + err, + ) + return err + } + + // If blob has no blocks, we should convert the blob into blocks first. + if blob.HasNoBlocks() && options.OldSize > 0 { + if options.OldSize > blockblob.MaxUploadBlobBytes { + err = fmt.Errorf( + "Blob %v has size %d bytes greater than 256MB but has no blocks, inconsistent state", + options.Name, + options.OldSize, ) + log.Err("BlockBlob::TruncateFileUsingBlocks : %v", err) return err } - } else { - bol, err := bb.GetFileBlockOffsets(name) + + buf := make([]byte, options.OldSize) + + // Read the file + err = bb.ReadInBuffer(options.Name, 0, options.OldSize, buf, nil) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to get block list of file %s [%s]", name, err.Error()) + log.Err( + "BlockBlob::TruncateFileUsingBlocks : Failed to read small file %s[%v]", + options.Name, + err, + ) return err } - if bol.SmallFile() { - data, err := bb.HandleSmallFile(name, size, attr.Size) - if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to read small file %s", name, err.Error()) - return err - } - err = bb.WriteFromBuffer(name, nil, data) - if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to write from buffer file %s", name, err.Error()) - return err - } - } else { - if size < attr.Size { - bol = bb.removeBlocks(bol, size, name) - } else if size > attr.Size { - _, err = bb.createNewBlocks(bol, bol.BlockList[len(bol.BlockList)-1].EndIndex, size-attr.Size) - if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to create new blocks for file %s", name, err.Error()) - return err - } + + // Create blocks for the blob. + blockSize := options.BlockSize + if blockSize == 0 { + blockSize = bb.Config.blockSize + if blockSize == 0 { + // TODO: This should be set in init of blobfuse + blockSize = 16 * 1024 * 1024 } - err = bb.StageAndCommit(name, bol) - if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to stage and commit file %s", name, err.Error()) - return err + + for i := int64(0); i < options.OldSize; i += blockSize { + blkSize := min(blockSize, options.OldSize-i) + newBlock := bb.createBlockFromBuffer(blob.BlockIdLength, i, buf[i:i+blkSize]) + blob.BlockList = append(blob.BlockList, newBlock) } } + blob.Flags.Set(common.BlobFlagBlockListModified) + } else { + // If blob has blocks, we should validate the block list against the old size. + if ok := blob.ValidateBlockListAgainstFileSize(options.OldSize); !ok { + err = fmt.Errorf("Blob %v has blocks that do not match the old size %d bytes, inconsistent state", + options.Name, options.OldSize) + log.Err("BlockBlob::TruncateFileUsingBlocks : %v", err) + return err + } } - return nil -} - -func (bb *BlockBlob) HandleSmallFile(name string, size int64, originalSize int64) ([]byte, error) { - var data = make([]byte, size) - var err error - if size > originalSize { - err = bb.ReadInBuffer(name, 0, 0, data, nil) + if options.NewSize < options.OldSize { + err = bb.removeBlocksTruncate(blob, options) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to read small file %s", name, err.Error()) + log.Err( + "BlockBlob::TruncateFileUsingBlocks : Failed to Remove Blocks from Blocklist for file %s[%v]", + options.Name, + err, + ) + return err } } else { - err = bb.ReadInBuffer(name, 0, size, data, nil) + err = bb.createNewBlocksTruncate(blob, options) if err != nil { - log.Err("BlockBlob::TruncateFile : Failed to read small file %s", name, err.Error()) + log.Err("BlockBlob::TruncateFileUsingBlocks : Failed to Create New Blocks for file %s[%v]", options.Name, err) + return err } } - return data, err + + // Stage and commit the blocks. + err = bb.StageAndCommit(options.Name, blob) + if err != nil { + log.Err( + "BlockBlob::TruncateFileUsingBlocks : Failed to Stage and Commit blocks for file %s[%v]", + options.Name, + err, + ) + return err + } + + return nil } // Write : write data at given offset to a blob -func (bb *BlockBlob) Write(options internal.WriteFileOptions) error { +func (bb *BlockBlob) Write(options *internal.WriteFileOptions) error { name := options.Handle.Path offset := options.Offset defer log.TimeTrack(time.Now(), "BlockBlob::Write", options.Handle.Path) @@ -1571,7 +1789,7 @@ func (bb *BlockBlob) Write(options internal.WriteFileOptions) error { length := int64(len(options.Data)) data := options.Data // case 1: file consists of no blocks (small file) - if fileOffsets.SmallFile() { + if fileOffsets.HasNoBlocks() { // get all the data oldData, _ := bb.ReadBuffer(name, 0, 0) // update the data with the new data @@ -1699,16 +1917,40 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er defer blobMtx.Unlock() blobClient := bb.getBlockBlobClient(name) blockIDList := make([]string, 0, len(bol.BlockList)) - var data []byte - staged := false - for _, blk := range bol.BlockList { + var truncBuf, data []byte + var truncateBufAllocated = false + staged := bol.IsBlockListModified() + + for i, blk := range bol.BlockList { blockIDList = append(blockIDList, blk.Id) + if blk.Truncated() { - data = make([]byte, blk.EndIndex-blk.StartIndex) + if !truncateBufAllocated { + truncBuf = make([]byte, blk.EndIndex-blk.StartIndex) + truncateBufAllocated = true + } + + // Match the size of the buffer to the size of the block + if len(truncBuf) < int(blk.EndIndex-blk.StartIndex) { + // This should not happen since we allocate the buffer once to the max size of block + return fmt.Errorf( + "BlockBlob::StageAndCommit : Truncate buffer size %d is smaller than block size %d for idx: %d, id: %s for blob %s", + len(truncBuf), + blk.EndIndex-blk.StartIndex, + i, + blk.Id, + name, + ) + } + + // Reslice the buffer to the size of the block, as the last block could be smaller than the rest + data = truncBuf[:blk.EndIndex-blk.StartIndex] + blk.Flags.Clear(common.TruncatedBlock) } else { data = blk.Data } + if blk.Dirty() { _, err := blobClient.StageBlock(context.Background(), blk.Id, @@ -1728,10 +1970,9 @@ func (bb *BlockBlob) StageAndCommit(name string, bol *common.BlockOffsetList) er } staged = true blk.Flags.Clear(common.DirtyBlock) - } else if blk.Removed() { - staged = true } } + if staged { _, err := blobClient.CommitBlockList(context.Background(), blockIDList, diff --git a/component/azstorage/block_blob_test.go b/component/azstorage/block_blob_test.go index 6d8495dee..e711a6bc1 100644 --- a/component/azstorage/block_blob_test.go +++ b/component/azstorage/block_blob_test.go @@ -98,6 +98,7 @@ var storageTestConfigurationParameters storageTestConfiguration var ctx = context.Background() const MB = 1024 * 1024 +const GB = 1024 * MB // A UUID representation compliant with specification in RFC 4122 document. type uuid [16]byte @@ -807,7 +808,7 @@ func (s *blockBlobTestSuite) TestStreamDirError() { s.assert.NoError( err, - ) + ) // Note: See comment in BlockBlob.List. BlockBlob behaves differently from Datalake s.assert.Empty(entries) // Directory should not be in the account dir := s.containerClient.NewBlobClient(name) @@ -1134,7 +1135,7 @@ func (s *blockBlobTestSuite) TestOpenFileSize() { name := generateFileName() size := 10 s.az.CreateFile(internal.CreateFileOptions{Name: name}) - s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(size)}) + s.az.TruncateFile(internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(size)}) h, err := s.az.OpenFile(internal.OpenFileOptions{Name: name}) s.assert.NoError(err) @@ -1333,11 +1334,11 @@ func (s *blockBlobTestSuite) TestReadInBuffer() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 5) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(5, len) s.assert.EqualValues(testData[:5], output) @@ -1353,13 +1354,18 @@ func (s *blockBlobTestSuite) TestReadInBufferWithoutHandle() { testData := "test data" data := []byte(testData) - n, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + n, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Len(data, n) output := make([]byte, 5) len, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Offset: 0, Data: output, Path: name, Size: (int64)(len(data))}, + &internal.ReadInBufferOptions{ + Offset: 0, + Data: output, + Path: name, + Size: (int64)(len(data)), + }, ) s.assert.NoError(err) s.assert.Equal(5, len) @@ -1370,7 +1376,7 @@ func (s *blockBlobTestSuite) TestReadInBufferEmptyPath() { defer s.cleanupTest() output := make([]byte, 5) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Offset: 0, Data: output, Size: 5}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Offset: 0, Data: output, Size: 5}) s.assert.Error(err) s.assert.Equal(0, len) s.assert.Equal("path not given for download", err.Error()) @@ -1383,13 +1389,13 @@ func (bbTestSuite *blockBlobTestSuite) TestReadInBufferWithETAG() { handle, _ := bbTestSuite.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - bbTestSuite.az.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + bbTestSuite.az.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) handle, _ = bbTestSuite.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 5) var etag string len, err := bbTestSuite.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output, Etag: &etag}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output, Etag: &etag}, ) bbTestSuite.assert.NoError(err) bbTestSuite.assert.NotEmpty(etag) @@ -1405,7 +1411,7 @@ func (bbTestSuite *blockBlobTestSuite) TestReadInBufferWithETAGMismatch() { handle, _ := bbTestSuite.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data 12345678910" data := []byte(testData) - bbTestSuite.az.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + bbTestSuite.az.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) _ = bbTestSuite.az.CloseFile(internal.CloseFileOptions{Handle: handle}) attr, err := bbTestSuite.az.GetAttr(internal.GetAttrOptions{Name: name}) @@ -1419,7 +1425,7 @@ func (bbTestSuite *blockBlobTestSuite) TestReadInBufferWithETAGMismatch() { handle, _ = bbTestSuite.az.OpenFile(internal.OpenFileOptions{Name: name}) _, err = bbTestSuite.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output, Etag: &etag}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output, Etag: &etag}, ) bbTestSuite.assert.NoError(err) bbTestSuite.assert.NotEmpty(etag) @@ -1431,12 +1437,12 @@ func (bbTestSuite *blockBlobTestSuite) TestReadInBufferWithETAGMismatch() { bbTestSuite.assert.NoError(err) testData = "test data 12345678910 123123123123123123123" data = []byte(testData) - bbTestSuite.az.WriteFile(internal.WriteFileOptions{Handle: handle1, Offset: 0, Data: data}) + bbTestSuite.az.WriteFile(&internal.WriteFileOptions{Handle: handle1, Offset: 0, Data: data}) _ = bbTestSuite.az.CloseFile(internal.CloseFileOptions{Handle: handle1}) // Read data back using older handle _, err = bbTestSuite.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 5, Data: output, Etag: &etag}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 5, Data: output, Etag: &etag}, ) bbTestSuite.assert.NoError(err) bbTestSuite.assert.NotEmpty(etag) @@ -1453,11 +1459,11 @@ func (s *blockBlobTestSuite) TestReadInBufferLargeBuffer() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 1000) // Testing that passing in a super large buffer will still work - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.EqualValues(h.Size, len) s.assert.EqualValues(testData, output[:h.Size]) @@ -1470,7 +1476,7 @@ func (s *blockBlobTestSuite) TestReadInBufferEmpty() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) output := make([]byte, 10) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(0, len) } @@ -1483,7 +1489,7 @@ func (s *blockBlobTestSuite) TestReadInBufferBadRange() { h.Size = 10 _, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ERANGE, err) @@ -1497,7 +1503,7 @@ func (s *blockBlobTestSuite) TestReadInBufferError() { h.Size = 10 _, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) @@ -1511,7 +1517,7 @@ func (s *blockBlobTestSuite) TestWriteFile() { testData := "test data" data := []byte(testData) - count, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1547,7 +1553,7 @@ func (s *blockBlobTestSuite) TestWriteFileWindowsNameConvert() { testData := "test data" data := []byte(testData) - count, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1569,9 +1575,11 @@ func (s *blockBlobTestSuite) TestTruncateSmallFileSmaller() { testData := "test data" data := []byte(testData) truncatedLength := 5 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1608,10 +1616,10 @@ func (s *blockBlobTestSuite) TestTruncateSmallFileSmallerWindowsNameConvert() { testData := "test data" data := []byte(testData) truncatedLength := 5 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err := s.az.TruncateFile( - internal.TruncateFileOptions{Name: windowsName, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: windowsName, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1633,7 +1641,9 @@ func (s *blockBlobTestSuite) TestTruncateEmptyFileToLargeSize() { s.assert.NotNil(h) blobSize := int64((1 * common.GbToBytes) + 13) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: blobSize}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: blobSize}, + ) s.assert.NoError(err) props, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) @@ -1666,7 +1676,9 @@ func (s *blockBlobTestSuite) TestTruncateChunkedFileSmaller() { ) s.assert.NoError(err) - err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1689,9 +1701,11 @@ func (s *blockBlobTestSuite) TestTruncateSmallFileEqual() { testData := "test data" data := []byte(testData) truncatedLength := 9 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1727,7 +1741,9 @@ func (s *blockBlobTestSuite) TestTruncateChunkedFileEqual() { ) s.assert.NoError(err) - err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1750,9 +1766,11 @@ func (s *blockBlobTestSuite) TestTruncateSmallFileBigger() { testData := "test data" data := []byte(testData) truncatedLength := 15 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1788,7 +1806,9 @@ func (s *blockBlobTestSuite) TestTruncateChunkedFileBigger() { ) s.assert.NoError(err) - err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1808,7 +1828,7 @@ func (s *blockBlobTestSuite) TestTruncateFileError() { // Setup name := generateFileName() - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name}) + err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, OldSize: -1}) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) } @@ -1821,7 +1841,7 @@ func (s *blockBlobTestSuite) TestWriteSmallFile() { testData := "test data" data := []byte(testData) dataLen := len(data) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) @@ -1846,12 +1866,12 @@ func (s *blockBlobTestSuite) TestOverwriteSmallFile() { testData := "test-replace-data" data := []byte(testData) dataLen := len(data) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-newdata-data") @@ -1876,12 +1896,12 @@ func (s *blockBlobTestSuite) TestOverwriteAndAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-newdata") @@ -1907,12 +1927,12 @@ func (s *blockBlobTestSuite) TestAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("-newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data-newdata") @@ -1938,12 +1958,12 @@ func (s *blockBlobTestSuite) TestAppendOffsetLargerThanSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data\x00\x00\x00newdata") @@ -1985,7 +2005,7 @@ func (s *blockBlobTestSuite) TestAppendBlocksToSmallFile() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("-newdata-newdata-newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data-newdata-newdata-newdata") @@ -2026,7 +2046,7 @@ func (s *blockBlobTestSuite) TestOverwriteBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 16, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 16, Data: newTestData}) s.assert.NoError(err) currentData := []byte("testdatates1dat1cakedat2tes3dat3tes4dat4") @@ -2067,7 +2087,7 @@ func (s *blockBlobTestSuite) TestOverwriteAndAppendBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 32, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 32, Data: newTestData}) s.assert.NoError(err) currentData := []byte("testdatates1dat1tes2dat2tes3dat343211234cake") @@ -2107,7 +2127,7 @@ func (s *blockBlobTestSuite) TestAppendBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: newTestData}) s.assert.NoError(err) currentData := []byte("43211234cakedat1tes2dat2tes3dat3tes4dat4") @@ -2147,7 +2167,7 @@ func (s *blockBlobTestSuite) TestAppendOffsetLargerThanSize() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}) s.assert.NoError(err) currentData := []byte( @@ -2558,7 +2578,7 @@ func (s *blockBlobTestSuite) TestGetAttrFileSize() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) props, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) s.assert.NoError(err) @@ -2594,7 +2614,7 @@ func (s *blockBlobTestSuite) TestGetAttrFileTime() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) before, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) s.assert.NoError(err) @@ -2602,7 +2622,7 @@ func (s *blockBlobTestSuite) TestGetAttrFileTime() { time.Sleep(1 * time.Second) // Ensure that the modification time will change - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) after, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) s.assert.NoError(err) @@ -2804,14 +2824,13 @@ func (s *blockBlobTestSuite) TestGetFileBlockOffsetsSmallFile() { testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // GetFileBlockOffsets offsetList, err := s.az.GetFileBlockOffsets(internal.GetFileBlockOffsetsOptions{Name: name}) s.assert.NoError(err) s.assert.Empty(offsetList.BlockList) - s.assert.True(offsetList.SmallFile()) - s.assert.EqualValues(0, offsetList.BlockIdLength) + s.assert.True(offsetList.HasNoBlocks()) } func (s *blockBlobTestSuite) TestGetFileBlockOffsetsChunkedFile() { @@ -2869,7 +2888,7 @@ func (s *blockBlobTestSuite) TestFlushFileEmptyFile() { output := make([]byte, 1) length, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(0, length) @@ -2907,7 +2926,7 @@ func (s *blockBlobTestSuite) TestFlushFileChunkedFile() { output := make([]byte, 16*MB) length, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(16*MB, length) @@ -2958,7 +2977,7 @@ func (s *blockBlobTestSuite) TestFlushFileUpdateChunkedFile() { s.assert.NoError(err) output := make([]byte, 16*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(16*MB, len) s.assert.NotEqual(data, output) @@ -3013,7 +3032,7 @@ func (s *blockBlobTestSuite) TestFlushFileTruncateUpdateChunkedFile() { s.assert.NoError(err) output := make([]byte, 16*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(16*MB, len) s.assert.NotEqual(data, output) @@ -3070,13 +3089,13 @@ func (s *blockBlobTestSuite) TestFlushFileAppendBlocksEmptyFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 6*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(6*MB, len) s.assert.Equal(blk1.Data, output[0:blockSize]) @@ -3149,13 +3168,13 @@ func (s *blockBlobTestSuite) TestFlushFileAppendBlocksChunkedFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[0:fileSize]) @@ -3208,13 +3227,13 @@ func (s *blockBlobTestSuite) TestFlushFileTruncateBlocksEmptyFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*int64(blockSize)) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.EqualValues(3*int64(blockSize), len) data := make([]byte, 3*blockSize) @@ -3280,13 +3299,13 @@ func (s *blockBlobTestSuite) TestFlushFileTruncateBlocksChunkedFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[:fileSize]) @@ -3340,13 +3359,13 @@ func (s *blockBlobTestSuite) TestFlushFileAppendAndTruncateBlocksEmptyFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(3*blockSize, len) data := make([]byte, blockSize) @@ -3416,14 +3435,14 @@ func (s *blockBlobTestSuite) TestFlushFileAppendAndTruncateBlocksChunkedFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) // file should be empty output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[:fileSize]) @@ -4107,7 +4126,7 @@ func (s *blockBlobTestSuite) TestInvalidMD5OnReadNoVaildate() { // h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) // testData := "test data" // data := []byte(testData) -// s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) +// s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) // s.az.CloseFile(internal.CloseFileOptions{Handle: h}) @@ -4271,10 +4290,10 @@ func (s *blockBlobTestSuite) UtilityFunctionTestTruncateFileToSmaller( s.assert.NoError(err) data := make([]byte, size) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err = s.az.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -4315,10 +4334,10 @@ func (s *blockBlobTestSuite) UtilityFunctionTruncateFileToLarger( s.assert.NoError(err) data := make([]byte, size) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err = s.az.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) diff --git a/component/azstorage/connection.go b/component/azstorage/connection.go index 294c1531e..323e43c13 100644 --- a/component/azstorage/connection.go +++ b/component/azstorage/connection.go @@ -120,12 +120,12 @@ type AzConnection interface { WriteFromFile(name string, metadata map[string]*string, fi *os.File) error WriteFromBuffer(name string, metadata map[string]*string, data []byte) error - Write(options internal.WriteFileOptions) error + Write(options *internal.WriteFileOptions) error GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) ChangeMod(string, os.FileMode) error ChangeOwner(string, int, int) error - TruncateFile(string, int64) error + TruncateFile(options internal.TruncateFileOptions) error StageAndCommit(name string, bol *common.BlockOffsetList) error GetCommittedBlockList(string) (*internal.CommittedBlockList, error) diff --git a/component/azstorage/datalake.go b/component/azstorage/datalake.go index 4e0434b7e..c0ed1a466 100644 --- a/component/azstorage/datalake.go +++ b/component/azstorage/datalake.go @@ -195,7 +195,7 @@ func (dl *Datalake) TestPipeline() error { }) // we are just validating the auth mode used. So, no need to iterate over the pages - _, err := listPathPager.NextPage(context.Background()) + resp, err := listPathPager.NextPage(context.Background()) if err != nil { log.Err( "Datalake::TestPipeline : Failed to validate account with given auth %s", @@ -207,6 +207,16 @@ func (dl *Datalake) TestPipeline() error { return fmt.Errorf("Datalake::TestPipeline : [%s]", respErr.ErrorCode) } return err + } else { + // If the account is not HNS, then the permissions will be nil + // For empty containers there will be another check done by block_blob TestPipeline + // so no need to error out if there are no paths + if len(resp.Paths) > 0 { + if resp.Paths[0].Permissions == nil { + // This is to block FNS account being mounted as HNS account + return fmt.Errorf("Datalake::TestPipeline : Account is not HNS, kindly set correct account type") + } + } } return dl.BlockBlob.TestPipeline() @@ -565,7 +575,7 @@ func (dl *Datalake) WriteFromBuffer(name string, metadata map[string]*string, da } // Write : Write to a file at given offset -func (dl *Datalake) Write(options internal.WriteFileOptions) error { +func (dl *Datalake) Write(options *internal.WriteFileOptions) error { return dl.BlockBlob.Write(options) } @@ -577,8 +587,8 @@ func (dl *Datalake) GetFileBlockOffsets(name string) (*common.BlockOffsetList, e return dl.BlockBlob.GetFileBlockOffsets(name) } -func (dl *Datalake) TruncateFile(name string, size int64) error { - return dl.BlockBlob.TruncateFile(name, size) +func (dl *Datalake) TruncateFile(options internal.TruncateFileOptions) error { + return dl.BlockBlob.TruncateFile(options) } // ChangeMod : Change mode of a path diff --git a/component/azstorage/datalake_test.go b/component/azstorage/datalake_test.go index c9d764722..8325b391c 100644 --- a/component/azstorage/datalake_test.go +++ b/component/azstorage/datalake_test.go @@ -279,6 +279,37 @@ func (s *datalakeTestSuite) TestListContainers() { // TODO : ListContainersHuge: Maybe this is overkill? +func (s *datalakeTestSuite) TestFNSOverHNS() { + defer s.cleanupTest() + // Testing dir and dir/ + s.tearDownTestHelper(false) // Don't delete the generated container. + config := fmt.Sprintf( + "azstorage:\n account-name: %s\n type: adls\n account-key: %s\n mode: key\n container: %s\n ", + storageTestConfigurationParameters.BlockAccount, + storageTestConfigurationParameters.BlockKey, + s.container, + ) + s.setupTestHelper(config, s.container, true) + + var paths = []string{generateDirectoryName(), generateDirectoryName() + "/"} + for _, path := range paths { + log.Debug(path) + s.Run(path, func() { + err := s.az.CreateDir(internal.CreateDirOptions{Name: path}) + + s.assert.NoError(err) + // Directory should be in the account + dir := s.containerClient.NewDirectoryClient(internal.TruncateDirName(path)) + _, err = dir.GetProperties(ctx, nil) + s.assert.NoError(err) + }) + } + + err := s.az.storage.TestPipeline() + s.assert.Error(err) + s.assert.Contains(err.Error(), "Account is not HNS") +} + func (s *datalakeTestSuite) TestCreateDir() { defer s.cleanupTest() // Testing dir and dir/ @@ -681,7 +712,7 @@ func (s *datalakeTestSuite) TestStreamDirError() { s.assert.NoError( err, - ) + ) // Note: See comment in BlockBlob.List. BlockBlob behaves differently from Datalake s.assert.Empty(entries) // Directory should not be in the account dir := s.containerClient.NewDirectoryClient(name) @@ -957,7 +988,7 @@ func (s *datalakeTestSuite) TestWriteSmallFile() { testData := "test data" data := []byte(testData) dataLen := len(data) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) @@ -982,12 +1013,12 @@ func (s *datalakeTestSuite) TestOverwriteSmallFile() { testData := "test-replace-data" data := []byte(testData) dataLen := len(data) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-newdata-data") @@ -1012,12 +1043,12 @@ func (s *datalakeTestSuite) TestOverwriteAndAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-newdata") @@ -1043,12 +1074,12 @@ func (s *datalakeTestSuite) TestAppendOffsetLargerThanSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data\x00\x00\x00newdata") @@ -1074,12 +1105,12 @@ func (s *datalakeTestSuite) TestAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("-newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data-newdata") @@ -1119,7 +1150,7 @@ func (s *datalakeTestSuite) TestAppendBlocksToSmallFile() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("-newdata-newdata-newdata") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}) s.assert.NoError(err) currentData := []byte("test-data-newdata-newdata-newdata") @@ -1159,7 +1190,7 @@ func (s *datalakeTestSuite) TestOverwriteBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 16, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 16, Data: newTestData}) s.assert.NoError(err) currentData := []byte("testdatates1dat1cakedat2tes3dat3tes4dat4") @@ -1199,7 +1230,7 @@ func (s *datalakeTestSuite) TestOverwriteAndAppendBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 32, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 32, Data: newTestData}) s.assert.NoError(err) currentData := []byte("testdatates1dat1tes2dat2tes3dat343211234cake") @@ -1237,7 +1268,7 @@ func (s *datalakeTestSuite) TestAppendBlocks() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: newTestData}) s.assert.NoError(err) currentData := []byte("43211234cakedat1tes2dat2tes3dat3tes4dat4") @@ -1275,7 +1306,7 @@ func (s *datalakeTestSuite) TestAppendOffsetLargerThanSize() { f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) newTestData := []byte("43211234cake") - _, err = s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}) + _, err = s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}) s.assert.NoError(err) currentData := []byte( @@ -1324,7 +1355,7 @@ func (s *datalakeTestSuite) TestOpenFileSize() { name := generateFileName() size := 10 s.az.CreateFile(internal.CreateFileOptions{Name: name}) - s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(size)}) + s.az.TruncateFile(internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(size)}) h, err := s.az.OpenFile(internal.OpenFileOptions{Name: name}) s.assert.NoError(err) @@ -1582,11 +1613,11 @@ func (s *datalakeTestSuite) TestReadInBuffer() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 5) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(5, len) s.assert.EqualValues(testData[:5], output) @@ -1602,13 +1633,18 @@ func (s *datalakeTestSuite) TestReadInBufferWithoutHandle() { testData := "test data" data := []byte(testData) - n, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + n, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Len(data, n) output := make([]byte, 5) len, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Offset: 0, Data: output, Path: name, Size: (int64)(len(data))}, + &internal.ReadInBufferOptions{ + Offset: 0, + Data: output, + Path: name, + Size: (int64)(len(data)), + }, ) s.assert.NoError(err) s.assert.Equal(5, len) @@ -1619,7 +1655,7 @@ func (s *datalakeTestSuite) TestReadInBufferEmptyPath() { defer s.cleanupTest() output := make([]byte, 5) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Offset: 0, Data: output, Size: 5}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Offset: 0, Data: output, Size: 5}) s.assert.Error(err) s.assert.Equal(0, len) s.assert.Equal("path not given for download", err.Error()) @@ -1632,13 +1668,13 @@ func (suite *datalakeTestSuite) TestReadInBufferWithETAG() { fileHandle, _ := suite.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - suite.az.WriteFile(internal.WriteFileOptions{Handle: fileHandle, Offset: 0, Data: data}) + suite.az.WriteFile(&internal.WriteFileOptions{Handle: fileHandle, Offset: 0, Data: data}) fileHandle, _ = suite.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 5) var etag string len, err := suite.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: fileHandle, Offset: 0, Data: output, Etag: &etag}, + &internal.ReadInBufferOptions{Handle: fileHandle, Offset: 0, Data: output, Etag: &etag}, ) suite.assert.NoError(err) suite.assert.NotEmpty(etag) @@ -1654,11 +1690,11 @@ func (s *datalakeTestSuite) TestReadInBufferLargeBuffer() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) output := make([]byte, 1000) // Testing that passing in a super large buffer will still work - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.EqualValues(h.Size, len) s.assert.EqualValues(testData, output[:h.Size]) @@ -1671,7 +1707,7 @@ func (s *datalakeTestSuite) TestReadInBufferEmpty() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) output := make([]byte, 10) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(0, len) } @@ -1684,7 +1720,7 @@ func (s *datalakeTestSuite) TestReadInBufferBadRange() { h.Size = 10 _, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ERANGE, err) @@ -1698,7 +1734,7 @@ func (s *datalakeTestSuite) TestReadInBufferError() { h.Size = 10 _, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) @@ -1712,7 +1748,7 @@ func (s *datalakeTestSuite) TestWriteFile() { testData := "test data" data := []byte(testData) - count, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1748,7 +1784,7 @@ func (s *datalakeTestSuite) TestWriteFileWindowsNameConvert() { testData := "test data" data := []byte(testData) - count, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1770,54 +1806,15 @@ func (s *datalakeTestSuite) TestTruncateSmallFileSmaller() { testData := "test data" data := []byte(testData) truncatedLength := 5 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) - s.assert.NoError(err) - - // Blob should have updated data - fileClient := s.containerClient.NewFileClient(name) - resp, err := fileClient.DownloadStream(ctx, &file.DownloadStreamOptions{ - Range: &file.HTTPRange{Offset: 0, Count: int64(truncatedLength)}, - }) - // 0, int64(truncatedLength)) - s.assert.NoError(err) - s.assert.EqualValues(truncatedLength, *resp.ContentLength) - output, _ := io.ReadAll(resp.Body) - s.assert.EqualValues(testData[:truncatedLength], output) -} - -func (s *datalakeTestSuite) TestTruncateSmallFileSmallerWindowsNameConvert() { - // Skip test if not running on Windows - if runtime.GOOS != "windows" { - return - } - config := fmt.Sprintf( - "restricted-characters-windows: true\nazstorage:\n account-name: %s\n endpoint: https://%s.blob.core.windows.net/\n type: adls\n account-key: %s\n mode: key\n container: %s\n fail-unsupported-op: true", - storageTestConfigurationParameters.AdlsAccount, - storageTestConfigurationParameters.AdlsAccount, - storageTestConfigurationParameters.AdlsKey, - s.container, - ) - s.setupTestHelper(config, s.container, true) - defer s.cleanupTest() - // Setup - name := generateFileName() - windowsName := ""*:<>?|" + "/" + name + ""*:<>?|" - blobName := "\"*:<>?|" + "/" + name + "\"*:<>?|" - h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: windowsName}) - testData := "test data" - data := []byte(testData) - truncatedLength := 5 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err := s.az.TruncateFile( - internal.TruncateFileOptions{Name: windowsName, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) // Blob should have updated data - fileClient := s.containerClient.NewFileClient(blobName) + fileClient := s.containerClient.NewFileClient(name) resp, err := fileClient.DownloadStream(ctx, &file.DownloadStreamOptions{ Range: &file.HTTPRange{Offset: 0, Count: int64(truncatedLength)}, }) @@ -1851,7 +1848,9 @@ func (s *datalakeTestSuite) TestTruncateChunkedFileSmaller() { ) s.assert.NoError(err) - err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1873,9 +1872,11 @@ func (s *datalakeTestSuite) TestTruncateSmallFileEqual() { testData := "test data" data := []byte(testData) truncatedLength := 9 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1912,7 +1913,9 @@ func (s *datalakeTestSuite) TestTruncateChunkedFileEqual() { ) s.assert.NoError(err) - err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1934,9 +1937,11 @@ func (s *datalakeTestSuite) TestTruncateSmallFileBigger() { testData := "test data" data := []byte(testData) truncatedLength := 15 - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err := s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1973,7 +1978,9 @@ func (s *datalakeTestSuite) TestTruncateChunkedFileBigger() { ) s.assert.NoError(err) - s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}) + err = s.az.TruncateFile( + internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: int64(truncatedLength)}, + ) s.assert.NoError(err) // Blob should have updated data @@ -1992,7 +1999,7 @@ func (s *datalakeTestSuite) TestTruncateFileError() { // Setup name := generateFileName() - err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name}) + err := s.az.TruncateFile(internal.TruncateFileOptions{Name: name, OldSize: -1}) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) } @@ -2005,7 +2012,7 @@ func (s *datalakeTestSuite) TestCopyToFile() { testData := "test data" data := []byte(testData) dataLen := len(data) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) f, _ := os.CreateTemp("", name+".tmp") defer os.Remove(f.Name()) @@ -2237,7 +2244,7 @@ func (s *datalakeTestSuite) TestGetAttrFileSize() { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) testData := "test data" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) props, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) s.assert.NoError(err) @@ -2257,7 +2264,7 @@ func (s *datalakeTestSuite) TestGetAttrFileTime() { // h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) // testData := "test data" // data := []byte(testData) - // s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + // s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // before, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) // s.assert.NoError(err) @@ -2265,7 +2272,7 @@ func (s *datalakeTestSuite) TestGetAttrFileTime() { // time.Sleep(time.Second * 3) // Wait 3 seconds and then modify the file again - // s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + // s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // time.Sleep(time.Second * 1) // after, err := s.az.GetAttr(internal.GetAttrOptions{Name: name}) @@ -2352,14 +2359,13 @@ func (s *datalakeTestSuite) TestGetFileBlockOffsetsSmallFile() { testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4" data := []byte(testData) - s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // GetFileBlockOffsets offsetList, err := s.az.GetFileBlockOffsets(internal.GetFileBlockOffsetsOptions{Name: name}) s.assert.NoError(err) s.assert.Empty(offsetList.BlockList) - s.assert.True(offsetList.SmallFile()) - s.assert.EqualValues(0, offsetList.BlockIdLength) + s.assert.True(offsetList.HasNoBlocks()) } func (s *datalakeTestSuite) TestGetFileBlockOffsetsChunkedFile() { @@ -2423,7 +2429,7 @@ func (s *datalakeTestSuite) TestFlushFileEmptyFile() { output := make([]byte, 1) length, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(0, length) @@ -2463,7 +2469,7 @@ func (s *datalakeTestSuite) TestFlushFileChunkedFile() { output := make([]byte, 16*MB) length, err := s.az.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(16*MB, length) @@ -2516,7 +2522,7 @@ func (s *datalakeTestSuite) TestFlushFileUpdateChunkedFile() { s.assert.NoError(err) output := make([]byte, 16*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(16*MB, len) s.assert.NotEqual(data, output) @@ -2573,7 +2579,7 @@ func (s *datalakeTestSuite) TestFlushFileTruncateUpdateChunkedFile() { s.assert.NoError(err) output := make([]byte, 16*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(16*MB, len) s.assert.NotEqual(data, output) @@ -2630,13 +2636,13 @@ func (s *datalakeTestSuite) TestFlushFileAppendBlocksEmptyFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 6*MB) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(6*MB, len) s.assert.Equal(blk1.Data, output[0:blockSize]) @@ -2711,13 +2717,13 @@ func (s *datalakeTestSuite) TestFlushFileAppendBlocksChunkedFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[0:fileSize]) @@ -2770,13 +2776,13 @@ func (s *datalakeTestSuite) TestFlushFileTruncateBlocksEmptyFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*int64(blockSize)) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.EqualValues(3*int64(blockSize), len) data := make([]byte, 3*blockSize) @@ -2844,13 +2850,13 @@ func (s *datalakeTestSuite) TestFlushFileTruncateBlocksChunkedFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[:fileSize]) @@ -2904,13 +2910,13 @@ func (s *datalakeTestSuite) TestFlushFileAppendAndTruncateBlocksEmptyFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(3*blockSize, len) data := make([]byte, blockSize) @@ -2982,14 +2988,14 @@ func (s *datalakeTestSuite) TestFlushFileAppendAndTruncateBlocksChunkedFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.az.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) // file should be empty output := make([]byte, fileSize+3*blockSize) - len, err := s.az.ReadInBuffer(internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) + len, err := s.az.ReadInBuffer(&internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSize, len) s.assert.Equal(data, output[:fileSize]) @@ -3169,7 +3175,7 @@ func getACL(dl *Datalake, name string) (string, error) { func (s *datalakeTestSuite) createFileWithData(name string, data []byte, mode os.FileMode) { h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) - _, err := s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err := s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.az.Chmod(internal.ChmodOptions{Name: name, Mode: mode}) @@ -3438,7 +3444,7 @@ func (s *datalakeTestSuite) TestList() { // h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name}) // testData := "test data" // data := []byte(testData) -// s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) +// s.az.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // h, _ = s.az.OpenFile(internal.OpenFileOptions{Name: name}) // s.az.CloseFile(internal.CloseFileOptions{Handle: h}) diff --git a/component/block_cache/block_cache.go b/component/block_cache/block_cache.go index 7a60ea2a6..5e61833ff 100755 --- a/component/block_cache/block_cache.go +++ b/component/block_cache/block_cache.go @@ -654,7 +654,7 @@ func (bc *BlockCache) getBlockSize(fileSize uint64, block *Block) uint64 { } // ReadInBuffer: Read the file into a buffer -func (bc *BlockCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (bc *BlockCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { if options.Offset >= options.Handle.Size { // EOF reached so early exit return 0, io.EOF @@ -1234,7 +1234,7 @@ func (bc *BlockCache) download(item *workItem) { var etag string // If file does not exists then download the block from the container - n, err := bc.NextComponent().ReadInBuffer(internal.ReadInBufferOptions{ + n, err := bc.NextComponent().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: item.handle, Offset: int64(item.block.offset), Data: item.block.data, @@ -1361,7 +1361,7 @@ func (bc *BlockCache) download(item *workItem) { } // WriteFile: Write to the local file -func (bc *BlockCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (bc *BlockCache) WriteFile(options *internal.WriteFileOptions) (int, error) { // log.Debug("BlockCache::WriteFile : Writing %v bytes from %s", len(options.Data), options.Handle.Path) options.Handle.Lock() @@ -2331,6 +2331,21 @@ func (bc *BlockCache) SyncFile(options internal.SyncFileOptions) error { return nil } +func (bc *BlockCache) TruncateFile(options internal.TruncateFileOptions) error { + log.Trace("BlockCache::TruncateFile : path=%s, size=%d", options.Name, options.NewSize) + + // Set the block size that need to used by the next component + options.BlockSize = int64(bc.blockSize) + + err := bc.NextComponent().TruncateFile(options) + if err != nil { + log.Err("BlockCache::TruncateFile : Failed to truncate file %s: %v", options.Name, err) + return err + } + + return nil +} + func (bc *BlockCache) StatFs() (*common.Statfs_t, bool, error) { var maxCacheSize uint64 if bc.diskSize > 0 { diff --git a/component/block_cache/block_cache_linux_test.go b/component/block_cache/block_cache_linux_test.go index 413684712..06cbc8a65 100644 --- a/component/block_cache/block_cache_linux_test.go +++ b/component/block_cache/block_cache_linux_test.go @@ -80,7 +80,7 @@ func (suite *blockCacheLinuxTestSuite) TestStrongConsistency() { data := make([]byte, size) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // Write data to file suite.assert.NoError(err) suite.assert.EqualValues(n, size) @@ -101,7 +101,7 @@ func (suite *blockCacheLinuxTestSuite) TestStrongConsistency() { suite.assert.NoError(err) suite.assert.NotNil(h) _, _ = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, ) err = tobj.blockCache.CloseFile(internal.CloseFileOptions{Handle: h}) suite.assert.NoError(err) @@ -124,7 +124,7 @@ func (suite *blockCacheLinuxTestSuite) TestStrongConsistency() { suite.assert.NoError(err) suite.assert.NotNil(h) _, _ = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, ) err = tobj.blockCache.CloseFile(internal.CloseFileOptions{Handle: h}) suite.assert.NoError(err) diff --git a/component/block_cache/block_cache_test.go b/component/block_cache/block_cache_test.go index be3a33b02..b669b0403 100644 --- a/component/block_cache/block_cache_test.go +++ b/component/block_cache/block_cache_test.go @@ -172,6 +172,12 @@ func (suite *blockCacheTestSuite) TestEmpty() { tobj, err := setupPipeline(emptyConfig) defer tobj.cleanupPipeline() + if err != nil { + // On some distros due to low memory, block cache init fails. + suite.assert.Contains(err.Error(), "memory limit too low for configured prefetch") + return + } + suite.assert.NoError(err) suite.assert.Equal("block_cache", tobj.blockCache.Name()) suite.assert.Equal(16*_1MB, tobj.blockCache.blockSize) @@ -202,6 +208,12 @@ func (suite *blockCacheTestSuite) TestMemory() { tobj, err := setupPipeline(emptyConfig) defer tobj.cleanupPipeline() + if err != nil { + // On some distros due to low memory, block cache init fails. + suite.assert.Contains(err.Error(), "memory limit too low for configured prefetch") + return + } + suite.assert.NoError(err) suite.assert.Equal("block_cache", tobj.blockCache.Name()) cmd := exec.Command("bash", "-c", "free -b | grep Mem | awk '{print $4}'") @@ -569,7 +581,7 @@ func (suite *blockCacheTestSuite) TestFileReadTotalBytes() { data := make([]byte, size) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // Write data to file suite.assert.NoError(err) suite.assert.Equal(int64(n), size) @@ -580,7 +592,7 @@ func (suite *blockCacheTestSuite) TestFileReadTotalBytes() { totaldata := uint64(0) for { n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, ) totaldata += uint64(n) if err != nil { @@ -621,7 +633,7 @@ func (suite *blockCacheTestSuite) TestFileReadBlockCacheTmpPath() { data := make([]byte, size) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // Write data to file suite.assert.NoError(err) suite.assert.Equal(n, size) @@ -643,7 +655,7 @@ func (suite *blockCacheTestSuite) TestFileReadBlockCacheTmpPath() { totaldata := uint64(0) for { n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, ) totaldata += uint64(n) if err != nil { @@ -659,7 +671,7 @@ func (suite *blockCacheTestSuite) TestFileReadBlockCacheTmpPath() { totaldata = uint64(0) for { n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, ) totaldata += uint64(n) if err != nil { @@ -722,7 +734,7 @@ func (suite *blockCacheTestSuite) TestFileReadSerial() { totaldata := uint64(0) for { n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(totaldata), Data: data}, ) totaldata += uint64(n) if err != nil { @@ -766,7 +778,7 @@ func (suite *blockCacheTestSuite) TestFileReadRandom() { for range 50 { offset := mrand.Int64N(max) n, _ := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: offset, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: offset, Data: data}, ) suite.assert.LessOrEqual(n, 100) } @@ -809,7 +821,7 @@ func (suite *blockCacheTestSuite) TestFileReadRandomNoPrefetch() { for range 50 { offset := mrand.Int64N(max) n, _ := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: offset, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: offset, Data: data}, ) suite.assert.Equal(1, h.Buffers.Cooked.Len()) suite.assert.Equal(0, h.Buffers.Cooking.Len()) @@ -965,7 +977,7 @@ func (suite *blockCacheTestSuite) TestWriteFileDiskCachePresence() { // Write some data data := []byte("testdata") n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1002,7 +1014,7 @@ func (suite *blockCacheTestSuite) TestWriteFileDiskCachePresenceInDir() { // Write some data data := []byte("testdata") n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1037,7 +1049,7 @@ func (suite *blockCacheTestSuite) TestWriteFileSimple() { suite.assert.Equal(int64(0), fs.Size()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: []byte("Hello")}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: []byte("Hello")}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Equal(5, n) @@ -1063,7 +1075,7 @@ func (suite *blockCacheTestSuite) TestWriteFileSimple() { suite.assert.Equal(int64(5), fs.Size()) n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5, Data: []byte("Gello")}, + &internal.WriteFileOptions{Handle: h, Offset: 5, Data: []byte("Gello")}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Equal(5, n) @@ -1103,7 +1115,7 @@ func (suite *blockCacheTestSuite) TestWriteFileMultiBlock() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1143,7 +1155,7 @@ func (suite *blockCacheTestSuite) TestWriteFileMultiBlockWithOverwrite() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1157,13 +1169,13 @@ func (suite *blockCacheTestSuite) TestWriteFileMultiBlockWithOverwrite() { suite.assert.False(h.Dirty()) n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:100]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:100]}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Equal(100, n) n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:100]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:100]}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Equal(100, n) @@ -1200,7 +1212,7 @@ func (suite *blockCacheTestSuite) TestWritefileWithAppend() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1215,7 +1227,7 @@ func (suite *blockCacheTestSuite) TestWritefileWithAppend() { suite.assert.False(h.Dirty()) n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(data, n) @@ -1233,7 +1245,7 @@ func (suite *blockCacheTestSuite) TestWritefileWithAppend() { _, _ = rand.Read(data) n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: h.Size, Data: dataNew}, + &internal.WriteFileOptions{Handle: h, Offset: h.Size, Data: dataNew}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(dataNew, n) @@ -1276,7 +1288,7 @@ func (suite *blockCacheTestSuite) TestWriteBlockOutOfRange() { _, _ = rand.Read(data) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 10 * 50001, Data: dataNew}, + &internal.WriteFileOptions{Handle: h, Offset: 10 * 50001, Data: dataNew}, ) // 5 bytes suite.assert.Error(err) suite.assert.Contains(err.Error(), "block index out of range") @@ -1284,7 +1296,7 @@ func (suite *blockCacheTestSuite) TestWriteBlockOutOfRange() { tobj.blockCache.blockSize = 1048576 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 10 * 50001, Data: dataNew}, + &internal.WriteFileOptions{Handle: h, Offset: 10 * 50001, Data: dataNew}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Len(dataNew, n) @@ -1311,7 +1323,7 @@ func (suite *blockCacheTestSuite) TestDeleteAndRenameDirAndFile() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: []byte("Hello")}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: []byte("Hello")}, ) // 5 bytes suite.assert.NoError(err) suite.assert.Equal(5, n) @@ -1410,7 +1422,7 @@ func (suite *blockCacheTestSuite) TestZZZZLazyWrite() { handle, _ := tobj.blockCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) data := make([]byte, 10*1024*1024) _, _ = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) _ = tobj.blockCache.FlushFile(internal.FlushFileOptions{Handle: handle}) @@ -1485,7 +1497,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFile() { // write 1MB data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1493,14 +1505,14 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFile() { // write 1MB data at offset 9*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) // write 1MB data at offset 5*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(5 * _1MB), Data: dataBuff[2*_1MB : 3*_1MB], @@ -1583,7 +1595,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithPartialBlock() { // write 1MB data at offset 0 // partial block where it has data only from 0 to 1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1591,7 +1603,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithPartialBlock() { // write 1MB data at offset 9*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[2*_1MB : 3*_1MB], @@ -1602,7 +1614,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithPartialBlock() { // write 1MB data at offset 18*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(18 * _1MB), Data: dataBuff[4*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(18 * _1MB), Data: dataBuff[4*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1680,7 +1692,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithBlockOverlap() { // write 1MB data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1688,7 +1700,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithBlockOverlap() { // write 1MB data at offset 9*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1696,7 +1708,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteSparseFileWithBlockOverlap() { // write 1MB data at offset 5*_1MB // data is written to last 0.5MB of block 5 and first 0.5MB of block 6 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(5*_1MB + 1024*512), Data: dataBuff[2*_1MB : 3*_1MB], @@ -1773,7 +1785,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFileOneBlock() { // write 2MB data at offset 4*1_MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(4 * _1MB), Data: dataBuff[3*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(4 * _1MB), Data: dataBuff[3*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(2*_1MB)) @@ -1781,7 +1793,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFileOneBlock() { // write 1MB data at offset 2*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(2 * _1MB), Data: dataBuff[2*_1MB : 3*_1MB], @@ -1868,7 +1880,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFlushAndOverwrite() { // write 1MB data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1876,7 +1888,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFlushAndOverwrite() { // write 1MB data at offset 9*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(9 * _1MB), Data: dataBuff[4*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1888,7 +1900,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFlushAndOverwrite() { // write 1MB data at offset 5.5*_1MB // overwriting last 0.5MB of block 5 and first 0.5MB of block 6 after flush n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(5*_1MB + 1024*512), Data: dataBuff[2*_1MB : 3*_1MB], @@ -1899,7 +1911,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteFlushAndOverwrite() { // write 1MB data at offset 18*_1MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(18 * _1MB), Data: dataBuff[4*_1MB:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(18 * _1MB), Data: dataBuff[4*_1MB:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -1983,7 +1995,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteUncommittedBlockValidation() { for i := 0; i < prefetch+50; i++ { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(i * int(_1MB)), Data: dataBuff[:_1MB], @@ -1998,7 +2010,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteUncommittedBlockValidation() { // update 10 bytes at 0 offset n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[_1MB : _1MB+10]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[_1MB : _1MB+10]}, ) suite.assert.NoError(err) suite.assert.Equal(10, n) @@ -2006,7 +2018,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteUncommittedBlockValidation() { // update 10 bytes at 5MB offset n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(5 * _1MB), Data: dataBuff[2*_1MB : 2*_1MB+10], @@ -2059,7 +2071,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteExistingFile() { // write 5MB data n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(5*_1MB)) @@ -2081,7 +2093,7 @@ func (suite *blockCacheTestSuite) TestRandomWriteExistingFile() { // write randomly in new handle at offset 2MB n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: nh, Offset: int64(2 * _1MB), Data: dataBuff[:10]}, + &internal.WriteFileOptions{Handle: nh, Offset: int64(2 * _1MB), Data: dataBuff[:10]}, ) suite.assert.NoError(err) suite.assert.Equal(10, n) @@ -2119,7 +2131,7 @@ func (suite *blockCacheTestSuite) TestPreventRaceCondition() { // writing at offset 0 in block 0 n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, ) suite.assert.NoError(err) suite.assert.Equal(10, n) @@ -2129,7 +2141,7 @@ func (suite *blockCacheTestSuite) TestPreventRaceCondition() { // writing at offset 1MB in block 1 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2139,7 +2151,7 @@ func (suite *blockCacheTestSuite) TestPreventRaceCondition() { // writing at offset 2MB in block 2 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(2 * _1MB), Data: data[:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(2 * _1MB), Data: data[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2149,7 +2161,7 @@ func (suite *blockCacheTestSuite) TestPreventRaceCondition() { // writing at offset 3MB in block 3 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: data[:1]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: data[:1]}, ) suite.assert.NoError(err) suite.assert.Equal(1, n) @@ -2159,7 +2171,7 @@ func (suite *blockCacheTestSuite) TestPreventRaceCondition() { // writing at offset 10 in block 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, + &internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB-10)) @@ -2200,7 +2212,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWrite() { // writing at offset 0 in block 0 n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, ) suite.assert.NoError(err) suite.assert.Equal(10, n) @@ -2210,7 +2222,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWrite() { // writing at offset 1MB in block 1 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:100]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:100]}, ) suite.assert.NoError(err) suite.assert.Equal(100, n) @@ -2226,7 +2238,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWrite() { // writing at offset 10 in block 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, + &internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB-10)) @@ -2308,7 +2320,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWriteValidation() { // writing at offset 0 in block 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data[:10]}, ) suite.assert.NoError(err) suite.assert.Equal(10, n) @@ -2318,7 +2330,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWriteValidation() { // writing at offset 1MB in block 1 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: data[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2328,7 +2340,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWriteValidation() { // writing at offset 2MB in block 2 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(2 * _1MB), Data: data[:]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(2 * _1MB), Data: data[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2338,7 +2350,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWriteValidation() { // writing at offset 3MB in block 3 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: data[:100]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: data[:100]}, ) suite.assert.NoError(err) suite.assert.Equal(100, n) @@ -2348,7 +2360,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelUploadAndWriteValidation() { // writing at offset 10 in block 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, + &internal.WriteFileOptions{Handle: h, Offset: 10, Data: data[10:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB-10)) @@ -2426,7 +2438,7 @@ func (suite *blockCacheTestSuite) TestBlockParallelReadAndWriteValidation() { // write 3MB at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:3*_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:3*_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(3*_1MB)) @@ -2448,14 +2460,14 @@ func (suite *blockCacheTestSuite) TestBlockParallelReadAndWriteValidation() { // read 1MB data at offset 0 data := make([]byte, _1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: nh, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: nh, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) // update 1MB data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: nh, Offset: 0, Data: dataBuff[4*_1MB : 5*_1MB]}, + &internal.WriteFileOptions{Handle: nh, Offset: 0, Data: dataBuff[4*_1MB : 5*_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2530,7 +2542,7 @@ func (suite *blockCacheTestSuite) TestBlockOverwriteValidation() { // write 3MB at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:3*_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:3*_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(3*_1MB)) @@ -2551,14 +2563,14 @@ func (suite *blockCacheTestSuite) TestBlockOverwriteValidation() { // update 5 bytes data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: nh, Offset: 0, Data: dataBuff[4*_1MB : 4*_1MB+5]}, + &internal.WriteFileOptions{Handle: nh, Offset: 0, Data: dataBuff[4*_1MB : 4*_1MB+5]}, ) suite.assert.NoError(err) suite.assert.Equal(5, n) // update 5 bytes data at offset 5 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: nh, Offset: 5, Data: dataBuff[4*_1MB+5 : 4*_1MB+10]}, + &internal.WriteFileOptions{Handle: nh, Offset: 5, Data: dataBuff[4*_1MB+5 : 4*_1MB+10]}, ) suite.assert.NoError(err) suite.assert.Equal(5, n) @@ -2624,7 +2636,7 @@ func (suite *blockCacheTestSuite) TestBlockFailOverwrite() { // write at offset 0 where block 0 download will fail n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:1*_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:1*_1MB]}, ) suite.assert.Error(err) suite.assert.Contains(err.Error(), "failed to download block") @@ -2669,14 +2681,14 @@ func (suite *blockCacheTestSuite) TestBlockDownloadOffsetGreaterThanFileSize() { data := make([]byte, _1MB) n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(int(_1MB), n) // write at offset 1MB where block 1 download will fail n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: dataBuff[:1*_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(_1MB), Data: dataBuff[:1*_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(int(_1MB), n) @@ -2711,7 +2723,7 @@ func (suite *blockCacheTestSuite) TestReadStagedBlock() { // write 4MB at offset 0 n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:4*_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:4*_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(4*_1MB)) @@ -2721,7 +2733,7 @@ func (suite *blockCacheTestSuite) TestReadStagedBlock() { data := make([]byte, _1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2786,7 +2798,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedBlockValidation() { ind = 0 for i := 0; i < prefetch+50; i++ { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(i * int(_1MB)), Data: dataBuff[ind*_1MB : (ind+1)*_1MB], @@ -2803,7 +2815,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedBlockValidation() { // read blocks 0, 1 and 2 which are uncommitted data := make([]byte, 2*_1MB) n, err := tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(2*_1MB)) @@ -2813,7 +2825,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedBlockValidation() { // read block 4 which has been committed by the previous read data = make([]byte, _1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2866,7 +2878,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedPrefetchedBlock() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2885,7 +2897,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedPrefetchedBlock() { ind := uint64(1) for i := 1; i < prefetch+50; i++ { n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(i * int(_1MB)), Data: dataBuff[ind*_1MB : (ind+1)*_1MB], @@ -2902,7 +2914,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedPrefetchedBlock() { // read blocks 0, 1 and 2 where prefetched blocks 1 and 2 are uncommitted data := make([]byte, 2*_1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(2*_1MB)) @@ -2912,7 +2924,7 @@ func (suite *blockCacheTestSuite) TestReadUncommittedPrefetchedBlock() { // read block 4 which has been committed by the previous read data = make([]byte, _1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -2951,7 +2963,7 @@ func (suite *blockCacheTestSuite) TestReadWriteBlockInParallel() { suite.assert.False(h.Dirty()) n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(5*_1MB)) @@ -2970,7 +2982,7 @@ func (suite *blockCacheTestSuite) TestReadWriteBlockInParallel() { ind := uint64(0) for i := 5; i < prefetch+50; i++ { n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(i * int(_1MB)), Data: dataBuff[ind*_1MB : (ind+1)*_1MB], @@ -2987,7 +2999,7 @@ func (suite *blockCacheTestSuite) TestReadWriteBlockInParallel() { // read blocks 0, 1 and 2 data := make([]byte, 2*_1MB) n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: 512, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(2*_1MB)) @@ -2996,7 +3008,7 @@ func (suite *blockCacheTestSuite) TestReadWriteBlockInParallel() { // read blocks 4 and 5 n, err = tobj.blockCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, + &internal.ReadInBufferOptions{Handle: h, Offset: int64(4 * _1MB), Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(2*_1MB)) @@ -3022,11 +3034,11 @@ func (suite *blockCacheTestSuite) TestReadWriteBlockInParallel() { // tobj, err := setupPipeline(config) // defer tobj.cleanupPipeline() -// suite.assert.Nil(err) +// suite.assert.NoError(err) // if err == nil { -// suite.assert.Equal(tobj.blockCache.Name(), "block_cache") -// suite.assert.EqualValues(tobj.blockCache.blockSize, 2*_1MB) -// suite.assert.EqualValues(tobj.blockCache.memSize, 1*_1MB*maxbuffers) +// suite.assert.Equal("block_cache", tobj.blockCache.Name()) +// suite.assert.EqualValues(2*_1MB, tobj.blockCache.blockSize) +// suite.assert.Equal(tobj.blockCache.memSize, 1*_1MB*maxbuffers) // } // } @@ -3067,7 +3079,7 @@ func (suite *blockCacheTestSuite) TestSizeOfFileInOpen() { // write 1MB data at offset 0 n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: dataBuff[:_1MB]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB)) @@ -3133,7 +3145,11 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlockAfterAppends() { // Jump to 13thMB offset and write 500kb of data n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(13 * _1MB), Data: dataBuff[:(_1MB / 2)]}, + &internal.WriteFileOptions{ + Handle: h, + Offset: int64(13 * _1MB), + Data: dataBuff[:(_1MB / 2)], + }, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB/2)) @@ -3143,7 +3159,7 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlockAfterAppends() { for i := range 12 { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(uint64(12-i) * _1MB), Data: dataBuff[:_1MB], @@ -3156,7 +3172,11 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlockAfterAppends() { // Now Jump to 15thMB offset and write 500kb of data n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(20 * _1MB), Data: dataBuff[:(_1MB / 2)]}, + &internal.WriteFileOptions{ + Handle: h, + Offset: int64(20 * _1MB), + Data: dataBuff[:(_1MB / 2)], + }, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB/2)) @@ -3199,7 +3219,7 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // At 3MB offset write half mb data, assuming this is the last block n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: dataBuff[:(_1MB / 2)]}, + &internal.WriteFileOptions{Handle: h, Offset: int64(3 * _1MB), Data: dataBuff[:(_1MB / 2)]}, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB/2)) @@ -3208,7 +3228,7 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // Fill some data before that so that last block gets committed for i := int64(2); i >= 0; i-- { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(uint64(i) * _1MB), Data: dataBuff[:_1MB], @@ -3221,7 +3241,11 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // At 10MB offset write half mb data, assuming this is the last block n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(10 * _1MB), Data: dataBuff[:(_1MB / 2)]}, + &internal.WriteFileOptions{ + Handle: h, + Offset: int64(10 * _1MB), + Data: dataBuff[:(_1MB / 2)], + }, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB/2)) @@ -3230,7 +3254,7 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // Fill some data before that so that last block gets committed for i := int64(9); i >= 7; i-- { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(uint64(i) * _1MB), Data: dataBuff[:_1MB], @@ -3243,7 +3267,11 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // At 15MB offset write half mb data, assuming this is the last block n, err = tobj.blockCache.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: int64(15 * _1MB), Data: dataBuff[:(_1MB / 2)]}, + &internal.WriteFileOptions{ + Handle: h, + Offset: int64(15 * _1MB), + Data: dataBuff[:(_1MB / 2)], + }, ) suite.assert.NoError(err) suite.assert.Equal(n, int(_1MB/2)) @@ -3252,7 +3280,7 @@ func (suite *blockCacheTestSuite) TestReadCommittedLastBlocksOverwrite() { // Fill some data before that so that last block gets committed for i := int64(14); i >= 12; i-- { n, err := tobj.blockCache.WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: h, Offset: int64(uint64(i) * _1MB), Data: dataBuff[:_1MB], diff --git a/component/file_cache/file_cache.go b/component/file_cache/file_cache.go index 01325aded..e36d9f8cf 100644 --- a/component/file_cache/file_cache.go +++ b/component/file_cache/file_cache.go @@ -1499,7 +1499,7 @@ func (fc *FileCache) closeFileInternal( } // ReadInBuffer: Read the local file into a buffer -func (fc *FileCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (fc *FileCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { //defer exectime.StatTimeCurrentBlock("FileCache::ReadInBuffer")() // The file should already be in the cache since CreateFile/OpenFile was called before and a shared lock was acquired. // log.Debug("FileCache::ReadInBuffer : Reading %v bytes from %s", len(options.Data), options.Handle.Path) @@ -1545,7 +1545,7 @@ func (fc *FileCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, er } // WriteFile: Write to the local file -func (fc *FileCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (fc *FileCache) WriteFile(options *internal.WriteFileOptions) (int, error) { //defer exectime.StatTimeCurrentBlock("FileCache::WriteFile")() // The file should already be in the cache since CreateFile/OpenFile was called before and a shared lock was acquired. //log.Debug("FileCache::WriteFile : Writing %v bytes from %s", len(options.Data), options.Handle.Path) @@ -1998,7 +1998,7 @@ func (fc *FileCache) renameOpenHandles( // TruncateFile: Update the file with its new size. func (fc *FileCache) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("FileCache::TruncateFile : name=%s, size=%d", options.Name, options.Size) + log.Trace("FileCache::TruncateFile : name=%s, size=%d", options.Name, options.NewSize) if fc.diskHighWaterMark != 0 { currSize, err := common.GetUsage(fc.tmpPath) @@ -2008,13 +2008,40 @@ func (fc *FileCache) TruncateFile(options internal.TruncateFileOptions) error { err.Error(), ) } else { - if (currSize + float64(options.Size)) > fc.diskHighWaterMark { + if (currSize + float64(options.NewSize)) > fc.diskHighWaterMark { log.Err("FileCache::TruncateFile : cache size limit reached [%f] failed to open %s", fc.maxCacheSize, options.Name) return syscall.ENOSPC } } } + if options.Handle != nil { + // The call is coming from an open handle, so we can just truncate the local file, and the change will be + // flushed to storage on close. + f := options.Handle.GetFileObject() + if f == nil { + log.Err( + "FileCache::TruncateFile : error [couldn't find fd in handle] %s", + options.Handle.Path, + ) + return syscall.EBADF + } + + err := f.Truncate(options.NewSize) + if err != nil { + log.Err( + "FileCache::TruncateFile : error truncating file %s [%s]", + options.Handle.Path, + err.Error(), + ) + return err + } + + options.Handle.Flags.Set(handlemap.HandleFlagDirty) + + return nil + } + flock := fc.fileLocks.Get(options.Name) flock.Lock() defer flock.Unlock() @@ -2032,8 +2059,8 @@ func (fc *FileCache) TruncateFile(options internal.TruncateFileOptions) error { if err == nil || os.IsExist(err) { fc.policy.CacheValid(localPath) - if info.Size() != options.Size { - err = os.Truncate(localPath, options.Size) + if info.Size() != options.NewSize { + err = os.Truncate(localPath, options.NewSize) if err != nil { log.Err( "FileCache::TruncateFile : error truncating cached file %s [%s]", diff --git a/component/file_cache/file_cache_test.go b/component/file_cache/file_cache_test.go index 7719bd979..4bff64ba4 100644 --- a/component/file_cache/file_cache_test.go +++ b/component/file_cache/file_cache_test.go @@ -533,11 +533,11 @@ func (suite *fileCacheTestSuite) TestStreamDirCase3() { suite.fileCache.CreateDir(internal.CreateDirOptions{Name: subdir, Mode: 0777}) // Truncate causes these files to be written to fake storage suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file1, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file1, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file1, NewSize: 1024}) suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file2, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file2, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file2, NewSize: 1024}) suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file3, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file3, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file3, NewSize: 1024}) // Change the sizes directly in fake storage suite.loopback.TruncateFile(internal.TruncateFileOptions{Name: file1}) // Length is default 0 suite.loopback.TruncateFile(internal.TruncateFileOptions{Name: file2}) @@ -582,9 +582,9 @@ func (suite *fileCacheTestSuite) TestStreamDirMixed() { // By default createEmptyFile is false, so we will not create these files in storage until they are closed. suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file2, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file2, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file2, NewSize: 1024}) suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file3, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file3, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file3, NewSize: 1024}) // Create the files in fake_storage and simulate different sizes handle, _ := suite.loopback.CreateFile( @@ -596,8 +596,8 @@ func (suite *fileCacheTestSuite) TestStreamDirMixed() { internal.CreateFileOptions{Name: file4, Mode: 0777}, ) // Length is default 0 suite.loopback.CloseFile(internal.CloseFileOptions{Handle: handle}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file4, Size: 1024}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file4, Size: 0}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file4, NewSize: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file4, NewSize: 0}) // Read the Directory dir, _, err := suite.fileCache.StreamDir(internal.StreamDirOptions{Name: name}) @@ -742,7 +742,7 @@ func (suite *fileCacheTestSuite) TestRenameDirOpenFile() { suite.assert.FileExists(suite.cache_path + "/" + case3src) // write and flush to cloud initialData := []byte("initialData") - n, err := suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err := suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle3, Data: initialData, }) @@ -787,7 +787,7 @@ func (suite *fileCacheTestSuite) TestRenameDirOpenFile() { // // Case 1 // write to file handle - n, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle1, Data: data, }) @@ -798,7 +798,7 @@ func (suite *fileCacheTestSuite) TestRenameDirOpenFile() { suite.assert.FileExists(filepath.Join(suite.cache_path, case1dst)) // // Case 2 - n, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle2, Data: data, }) @@ -806,7 +806,7 @@ func (suite *fileCacheTestSuite) TestRenameDirOpenFile() { suite.assert.Equal(len(data), n) // // Case 3 - n, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle3, Data: data, Offset: int64(len(initialData)), @@ -1055,7 +1055,7 @@ func (suite *fileCacheTestSuite) TestSyncFile() { testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) handle, loaded := handlemap.Load(handle.ID) suite.assert.True(loaded) suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) @@ -1069,7 +1069,7 @@ func (suite *fileCacheTestSuite) TestSyncFile() { handle, err = suite.fileCache.CreateFile(internal.CreateFileOptions{Name: path, Mode: 0777}) suite.assert.NoError(err) _, err = suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.True(handle.Dirty()) @@ -1150,7 +1150,7 @@ func (suite *fileCacheTestSuite) TestOpenFileNotInCache() { handle, _ := suite.loopback.CreateFile(internal.CreateFileOptions{Name: path, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.loopback.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.loopback.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) suite.loopback.CloseFile(internal.CloseFileOptions{Handle: handle}) handle, err := suite.fileCache.OpenFile( @@ -1174,7 +1174,7 @@ func (suite *fileCacheTestSuite) TestOpenFileInCache() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: path, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) // Download is required @@ -1315,7 +1315,7 @@ func (suite *fileCacheTestSuite) TestReadInBufferEmpty() { data := make([]byte, 0) length, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(0, length) @@ -1329,13 +1329,13 @@ func (suite *fileCacheTestSuite) TestReadInBufferNoFlush() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) handle, _ = suite.fileCache.OpenFile(internal.OpenFileOptions{Name: file, Mode: 0777}) output := make([]byte, 9) length, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, ) suite.assert.NoError(err) suite.assert.Equal(data, output) @@ -1349,14 +1349,14 @@ func (suite *fileCacheTestSuite) TestReadInBuffer() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) handle, _ = suite.fileCache.OpenFile(internal.OpenFileOptions{Name: file, Mode: 0777}) output := make([]byte, 9) length, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, ) suite.assert.NoError(err) suite.assert.Equal(data, output) @@ -1368,7 +1368,7 @@ func (suite *fileCacheTestSuite) TestReadInBufferErrorBadFd() { // Setup file := "file18" handle := handlemap.NewHandle(file) - length, err := suite.fileCache.ReadInBuffer(internal.ReadInBufferOptions{Handle: handle}) + length, err := suite.fileCache.ReadInBuffer(&internal.ReadInBufferOptions{Handle: handle}) suite.assert.Error(err) suite.assert.EqualValues(syscall.EBADF, err) suite.assert.Equal(0, length) @@ -1386,7 +1386,7 @@ func (suite *fileCacheTestSuite) TestWriteFile() { testData := "test data" data := []byte(testData) length, err := suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) @@ -1402,7 +1402,7 @@ func (suite *fileCacheTestSuite) TestWriteFileErrorBadFd() { // Setup file := "file20" handle := handlemap.NewHandle(file) - len, err := suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle}) + len, err := suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle}) suite.assert.Error(err) suite.assert.EqualValues(syscall.EBADF, err) suite.assert.Equal(0, len) @@ -1433,7 +1433,7 @@ func (suite *fileCacheTestSuite) TestFlushFile() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) // Path should not be in fake storage suite.assert.NoFileExists(filepath.Join(suite.fake_storage_path, file)) @@ -1492,7 +1492,7 @@ loopbackfs: suite.assert.NoError(err) data := []byte("simple scheduled upload test data") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Data: data}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: data}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) @@ -1552,7 +1552,7 @@ loopbackfs: handle1, err := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file1, Mode: 0777}) suite.assert.NoError(err) data1 := []byte("file created during scheduler ON window") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle1, Data: data1}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle1, Data: data1}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle1}) suite.assert.NoError(err) @@ -1575,7 +1575,7 @@ loopbackfs: handle2, err := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file2, Mode: 0777}) suite.assert.NoError(err) data2 := []byte("file created during scheduler OFF window") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle2, Data: data2}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle2, Data: data2}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle2}) suite.assert.NoError(err) @@ -1608,7 +1608,7 @@ loopbackfs: handle, err := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) suite.assert.NoError(err) data := []byte("testing default scheduler behavior") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Data: data}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: data}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) suite.assert.NoError(err) @@ -1673,7 +1673,7 @@ loopbackfs: suite.assert.NoError(err) // Write new content to the file modifiedContent := []byte("modified cloud file content") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle, Data: modifiedContent, Offset: 0, @@ -1717,7 +1717,7 @@ loopbackfs: suite.assert.NoError(err) data := []byte("file to be renamed while in scheduler OFF state") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Data: data}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: data}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) @@ -1793,7 +1793,7 @@ loopbackfs: suite.assert.NoError(err) data := []byte("file to be deleted while in scheduler OFF state") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Data: data}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: data}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) @@ -1905,7 +1905,7 @@ loopbackfs: suite.assert.NoError(err) _, err = suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Data: initialContent}, + &internal.WriteFileOptions{Handle: handle, Data: initialContent}, ) suite.assert.NoError(err) @@ -1930,7 +1930,7 @@ loopbackfs: ) suite.assert.NoError(err) - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Data: newContent}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: newContent}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) @@ -2030,7 +2030,7 @@ loopbackfs: handle1, err := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file1, Mode: 0777}) suite.assert.NoError(err) data1 := []byte("file created during first window") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle1, Data: data1}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle1, Data: data1}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle1}) suite.assert.NoError(err) @@ -2045,7 +2045,7 @@ loopbackfs: handle2, err := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file2, Mode: 0777}) suite.assert.NoError(err) data2 := []byte("file created during second window") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle2, Data: data2}) + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle2, Data: data2}) suite.assert.NoError(err) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle2}) suite.assert.NoError(err) @@ -2062,7 +2062,7 @@ loopbackfs: }) suite.assert.NoError(err) updatedData := []byte(" - updated in second window") - _, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + _, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle1, Data: updatedData, Offset: int64(len(data1)), @@ -2114,7 +2114,7 @@ func (suite *fileCacheTestSuite) TestGetAttrCase3() { file := "file26" // By default createEmptyFile is false, so we will not create these files in cloud storage until they are closed. suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) - suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file, Size: 1024}) + suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: file, NewSize: 1024}) // Create the files in fake_storage and simulate different sizes //suite.loopback.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) // Length is default 0 @@ -2149,7 +2149,7 @@ func (suite *fileCacheTestSuite) TestGetAttrCase4() { data := make([]byte, size) written, err := suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(size, written) @@ -2315,7 +2315,7 @@ func (suite *fileCacheTestSuite) TestRenameOpenFileCase1() { // write to file handle data := []byte("newdata") - n, err := suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err := suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle, Data: data, }) @@ -2361,7 +2361,7 @@ func (suite *fileCacheTestSuite) TestRenameOpenFileCase2() { // write to file handle data := []byte("newdata") - n, err := suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err := suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle, Data: data, }) @@ -2396,7 +2396,7 @@ func (suite *fileCacheTestSuite) TestRenameOpenFileCase3() { suite.assert.FileExists(suite.cache_path + "/" + src) // write to file handle initialData := []byte("initialData") - n, err := suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err := suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle, Data: initialData, }) @@ -2417,7 +2417,7 @@ func (suite *fileCacheTestSuite) TestRenameOpenFileCase3() { suite.assert.NoError(err) // write to file handle newData := []byte("newData") - n, err = suite.fileCache.WriteFile(internal.WriteFileOptions{ + n, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{ Handle: handle, Data: newData, Offset: int64(len(initialData)), @@ -2450,7 +2450,9 @@ func (suite *fileCacheTestSuite) TestTruncateFileNotInCache() { // Chmod size := 1024 - err := suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: path, Size: int64(size)}) + err := suite.fileCache.TruncateFile( + internal.TruncateFileOptions{Name: path, NewSize: int64(size)}, + ) suite.assert.NoError(err) // Path in fake storage should be updated @@ -2472,7 +2474,9 @@ func (suite *fileCacheTestSuite) TestTruncateFileCase3() { // Chmod size := 1024 - err := suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: path, Size: int64(size)}) + err := suite.fileCache.TruncateFile( + internal.TruncateFileOptions{Name: path, NewSize: int64(size)}, + ) suite.assert.NoError(err) // Path in fake storage and file cache should be updated info, _ := os.Stat(filepath.Join(suite.cache_path, path)) @@ -2488,7 +2492,9 @@ func (suite *fileCacheTestSuite) TestTruncateFileCase2() { suite.fileCache.CreateFile(internal.CreateFileOptions{Name: path, Mode: 0666}) size := 1024 - err := suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: path, Size: int64(size)}) + err := suite.fileCache.TruncateFile( + internal.TruncateFileOptions{Name: path, NewSize: int64(size)}, + ) suite.assert.NoError(err) // Path should be in the file cache and size should be updated @@ -2547,14 +2553,14 @@ func (suite *fileCacheTestSuite) TestCachePathSymlink() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) testData := "test data" data := []byte(testData) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) handle, _ = suite.fileCache.OpenFile(internal.OpenFileOptions{Name: file, Mode: 0777}) output := make([]byte, 9) n, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: output}, ) suite.assert.NoError(err) suite.assert.Equal(9, n) @@ -2589,7 +2595,7 @@ func (suite *fileCacheTestSuite) TestZZZZLazyWrite() { handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) data := make([]byte, 10*1024*1024) _, _ = suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) _ = suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) @@ -2628,7 +2634,7 @@ func (suite *fileCacheTestSuite) TestStatFS() { file := "file41" handle, _ := suite.fileCache.CreateFile(internal.CreateFileOptions{Name: file, Mode: 0777}) data := make([]byte, 1024*1024) - suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) + suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}) suite.fileCache.FlushFile(internal.FlushFileOptions{Handle: handle}) stat, ret, err := suite.fileCache.StatFs() suite.assert.True(ret) @@ -2665,7 +2671,7 @@ func (suite *fileCacheTestSuite) TestReadFileWithRefresh() { suite.assert.NoError(err) suite.assert.False(handle.Dirty()) n, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(len(byteArr), n) @@ -2680,7 +2686,7 @@ func (suite *fileCacheTestSuite) TestReadFileWithRefresh() { suite.assert.NoError(err) suite.assert.False(handle.Dirty()) n, err = suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(9, n) @@ -2696,7 +2702,7 @@ func (suite *fileCacheTestSuite) TestReadFileWithRefresh() { suite.assert.NoError(err) suite.assert.False(handle.Dirty()) n, err = suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(15, n) @@ -2756,7 +2762,9 @@ func (suite *fileCacheTestSuite) TestHardLimitOnSize() { f, err := suite.fileCache.CreateFile(options1) suite.assert.NoError(err) data = make([]byte, 1*MB) - n, err := suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: f, Offset: 0, Data: data}) + n, err := suite.fileCache.WriteFile( + &internal.WriteFileOptions{Handle: f, Offset: 0, Data: data}, + ) suite.assert.NoError(err) suite.assert.Equal(1*MB, n) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: f}) @@ -2767,18 +2775,22 @@ func (suite *fileCacheTestSuite) TestHardLimitOnSize() { f, err = suite.fileCache.CreateFile(options1) suite.assert.NoError(err) data = make([]byte, 3*MB) - n, err = suite.fileCache.WriteFile(internal.WriteFileOptions{Handle: f, Offset: 0, Data: data}) + n, err = suite.fileCache.WriteFile(&internal.WriteFileOptions{Handle: f, Offset: 0, Data: data}) suite.assert.Error(err) suite.assert.Equal(0, n) err = suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: f}) suite.assert.NoError(err) // try opening small file - err = suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: pathsmall, Size: 1 * MB}) + err = suite.fileCache.TruncateFile( + internal.TruncateFileOptions{Name: pathsmall, NewSize: 1 * MB}, + ) suite.assert.NoError(err) // try opening small file - err = suite.fileCache.TruncateFile(internal.TruncateFileOptions{Name: pathsmall, Size: 3 * MB}) + err = suite.fileCache.TruncateFile( + internal.TruncateFileOptions{Name: pathsmall, NewSize: 3 * MB}, + ) suite.assert.Error(err) } @@ -2798,7 +2810,7 @@ func (suite *fileCacheTestSuite) TestHandleDataChange() { suite.assert.NoError(err) suite.assert.False(handle.Dirty()) n, err := suite.fileCache.ReadInBuffer( - internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, + &internal.ReadInBufferOptions{Handle: handle, Offset: 0, Data: data}, ) handle, loaded := handlemap.Load(handle.ID) suite.assert.True(loaded) @@ -2980,7 +2992,7 @@ func (suite *fileCacheTestSuite) TestHardLimit() { data := make([]byte, 1024*1024) for i := range int64(5) { suite.fileCache.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: i * 1024 * 1024, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: i * 1024 * 1024, Data: data}, ) } suite.fileCache.CloseFile(internal.CloseFileOptions{Handle: handle}) diff --git a/component/file_cache/lru_policy.go b/component/file_cache/lru_policy.go index 5e026d865..5ef5b2d27 100644 --- a/component/file_cache/lru_policy.go +++ b/component/file_cache/lru_policy.go @@ -50,6 +50,10 @@ type lruNode struct { type lruPolicy struct { sync.Mutex + + // wait group for stopping the go-routines gracefully. + wg sync.WaitGroup + cachePolicyConfig nodeMap sync.Map // uses os.Separator (filepath.Join) @@ -146,6 +150,7 @@ func (p *lruPolicy) StartPolicy() error { // start the timeout monitor p.cacheTimeoutMonitor = time.Tick(time.Duration(p.cacheTimeout) * time.Second) + p.wg.Add(2) go p.clearCache() go p.asyncCacheValid() @@ -157,6 +162,8 @@ func (p *lruPolicy) ShutdownPolicy() error { log.Trace("lruPolicy::ShutdownPolicy") p.closeSignal <- 1 p.closeSignalValidate <- 1 + // wait for all go-routines to stop. + p.wg.Wait() return p.createSnapshot().writeToFile(p.tmpPath) } @@ -338,6 +345,7 @@ func (p *lruPolicy) Name() string { // On validate name of the file was pushed on this channel so now update the LRU list func (p *lruPolicy) asyncCacheValid() { + defer p.wg.Done() for { select { case name := <-p.validateChan: @@ -385,6 +393,7 @@ func (p *lruPolicy) cacheValidate(name string) { // For all other timer based activities we check the stuff here func (p *lruPolicy) clearCache() { log.Trace("lruPolicy::ClearCache") + defer p.wg.Done() for { select { diff --git a/component/libfuse/libfuse2_handler.go b/component/libfuse/libfuse2_handler.go index bd6565d86..a412bc476 100644 --- a/component/libfuse/libfuse2_handler.go +++ b/component/libfuse/libfuse2_handler.go @@ -285,7 +285,7 @@ func (cf *CgofuseFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) int { // Get attributes attr, err := fuseFS.NextComponent().GetAttr(internal.GetAttrOptions{Name: name}) if err != nil { - log.Err("Libfuse::Getattr : Failed to get attributes of %s [%s]", name, err.Error()) + //log.Err("Libfuse::Getattr : Failed to get attributes of %s [%s]", name, err.Error()) switch err { case syscall.ENOENT: return -fuse.ENOENT @@ -714,7 +714,7 @@ func (cf *CgofuseFS) Read(path string, buff []byte, ofst int64, fh uint64) int { bytesRead, err = handle.FObj.ReadAt(buff, int64(offset)) } else { bytesRead, err = fuseFS.NextComponent().ReadInBuffer( - internal.ReadInBufferOptions{ + &internal.ReadInBufferOptions{ Handle: handle, Offset: int64(offset), Data: buff, @@ -749,7 +749,7 @@ func (cf *CgofuseFS) Write(path string, buff []byte, ofst int64, fh uint64) int } bytesWritten, err := fuseFS.NextComponent().WriteFile( - internal.WriteFileOptions{ + &internal.WriteFileOptions{ Handle: handle, Offset: ofst, Data: buff, @@ -814,8 +814,13 @@ func (cf *CgofuseFS) Truncate(path string, size int64, fh uint64) int { log.Trace("Libfuse::Truncate : %s size %d", name, size) handle, _ := handlemap.Load(handlemap.HandleID(fh)) - err := fuseFS.NextComponent(). - TruncateFile(internal.TruncateFileOptions{Name: name, Size: size, Handle: handle}) + err := fuseFS.NextComponent().TruncateFile( + internal.TruncateFileOptions{ + Name: name, + OldSize: -1, + NewSize: int64(size), + Handle: handle, + }) if err != nil { log.Err("Libfuse::Truncate : error truncating file %s [%s]", name, err.Error()) if os.IsNotExist(err) { diff --git a/component/libfuse/libfuse2_handler_test_wrapper.go b/component/libfuse/libfuse2_handler_test_wrapper.go index 5b5f52647..796ae6c56 100644 --- a/component/libfuse/libfuse2_handler_test_wrapper.go +++ b/component/libfuse/libfuse2_handler_test_wrapper.go @@ -403,7 +403,7 @@ func testTruncate(suite *libfuseTestSuite) { name := "path" path := "/" + name size := int64(1024) - options := internal.TruncateFileOptions{Name: name, Size: size} + options := internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: size} suite.mock.EXPECT().TruncateFile(options).Return(nil) err := cfuseFS.Truncate(path, size, 0) @@ -415,7 +415,7 @@ func testTruncateError(suite *libfuseTestSuite) { name := "path" path := "/" + name size := int64(1024) - options := internal.TruncateFileOptions{Name: name, Size: size} + options := internal.TruncateFileOptions{Name: name, OldSize: -1, NewSize: size} suite.mock.EXPECT().TruncateFile(options).Return(errors.New("failed to truncate file")) err := cfuseFS.Truncate(path, size, 0) @@ -431,7 +431,7 @@ func testFTruncate(suite *libfuseTestSuite) { handle := handlemap.NewHandle(name) fh := handlemap.Add(handle) - options := internal.TruncateFileOptions{Handle: handle, Name: name, Size: size} + options := internal.TruncateFileOptions{Handle: handle, Name: name, OldSize: -1, NewSize: size} suite.mock.EXPECT().TruncateFile(options).Return(nil) err := cfuseFS.Truncate(path, size, uint64(fh)) @@ -447,7 +447,7 @@ func testFTruncateError(suite *libfuseTestSuite) { handle := handlemap.NewHandle(name) fh := handlemap.Add(handle) - options := internal.TruncateFileOptions{Handle: handle, Name: name, Size: size} + options := internal.TruncateFileOptions{Handle: handle, Name: name, OldSize: -1, NewSize: size} suite.mock.EXPECT().TruncateFile(options).Return(errors.New("failed to truncate file")) err := cfuseFS.Truncate(path, size, uint64(fh)) diff --git a/component/loopback/loopback_fs.go b/component/loopback/loopback_fs.go index 1433e26b5..d672bff7c 100644 --- a/component/loopback/loopback_fs.go +++ b/component/loopback/loopback_fs.go @@ -284,7 +284,7 @@ func (lfs *LoopbackFS) ReadLink(options internal.ReadLinkOptions) (string, error return strings.TrimPrefix(targetPath, prefix), nil } -func (lfs *LoopbackFS) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (lfs *LoopbackFS) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { // if handle is nil, create a new handle // added because after changes in xload, path and size can be passed in ReadInBufferOptions, where handle can be nil if options.Handle == nil { @@ -318,7 +318,7 @@ func (lfs *LoopbackFS) ReadInBuffer(options internal.ReadInBufferOptions) (int, return n, err } -func (lfs *LoopbackFS) WriteFile(options internal.WriteFileOptions) (int, error) { +func (lfs *LoopbackFS) WriteFile(options *internal.WriteFileOptions) (int, error) { log.Trace("LoopbackFS::WriteFile : name=%s", options.Handle.Path) f := options.Handle.GetFileObject() @@ -336,8 +336,7 @@ func (lfs *LoopbackFS) WriteFile(options internal.WriteFileOptions) (int, error) func (lfs *LoopbackFS) TruncateFile(options internal.TruncateFileOptions) error { log.Trace("LoopbackFS::TruncateFile : name=%s", options.Name) fsPath := filepath.Join(lfs.path, options.Name) - - return os.Truncate(fsPath, options.Size) + return os.Truncate(fsPath, options.NewSize) } func (lfs *LoopbackFS) FlushFile(options internal.FlushFileOptions) error { diff --git a/component/loopback/loopback_fs_test.go b/component/loopback/loopback_fs_test.go index 6e9adedfa..fd219be9b 100644 --- a/component/loopback/loopback_fs_test.go +++ b/component/loopback/loopback_fs_test.go @@ -262,7 +262,7 @@ func (suite *LoopbackFSTestSuite) TestRenameWriteFile() { assert.NoFileExists(filepath.Join(testPath, fileEmpty)) n, err := suite.lfs.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, ) assert.NoError(err) assert.Equal(5, n, "TestRenameWriteFile: failed to write the specified number of bytes") @@ -291,7 +291,7 @@ func (suite *LoopbackFSTestSuite) TestRenameWriteFileGetAttr() { assert.FileExists(filepath.Join(testPath, fileEmpty)) n, err := suite.lfs.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, ) assert.NoError(err) assert.Equal(5, n, "TestRenameWriteFile: failed to write the specified number of bytes") @@ -346,7 +346,7 @@ func (suite *LoopbackFSTestSuite) TestReadInBuffer() { for _, testCase := range testCases { n, err := suite.lfs.ReadInBuffer( - internal.ReadInBufferOptions{ + &internal.ReadInBufferOptions{ Handle: handle, Offset: testCase.offset, Data: testCase.data, @@ -380,7 +380,7 @@ func (suite *LoopbackFSTestSuite) TestWriteFile() { assert.NotNil(handle) n, err := suite.lfs.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: []byte(quotesText)[:5]}, ) assert.NoError(err) assert.Equal(5, n, "WriteFile: failed to write the specified number of bytes") @@ -390,7 +390,7 @@ func (suite *LoopbackFSTestSuite) TestWriteFile() { assert.EqualValues(5, attr.Size) n, err = suite.lfs.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 5, Data: []byte(quotesText)[5:]}, + &internal.WriteFileOptions{Handle: handle, Offset: 5, Data: []byte(quotesText)[5:]}, ) assert.NoError(err) assert.Len([]byte(quotesText)[5:], n, "WriteFile: failed to write specified number of bytes") @@ -413,7 +413,7 @@ func (suite *LoopbackFSTestSuite) TestTruncateFile() { assert.NoError(err, "TruncateFile: failed to open file") assert.NotNil(handle) - err = suite.lfs.TruncateFile(internal.TruncateFileOptions{Name: fileLorem, Size: 10}) + err = suite.lfs.TruncateFile(internal.TruncateFileOptions{Name: fileLorem, NewSize: 10}) assert.NoError(err) info, err := os.Stat(filepath.Join(testPath, fileLorem)) assert.NoError(err, "TruncateFile: cannot stat file") @@ -436,7 +436,7 @@ func (suite *LoopbackFSTestSuite) TestTruncateClosedFile() { err = suite.lfs.CloseFile(internal.CloseFileOptions{Handle: handle}) assert.NoError(err, "TruncateFile: Failed to close file") - err = suite.lfs.TruncateFile(internal.TruncateFileOptions{Name: fileLorem, Size: 10}) + err = suite.lfs.TruncateFile(internal.TruncateFileOptions{Name: fileLorem, NewSize: 10}) assert.NoError(err) info, err := os.Stat(filepath.Join(testPath, fileLorem)) assert.NoError(err, "TruncateFile: cannot stat file") diff --git a/component/s3storage/client.go b/component/s3storage/client.go index 31ad7e9a6..c8b1bb490 100644 --- a/component/s3storage/client.go +++ b/component/s3storage/client.go @@ -891,7 +891,7 @@ func (cl *Client) GetFileBlockOffsets(name string) (*common.BlockOffsetList, err // if file is smaller than the uploadCutoff it is small, otherwise it is a multipart // upload if result.Size < cutoff { - blockList.Flags.Set(common.SmallFile) + blockList.Flags.Set(common.BlobFlagHasNoBlocks) return &blockList, nil } @@ -970,7 +970,7 @@ func (cl *Client) TruncateFile(name string, size int64) error { } // Write : write data at given offset to an object -func (cl *Client) Write(options internal.WriteFileOptions) error { +func (cl *Client) Write(options *internal.WriteFileOptions) error { name := options.Handle.Path offset := options.Offset data := options.Data @@ -986,7 +986,7 @@ func (cl *Client) Write(options internal.WriteFileOptions) error { return err } - if fileOffsets.SmallFile() { + if fileOffsets.HasNoBlocks() { // case 1: file consists of no parts (small file) // get the existing object data diff --git a/component/s3storage/client_test.go b/component/s3storage/client_test.go index 34e5fa80f..5e62c82d8 100644 --- a/component/s3storage/client_test.go +++ b/component/s3storage/client_test.go @@ -1344,7 +1344,9 @@ func (s *clientTestSuite) TestWrite() { offset := rand.IntN(bodyLen-1) + 1 // minimum offset of 1 newData := []byte(randomString(bodyLen - offset)) h := handlemap.NewHandle(name) - err = s.client.Write(internal.WriteFileOptions{Handle: h, Offset: int64(offset), Data: newData}) + err = s.client.Write( + &internal.WriteFileOptions{Handle: h, Offset: int64(offset), Data: newData}, + ) s.assert.NoError(err) result, err := s.awsS3Client.GetObject(context.Background(), &s3.GetObjectInput{ diff --git a/component/s3storage/connection.go b/component/s3storage/connection.go index da4f4212d..86661ddfa 100644 --- a/component/s3storage/connection.go +++ b/component/s3storage/connection.go @@ -109,7 +109,7 @@ type S3Connection interface { WriteFromFile(name string, metadata map[string]*string, fi *os.File) error WriteFromBuffer(name string, metadata map[string]*string, data []byte) error - Write(options internal.WriteFileOptions) error + Write(options *internal.WriteFileOptions) error GetFileBlockOffsets(name string) (*common.BlockOffsetList, error) TruncateFile(string, int64) error diff --git a/component/s3storage/s3storage.go b/component/s3storage/s3storage.go index 6d3aad8a7..b88b58057 100644 --- a/component/s3storage/s3storage.go +++ b/component/s3storage/s3storage.go @@ -418,7 +418,7 @@ func (s3 *S3Storage) RenameFile(options internal.RenameFileOptions) error { } // Read file data into the buffer given in options.Data. -func (s3 *S3Storage) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (s3 *S3Storage) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { //log.Trace("S3Storage::ReadInBuffer : Read %s from %d offset", h.Path, offset) if options.Offset > atomic.LoadInt64(&options.Handle.Size) { @@ -447,7 +447,7 @@ func (s3 *S3Storage) ReadInBuffer(options internal.ReadInBufferOptions) (int, er return length, err } -func (s3 *S3Storage) WriteFile(options internal.WriteFileOptions) (int, error) { +func (s3 *S3Storage) WriteFile(options *internal.WriteFileOptions) (int, error) { err := s3.Storage.Write(options) return len(options.Data), err } @@ -460,14 +460,14 @@ func (s3 *S3Storage) GetFileBlockOffsets( } func (s3 *S3Storage) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("S3Storage::TruncateFile : %s to %d bytes", options.Name, options.Size) - err := s3.Storage.TruncateFile(options.Name, options.Size) + log.Trace("S3Storage::TruncateFile : %s to %d bytes", options.Name, options.NewSize) + err := s3.Storage.TruncateFile(options.Name, options.NewSize) if err == nil { s3StatsCollector.PushEvents( truncateFile, options.Name, - map[string]any{size: options.Size}, + map[string]any{size: options.NewSize}, ) s3StatsCollector.UpdateStats(stats_manager.Increment, truncateFile, (int64)(1)) } diff --git a/component/s3storage/s3storage_test.go b/component/s3storage/s3storage_test.go index fe492d797..ffc733125 100644 --- a/component/s3storage/s3storage_test.go +++ b/component/s3storage/s3storage_test.go @@ -1142,7 +1142,7 @@ func (s *s3StorageTestSuite) TestOpenFileSize() { size := 10 _, err := s.s3Storage.CreateFile(internal.CreateFileOptions{Name: name}) s.assert.NoError(err) - err = s.s3Storage.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(size)}) + err = s.s3Storage.TruncateFile(internal.TruncateFileOptions{Name: name, NewSize: int64(size)}) s.assert.NoError(err) // TODO: There is a sort of bug in S3 where writing zeros to the object causes it to be unreadable. @@ -1331,14 +1331,14 @@ func (s *s3StorageTestSuite) TestReadInBuffer() { s.assert.NoError(err) testData := "test data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) h, err = s.s3Storage.OpenFile(internal.OpenFileOptions{Name: name}) s.assert.NoError(err) output := make([]byte, 5) len, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(5, len) @@ -1353,14 +1353,14 @@ func (s *s3StorageTestSuite) TestReadInBufferRange() { s.assert.NoError(err) testData := "test data test data " data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) h, err = s.s3Storage.OpenFile(internal.OpenFileOptions{Name: name}) s.assert.NoError(err) output := make([]byte, 15) len, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 5, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 5, Data: output}, ) s.assert.NoError(err) s.assert.Equal(15, len) @@ -1375,14 +1375,14 @@ func (s *s3StorageTestSuite) TestReadInBufferLargeBuffer() { s.assert.NoError(err) testData := "test data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) h, err = s.s3Storage.OpenFile(internal.OpenFileOptions{Name: name}) s.assert.NoError(err) output := make([]byte, 1000) // Testing that passing in a super large buffer will still work len, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.EqualValues(h.Size, len) @@ -1398,7 +1398,7 @@ func (s *s3StorageTestSuite) TestReadInBufferEmpty() { output := make([]byte, 10) len, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(0, len) @@ -1412,7 +1412,7 @@ func (s *s3StorageTestSuite) TestReadInBufferBadRange() { h.Size = 10 _, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 20, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ERANGE, err) @@ -1426,7 +1426,7 @@ func (s *s3StorageTestSuite) TestReadInBufferError() { h.Size = 10 _, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: make([]byte, 2)}, ) s.assert.Error(err) s.assert.EqualValues(syscall.ENOENT, err) @@ -1441,7 +1441,9 @@ func (s *s3StorageTestSuite) TestWriteFile() { testData := "test data" data := []byte(testData) - count, err := s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.s3Storage.WriteFile( + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + ) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1474,7 +1476,9 @@ func (s *s3StorageTestSuite) TestWriteFileMultipartUpload() { data := make([]byte, fileSize) rand.Read(data) - count, err := s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.s3Storage.WriteFile( + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + ) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1523,7 +1527,9 @@ func (s *s3StorageTestSuite) TestWriteFileWindowsNameConvert() { testData := "test data" data := []byte(testData) - count, err := s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + count, err := s.s3Storage.WriteFile( + &internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}, + ) s.assert.NoError(err) s.assert.Equal(len(data), count) @@ -1550,11 +1556,11 @@ func (s *s3StorageTestSuite) TestTruncateSmallFileSmaller() { testData := "test data" data := []byte(testData) truncatedLength := 5 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1591,11 +1597,11 @@ func (s *s3StorageTestSuite) TestTruncateSmallFileSmallerWindowsNameConvert() { testData := "test data" data := []byte(testData) truncatedLength := 5 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: windowsName, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: windowsName, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1623,11 +1629,11 @@ func (s *s3StorageTestSuite) TestTruncateChunkedFileSmaller() { testData := "test data" data := []byte(testData) truncatedLength := 5 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1655,11 +1661,11 @@ func (s *s3StorageTestSuite) TestTruncateSmallFileEqual() { testData := "test data" data := []byte(testData) truncatedLength := 9 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1687,11 +1693,11 @@ func (s *s3StorageTestSuite) TestTruncateChunkedFileEqual() { testData := "test data" data := []byte(testData) truncatedLength := 9 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1719,11 +1725,11 @@ func (s *s3StorageTestSuite) TestTruncateSmallFileBigger() { testData := "test data" data := []byte(testData) truncatedLength := 15 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1751,7 +1757,7 @@ func (s *s3StorageTestSuite) TestTruncateEmptyFileBigger() { truncatedLength := 15 err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1779,11 +1785,11 @@ func (s *s3StorageTestSuite) TestTruncateChunkedFileBigger() { testData := "test data" data := []byte(testData) truncatedLength := 15 - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -1821,7 +1827,7 @@ func (s *s3StorageTestSuite) TestWriteSmallFile() { testData := "test data" data := []byte(testData) dataLen := len(data) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) @@ -1849,14 +1855,14 @@ func (s *s3StorageTestSuite) TestOverwriteSmallFile() { testData := "test-replace-data" data := []byte(testData) dataLen := len(data) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) defer os.Remove(f.Name()) newTestData := []byte("newdata") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, ) s.assert.NoError(err) @@ -1884,14 +1890,14 @@ func (s *s3StorageTestSuite) TestOverwriteAndAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) defer os.Remove(f.Name()) newTestData := []byte("newdata") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, ) s.assert.NoError(err) @@ -1920,14 +1926,14 @@ func (s *s3StorageTestSuite) TestAppendToSmallFile() { testData := "test-data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) defer os.Remove(f.Name()) newTestData := []byte("-newdata") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 9, Data: newTestData}, ) s.assert.NoError(err) @@ -1956,14 +1962,14 @@ func (s *s3StorageTestSuite) TestAppendOffsetLargerThanSmallFile() { testData := "test-data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) defer os.Remove(f.Name()) newTestData := []byte("newdata") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 12, Data: newTestData}, ) s.assert.NoError(err) @@ -2012,7 +2018,7 @@ func (s *s3StorageTestSuite) TestOverwriteBlocks() { defer os.Remove(f.Name()) newTestData := []byte("cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5, Data: newTestData}, ) s.assert.NoError(err) @@ -2061,7 +2067,7 @@ func (s *s3StorageTestSuite) TestOverwriteAndAppendBlocks() { defer os.Remove(f.Name()) newTestData := []byte("43211234cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5*MB - 4, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5*MB - 4, Data: newTestData}, ) s.assert.NoError(err) @@ -2109,7 +2115,7 @@ func (s *s3StorageTestSuite) TestAppendBlocks() { defer os.Remove(f.Name()) newTestData := []byte("43211234cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5 * MB, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5 * MB, Data: newTestData}, ) s.assert.NoError(err) @@ -2157,7 +2163,7 @@ func (s *s3StorageTestSuite) TestOverwriteAndAppendBlocksLargeFile() { defer os.Remove(f.Name()) newTestData := []byte("43211234cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 15*MB - 4, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 15*MB - 4, Data: newTestData}, ) s.assert.NoError(err) @@ -2205,7 +2211,7 @@ func (s *s3StorageTestSuite) TestOverwriteAndAppendBlocksMiddleLargeFile() { defer os.Remove(f.Name()) newTestData := []byte("43211234cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 5*MB - 4, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 5*MB - 4, Data: newTestData}, ) s.assert.NoError(err) @@ -2236,14 +2242,14 @@ func (s *s3StorageTestSuite) TestAppendOffsetLargerThanSize() { testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Data: data}) s.assert.NoError(err) f, err := os.CreateTemp("", name+".tmp") s.assert.NoError(err) defer os.Remove(f.Name()) newTestData := []byte("43211234cake") _, err = s.s3Storage.WriteFile( - internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}, + &internal.WriteFileOptions{Handle: h, Offset: 45, Data: newTestData}, ) s.assert.NoError(err) @@ -2674,7 +2680,7 @@ func (s *s3StorageTestSuite) TestGetAttrFileSize() { s.assert.NoError(err) testData := "test data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) props, err := s.s3Storage.GetAttr(internal.GetAttrOptions{Name: name}) @@ -2693,7 +2699,7 @@ func (s *s3StorageTestSuite) TestGetAttrFileTime() { s.assert.NoError(err) testData := "test data" data := []byte(testData) - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) before, err := s.s3Storage.GetAttr(internal.GetAttrOptions{Name: name}) @@ -2702,7 +2708,7 @@ func (s *s3StorageTestSuite) TestGetAttrFileTime() { time.Sleep(1 * time.Second) // Wait and then modify the file again - _, err = s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + _, err = s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) s.assert.NoError(err) after, err := s.s3Storage.GetAttr(internal.GetAttrOptions{Name: name}) @@ -2878,7 +2884,7 @@ func (s *s3StorageTestSuite) TestGetFileBlockOffsetsSmallFile() { testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4" data := []byte(testData) - s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) // GetFileBlockOffsets offsetList, err := s.s3Storage.GetFileBlockOffsets( @@ -2886,7 +2892,7 @@ func (s *s3StorageTestSuite) TestGetFileBlockOffsetsSmallFile() { ) s.assert.NoError(err) s.assert.Empty(offsetList.BlockList) - s.assert.True(offsetList.SmallFile()) + s.assert.True(offsetList.HasNoBlocks()) s.assert.EqualValues(0, offsetList.BlockIdLength) } @@ -2960,7 +2966,7 @@ func (s *s3StorageTestSuite) TestFlushFileEmptyFile() { output := make([]byte, 1) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(0, length) @@ -3001,7 +3007,7 @@ func (s *s3StorageTestSuite) TestFlushFileChunkedFile() { output := make([]byte, 15*MB) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(15*MB, length) @@ -3054,7 +3060,7 @@ func (s *s3StorageTestSuite) TestFlushFileUpdateChunkedFile() { output := make([]byte, 15*MB) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(15*MB, length) @@ -3112,7 +3118,7 @@ func (s *s3StorageTestSuite) TestFlushFileTruncateUpdateChunkedFile() { output := make([]byte, 16*MB) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(16*MB, length) @@ -3175,14 +3181,14 @@ func (s *s3StorageTestSuite) TestFlushFileAppendBlocksEmptyFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*blockSizeBytes) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(3*blockSizeBytes, length) @@ -3258,14 +3264,14 @@ func (s *s3StorageTestSuite) TestFlushFileAppendBlocksChunkedFile() { } blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSizeBytes) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSizeBytes, length) @@ -3324,14 +3330,14 @@ func (s *s3StorageTestSuite) TestFlushFileTruncateBlocksEmptyFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*int64(blockSizeBytes)) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.EqualValues(3*int64(blockSizeBytes), length) @@ -3400,14 +3406,14 @@ func (s *s3StorageTestSuite) TestFlushFileTruncateBlocksChunkedFile() { blk3.Flags.Set(common.TruncatedBlock) blk3.Flags.Set(common.DirtyBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, fileSize+3*blockSizeBytes) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSizeBytes, length) @@ -3467,14 +3473,14 @@ func (s *s3StorageTestSuite) TestFlushFileAppendAndTruncateBlocksEmptyFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err := s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) output := make([]byte, 3*blockSizeBytes) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(3*blockSizeBytes, length) @@ -3547,7 +3553,7 @@ func (s *s3StorageTestSuite) TestFlushFileAppendAndTruncateBlocksChunkedFile() { blk3.Flags.Set(common.DirtyBlock) blk3.Flags.Set(common.TruncatedBlock) h.CacheObj.BlockList = append(h.CacheObj.BlockList, blk1, blk2, blk3) - bol.Flags.Clear(common.SmallFile) + bol.Flags.Clear(common.BlobFlagHasNoBlocks) err = s.s3Storage.FlushFile(internal.FlushFileOptions{Handle: h}) s.assert.NoError(err) @@ -3555,7 +3561,7 @@ func (s *s3StorageTestSuite) TestFlushFileAppendAndTruncateBlocksChunkedFile() { // file should be empty output := make([]byte, fileSize+3*blockSizeBytes) length, err := s.s3Storage.ReadInBuffer( - internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, + &internal.ReadInBufferOptions{Handle: h, Offset: 0, Data: output}, ) s.assert.NoError(err) s.assert.Equal(fileSize+3*blockSizeBytes, length) @@ -3617,10 +3623,10 @@ func (s *s3StorageTestSuite) UtilityFunctionTestTruncateFileToSmaller( s.assert.NoError(err) data := make([]byte, size) - s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) @@ -3655,10 +3661,10 @@ func (s *s3StorageTestSuite) UtilityFunctionTruncateFileToLarger(size int, trunc s.assert.NoError(err) data := make([]byte, size) - s.s3Storage.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) + s.s3Storage.WriteFile(&internal.WriteFileOptions{Handle: h, Offset: 0, Data: data}) err = s.s3Storage.TruncateFile( - internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)}, + internal.TruncateFileOptions{Name: name, NewSize: int64(truncatedLength)}, ) s.assert.NoError(err) diff --git a/component/size_tracker/size_tracker.go b/component/size_tracker/size_tracker.go index aaed84acd..54b5b8b60 100644 --- a/component/size_tracker/size_tracker.go +++ b/component/size_tracker/size_tracker.go @@ -182,8 +182,7 @@ func (st *SizeTracker) RenameFile(options internal.RenameFileOptions) error { return err } -func (st *SizeTracker) WriteFile(options internal.WriteFileOptions) (int, error) { - // log.Trace("SizeTracker::WriteFile : %s", options.Handle.Path) +func (st *SizeTracker) WriteFile(options *internal.WriteFileOptions) (int, error) { var oldSize int64 attr, getAttrErr1 := st.NextComponent(). GetAttr(internal.GetAttrOptions{Name: options.Handle.Path}) @@ -208,7 +207,7 @@ func (st *SizeTracker) WriteFile(options internal.WriteFileOptions) (int, error) } func (st *SizeTracker) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("SizeTracker::TruncateFile : %s to %dB", options.Name, options.Size) + log.Trace("SizeTracker::TruncateFile : %s to %dB", options.Name, options.NewSize) var origSize int64 attr, getAttrErr := st.NextComponent().GetAttr(internal.GetAttrOptions{Name: options.Name}) if getAttrErr == nil { @@ -227,7 +226,7 @@ func (st *SizeTracker) TruncateFile(options internal.TruncateFileOptions) error } // subtract difference in file size - st.mountSize.Add(options.Size - origSize) + st.mountSize.Add(options.NewSize - origSize) return nil } diff --git a/component/size_tracker/size_tracker_mock_test.go b/component/size_tracker/size_tracker_mock_test.go index a31b60035..6999c123d 100644 --- a/component/size_tracker/size_tracker_mock_test.go +++ b/component/size_tracker/size_tracker_mock_test.go @@ -118,10 +118,10 @@ func (suite *sizeTrackerMockTestSuite) TestStatFSFallBackEnabledUnderThreshold() GetAttr(internal.GetAttrOptions{Name: handle.Path}). Return(&internal.ObjAttr{Path: file}, nil) suite.mock.EXPECT(). - WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}). + WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}). Return(len(data), nil) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) @@ -175,10 +175,10 @@ func (suite *sizeTrackerMockTestSuite) TestStatFSFallBackEnabledOverThreshold() GetAttr(internal.GetAttrOptions{Name: handle.Path}). Return(&internal.ObjAttr{Path: file}, nil) suite.mock.EXPECT(). - WriteFile(internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}). + WriteFile(&internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}). Return(len(data), nil) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) diff --git a/component/size_tracker/size_tracker_test.go b/component/size_tracker/size_tracker_test.go index d8a2ed98b..c6cefa46a 100644 --- a/component/size_tracker/size_tracker_test.go +++ b/component/size_tracker/size_tracker_test.go @@ -148,7 +148,7 @@ func (suite *sizeTrackerTestSuite) TestDeleteDir() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(testData), suite.sizeTracker.mountSize.GetSize()) @@ -184,7 +184,7 @@ func (suite *sizeTrackerTestSuite) TestRenameDir() { ) suite.assert.NoError(err) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) err = suite.sizeTracker.CloseFile(internal.CloseFileOptions{Handle: handle}) @@ -226,7 +226,7 @@ func (suite *sizeTrackerTestSuite) TestDeleteFile() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(testData), suite.sizeTracker.mountSize.GetSize()) @@ -251,7 +251,7 @@ func (suite *sizeTrackerTestSuite) TestDeleteFileNegative() { testData := "test data" data := []byte(testData) _, err = suite.loopback.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) @@ -285,7 +285,7 @@ func (suite *sizeTrackerTestSuite) TestWriteFile() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(data), suite.sizeTracker.mountSize.GetSize()) @@ -308,31 +308,31 @@ func (suite *sizeTrackerTestSuite) TestWriteFileMultiple() { data := make([]byte, 1024*1024) _, _ = rand.Read(data) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(data), suite.sizeTracker.mountSize.GetSize()) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: int64(len(data)), Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: int64(len(data)), Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(2*len(data), suite.sizeTracker.mountSize.GetSize()) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 512, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 512, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(2*len(data), suite.sizeTracker.mountSize.GetSize()) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 2*int64(len(data)) + 512, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 2*int64(len(data)) + 512, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(3*len(data)+512, suite.sizeTracker.mountSize.GetSize()) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 3 * int64(len(data)), Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 3 * int64(len(data)), Data: data}, ) suite.assert.NoError(err) suite.assert.Equal(4*len(data), int(suite.sizeTracker.mountSize.GetSize())) @@ -350,7 +350,7 @@ func (suite *sizeTrackerTestSuite) TestWriteFileErrorBadFd() { // Setup file := generateFileName() handle := handlemap.NewHandle(file) - length, err := suite.sizeTracker.WriteFile(internal.WriteFileOptions{Handle: handle}) + length, err := suite.sizeTracker.WriteFile(&internal.WriteFileOptions{Handle: handle}) suite.assert.Error(err) suite.assert.Equal(0, length) suite.assert.EqualValues(0, suite.sizeTracker.mountSize.GetSize()) @@ -383,7 +383,7 @@ func (suite *sizeTrackerTestSuite) TestFlushFile() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(testData), suite.sizeTracker.mountSize.GetSize()) @@ -419,7 +419,7 @@ func (suite *sizeTrackerTestSuite) TestRenameFile() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(testData), suite.sizeTracker.mountSize.GetSize()) @@ -450,7 +450,7 @@ func (suite *sizeTrackerTestSuite) TestRenameOpenFile() { // write to file handle data := []byte("newdata") - n, err := suite.sizeTracker.WriteFile(internal.WriteFileOptions{Handle: handle, Data: data}) + n, err := suite.sizeTracker.WriteFile(&internal.WriteFileOptions{Handle: handle, Data: data}) suite.assert.NoError(err) suite.assert.Equal(len(data), n) @@ -486,7 +486,7 @@ func (suite *sizeTrackerTestSuite) TestRenameWriteFile() { // write to file handle data := []byte("newdata") n, err := suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Data: data, Offset: 0}, + &internal.WriteFileOptions{Handle: handle, Data: data, Offset: 0}, ) suite.assert.NoError(err) suite.assert.Equal(len(data), n) @@ -499,7 +499,7 @@ func (suite *sizeTrackerTestSuite) TestRenameWriteFile() { // write to file handle n, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Data: data, Offset: int64(len(data))}, + &internal.WriteFileOptions{Handle: handle, Data: data, Offset: int64(len(data))}, ) suite.assert.NoError(err) suite.assert.Equal(len(data), n) @@ -529,7 +529,7 @@ func (suite *sizeTrackerTestSuite) TestTruncateFile() { size := 1024 err = suite.sizeTracker.TruncateFile( - internal.TruncateFileOptions{Name: path, Size: int64(size)}, + internal.TruncateFileOptions{Name: path, NewSize: int64(size)}, ) suite.assert.NoError(err) @@ -549,7 +549,7 @@ func (suite *sizeTrackerTestSuite) TestTruncateFileOpen() { size := 1024 err = suite.sizeTracker.TruncateFile( - internal.TruncateFileOptions{Name: path, Size: int64(size)}, + internal.TruncateFileOptions{Name: path, NewSize: int64(size)}, ) suite.assert.NoError(err) @@ -578,7 +578,7 @@ func (suite *sizeTrackerTestSuite) TestSymlink() { testData := "test data" data := []byte(testData) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) suite.assert.EqualValues(len(data), suite.sizeTracker.mountSize.GetSize()) @@ -608,7 +608,7 @@ func (suite *sizeTrackerTestSuite) TestStatFS() { data := make([]byte, 1024*1024) _, _ = rand.Read(data) _, err = suite.sizeTracker.WriteFile( - internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, + &internal.WriteFileOptions{Handle: handle, Offset: 0, Data: data}, ) suite.assert.NoError(err) err = suite.sizeTracker.FlushFile(internal.FlushFileOptions{Handle: handle}) diff --git a/component/stream/connection.go b/component/stream/connection.go index 61a325077..c6ea11f9a 100644 --- a/component/stream/connection.go +++ b/component/stream/connection.go @@ -37,9 +37,9 @@ type StreamConnection interface { DeleteFile(options internal.DeleteFileOptions) error CreateFile(options internal.CreateFileOptions) (*handlemap.Handle, error) //TODO TEST THIS Configure(cfg StreamOptions) error - ReadInBuffer(internal.ReadInBufferOptions) (int, error) + ReadInBuffer(*internal.ReadInBufferOptions) (int, error) OpenFile(internal.OpenFileOptions) (*handlemap.Handle, error) - WriteFile(options internal.WriteFileOptions) (int, error) + WriteFile(options *internal.WriteFileOptions) (int, error) TruncateFile(internal.TruncateFileOptions) error FlushFile(internal.FlushFileOptions) error GetAttr(internal.GetAttrOptions) (*internal.ObjAttr, error) diff --git a/component/stream/read.go b/component/stream/read.go index 5b2626a8e..5ae457585 100644 --- a/component/stream/read.go +++ b/component/stream/read.go @@ -137,7 +137,7 @@ func (r *ReadCache) getBlock(handle *handlemap.Handle, offset int64) (*common.Bl handle.CacheObj.Put(blockKeyObj, block) handle.CacheObj.Unlock() // if the block does not exist fetch it from the next component - options := internal.ReadInBufferOptions{ + options := &internal.ReadInBufferOptions{ Handle: handle, Offset: block.StartIndex, Data: block.Data, @@ -187,7 +187,7 @@ func (r *ReadCache) copyCachedBlock( return dataRead, nil } -func (r *ReadCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (r *ReadCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { // if we're only streaming then avoid using the cache if r.StreamOnly || options.Handle.CacheObj.StreamOnly { data, err := r.NextComponent().ReadInBuffer(options) @@ -224,7 +224,7 @@ func (r *ReadCache) GetAttr(options internal.GetAttrOptions) (*internal.ObjAttr, return r.NextComponent().GetAttr(options) } -func (r *ReadCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (r *ReadCache) WriteFile(options *internal.WriteFileOptions) (int, error) { return 0, syscall.ENOTSUP } diff --git a/component/stream/read_test.go b/component/stream/read_test.go index 73b0babb7..02b58052e 100644 --- a/component/stream/read_test.go +++ b/component/stream/read_test.go @@ -104,7 +104,7 @@ func (suite *streamTestSuite) getRequestOptions( handle *handlemap.Handle, overwriteEndIndex bool, fileSize, offset, endIndex int64, -) (internal.OpenFileOptions, internal.ReadInBufferOptions, *[]byte) { +) (internal.OpenFileOptions, *internal.ReadInBufferOptions, *[]byte) { var data []byte openFileOptions := internal.OpenFileOptions{ Name: fileNames[fileIndex], @@ -116,7 +116,7 @@ func (suite *streamTestSuite) getRequestOptions( } else { data = make([]byte, endIndex-offset) } - readInBufferOptions := internal.ReadInBufferOptions{Handle: handle, Offset: offset, Data: data} + readInBufferOptions := &internal.ReadInBufferOptions{Handle: handle, Offset: offset, Data: data} return openFileOptions, readInBufferOptions, &data } @@ -136,7 +136,7 @@ func getCachedBlock(suite *streamTestSuite, offset int64, handle *handlemap.Hand } // Concurrency helpers with wait group terminations ======================================== -func asyncReadInBuffer(suite *streamTestSuite, readInBufferOptions internal.ReadInBufferOptions) { +func asyncReadInBuffer(suite *streamTestSuite, readInBufferOptions *internal.ReadInBufferOptions) { _, _ = suite.stream.ReadInBuffer(readInBufferOptions) wg.Done() } @@ -218,7 +218,7 @@ func (suite *streamTestSuite) TestReadWriteFile() { suite.cleanupTest() config = "stream:\n block-size-mb: 0\n buffer-size-mb: 16\n max-buffers: 4\n" suite.setupTestHelper(config, true) - _, err := suite.stream.WriteFile(internal.WriteFileOptions{}) + _, err := suite.stream.WriteFile(&internal.WriteFileOptions{}) suite.assert.Equal(syscall.ENOTSUP, err) } @@ -659,7 +659,7 @@ func (suite *streamTestSuite) TestCachedData() { config := "stream:\n block-size-mb: 16\n buffer-size-mb: 32\n max-buffers: 4\n" suite.setupTestHelper(config, true) var dataBuffer *[]byte - var readInBufferOptions internal.ReadInBufferOptions + var readInBufferOptions *internal.ReadInBufferOptions handle_1 := &handlemap.Handle{Size: int64(32 * MB), Path: fileNames[0]} data := *getBlockData(suite, 32*MB) @@ -725,7 +725,7 @@ func (suite *streamTestSuite) TestAsyncReadAndEviction() { var blockOneDataBuffer *[]byte var blockTwoDataBuffer *[]byte - var readInBufferOptions internal.ReadInBufferOptions + var readInBufferOptions *internal.ReadInBufferOptions handle_1 := &handlemap.Handle{Size: int64(16 * MB), Path: fileNames[0]} // Even though our file size is 16MB below we only check against 8MB of the data (we check against two blocks) diff --git a/component/stream/read_write.go b/component/stream/read_write.go index 28bd31a08..14e05897c 100644 --- a/component/stream/read_write.go +++ b/component/stream/read_write.go @@ -114,7 +114,7 @@ func (rw *ReadWriteCache) OpenFile(options internal.OpenFileOptions) (*handlemap return handle, err } -func (rw *ReadWriteCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (rw *ReadWriteCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { // log.Trace("Stream::ReadInBuffer : name=%s, handle=%d, offset=%d", options.Handle.Path, options.Handle.ID, options.Offset) if !rw.StreamOnly && options.Handle.CacheObj.StreamOnly { err := rw.createHandleCache(options.Handle) @@ -154,7 +154,7 @@ func (rw *ReadWriteCache) ReadInBuffer(options internal.ReadInBufferOptions) (in return read, err } -func (rw *ReadWriteCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (rw *ReadWriteCache) WriteFile(options *internal.WriteFileOptions) (int, error) { // log.Trace("Stream::WriteFile : name=%s, handle=%d, offset=%d", options.Handle.Path, options.Handle.ID, options.Offset) if !rw.StreamOnly && options.Handle.CacheObj.StreamOnly { err := rw.createHandleCache(options.Handle) @@ -193,7 +193,7 @@ func (rw *ReadWriteCache) WriteFile(options internal.WriteFileOptions) (int, err } func (rw *ReadWriteCache) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("Stream::TruncateFile : name=%s, size=%d", options.Name, options.Size) + log.Trace("Stream::TruncateFile : name=%s, size=%d", options.Name, options.NewSize) // if !rw.StreamOnly { // handleMap := handlemap.GetHandles() // handleMap.Range(func(key, value interface{}) bool { @@ -437,7 +437,7 @@ func (rw *ReadWriteCache) createHandleCache(handle *handlemap.Handle) error { var err error if handle.Size == 0 { offsets = &common.BlockOffsetList{} - offsets.Flags.Set(common.SmallFile) + offsets.Flags.Set(common.BlobFlagHasNoBlocks) } else { offsets, err = rw.NextComponent().GetFileBlockOffsets(opts) if err != nil { @@ -446,7 +446,7 @@ func (rw *ReadWriteCache) createHandleCache(handle *handlemap.Handle) error { } handle.CacheObj.BlockOffsetList = offsets // if its a small file then download the file in its entirety if there is memory available, otherwise stream only - if handle.CacheObj.SmallFile() { + if handle.CacheObj.HasNoBlocks() { if uint64(atomic.LoadInt64(&handle.Size)) > memory.FreeMemory() { handle.CacheObj.StreamOnly = true return nil @@ -460,7 +460,7 @@ func (rw *ReadWriteCache) createHandleCache(handle *handlemap.Handle) error { handle.CacheObj.BlockList = append(handle.CacheObj.BlockList, block) handle.CacheObj.BlockIdLength = common.GetIdLength(block.Id) // now consists of a block - clear the flag - handle.CacheObj.Flags.Clear(common.SmallFile) + handle.CacheObj.Flags.Clear(common.BlobFlagHasNoBlocks) } atomic.AddInt32(&rw.CachedObjects, 1) return nil @@ -493,7 +493,7 @@ func (rw *ReadWriteCache) getBlock( if err != nil { return block, false, err } - options := internal.ReadInBufferOptions{ + options := &internal.ReadInBufferOptions{ Handle: handle, Offset: block.StartIndex, Data: block.Data, diff --git a/component/stream/read_write_filename.go b/component/stream/read_write_filename.go index d66f59546..118e5b073 100644 --- a/component/stream/read_write_filename.go +++ b/component/stream/read_write_filename.go @@ -113,7 +113,7 @@ func (rw *ReadWriteFilenameCache) OpenFile( return handle, err } -func (rw *ReadWriteFilenameCache) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (rw *ReadWriteFilenameCache) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { // log.Trace("Stream::ReadInBuffer : name=%s, handle=%d, offset=%d", options.Handle.Path, options.Handle.ID, options.Offset) if !rw.StreamOnly && options.Handle.CacheObj.StreamOnly { err := rw.createFileCache(options.Handle) @@ -151,7 +151,7 @@ func (rw *ReadWriteFilenameCache) ReadInBuffer(options internal.ReadInBufferOpti return read, err } -func (rw *ReadWriteFilenameCache) WriteFile(options internal.WriteFileOptions) (int, error) { +func (rw *ReadWriteFilenameCache) WriteFile(options *internal.WriteFileOptions) (int, error) { // log.Trace("Stream::WriteFile : name=%s, handle=%d, offset=%d", options.Handle.Path, options.Handle.ID, options.Offset) if !rw.StreamOnly && options.Handle.CacheObj.StreamOnly { err := rw.createFileCache(options.Handle) @@ -189,7 +189,7 @@ func (rw *ReadWriteFilenameCache) WriteFile(options internal.WriteFileOptions) ( // TODO: truncate in cache func (rw *ReadWriteFilenameCache) TruncateFile(options internal.TruncateFileOptions) error { - log.Trace("Stream::TruncateFile : name=%s, size=%d", options.Name, options.Size) + log.Trace("Stream::TruncateFile : name=%s, size=%d", options.Name, options.NewSize) err := rw.NextComponent().TruncateFile(options) if err != nil { log.Err("Stream::TruncateFile : error truncating file %s [%s]", options.Name, err.Error()) @@ -388,7 +388,7 @@ func (rw *ReadWriteFilenameCache) createFileCache(handle *handlemap.Handle) erro handle.CacheObj.BlockOffsetList = offsets atomic.StoreInt64(&handle.CacheObj.Size, handle.Size) handle.CacheObj.Mtime = handle.Mtime - if handle.CacheObj.SmallFile() { + if handle.CacheObj.HasNoBlocks() { if uint64(atomic.LoadInt64(&handle.Size)) > memory.FreeMemory() { handle.CacheObj.StreamOnly = true return nil @@ -402,7 +402,7 @@ func (rw *ReadWriteFilenameCache) createFileCache(handle *handlemap.Handle) erro handle.CacheObj.BlockList = append(handle.CacheObj.BlockList, block) handle.CacheObj.BlockIdLength = common.GetIdLength(block.Id) // now consists of a block - clear the flag - handle.CacheObj.Flags.Clear(common.SmallFile) + handle.CacheObj.Flags.Clear(common.BlobFlagHasNoBlocks) } rw.fileCache[handle.Path] = handle.CacheObj atomic.AddInt32(&rw.CachedObjects, 1) @@ -445,7 +445,7 @@ func (rw *ReadWriteFilenameCache) getBlock( if err != nil { return block, false, err } - options := internal.ReadInBufferOptions{ + options := &internal.ReadInBufferOptions{ Handle: handle, Offset: block.StartIndex, Data: block.Data, diff --git a/component/stream/read_write_filename_test.go b/component/stream/read_write_filename_test.go index a66f683c9..4284e7256 100644 --- a/component/stream/read_write_filename_test.go +++ b/component/stream/read_write_filename_test.go @@ -241,7 +241,7 @@ func (suite *streamTestSuite) TestCacheSmallFileFilenameOnOpen() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) + bol.Flags.Set(common.BlobFlagHasNoBlocks) suite.mock.EXPECT().OpenFile(openFileOptions).Return(handle, nil) suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions).Return(bol, nil) @@ -259,7 +259,7 @@ func (suite *streamTestSuite) TestCacheSmallFileFilenameOnOpen() { Mode: os.FileMode(0777), } getFileBlockOffsetsOptions = internal.GetFileBlockOffsetsOptions{Name: fileNames[1]} - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1), @@ -301,7 +301,7 @@ func (suite *streamTestSuite) TestFilenameReadInBuffer() { _, _ = suite.stream.OpenFile(openFileOptions) // get second block - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 2*MB), @@ -392,7 +392,7 @@ func (suite *streamTestSuite) TestFilenameStreamOnly() { _, err := suite.stream.OpenFile(openFileOptions) suite.assert.Error(err) - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -432,18 +432,18 @@ func (suite *streamTestSuite) TestFilenameReadLargeFileBlocks() { assertHandleNotStreamOnly(suite, handle1) // data spans two blocks - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle1, Offset: 1*MB - 2, Data: make([]byte, 7), } - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle1, Offset: 0, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle1, Offset: 1 * MB, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) @@ -471,8 +471,8 @@ func (suite *streamTestSuite) TestFilenamePurgeOnClose() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) - readInBufferOptions := internal.ReadInBufferOptions{ + bol.Flags.Set(common.BlobFlagHasNoBlocks) + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1), @@ -480,7 +480,9 @@ func (suite *streamTestSuite) TestFilenamePurgeOnClose() { suite.mock.EXPECT().OpenFile(openFileOptions).Return(handle, nil) suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions).Return(bol, nil) - suite.mock.EXPECT().ReadInBuffer(readInBufferOptions).Return(len(readInBufferOptions.Data), nil) + suite.mock.EXPECT(). + ReadInBuffer(readInBufferOptions). + Return(len(readInBufferOptions.Data), nil) _, _ = suite.stream.OpenFile(openFileOptions) assertBlockCached(suite, 0, handle) @@ -512,8 +514,8 @@ func (suite *streamTestSuite) TestFilenameWriteToSmallFileEviction() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) - readInBufferOptions := internal.ReadInBufferOptions{ + bol.Flags.Set(common.BlobFlagHasNoBlocks) + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1*MB), @@ -527,7 +529,7 @@ func (suite *streamTestSuite) TestFilenameWriteToSmallFileEviction() { assertNumberOfCachedFileBlocks(suite, 1, handle) // append new block and confirm old gets evicted - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -562,7 +564,7 @@ func (suite *streamTestSuite) TestFilenameLargeFileEviction() { BlockList: []*common.Block{block1, block2}, BlockIdLength: 10, } - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1*MB), @@ -579,7 +581,7 @@ func (suite *streamTestSuite) TestFilenameLargeFileEviction() { assertNumberOfCachedFileBlocks(suite, 1, handle) // get second block - readInBufferOptions = internal.ReadInBufferOptions{ + readInBufferOptions = &internal.ReadInBufferOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -592,7 +594,7 @@ func (suite *streamTestSuite) TestFilenameLargeFileEviction() { assertNumberOfCachedFileBlocks(suite, 2, handle) // write to second block - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1*MB + 2, Data: make([]byte, 2), @@ -677,14 +679,14 @@ func (suite *streamTestSuite) TestFilenameStreamOnly2() { _ = suite.stream.CloseFile(closeFileOptions) // get block for second handle and confirm it gets cached - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle2, Offset: 0, Data: make([]byte, 4), } suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions2).Return(bol, nil) - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle2, Offset: 0, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) @@ -708,7 +710,7 @@ func (suite *streamTestSuite) TestFilenameCreateFile() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) + bol.Flags.Set(common.BlobFlagHasNoBlocks) suite.mock.EXPECT().CreateFile(createFileoptions).Return(handle1, nil) suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions).Return(bol, nil) diff --git a/component/stream/read_write_test.go b/component/stream/read_write_test.go index f84a1108e..ac23603b2 100644 --- a/component/stream/read_write_test.go +++ b/component/stream/read_write_test.go @@ -241,7 +241,7 @@ func (suite *streamTestSuite) TestCacheSmallFileOnOpen() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) + bol.Flags.Set(common.BlobFlagHasNoBlocks) suite.mock.EXPECT().OpenFile(openFileOptions).Return(handle, nil) suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions).Return(bol, nil) @@ -259,7 +259,7 @@ func (suite *streamTestSuite) TestCacheSmallFileOnOpen() { Mode: os.FileMode(0777), } getFileBlockOffsetsOptions = internal.GetFileBlockOffsetsOptions{Name: fileNames[1]} - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1), @@ -301,7 +301,7 @@ func (suite *streamTestSuite) TestReadInBuffer() { _, _ = suite.stream.OpenFile(openFileOptions) // get second block - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 2*MB), @@ -392,7 +392,7 @@ func (suite *streamTestSuite) TestStreamOnly() { _, err := suite.stream.OpenFile(openFileOptions) suite.assert.Error(err) - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -432,18 +432,18 @@ func (suite *streamTestSuite) TestReadLargeFileBlocks() { assertHandleNotStreamOnly(suite, handle1) // data spans two blocks - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle1, Offset: 1*MB - 2, Data: make([]byte, 7), } - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle1, Offset: 0, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle1, Offset: 1 * MB, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) @@ -471,8 +471,8 @@ func (suite *streamTestSuite) TestPurgeOnClose() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) - readInBufferOptions := internal.ReadInBufferOptions{ + bol.Flags.Set(common.BlobFlagHasNoBlocks) + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1), @@ -512,8 +512,8 @@ func (suite *streamTestSuite) TestWriteToSmallFileEviction() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) - readInBufferOptions := internal.ReadInBufferOptions{ + bol.Flags.Set(common.BlobFlagHasNoBlocks) + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1*MB), @@ -527,7 +527,7 @@ func (suite *streamTestSuite) TestWriteToSmallFileEviction() { assertNumberOfCachedFileBlocks(suite, 1, handle) // append new block and confirm old gets evicted - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -562,7 +562,7 @@ func (suite *streamTestSuite) TestLargeFileEviction() { BlockList: []*common.Block{block1, block2}, BlockIdLength: 10, } - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle, Offset: 0, Data: make([]byte, 1*MB), @@ -579,7 +579,7 @@ func (suite *streamTestSuite) TestLargeFileEviction() { assertNumberOfCachedFileBlocks(suite, 1, handle) // get second block - readInBufferOptions = internal.ReadInBufferOptions{ + readInBufferOptions = &internal.ReadInBufferOptions{ Handle: handle, Offset: 1 * MB, Data: make([]byte, 1*MB), @@ -592,7 +592,7 @@ func (suite *streamTestSuite) TestLargeFileEviction() { assertNumberOfCachedFileBlocks(suite, 2, handle) // write to second block - writeFileOptions := internal.WriteFileOptions{ + writeFileOptions := &internal.WriteFileOptions{ Handle: handle, Offset: 1*MB + 2, Data: make([]byte, 2), @@ -676,14 +676,14 @@ func (suite *streamTestSuite) TestStreamOnlyHandle() { _ = suite.stream.CloseFile(closeFileOptions) // get block for second handle and confirm it gets cached - readInBufferOptions := internal.ReadInBufferOptions{ + readInBufferOptions := &internal.ReadInBufferOptions{ Handle: handle2, Offset: 0, Data: make([]byte, 4), } suite.mock.EXPECT().GetFileBlockOffsets(getFileBlockOffsetsOptions).Return(bol, nil) - suite.mock.EXPECT().ReadInBuffer(internal.ReadInBufferOptions{ + suite.mock.EXPECT().ReadInBuffer(&internal.ReadInBufferOptions{ Handle: handle2, Offset: 0, Data: make([]byte, 1*MB)}).Return(len(readInBufferOptions.Data), nil) @@ -706,7 +706,7 @@ func (suite *streamTestSuite) TestCreateFile() { bol := &common.BlockOffsetList{ BlockList: []*common.Block{}, } - bol.Flags.Set(common.SmallFile) + bol.Flags.Set(common.BlobFlagHasNoBlocks) suite.mock.EXPECT().CreateFile(createFileoptions).Return(handle1, nil) _, _ = suite.stream.CreateFile(createFileoptions) diff --git a/component/stream/stream.go b/component/stream/stream.go index d0b1a6f2e..9849d24fe 100644 --- a/component/stream/stream.go +++ b/component/stream/stream.go @@ -134,11 +134,11 @@ func (st *Stream) OpenFile(options internal.OpenFileOptions) (*handlemap.Handle, return st.cache.OpenFile(options) } -func (st *Stream) ReadInBuffer(options internal.ReadInBufferOptions) (int, error) { +func (st *Stream) ReadInBuffer(options *internal.ReadInBufferOptions) (int, error) { return st.cache.ReadInBuffer(options) } -func (st *Stream) WriteFile(options internal.WriteFileOptions) (int, error) { +func (st *Stream) WriteFile(options *internal.WriteFileOptions) (int, error) { return st.cache.WriteFile(options) } diff --git a/component/xload/data_manager.go b/component/xload/data_manager.go index 7379fe9ba..d0a1b1e8f 100644 --- a/component/xload/data_manager.go +++ b/component/xload/data_manager.go @@ -125,7 +125,7 @@ func (rdm *remoteDataManager) Process(item *WorkItem) (int, error) { func (rdm *remoteDataManager) ReadData(item *WorkItem) (int, error) { // log.Debug("remoteDataManager::ReadData : Scheduling download for %s offset %v", item.Path, item.Block.Offset) - bytesTransferred, err := rdm.GetRemote().ReadInBuffer(internal.ReadInBufferOptions{ + bytesTransferred, err := rdm.GetRemote().ReadInBuffer(&internal.ReadInBufferOptions{ Offset: item.Block.Offset, Data: item.Block.Data, Path: item.Path, diff --git a/component/xload/stats_manager.go b/component/xload/stats_manager.go index 72628d441..6ff1b9571 100644 --- a/component/xload/stats_manager.go +++ b/component/xload/stats_manager.go @@ -231,12 +231,12 @@ func (sm *StatsManager) calculateBandwidth() { bandwidthMbps := float64(bytesTransferred*8) / (timeLapsed * float64(MB)) diskSpeedMbps := float64(sm.diskIOBytes*8) / (timeLapsed * float64(MB)) - var maxBlocks, pr, reg uint32 + var maximum, pr, reg uint32 var waiting int32 var poolusage uint32 if sm.pool != nil { - maxBlocks, pr, reg, waiting = sm.pool.GetUsageDetails() + maximum, pr, reg, waiting = sm.pool.GetUsageDetails() sm.pool.Usage() } @@ -244,7 +244,7 @@ func (sm *StatsManager) calculateBandwidth() { "%v Pending, %v Total, Bytes transferred %v, Throughput (Mbps): %.2f, Disk Speed (Mbps): %.2f, Blockpool usage: %v%%, (%v / %v / %v : %v), Time: %.2f", currTime.Format(time.RFC1123), percentCompleted, sm.success, sm.failed, filesPending, sm.totalFiles, bytesTransferred, bandwidthMbps, diskSpeedMbps, poolusage, - maxBlocks, pr, reg, waiting, timeLapsed) + maximum, pr, reg, waiting, timeLapsed) if sm.fileHandle != nil { err := sm.marshalStatsData(&statsJSONData{ diff --git a/go.mod b/go.mod index 33d53162f..57e1b320d 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/sevlyar/go-daemon v0.1.7-0.20240723083326-c2a11b2b57fc github.com/shirou/gopsutil/v4 v4.25.11 - github.com/spf13/cobra v1.9.1 + github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.10 github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 @@ -76,8 +76,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sagikazarmark/locafero v0.11.0 // indirect - github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/sagikazarmark/locafero v0.12.0 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect diff --git a/go.sum b/go.sum index a76d29375..ba109300a 100644 --- a/go.sum +++ b/go.sum @@ -134,14 +134,12 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= -github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= +github.com/sagikazarmark/locafero v0.12.0 h1:/NQhBAkUb4+fH1jivKHWusDYFjMOOKU88eegjfxfHb4= +github.com/sagikazarmark/locafero v0.12.0/go.mod h1:sZh36u/YSZ918v0Io+U9ogLYQJ9tLLBmM4eneO6WwsI= github.com/sevlyar/go-daemon v0.1.7-0.20240723083326-c2a11b2b57fc h1:/StamxKd71C89UI6Vo7NXg8zu6eHxj/jgxdwAVRqP9Q= github.com/sevlyar/go-daemon v0.1.7-0.20240723083326-c2a11b2b57fc/go.mod h1:XFAAg6dLmyBIYW7Gss91IQoNmbvZXAVdrXRP9u9AQu8= github.com/shirou/gopsutil/v4 v4.25.11 h1:X53gB7muL9Gnwwo2evPSE+SfOrltMoR6V3xJAXZILTY= github.com/shirou/gopsutil/v4 v4.25.11/go.mod h1:EivAfP5x2EhLp2ovdpKSozecVXn1TmuG7SMzs/Wh4PU= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= -github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= diff --git a/internal/base_component.go b/internal/base_component.go index 5cca3585f..c2159dc5b 100644 --- a/internal/base_component.go +++ b/internal/base_component.go @@ -170,14 +170,14 @@ func (base *BaseComponent) RenameFile(options RenameFileOptions) error { return nil } -func (base *BaseComponent) ReadInBuffer(options ReadInBufferOptions) (int, error) { +func (base *BaseComponent) ReadInBuffer(options *ReadInBufferOptions) (int, error) { if base.next != nil { return base.next.ReadInBuffer(options) } return 0, nil } -func (base *BaseComponent) WriteFile(options WriteFileOptions) (int, error) { +func (base *BaseComponent) WriteFile(options *WriteFileOptions) (int, error) { if base.next != nil { return base.next.WriteFile(options) } diff --git a/internal/component.go b/internal/component.go index a81a2619c..d5f286158 100644 --- a/internal/component.go +++ b/internal/component.go @@ -94,9 +94,9 @@ type Component interface { RenameFile(RenameFileOptions) error - ReadInBuffer(ReadInBufferOptions) (int, error) + ReadInBuffer(*ReadInBufferOptions) (int, error) - WriteFile(WriteFileOptions) (int, error) + WriteFile(*WriteFileOptions) (int, error) TruncateFile(TruncateFileOptions) error CopyToFile(CopyToFileOptions) error diff --git a/internal/component_options.go b/internal/component_options.go index 0efcd03b4..325728a28 100644 --- a/internal/component_options.go +++ b/internal/component_options.go @@ -115,9 +115,12 @@ type GetFileBlockOffsetsOptions struct { } type TruncateFileOptions struct { - Handle *handlemap.Handle - Name string - Size int64 + Handle *handlemap.Handle + Name string + OldSize int64 + NewSize int64 + // This is equivalent to the storage block Size. + BlockSize int64 } type CopyToFileOptions struct { diff --git a/internal/mock_component.go b/internal/mock_component.go index 6b21c9c54..18d8aabbe 100644 --- a/internal/mock_component.go +++ b/internal/mock_component.go @@ -443,7 +443,7 @@ func (mr *MockComponentMockRecorder) StreamDir(arg0 interface{}) *gomock.Call { } // ReadInBuffer mocks base method. -func (m *MockComponent) ReadInBuffer(arg0 ReadInBufferOptions) (int, error) { +func (m *MockComponent) ReadInBuffer(arg0 *ReadInBufferOptions) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadInBuffer", arg0) ret0, _ := ret[0].(int) @@ -572,7 +572,7 @@ func (mr *MockComponentMockRecorder) TruncateFile(arg0 interface{}) *gomock.Call } // WriteFile mocks base method. -func (m *MockComponent) WriteFile(arg0 WriteFileOptions) (int, error) { +func (m *MockComponent) WriteFile(arg0 *WriteFileOptions) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteFile", arg0) ret0, _ := ret[0].(int) diff --git a/internal/pipeline.go b/internal/pipeline.go index 3bb50119b..3ab026aea 100644 --- a/internal/pipeline.go +++ b/internal/pipeline.go @@ -27,6 +27,7 @@ package internal import ( "context" + "errors" "fmt" "github.com/Seagate/cloudfuse/common/log" @@ -119,23 +120,41 @@ func (p *Pipeline) Create() { func (p *Pipeline) Start(ctx context.Context) (err error) { p.Create() + var errs []error + for i := len(p.components) - 1; i >= 0; i-- { if err = p.components[i].Start(ctx); err != nil { - return err + errs = append(errs, err) + // stop all the upstream components before returning, f.e., this would prevent the upstream components + // to use the logger after it is destroyed. + for j := i + 1; j < len(p.components); j++ { + if err = p.components[j].Stop(); err != nil { + errs = append(errs, err) + } + } } } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil } // Stop : Stop the pipeline by calling 'Stop' method of each component func (p *Pipeline) Stop() (err error) { - for i := 0; i < len(p.components); i++ { + var errs []error + for i := range p.components { if err = p.components[i].Stop(); err != nil { - return err + errs = append(errs, err) } } + if len(errs) > 0 { + return errors.Join(errs...) + } + return nil } diff --git a/main.go b/main.go index dc8d5666e..7b7aed3a2 100644 --- a/main.go +++ b/main.go @@ -34,11 +34,16 @@ import ( // To use go:generate run command "NAME="component" go generate" func main() { - _ = cmd.Execute() + defer log.Destroy() // nolint:errcheck + // This recovers the panics only for the functions that run within this context. all the go-routine + // spawned by this function need to handle their panics separately if required. Also the FUSE callbacks + // wouldn't run in this context, so the panics originated from the callbacks can't get recovered here. defer func() { if panicErr := recover(); panicErr != nil { - log.Err("PANIC: %v", panicErr) + log.Crit("PANIC: %v", panicErr) panic(panicErr) } }() + + _ = cmd.Execute() } diff --git a/scripts/go_installer.sh b/scripts/go_installer.sh index 6f778342d..198528d69 100755 --- a/scripts/go_installer.sh +++ b/scripts/go_installer.sh @@ -1,6 +1,6 @@ #!/bin/bash work_dir="${1%/}" -version="1.24.4" +version="1.25.4" arch=$(hostnamectl | grep "Arch" | rev | cut -d " " -f 1 | rev) if [ "$arch" != "arm64" ] diff --git a/setup/vmSetupAzSecPack.sh b/setup/vmSetupAzSecPack.sh index 7fa7313a2..f278a06d4 100755 --- a/setup/vmSetupAzSecPack.sh +++ b/setup/vmSetupAzSecPack.sh @@ -4,38 +4,6 @@ # Install Azure CLI curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash -# Update package lists -sudo apt-get update -y - -# Install required packages -sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release -y - -# Create directory for Microsoft GPG key -sudo mkdir -p /etc/apt/keyrings - -# Download and install Microsoft GPG key -curl -sLS https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | sudo tee /etc/apt/keyrings/microsoft.gpg > /dev/null - -# Set permissions for the GPG key -sudo chmod go+r /etc/apt/keyrings/microsoft.gpg - -# Get the distribution codename -AZ_DIST=$(lsb_release -cs) - -# Add Azure CLI repository to sources list -echo "Types: deb -URIs: https://packages.microsoft.com/repos/azure-cli/ -Suites: ${AZ_DIST} -Components: main -Architectures: $(dpkg --print-architecture) -Signed-by: /etc/apt/keyrings/microsoft.gpg" | sudo tee /etc/apt/sources.list.d/azure-cli.sources - -# Update package lists again -sudo apt-get update - -# Install Azure CLI again to ensure it's up to date -sudo apt-get install azure-cli -y - # Remove unnecessary packages sudo apt autoremove -y diff --git a/test/e2e_tests/data_validation_test.go b/test/e2e_tests/data_validation_test.go index 24f2479ae..500afa4c1 100644 --- a/test/e2e_tests/data_validation_test.go +++ b/test/e2e_tests/data_validation_test.go @@ -138,14 +138,106 @@ func (suite *dataValidationTestSuite) computeMD5(filePath string) []byte { return hash.Sum(nil) } -func (suite *dataValidationTestSuite) validateData(localFilePath string, remoteFilePath string) { - suite.T().Helper() +func (suite *dataValidationTestSuite) helperValidateFileContent( + localFilePath string, + remoteFilePath string, +) { + // check if file sizes are same + localFileInfo, err := os.Stat(localFilePath) + suite.NoError(err) + remoteFileInfo, err := os.Stat(remoteFilePath) + suite.NoError(err) + suite.Equal(localFileInfo.Size(), remoteFileInfo.Size()) localMD5sum := suite.computeMD5(localFilePath) remoteMD5sum := suite.computeMD5(remoteFilePath) suite.Equal(localMD5sum, remoteMD5sum) } +func (suite *dataValidationTestSuite) helperCreateFile( + localFilePath string, + remoteFilePath string, + size int64, +) { + buffer := make([]byte, 1*1024*1024) + rand.Read(buffer) + + writeFile := func(file *os.File) { + originalSize := size + for originalSize > 0 { + bytesToWrite := min(int64(len(buffer)), originalSize) + n, err := file.Write(buffer[0:bytesToWrite]) + suite.Equal(int(bytesToWrite), n) + ok := suite.NoError(err) + if !ok { + break + } + originalSize -= int64(n) + } + } + + localFile, err := os.Create(localFilePath) + suite.NoError(err) + writeFile(localFile) + err = localFile.Close() + suite.NoError(err) + + remoteFile, err := os.Create(remoteFilePath) + suite.NoError(err) + writeFile(remoteFile) + err = remoteFile.Close() + suite.NoError(err) +} + +func (suite *dataValidationTestSuite) helperTruncateFile( + localFilePath string, + remoteFilePath string, + size int64, +) { + srcFile, err := os.OpenFile(localFilePath, os.O_RDWR, 0666) + suite.NoError(err) + err = srcFile.Truncate(size) + suite.NoError(err) + + dstFile, err := os.OpenFile(remoteFilePath, os.O_RDWR, 0666) + suite.NoError(err) + err = dstFile.Truncate(size) + suite.NoError(err) + + err = srcFile.Close() + suite.NoError(err) + err = dstFile.Close() + suite.NoError(err) +} + +func (suite *dataValidationTestSuite) helperWriteToFile( + localFilePath string, + remoteFilePath string, + offset int64, + size int, +) { + buffer := make([]byte, 1*1024*1024) + + localFile, err := os.OpenFile(localFilePath, os.O_RDWR, 0666) + suite.NoError(err) + + remoteFile, err := os.OpenFile(remoteFilePath, os.O_RDWR, 0666) + suite.NoError(err) + + n, err := localFile.WriteAt(buffer[0:size], offset) + suite.Equal(size, n) + suite.NoError(err) + + n, err = remoteFile.WriteAt(buffer[0:size], offset) + suite.Equal(size, n) + suite.NoError(err) + + err = localFile.Close() + suite.NoError(err) + err = remoteFile.Close() + suite.NoError(err) +} + //----------------Utility Functions----------------------- // pass the file name and the function returns the LocalFilePath and MountedFilePath @@ -299,7 +391,7 @@ func (suite *dataValidationTestSuite) TestSmallFileData() { // delete the cache directory suite.dataValidationTestCleanup([]string{tObj.testCachePath}) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -328,7 +420,7 @@ func (suite *dataValidationTestSuite) TestMediumFileData() { // delete the cache directory suite.dataValidationTestCleanup([]string{tObj.testCachePath}) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -357,7 +449,7 @@ func (suite *dataValidationTestSuite) TestLargeFileData() { // delete the cache directory suite.dataValidationTestCleanup([]string{tObj.testCachePath}) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -437,7 +529,7 @@ func validateMultipleFilesData( suite.copyToMountDir(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{filepath.Join(tObj.testCachePath, fileName)}) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup( []string{localFilePath, filepath.Join(tObj.testCachePath, fileName)}, @@ -542,7 +634,7 @@ func (suite *dataValidationTestSuite) TestSparseFileRandomWrite() { suite.NoError(err) suite.Equal(165*int64(_1MB), fi.Size()) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -585,7 +677,7 @@ func (suite *dataValidationTestSuite) TestSparseFileRandomWriteBlockOverlap() { suite.NoError(err) suite.Equal(171*int64(_1MB), fi.Size()) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -640,7 +732,7 @@ func (suite *dataValidationTestSuite) TestFileReadBytesMultipleBlocks() { closeFileHandles(suite, fh) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -685,7 +777,7 @@ func (suite *dataValidationTestSuite) TestFileReadBytesOneBlock() { closeFileHandles(suite, fh) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } @@ -715,7 +807,7 @@ func (suite *dataValidationTestSuite) TestRandomWriteRaceCondition() { suite.NoError(err) suite.Equal(145*int64(_1MB), fi.Size()) - suite.validateData(localFilePath, remoteFilePath) + suite.helperValidateFileContent(localFilePath, remoteFilePath) suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) } diff --git a/test/e2e_tests/dir_test.go b/test/e2e_tests/dir_test.go index be5edcf76..76e6c51e1 100644 --- a/test/e2e_tests/dir_test.go +++ b/test/e2e_tests/dir_test.go @@ -602,7 +602,8 @@ func (suite *dirTestSuite) TestGitStash() { suite.NoError(err) suite.Equal(10, n) suite.Equal("TestString", string(data)) - _ = f.Close() + err = f.Close() + suite.NoError(err) cmd = exec.Command("git", "status") cliOut, err = cmd.Output() diff --git a/test/e2e_tests/file_test.go b/test/e2e_tests/file_test.go index f12c5b3d1..41fc3bf35 100644 --- a/test/e2e_tests/file_test.go +++ b/test/e2e_tests/file_test.go @@ -95,6 +95,7 @@ func (suite *fileTestSuite) fileTestCleanup(toRemove []string) { if err != nil { fmt.Printf("FileTestSuite::fileTestCleanup : Cleanup failed with error %v\n", err) } + suite.NoError(err) } } @@ -173,6 +174,8 @@ func (suite *fileTestSuite) TestOpenFlag_O_TRUNC() { suite.NoError(err) read, _ = srcFile.Read(tempbuf) suite.Equal(0, read) + err = srcFile.Close() + suite.NoError(err) } func (suite *fileTestSuite) TestFileCreateUtf8Char() { @@ -447,7 +450,7 @@ func (suite *fileTestSuite) TestFileNameConflict() { suite.NoError(err) } -// # Copy file from once directory to another +// # Copy file from one directory to another func (suite *fileTestSuite) TestFileCopy() { dirName := filepath.Join(suite.testPath, "test123") fileName := filepath.Join(suite.testPath, "test") @@ -458,15 +461,17 @@ func (suite *fileTestSuite) TestFileCopy() { srcFile, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDWR, 0777) suite.NoError(err) - defer srcFile.Close() dstFile, err := os.Create(dstFileName) suite.NoError(err) - defer dstFile.Close() _, err = io.Copy(srcFile, dstFile) suite.NoError(err) - dstFile.Close() + + err = srcFile.Close() + suite.NoError(err) + err = dstFile.Close() + suite.NoError(err) suite.fileTestCleanup([]string{dirName}) } @@ -757,7 +762,7 @@ func (suite *fileTestSuite) TestListDirReadLink() { } /* -func (suite *fileTestSuite) TestReadOnlyFile() { + func (suite *fileTestSuite) TestReadOnlyFile() { if suite.adlsTest == true { fileName := filepath.Join(suite.testPath, "readOnlyFile.txt") srcFile, err := os.Create(fileName) @@ -766,13 +771,19 @@ func (suite *fileTestSuite) TestReadOnlyFile() { // make it read only permissions err = os.Chmod(fileName, 0444) suite.Equal(nil, err) - _, err = os.OpenFile(fileName, os.O_RDONLY, 0444) + f, err := os.OpenFile(fileName, os.O_RDONLY, 0444) suite.Equal(nil, err) - _, err = os.OpenFile(fileName, os.O_RDWR, 0444) + err = f.Close() + suite.Equal(nil, err) + f, err = os.OpenFile(fileName, os.O_RDWR, 0444) suite.NotNil(err) + if f != nil { + closeErr := f.Close() + suite.Equal(nil, closeErr) + } suite.fileTestCleanup([]string{fileName}) } -} */ +}*/ func (suite *fileTestSuite) TestCreateReadOnlyFile() { // File permissions not working on Windows. @@ -783,10 +794,15 @@ func (suite *fileTestSuite) TestCreateReadOnlyFile() { if suite.adlsTest == true { fileName := filepath.Join(suite.testPath, "createReadOnlyFile.txt") srcFile, err := os.OpenFile(fileName, os.O_CREATE, 0444) - srcFile.Close() suite.NoError(err) - _, err = os.OpenFile(fileName, os.O_RDONLY, 0444) + err = srcFile.Close() suite.NoError(err) + + file, err := os.OpenFile(fileName, os.O_RDONLY, 0444) + suite.NoError(err) + err = file.Close() + suite.NoError(err) + suite.fileTestCleanup([]string{fileName}) } } diff --git a/test/e2e_tests/truncate_test.go b/test/e2e_tests/truncate_test.go new file mode 100644 index 000000000..fa8137609 --- /dev/null +++ b/test/e2e_tests/truncate_test.go @@ -0,0 +1,124 @@ +//go:build !unittest +// +build !unittest + +/* + _____ _____ _____ ____ ______ _____ ------ + | | | | | | | | | | | | | + | | | | | | | | | | | | | + | --- | | | | |-----| |---- | | |-----| |----- ------ + | | | | | | | | | | | | | + | ____| |_____ | ____| | ____| | |_____| _____| |_____ |_____ + + + Licensed under the MIT License . + + Copyright © 2020-2025 Microsoft Corporation. All rights reserved. + Author : + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE +*/ + +package e2e_tests + +import "os" + +func (suite *dataValidationTestSuite) TestShrinkExistingFile() { + fileName := "shrink_existing_file" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + t := func(initSize int64, shrinkSize int64) { + suite.helperCreateFile(localFilePath, remoteFilePath, initSize) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.helperTruncateFile(localFilePath, remoteFilePath, shrinkSize) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) + } + + t(1024*1024, 512*1024) + t(8*1024*1024, 4*1024*1024) + t(32*1024*1024, 16*1024*1024) + t(32*1024*1024+16, 16*1024*1024-9) + t(1*1024*1024*1024, 512*1024*1024-18) + t(10*1024*1024*1024, 1*1024*1024-8) +} + +func (suite *dataValidationTestSuite) TestExpandExistingFile() { + fileName := "expand_existing_file" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + t := func(initSize int64, expandSize int64) { + suite.helperCreateFile(localFilePath, remoteFilePath, initSize) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.helperTruncateFile(localFilePath, remoteFilePath, expandSize) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) + } + + t(1024*1024, 2*1024*1024) + t(8*1024*1024, 16*1024*1024) + t(8*1024*1024-1, 16*1024*1024+18) + t(16*1024*1024-1, 256*1024*1024+18) + t(1*1024*1024*1024, 2*1024*1024*1024+16) + t(1*1024*1024, 10*1024*1024*1024+8) +} + +func (suite *dataValidationTestSuite) TestTruncateNonExistingFile() { + fileName := "truncate_non_existing_file" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + lErr := os.Truncate(localFilePath, 1024*1024) + suite.Error(lErr) + + rErr := os.Truncate(remoteFilePath, 1024*1024) + suite.Error(rErr) + + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) +} + +func (suite *dataValidationTestSuite) TestWriteBeforeTruncate() { + fileName := "write_before_truncate" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + suite.helperCreateFile(localFilePath, remoteFilePath, 1024*1024) + suite.helperWriteToFile(localFilePath, remoteFilePath, 512*1024, 512*1024) + suite.helperTruncateFile(localFilePath, remoteFilePath, 512*1024) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) +} + +func (suite *dataValidationTestSuite) TestWriteAfterTruncate() { + fileName := "write_after_truncate" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + suite.helperCreateFile(localFilePath, remoteFilePath, 0) + suite.helperTruncateFile(localFilePath, remoteFilePath, 512*1024) + suite.helperWriteToFile(localFilePath, remoteFilePath, 512*1024, 512*1024) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) +} + +func (suite *dataValidationTestSuite) TestTruncateToZero() { + fileName := "truncate_to_zero" + localFilePath, remoteFilePath := convertFileNameToFilePath(fileName) + + suite.helperCreateFile(localFilePath, remoteFilePath, 1024*1024) + suite.helperTruncateFile(localFilePath, remoteFilePath, 0) + suite.helperValidateFileContent(localFilePath, remoteFilePath) + suite.dataValidationTestCleanup([]string{localFilePath, remoteFilePath, tObj.testCachePath}) +} diff --git a/test/fio/hole_inside_blocks.fio b/test/fio/hole_inside_blocks.fio new file mode 100644 index 000000000..e24abc26f --- /dev/null +++ b/test/fio/hole_inside_blocks.fio @@ -0,0 +1,18 @@ +[global] +name=blobfuse_file_integrity +bs=1M +filename=hole_inside_blocks_fio.data +verify=crc32c +verify_fatal=1 +verify_dump=1 +group_reporting + +[hole] +size=10G +# Create 3M Hole after each write call. +# This will create the holes inside the blocks(assuming block size of 8M) +rw=write:3M +number_ios=2560 +ioengine=sync +fallocate=none +unlink=1 diff --git a/test/fio/hole_over_blocks.fio b/test/fio/hole_over_blocks.fio new file mode 100644 index 000000000..e9ff9c0f3 --- /dev/null +++ b/test/fio/hole_over_blocks.fio @@ -0,0 +1,18 @@ +[global] +name=blobfuse_file_integrity +bs=1M +filename=hole_over_blocks_fio.data +verify=crc32c +verify_fatal=1 +group_reporting + +[hole] +size=10G +# Create 16M Hole/2 blocks after each write call. +# This will create the holes over the blocks(assuming block size of 8M), where some blocks will be +# skipped and other blocks to have some null data at the end of the block. +rw=write:15M +number_ios=640 +ioengine=sync +fallocate=none +unlink=1 diff --git a/test/fio/rw.fio b/test/fio/rw.fio new file mode 100644 index 000000000..b6d70691d --- /dev/null +++ b/test/fio/rw.fio @@ -0,0 +1,7 @@ +[writers] +bs=1M +ioengine=sync +verify=crc32c +verify_fatal=1 +group_reporting +unlink=1 \ No newline at end of file diff --git a/test/fio/seq-write-1f-10th.fio b/test/fio/seq-write-1f-10th.fio new file mode 100644 index 000000000..fc98a918f --- /dev/null +++ b/test/fio/seq-write-1f-10th.fio @@ -0,0 +1,78 @@ +[global] +name=blobfuse_file_integrity +bs=1M +size=10G +filename=stripe_write_fio.data +verify=crc32c +verify_fatal=1 +ioengine=sync +group_reporting + +[job1] +offset=0 +size=1G +rw=write +fallocate=none + + +[job2] +offset=1G +size=1G +rw=write +fallocate=none + + +[job3] +offset=2G +size=1G +rw=write +fallocate=none + + +[job4] +offset=3G +size=1G +rw=write +fallocate=none + + +[job5] +offset=4G +size=1G +rw=write +fallocate=none + + +[job6] +offset=5G +size=1G +rw=write +fallocate=none + + +[job7] +offset=6G +size=1G +rw=write +fallocate=none + + +[job8] +offset=7G +size=1G +rw=write +fallocate=none + + +[job9] +offset=8G +size=1G +rw=write +fallocate=none + + +[job10] +offset=9G +size=1G +rw=write +fallocate=none diff --git a/test/scenarios/blk_cache_integrity_linux_test.go b/test/scenarios/blk_cache_integrity_linux_test.go new file mode 100644 index 000000000..02c3f0c57 --- /dev/null +++ b/test/scenarios/blk_cache_integrity_linux_test.go @@ -0,0 +1,186 @@ +//go:build linux + +/* + Licensed under the MIT License . + + Copyright © 2020-2025 Microsoft Corporation. All rights reserved. + Author : + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE +*/ + +package scenarios + +import ( + "crypto/rand" + "io" + "os" + "path/filepath" + "sync" + "testing" + + "golang.org/x/sys/unix" + + "github.com/stretchr/testify/assert" +) + +// Test stripe reading with dup. +func TestStripeReadingWithDup(t *testing.T) { + t.Parallel() + filename := "testfile_stripe_reading_dup.txt" + content := []byte("Stripe Reading With Dup Test data") + tempbuf := make([]byte, len(content)) + offsets := []int64{69, 8*1024*1024 + 69, 16*1024*1024 + 69} + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + // Write to the file. + for _, off := range offsets { + written, err := file.WriteAt(content, int64(off)) + assert.NoError(t, err) + assert.Equal(t, len(content), written) + } + err = file.Close() + assert.NoError(t, err) + // Read from the different offsets using different file descriptions + file0, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + fd1, err := unix.Dup(int(file0.Fd())) + assert.NotEqual(t, int(file.Fd()), fd1) + assert.NoError(t, err) + fd2, err := unix.Dup(int(file0.Fd())) + assert.NotEqual(t, int(file.Fd()), fd1) + assert.NoError(t, err) + + bytesread, err := file0.ReadAt(tempbuf, offsets[0]) //read at 0MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + bytesread, err = unix.Pread(fd1, tempbuf, offsets[1]) //write at 8MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + bytesread, err = unix.Pread(fd2, tempbuf, offsets[2]) //write at 16MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + + err = file0.Close() + assert.NoError(t, err) + err = unix.Close(fd1) + assert.NoError(t, err) + err = unix.Close(fd2) + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Dup the FD and do parllel flush calls while writing. +func TestParllelFlushCallsByDuping(t *testing.T) { + filename := "testfile_parallel_flush_calls_using_dup.txt" + databuffer := make([]byte, 4*1024) // 4KB buffer + _, err := io.ReadFull(rand.Reader, databuffer) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + fd1, err := unix.Dup(int(file.Fd())) + assert.NotEqual(t, int(file.Fd()), fd1) + assert.NoError(t, err) + + // for each 1MB writes trigger a flush call from another go routine. + trigger_flush := make(chan struct{}, 1) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + _, ok := <-trigger_flush + if !ok { + break + } + err := unix.Fdatasync(fd1) + assert.NoError(t, err) + } + }() + // Write 40M data + for i := 0; i < 40*1024*1024; i += 4 * 1024 { + if i%(1*1024*1024) == 0 { + trigger_flush <- struct{}{} + } + byteswritten, err := file.Write(databuffer) + assert.Equal(t, 4*1024, byteswritten) + assert.NoError(t, err) + } + close(trigger_flush) + wg.Wait() + err = file.Close() + assert.NoError(t, err) + err = unix.Close(fd1) + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test stripe writing with dup. same as the stripe writing but rather than opening so many files duplicate the file descriptor. +func TestStripeWritingWithDup(t *testing.T) { + t.Parallel() + filename := "testfile_stripe_writing_dup.txt" + content := []byte("Stripe writing with dup test data") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + fd1, err := unix.Dup(int(file.Fd())) + assert.NotEqual(t, int(file.Fd()), fd1) + assert.NoError(t, err) + + fd2, err := unix.Dup(int(file.Fd())) + assert.NotEqual(t, int(file.Fd()), fd1) + assert.NoError(t, err) + + written, err := file.WriteAt(content, int64(0)) + assert.NoError(t, err) + assert.Equal(t, len(content), written) + written, err = unix.Pwrite(fd1, content, int64(8*1024*1024)) + assert.NoError(t, err) + assert.Equal(t, len(content), written) + written, err = unix.Pwrite(fd1, content, int64(16*1024*1024)) + assert.NoError(t, err) + assert.Equal(t, len(content), written) + + err = file.Close() + assert.NoError(t, err) + err = unix.Close(fd1) + assert.NoError(t, err) + err = unix.Close(fd2) + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} diff --git a/test/scenarios/blk_cache_integrity_test.go b/test/scenarios/blk_cache_integrity_test.go new file mode 100644 index 000000000..33b48feea --- /dev/null +++ b/test/scenarios/blk_cache_integrity_test.go @@ -0,0 +1,991 @@ +/* + Licensed under the MIT License . + + Copyright © 2020-2025 Microsoft Corporation. All rights reserved. + Author : + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE +*/ + +package scenarios + +import ( + "crypto/md5" + "crypto/rand" + "encoding/hex" + "flag" + "fmt" + "io" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Specify Mountpoints to check the file integrity across filesystems. +// Specifying one Mountpoint will check all the files for the errors. +var mountpoints []string + +func calculateMD5(t *testing.T, filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer func() { + err := file.Close() + assert.NoError(t, err) + }() + + hash := md5.New() + if _, err := io.Copy(hash, file); err != nil { + return "", err + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func checkFileIntegrity(t *testing.T, filename string) { + if len(mountpoints) > 1 { + var referenceMD5 string + var referenceSize int64 + for i, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + fi, err := os.Stat(filePath) + assert.NoError(t, err) + md5sum, err := calculateMD5(t, filePath) + assert.NoError(t, err) + + if i == 0 { + referenceMD5 = md5sum + referenceSize = fi.Size() + } else { + assert.Equal(t, referenceMD5, md5sum, "File content mismatch between mountpoints") + assert.Equal(t, referenceSize, fi.Size(), "File Size mismatch between mountpoints") + } + } + } +} + +func removeFiles(t *testing.T, filename string) { + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.Remove(filePath) + assert.NoError(t, err) + } +} + +func TestFileOpen(t *testing.T) { + t.Parallel() + filename := "testfile_open.txt" + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + + file, err = os.Open(filePath) + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestFileRead(t *testing.T) { + t.Parallel() + filename := "testfile_read.txt" + content := []byte("Hello, World!") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.WriteFile(filePath, content, 0644) + assert.NoError(t, err) + + file, err := os.Open(filePath) + assert.NoError(t, err) + + readContent := make([]byte, len(content)) + _, err = file.Read(readContent) + assert.True(t, err == nil || err == io.EOF) + + assert.Equal(t, string(content), string(readContent)) + + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestFileWrite(t *testing.T) { + t.Parallel() + filename := "testfile_write.txt" + content := []byte("Hello, World!") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + _, err = file.Write(content) + assert.NoError(t, err) + + err = file.Close() + assert.NoError(t, err) + + readContent, err := os.ReadFile(filePath) + assert.NoError(t, err) + + assert.Equal(t, string(content), string(readContent)) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestFsync(t *testing.T) { + t.Parallel() + filename := "testfile_fsync.txt" + content := []byte("Hello, World!") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + _, err = file.Write(content) + assert.NoError(t, err) + + err = file.Sync() + assert.NoError(t, err) + + readContent, err := os.ReadFile(filePath) + assert.NoError(t, err) + + assert.Equal(t, string(content), string(readContent)) + + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestFsyncWhileWriting(t *testing.T) { + t.Parallel() + var err error + filename := "testfile_fsync_while_writing.txt" + readBufSize := 4 * 1024 + content := make([]byte, readBufSize) + _, err = io.ReadFull(rand.Reader, content) + assert.NoError(t, err) + expectedContent := make([]byte, 4*1024, 10*1024*1024) + copy(expectedContent, content) + actualContent := make([]byte, 10*1024*1024) + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + // Write 9MB data, for each 4K buffer do an fsync for each 4K buffer. do read the data after fsync with other handle. + for i := 0; i*readBufSize < 9*1024*1024; i += 4 * 1024 { + bytesWritten, err := file.Write(content) + assert.NoError(t, err) + assert.Equal(t, len(content), bytesWritten) + + // We cannot do fsync for every 4K write, as the test takes long time to finish + // do it for every 512K + if i%(512*1024) == 0 { + err = file.Sync() + assert.NoError(t, err) + } + + file1, err := os.Open(filePath) + assert.NoError(t, err) + bytesRead, err := file1.Read(actualContent) + assert.Equal(t, (i+1)*readBufSize, bytesRead) + assert.NoError(t, err) + err = file1.Close() + assert.NoError(t, err) + + assert.Equal(t, expectedContent[:(i+1)*readBufSize], actualContent[:(i+1)*readBufSize]) + expectedContent = append(expectedContent, content...) + } + + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Add Tests for reading and writing to the newly created blocks and modified blocks while truncate. +const ( + truncate int = iota + ftruncate +) + +// tests for truncate function which works on path +func FileTruncate(t *testing.T, filename string, initialSize int, finalSize int, call int) { + content := make([]byte, initialSize) + _, err := io.ReadFull(rand.Reader, content) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.WriteFile(filePath, content, 0644) + assert.NoError(t, err) + + switch call { + case truncate: + err = os.Truncate(filePath, int64(finalSize)) + assert.NoError(t, err) + case ftruncate: + file, _ := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + err = file.Truncate(int64(finalSize)) + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + } + + readContent, err := os.ReadFile(filePath) + assert.NoError(t, err) + + expectedContent := make([]byte, initialSize) + copy(expectedContent, content) + if finalSize > initialSize { + expectedContent = append(expectedContent, make([]byte, finalSize-initialSize)...) + } else { + expectedContent = expectedContent[:finalSize] + } + assert.Equal(t, string(expectedContent), string(readContent)) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestFileTruncateSameSize(t *testing.T) { + t.Parallel() + filename := "testfile_truncate_same_size.txt" + FileTruncate(t, filename, 10, 10, truncate) + FileTruncate(t, filename, 9*1024*1024, 9*1024*1024, truncate) + FileTruncate(t, filename, 8*1024*1024, 8*1024*1024, truncate) +} + +func TestFileTruncateShrink(t *testing.T) { + t.Parallel() + + filename := "testfile_truncate_shrink.txt" + var wg sync.WaitGroup + + // Define table tests + tests := []struct { + name string + initial int + final int + truncation int + }{ + {fmt.Sprintf("%s_20_5_truncate", filename), 20, 5, truncate}, + {fmt.Sprintf("%s_10M_5K_truncate", filename), 10 * 1024 * 1024, 5 * 1024, truncate}, + {fmt.Sprintf("%s_20M_5K_truncate", filename), 20 * 1024 * 1024, 5 * 1024, truncate}, + { + fmt.Sprintf("%s_30M_20M_truncate", filename), + 30 * 1024 * 1024, + 20 * 1024 * 1024, + truncate, + }, + {fmt.Sprintf("%s_20_5_ftruncate", filename), 20, 5, ftruncate}, + {fmt.Sprintf("%s_10M_5K_ftruncate", filename), 10 * 1024 * 1024, 5 * 1024, ftruncate}, + {fmt.Sprintf("%s_20M_5K_ftruncate", filename), 20 * 1024 * 1024, 5 * 1024, ftruncate}, + { + fmt.Sprintf("%s_30M_20M_ftruncate", filename), + 30 * 1024 * 1024, + 20 * 1024 * 1024, + ftruncate, + }, + } + + // Add the number of test cases to the WaitGroup + wg.Add(len(tests)) + + // Iterate over the test cases + for _, tt := range tests { + go func(tt struct { + name string + initial int + final int + truncation int + }) { + defer wg.Done() + FileTruncate(t, tt.name, tt.initial, tt.final, tt.truncation) + }(tt) + } + + // Wait for all goroutines to complete + wg.Wait() +} + +func TestFileTruncateExpand(t *testing.T) { + t.Parallel() + + filename := "testfile_truncate_expand.txt" + var wg sync.WaitGroup + + // Define table tests + tests := []struct { + name string + initial int + final int + truncation int + }{ + {fmt.Sprintf("%s_5_20_truncate", filename), 5, 20, truncate}, + {fmt.Sprintf("%s_5K_10M_truncate", filename), 5 * 1024, 10 * 1024 * 1024, truncate}, + {fmt.Sprintf("%s_5K_20M_truncate", filename), 5 * 1024, 20 * 1024 * 1024, truncate}, + { + fmt.Sprintf("%s_20M_30M_truncate", filename), + 20 * 1024 * 1024, + 30 * 1024 * 1024, + truncate, + }, + {fmt.Sprintf("%s_5_20_ftruncate", filename), 5, 20, ftruncate}, + {fmt.Sprintf("%s_5K_10M_ftruncate", filename), 5 * 1024, 10 * 1024 * 1024, ftruncate}, + {fmt.Sprintf("%s_5K_20M_ftruncate", filename), 5 * 1024, 20 * 1024 * 1024, ftruncate}, + { + fmt.Sprintf("%s_20M_30M_ftruncate", filename), + 20 * 1024 * 1024, + 30 * 1024 * 1024, + ftruncate, + }, + } + + // Add the number of test cases to the WaitGroup + wg.Add(len(tests)) + + // Iterate over the test cases + for _, tt := range tests { + go func(tt struct { + name string + initial int + final int + truncation int + }) { + defer wg.Done() + FileTruncate(t, tt.name, tt.initial, tt.final, tt.truncation) + }(tt) + } + + // Wait for all goroutines to complete + wg.Wait() +} + +func TestTruncateNoFile(t *testing.T) { + t.Parallel() + filename := "testfile_truncate_no_file.txt" + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.Truncate(filePath, 5) + assert.Error(t, err) + assert.ErrorContains(t, err, "no such file or directory") + } +} + +func WriteTruncateClose(t *testing.T, filename string, writeSize int, truncSize int, call int) { + content := make([]byte, writeSize) + _, err := io.ReadFull(rand.Reader, content) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + written, err := file.Write(content) + assert.NoError(t, err) + assert.Equal(t, writeSize, written) + if call == truncate { + err := os.Truncate(filePath, int64(truncSize)) + assert.NoError(t, err) + } else { + err := file.Truncate(int64(truncSize)) + assert.NoError(t, err) + } + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestWriteTruncateClose(t *testing.T) { + t.Parallel() + + var wg sync.WaitGroup + + // Define table tests + tests := []struct { + name string + initial int + final int + truncation int + }{ + {"testWriteTruncateClose1M7M_truncate", 1 * 1024 * 1024, 7 * 1024 * 1024, truncate}, + {"testWriteTruncateClose1M13M_truncate", 1 * 1024 * 1024, 13 * 1024 * 1024, truncate}, + {"testWriteTruncateClose1M20M_truncate", 1 * 1024 * 1024, 20 * 1024 * 1024, truncate}, + {"testWriteTruncateClose7M1M_truncate", 7 * 1024 * 1024, 1 * 1024 * 1024, truncate}, + {"testWriteTruncateClose13M1M_truncate", 13 * 1024 * 1024, 1 * 1024 * 1024, truncate}, + {"testWriteTruncateClose20M1M_truncate", 20 * 1024 * 1024, 1 * 1024 * 1024, truncate}, + {"testWriteTruncateClose1M7M_ftruncate", 1 * 1024 * 1024, 7 * 1024 * 1024, ftruncate}, + {"testWriteTruncateClose1M13M_ftruncate", 1 * 1024 * 1024, 13 * 1024 * 1024, ftruncate}, + {"testWriteTruncateClose1M20M_ftruncate", 1 * 1024 * 1024, 20 * 1024 * 1024, ftruncate}, + {"testWriteTruncateClose7M1M_ftruncate", 7 * 1024 * 1024, 1 * 1024 * 1024, ftruncate}, + {"testWriteTruncateClose13M1M_ftruncate", 13 * 1024 * 1024, 1 * 1024 * 1024, ftruncate}, + {"testWriteTruncateClose20M1M_ftruncate", 20 * 1024 * 1024, 1 * 1024 * 1024, ftruncate}, + } + + // Add the number of test cases to the WaitGroup + wg.Add(len(tests)) + + // Iterate over the test cases + for _, tt := range tests { + go func(tt struct { + name string + initial int + final int + truncation int + }) { + defer wg.Done() + WriteTruncateClose(t, tt.name, tt.initial, tt.final, tt.truncation) + }(tt) + } + + // Wait for all goroutines to complete + wg.Wait() +} + +func TestWrite10MB(t *testing.T) { + t.Parallel() + filename := "testfile_write_10mb.txt" + content := make([]byte, 10*1024*1024) // 10MB of data + _, err := io.ReadFull(rand.Reader, content) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.WriteFile(filePath, content, 0644) + assert.NoError(t, err) + + readContent, err := os.ReadFile(filePath) + assert.NoError(t, err) + assert.Equal(t, content, readContent) + assert.Len(t, readContent, len(content)) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test Read Write From Same handle +func TestOpenWriteRead(t *testing.T) { + t.Parallel() + filename := "testfile_open_write_read.txt" + tempbuffer := make([]byte, 4*1024) + databuffer := make([]byte, 4*1024) // 4KB buffer + _, err := io.ReadFull(rand.Reader, databuffer) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + written, err := file.WriteAt(databuffer, 200) + assert.NoError(t, err) + assert.Equal(t, 4096, written) + read, err := file.Read(tempbuffer) + assert.NoError(t, err) + assert.Equal(t, 4096, read) + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) + +} + +// Test for writing from 1 fd and reading from another fd. +func TestOpenWriteReadMultipleHandles(t *testing.T) { + t.Parallel() + filename := "testfile_open_write_read_multiple_handles.txt" + tempbuffer := make([]byte, 4*1024) + databuffer := make([]byte, 4*1024) // 4KB buffer + _, err := io.ReadFull(rand.Reader, databuffer) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + file2, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + + for range 10 { // Write the buffer 10 times from file + written, err := file.Write(databuffer) + assert.NoError(t, err) + assert.Equal(t, 4*1024, written) + } + for range 10 { // Read the buffer 10 times + read, err := file2.Read(tempbuffer) + assert.NoError(t, err) + assert.Equal(t, 4*1024, read) + assert.Equal(t, databuffer, tempbuffer) + } + err = file.Close() + assert.NoError(t, err) + err = file2.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test rand sparse writing on a file. +func TestRandSparseWriting(t *testing.T) { + t.Parallel() + filename := "testfile_sparse_write.txt" + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + written, err := file.WriteAt([]byte("Hello"), 1024*1024) // Write at 1MB offset, 1st block + assert.NoError(t, err) + assert.Equal(t, 5, written) + + written, err = file.WriteAt( + []byte("World"), + 12*1024*1024, + ) // Write at 12MB offset, 2nd block + assert.NoError(t, err) + assert.Equal(t, 5, written) + + written, err = file.WriteAt( + []byte("Cosmos"), + 30*1024*1024, + ) // Write at 30MB offset, 4th block + assert.NoError(t, err) + assert.Equal(t, 6, written) + + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test sparse writing on blockoverlap assume block size as 8MB, +// write 4K buffers on overlapping zones of blocks. +func TestSparseWritingBlockOverlap(t *testing.T) { + t.Parallel() + filename := "testfile_block_overlap.txt" + blockSize := 8 * 1024 * 1024 // 8MB + bufferSize := 4 * 1024 // 4KB + databuf := make([]byte, bufferSize) + _, err := io.ReadFull(rand.Reader, databuf) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + + for i := 1; i <= 2; i++ { + offset := i * blockSize + offset -= 2 * 1024 + _, err = file.WriteAt(databuf, int64(offset)) + assert.NoError(t, err) + } + + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test write at end of the file and call truncate to expand at the middle of the writes. +// Test write at end of the file and call truncate to shrink at the middle of the writes. +// Test open, shrink, write, close, This should result in hole at the middle +// Test open, expand, write at middle, close, This should change the file size. +// Test open, expand, write at end, close, This should change the file size. +// Test stripe writing with go routines. + +// Test stripe writing. +// stripe writing means opening the files at different offsets and writing from that offset writing some data and finally close all the file descriptions. +func TestStripeWriting(t *testing.T) { + t.Parallel() + filename := "testfile_stripe_writing.txt" + content := []byte("Stripe writing test data") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file0, err := os.Create(filePath) + assert.NoError(t, err) + file1, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + file2, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + + written, err := file0.WriteAt(content, int64(0)) //write at 0MB + assert.NoError(t, err) + assert.Equal(t, len(content), written) + written, err = file1.WriteAt(content, int64(8*1024*1024)) //write at 8MB + assert.NoError(t, err) + assert.Equal(t, len(content), written) + written, err = file2.WriteAt(content, int64(16*1024*1024)) //write at 16MB + assert.NoError(t, err) + assert.Equal(t, len(content), written) + + err = file0.Close() + assert.NoError(t, err) + err = file1.Close() + assert.NoError(t, err) + err = file2.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test stripe reading. Create a large file say 32M, then open the files at different offsets and whether data is getting matched. +func TestStripeReading(t *testing.T) { + t.Parallel() + filename := "testfile_stripe_reading.txt" + content := []byte("Stripe Reading Test data") + tempbuf := make([]byte, len(content)) + offsets := []int64{69, 8*1024*1024 + 69, 16*1024*1024 + 69} + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file, err := os.Create(filePath) + assert.NoError(t, err) + // Write to the file. + for _, off := range offsets { + written, err := file.WriteAt(content, int64(off)) + assert.NoError(t, err) + assert.Equal(t, len(content), written) + } + err = file.Close() + assert.NoError(t, err) + // Read from the different offsets using different file descriptions + file0, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + file1, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + file2, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + + bytesread, err := file0.ReadAt(tempbuf, offsets[0]) //read at 0MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + bytesread, err = file1.ReadAt(tempbuf, offsets[1]) //read at 8MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + bytesread, err = file2.ReadAt(tempbuf, offsets[2]) //read at 16MB + assert.NoError(t, err) + assert.Equal(t, len(tempbuf), bytesread) + assert.Equal(t, content, tempbuf) + + err = file0.Close() + assert.NoError(t, err) + err = file1.Close() + assert.NoError(t, err) + err = file2.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test O_TRUNC flag +func TestOTruncFlag(t *testing.T) { + t.Parallel() + filename := "testfile_trunc.txt" + content := []byte("Hello, World!") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + err := os.WriteFile(filePath, content, 0644) + assert.NoError(t, err) + + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_TRUNC, 0644) + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + + readContent, err := os.ReadFile(filePath) + assert.NoError(t, err) + assert.Empty(t, readContent) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestOTruncWhileWriting(t *testing.T) { + t.Parallel() + OTruncWhileWritingHelper(t, 64*1024) + OTruncWhileWritingHelper(t, 10*1024*1024) + OTruncWhileWritingHelper(t, 24*1024*1024) +} + +func OTruncWhileWritingHelper(t *testing.T, size int) { + filename := "testfile_O_trunc_while_writing.txt" + databuf := make([]byte, 4096) + _, err := io.ReadFull(rand.Reader, databuf) + assert.NoError(t, err) + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0644) + assert.NoError(t, err) + + for i := 0; i < size; i += 4096 { + bytesWritten, err := file.Write(databuf) + assert.Equal(t, 4096, bytesWritten) + assert.NoError(t, err) + } + // lets open file with O_TRUNC + file2, err := os.OpenFile(filePath, os.O_TRUNC, 0644) + assert.NoError(t, err) + + // Continue the write on first fd. + bytesWritten, err := file.Write(databuf) + assert.Equal(t, 4096, bytesWritten) + assert.NoError(t, err) + // Now a big hole is formed at the starting of the file + err = file2.Close() + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +func TestOTruncWhileReading(t *testing.T) { + t.Parallel() + OTruncWhileReadingHelper(t, 64*1024) + OTruncWhileReadingHelper(t, 10*1024*1024) + OTruncWhileReadingHelper(t, 24*1024*1024) +} +func OTruncWhileReadingHelper(t *testing.T, size int) { + filename := "testfile_O_trunc_while_reading.txt" + databuf := make([]byte, 4096) + _, err := io.ReadFull(rand.Reader, databuf) + assert.NoError(t, err) + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + // Create the file with desired size before starting the test + file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0644) + assert.NoError(t, err) + + for i := 0; i < size; i += 4096 { + bytesWritten, err := file.Write(databuf) + assert.Equal(t, 4096, bytesWritten) + assert.NoError(t, err) + } + err = file.Close() + assert.NoError(t, err) + //------------------------------------------------------ + // Start reading the file + file, err = os.OpenFile(filePath, os.O_RDONLY, 0644) + assert.NoError(t, err) + bytesread, err := file.Read(databuf) + assert.Equal(t, 4096, bytesread) + assert.NoError(t, err) + + // lets open file with O_TRUNC + file2, err := os.OpenFile(filePath, os.O_TRUNC, 0644) + assert.NoError(t, err) + + // Continue the reading on first fd. + bytesWritten, err := file.Read(databuf) + assert.Equal(t, 0, bytesWritten) + assert.Equal(t, io.EOF, err) + + err = file2.Close() + assert.NoError(t, err) + err = file.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test unlink on open +func TestUnlinkOnOpen(t *testing.T) { + t.Parallel() + filename := "testfile_unlink.txt" + content := []byte("Hello, World!") + content2 := []byte("Hello, Cosmos") + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + //Open the file + file, err := os.Create(filePath) + assert.NoError(t, err) + written, err := file.Write(content) + assert.Equal(t, 13, written) + assert.NoError(t, err) + + // Delete the file + err = os.Remove(filePath) + assert.NoError(t, err) + // Read the content of the file after deleting the file. + readContent := make([]byte, len(content)) + _, err = file.ReadAt(readContent, 0) + assert.NoError(t, err) + assert.Equal(t, string(content), string(readContent)) + + err = file.Close() + assert.NoError(t, err) + + // Open the file again + _, err = os.Open(filePath) + assert.Error(t, err) + if err != nil { + assert.Contains(t, err.Error(), "no such file or directory") + } + + // Write to the file + err = os.WriteFile(filePath, content2, 0644) + assert.NoError(t, err) + + file2, err := os.Open(filePath) + assert.NoError(t, err) + + // This read should be served from the newly created file + _, err = file2.Read(readContent) + assert.NoError(t, err) + assert.Equal(t, string(content2), string(readContent)) + } + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Test for multiple handles, parallel flush calls while writing. + +func TestParllelFlushCalls(t *testing.T) { + t.Parallel() + filename := "testfile_parallel_flush_calls.txt" + databuffer := make([]byte, 4*1024) // 4KB buffer + _, err := io.ReadFull(rand.Reader, databuffer) + assert.NoError(t, err) + + for _, mnt := range mountpoints { + filePath := filepath.Join(mnt, filename) + file0, err := os.Create(filePath) + assert.NoError(t, err) + file1, err := os.OpenFile(filePath, os.O_RDWR, 0644) + assert.NoError(t, err) + + // for each 1MB writes trigger a flush call from another go routine. + trigger_flush := make(chan struct{}, 1) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + _, ok := <-trigger_flush + if !ok { + break + } + err := file1.Sync() + assert.NoError(t, err) + if err != nil { + fmt.Printf("%s", err.Error()) + } + } + }() + // Write 40M data + for i := 0; i < 40*1024*1024; i += 4 * 1024 { + if i%(1*1024*1024) == 0 { + trigger_flush <- struct{}{} + } + byteswritten, err := file0.Write(databuffer) + assert.Equal(t, 4*1024, byteswritten) + assert.NoError(t, err) + } + close(trigger_flush) + wg.Wait() + err = file0.Close() + assert.NoError(t, err) + err = file1.Close() + assert.NoError(t, err) + } + + checkFileIntegrity(t, filename) + removeFiles(t, filename) +} + +// Aggressive random write on large file. + +func expandPath(path string) (string, error) { + if strings.HasPrefix(path, "~/") { + usr, err := user.Current() + if err != nil { + return "", err + } + path = filepath.Join(usr.HomeDir, path[2:]) + } + return filepath.Abs(path) +} + +func TestMain(m *testing.M) { + mountpointsFlag := flag.String("mountpoints", "", "Comma-separated list of mountpoints") + flag.Parse() + + if *mountpointsFlag != "" { + mountpoints = strings.Split(*mountpointsFlag, ",") + for i, mnt := range mountpoints { + absPath, err := expandPath(mnt) + if err != nil { + panic(err) + } + mountpoints[i] = absPath + } + } + + os.Exit(m.Run()) +} diff --git a/testdata/config/azure_key_directio.yaml b/testdata/config/azure_key_directio.yaml index 17b4b6c56..f41f32446 100644 --- a/testdata/config/azure_key_directio.yaml +++ b/testdata/config/azure_key_directio.yaml @@ -20,7 +20,7 @@ libfuse: file_cache: path: { 1 } timeout-sec: 0 - max-size-mb: 2048 + max-size-mb: 20480 allow-non-empty-temp: true cleanup-on-start: true