Skip to content

Commit

Permalink
[Storage Explorer] Add support for High Throughput Append Blob (#2480)
Browse files Browse the repository at this point in the history
* initial changes for highthroughput appendblob

* clean up tests

* pulled constant into error message

* clean up tests + update comment

* updated comments + added mb constant
  • Loading branch information
siminsavani-msft committed Jan 2, 2024
1 parent 7e0e215 commit a612861
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 9 deletions.
6 changes: 3 additions & 3 deletions cmd/copy.go
Original file line number Diff line number Diff line change
Expand Up @@ -360,9 +360,9 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) {
}

// If the given blobType is AppendBlob, block-size-mb should not be greater than
// 4MB.
// common.MaxAppendBlobBlockSize.
if cookedSize, _ := blockSizeInBytes(raw.blockSizeMB); cooked.blobType == common.EBlobType.AppendBlob() && cookedSize > common.MaxAppendBlobBlockSize {
return cooked, fmt.Errorf("block size cannot be greater than 4MB for AppendBlob blob type")
return cooked, fmt.Errorf("block size cannot be greater than %dMB for AppendBlob blob type", common.MaxAppendBlobBlockSize/common.MegaByte)
}

err = cooked.blockBlobTier.Parse(raw.blockBlobTier)
Expand Down Expand Up @@ -1540,7 +1540,7 @@ func (cca *CookedCopyCmdArgs) processCopyJobPartOrders() (err error) {
options := createClientOptions(common.AzcopyCurrentJobLogger)
var azureFileSpecificOptions any
if cca.FromTo.From() == common.ELocation.File() {
azureFileSpecificOptions = &common.FileClientOptions {
azureFileSpecificOptions = &common.FileClientOptions{
AllowTrailingDot: cca.trailingDot == common.ETrailingDotOption.Enable(),
}
}
Expand Down
9 changes: 5 additions & 4 deletions common/fe-ste-models.go
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ func (ExitCode) Error() ExitCode { return ExitCode(1) }
// NoExit is used as a marker, to suppress the normal exit behaviour
func (ExitCode) NoExit() ExitCode { return ExitCode(99) }

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
type LogLevel uint8

const (
Expand Down Expand Up @@ -397,15 +397,15 @@ func (ll LogLevel) String() string {
}
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
// By default no implementation is provided here, because pipeline may be used in many different
// contexts, so the correct implementation is context-dependent
type LogSanitizer interface {
SanitizeLogMessage(raw string) string
}

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var EJobPriority = JobPriority(0)

// JobPriority defines the transfer priorities supported by the Storage Transfer Engine's channels
Expand Down Expand Up @@ -1073,13 +1073,14 @@ func (i *InvalidMetadataHandleOption) UnmarshalJSON(b []byte) error {
const (
DefaultBlockBlobBlockSize = 8 * 1024 * 1024
MaxBlockBlobBlockSize = 4000 * 1024 * 1024
MaxAppendBlobBlockSize = 4 * 1024 * 1024
MaxAppendBlobBlockSize = 100 * 1024 * 1024
DefaultPageBlobChunkSize = 4 * 1024 * 1024
DefaultAzureFileChunkSize = 4 * 1024 * 1024
MaxRangeGetSize = 4 * 1024 * 1024
MaxNumberOfBlocksPerBlob = 50000
BlockSizeThreshold = 256 * 1024 * 1024
MinParallelChunkCountThreshold = 4 /* minimum number of chunks in parallel for AzCopy to be performant. */
MegaByte = 1024 * 1024
)

// This struct represent a single transfer entry with source and destination details
Expand Down
58 changes: 58 additions & 0 deletions e2etest/zt_basic_copy_sync_remove_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"time"
Expand Down Expand Up @@ -77,6 +78,63 @@ func TestBasic_CopyUploadLargeBlob(t *testing.T) {
}, EAccountType.Standard(), EAccountType.Standard(), "")
}

func TestBasic_CopyUploadLargeAppendBlob(t *testing.T) {
dst := common.EBlobType.AppendBlob()

RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob(), common.EFromTo.LocalBlob()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{
recursive: true,
blobType: dst.String(),
}, &hooks{
afterValidation: func(h hookHelper) {
props := h.GetDestination().getAllProperties(h.GetAsserter())
h.GetAsserter().Assert(len(props), equals(), 1)
bprops := &objectProperties{}
for key, _ := range props {
// we try to match the test.txt substring because local test files have randomizing prefix to file names
if strings.Contains(key, "test.txt") {
bprops = props[key]
}
}
h.GetAsserter().Assert(bprops.blobType, equals(), dst)
},
}, testFiles{
defaultSize: "101M",

shouldTransfer: []interface{}{
f("test.txt", with{blobType: dst}),
},
}, EAccountType.Standard(), EAccountType.Standard(), "")
}

func TestBasic_CopyUploadLargeAppendBlobBlockSizeFlag(t *testing.T) {
dst := common.EBlobType.AppendBlob()

RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob(), common.EFromTo.LocalBlob()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{
recursive: true,
blobType: dst.String(),
blockSizeMB: 100, // 100 MB
}, &hooks{
afterValidation: func(h hookHelper) {
props := h.GetDestination().getAllProperties(h.GetAsserter())
h.GetAsserter().Assert(len(props), equals(), 1)
bprops := &objectProperties{}
for key, _ := range props {
// we try to match the test.txt substring because local test files have randomizing prefix to file names
if strings.Contains(key, "test.txt") {
bprops = props[key]
}
}
h.GetAsserter().Assert(bprops.blobType, equals(), dst)
},
}, testFiles{
defaultSize: "101M",

shouldTransfer: []interface{}{
f("test.txt", with{blobType: dst}),
},
}, EAccountType.Standard(), EAccountType.Standard(), "")
}

func TestBasic_CopyDownloadSingleBlob(t *testing.T) {
RunScenarios(t, eOperation.CopyAndSync(), eTestFromTo.AllDownloads(), eValidate.Auto(), allCredentialTypes, anonymousAuthOnly, params{
recursive: true,
Expand Down
4 changes: 2 additions & 2 deletions ste/sender-appendBlob.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ func newAppendBlobSenderBase(jptm IJobPartTransferMgr, destination string, pacer

// compute chunk count
chunkSize := transferInfo.BlockSize
// If the given chunk Size for the Job is greater than maximum append blob block size i.e 4 MB,
// then set chunkSize as 4 MB.
// If the given chunk Size for the Job is greater than maximum append blob block size i.e common.MaxAppendBlobBlockSize,
// then set chunkSize as common.MaxAppendBlobBlockSize.
chunkSize = common.Iff(
chunkSize > common.MaxAppendBlobBlockSize,
common.MaxAppendBlobBlockSize,
Expand Down

0 comments on commit a612861

Please sign in to comment.