diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 532588839..73e01a3fb 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -1,24 +1,24 @@ -# GitHub Action to automate the identification of common misspellings in text files. -# https://github.com/codespell-project/actions-codespell -# https://github.com/codespell-project/codespell -name: codespell -on: - push: - branches: - - dev - - main - pull_request: - branches: - - dev - - main -jobs: - codespell: - name: Check for spelling errors - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: codespell-project/actions-codespell@master - with: - check_filenames: true - skip: ./sddl/sddlPortable_test.go,./sddl/sddlHelper_linux.go +# GitHub Action to automate the identification of common misspellings in text files. +# https://github.com/codespell-project/actions-codespell +# https://github.com/codespell-project/codespell +name: codespell +on: + push: + branches: + - dev + - main + pull_request: + branches: + - dev + - main +jobs: + codespell: + name: Check for spelling errors + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: codespell-project/actions-codespell@master + with: + check_filenames: true + skip: ./sddl/sddlPortable_test.go,./sddl/sddlHelper_linux.go,./go.sum ignore_words_list: "resue,pase,cancl,cacl,froms" \ No newline at end of file diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 000000000..dbd0fdb59 --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,40 @@ +name: golangci-lint +on: + push: + branches: + - dev + - main + pull_request: + branches: + - dev + - main +permissions: + contents: read + # Optional: allow read access to pull request. Use with `only-new-issues` option. + # pull-requests: read +jobs: + golangci: + strategy: + matrix: + go: [1.18] + os: [ubuntu-latest, windows-latest, macos-latest] + name: lint + runs-on: ${{ matrix.os }} + steps: + - uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go }} + - uses: actions/checkout@v3 + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + version: latest + # Optional: working directory, useful for monorepos + # working-directory: somedir + + # Optional: golangci-lint command line arguments. + args: --tests=false --max-issues-per-linter=0 --skip-files=azbfs/zz_generated_* --skip-dirs=e2etest --exclude=S1008 --max-same-issues=0 --timeout 5m0s + + # Optional: show only new issues if it's a pull request. The default value is `false`. + # only-new-issues: true \ No newline at end of file diff --git a/ChangeLog.md b/ChangeLog.md index 86e0338fb..d9a37b12e 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,32 @@ # Change Log +## Version 10.18.0 + +### New features + +1. Added support for `Content-MD5` in `list` command. User can now list the MD5 hash of the blobs in the target container. +2. Added support to resume incomplete blobs. User can now resume the upload of a blob which was interrupted in the middle. +3. Added support for download of POSIX properties. +4. Added support for persisting symlink data. + +### Bug fixes + +1. Fixed [Issue 2120](https://github.com/Azure/azure-storage-azcopy/pull/2120) +2. Fixed [Issue 2062](https://github.com/Azure/azure-storage-azcopy/pull/2062) +3. Fixed [Issue 2046](https://github.com/Azure/azure-storage-azcopy/pull/2048) +4. Fixed [Issue 1762](https://github.com/Azure/azure-storage-azcopy/pull/2125) + +### Documentation + +1. Added example for `--include-pattern`. +2. Added documentation for `--compare-hash`. + +### Security fixes + +1. CPK-related headers are now sanitized from the logs. +2. Updated dependencies to address security vulnerabilities. + ## Version 10.17.0 ### New features diff --git a/azbfs/zc_credential_anonymous.go b/azbfs/zc_credential_anonymous.go index 7e9a70197..2e9c891c4 100644 --- a/azbfs/zc_credential_anonymous.go +++ b/azbfs/zc_credential_anonymous.go @@ -12,15 +12,6 @@ type Credential interface { credentialMarker() } -type credentialFunc pipeline.FactoryFunc - -func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { - return f(next, po) -} - -// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. -func (credentialFunc) credentialMarker() {} - ////////////////////////////// // NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource @@ -36,11 +27,15 @@ type anonymousCredentialPolicyFactory struct { } // New creates a credential policy object. +//nolint:unused func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + // Note: We are not deleting this "unused" code since this is a publicly exported function, we do not want to break + // anyone that has a dependency on the azbfs library (like blobfuse). return &anonymousCredentialPolicy{next: next} } // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +//nolint:unused func (*anonymousCredentialPolicyFactory) credentialMarker() {} // anonymousCredentialPolicy is the credential's policy object. diff --git a/azbfs/zc_credential_shared_key.go b/azbfs/zc_credential_shared_key.go index dead933ef..f41f7d171 100644 --- a/azbfs/zc_credential_shared_key.go +++ b/azbfs/zc_credential_shared_key.go @@ -62,25 +62,19 @@ func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptio // credentialMarker is a package-internal method that exists just to satisfy the Credential interface. func (*SharedKeyCredential) credentialMarker() {} -// Constants ensuring that header names are correctly spelled and consistently cased. const ( - headerAuthorization = "Authorization" - headerCacheControl = "Cache-Control" - headerContentEncoding = "Content-Encoding" - headerContentDisposition = "Content-Disposition" - headerContentLanguage = "Content-Language" - headerContentLength = "Content-Length" - headerContentMD5 = "Content-MD5" - headerContentType = "Content-Type" - headerDate = "Date" - headerIfMatch = "If-Match" - headerIfModifiedSince = "If-Modified-Since" - headerIfNoneMatch = "If-None-Match" - headerIfUnmodifiedSince = "If-Unmodified-Since" - headerRange = "Range" - headerUserAgent = "User-Agent" - headerXmsDate = "x-ms-date" - headerXmsVersion = "x-ms-version" + headerAuthorization = "Authorization" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerXmsDate = "x-ms-date" ) // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. @@ -143,7 +137,7 @@ func buildCanonicalizedHeader(headers http.Header) string { ch.WriteRune(':') ch.WriteString(strings.Join(cm[key], ",")) } - return string(ch.Bytes()) + return ch.String() } func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string { @@ -190,5 +184,5 @@ func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) string { cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) } } - return string(cr.Bytes()) + return cr.String() } diff --git a/azbfs/zc_mmf_unix.go b/azbfs/zc_mmf_unix.go deleted file mode 100644 index 9c6df8289..000000000 --- a/azbfs/zc_mmf_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux darwin - -package azbfs - -import ( - "os" - "syscall" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only - if writable { - prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED - } - addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) - return mmf(addr), err -} - -func (m *mmf) unmap() { - err := syscall.Munmap(*m) - *m = nil - if err != nil { - panic(err) - } -} diff --git a/azbfs/zc_mmf_windows.go b/azbfs/zc_mmf_windows.go deleted file mode 100644 index 550ab85b5..000000000 --- a/azbfs/zc_mmf_windows.go +++ /dev/null @@ -1,39 +0,0 @@ -package azbfs - -import ( - "os" - "reflect" - "syscall" - "unsafe" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only - if writable { - prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) - } - maxSize := int64(offset + int64(length)) - hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(maxSize>>32), uint32(maxSize&0xffffffff), nil) - if hMMF == 0 { - return nil, os.NewSyscallError("CreateFileMapping", errno) - } - defer syscall.CloseHandle(hMMF) - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) - m := mmf{} - h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) - h.Data = addr - h.Len = length - h.Cap = h.Len - return m, nil -} - -func (m *mmf) unmap() { - addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmf{} - err := syscall.UnmapViewOfFile(addr) - if err != nil { - panic(err) - } -} diff --git a/azbfs/zc_policy_request_log.go b/azbfs/zc_policy_request_log.go index 2aaa16ef9..5035ad2a6 100644 --- a/azbfs/zc_policy_request_log.go +++ b/azbfs/zc_policy_request_log.go @@ -76,9 +76,9 @@ func NewRequestLogPolicyFactory_Deprecated(o RequestLogOptions) pipeline.Factory sc := response.Response().StatusCode if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx - } else { - // For other status codes, we leave the level as is. } + // For other status codes, we leave the level as is. + } else { // This error did not get an HTTP response from the service; upgrade the severity to Error logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled } diff --git a/azbfs/zc_policy_retry.go b/azbfs/zc_policy_retry.go index a7f56a045..6fa214bc3 100644 --- a/azbfs/zc_policy_retry.go +++ b/azbfs/zc_policy_retry.go @@ -3,7 +3,6 @@ package azbfs import ( "context" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -134,7 +133,7 @@ func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never // Note: forked from the standard package url.go // The content is exactly the same but the spaces are encoded as %20 instead of + // TODO: remove after the service fix -// Encode encodes the values into ``URL encoded'' form +// Encode encodes the values into “URL encoded” form // ("bar=baz&foo=quux") sorted by key. func alternativeEncode(v url.Values) string { if v == nil { @@ -217,7 +216,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t @@ -254,7 +253,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { action = "Retry: Secondary URL returned 404" case err != nil: // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation - if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { + if netErr, ok := err.(net.Error); ok && (netErr.Temporary() || netErr.Timeout()) { //nolint:staticcheck action = "Retry: net.Error and Temporary() or Timeout()" } else if err == io.ErrUnexpectedEOF { // Some of our methods under the zz_ files do use io.Copy and other related methods that can throw an unexpectedEOF. @@ -284,7 +283,7 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { } if response != nil && response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/azbfs/zc_sas_query_params.go b/azbfs/zc_sas_query_params.go index 57ef32d32..132a3099a 100644 --- a/azbfs/zc_sas_query_params.go +++ b/azbfs/zc_sas_query_params.go @@ -200,7 +200,7 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool case "st": p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) case "se": - p.expiryTime, p.stTimeFormat, _ = parseSASTimeString(val) + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) case "sip": dashIndex := strings.Index(val, "-") if dashIndex == -1 { @@ -255,7 +255,7 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values { v.Add("st", formatSASTime(&p.startTime, p.stTimeFormat)) } if !p.expiryTime.IsZero() { - v.Add("se", formatSASTime(&p.expiryTime, p.stTimeFormat)) + v.Add("se", formatSASTime(&p.expiryTime, p.seTimeFormat)) } if len(p.ipRange.Start) > 0 { v.Add("sip", p.ipRange.String()) diff --git a/azbfs/zc_util_validate.go b/azbfs/zc_util_validate.go index b10a56985..4841bf42e 100644 --- a/azbfs/zc_util_validate.go +++ b/azbfs/zc_util_validate.go @@ -58,7 +58,10 @@ func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) int64 { if err != nil { panic("failed to seek stream") } - body.Seek(0, io.SeekStart) + _, err = body.Seek(0, io.SeekStart) + if err != nil { + logf("error seeking stream (%s)", err.Error()) + } return count } diff --git a/azbfs/zc_uuid.go b/azbfs/zc_uuid.go index 0f90c9fd1..f14972d2f 100644 --- a/azbfs/zc_uuid.go +++ b/azbfs/zc_uuid.go @@ -3,15 +3,11 @@ package azbfs import ( "crypto/rand" "fmt" - "strconv" ) // The UUID reserved variants. const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 + reservedRFC4122 byte = 0x40 ) // A UUID representation compliant with specification in RFC 4122 document. @@ -36,45 +32,3 @@ func newUUID() (u uuid) { func (u uuid) String() string { return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) } - -// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" -// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. -func parseUUID(uuidStr string) uuid { - char := func(hexString string) byte { - i, _ := strconv.ParseUint(hexString, 16, 8) - return byte(i) - } - if uuidStr[0] == '{' { - uuidStr = uuidStr[1:] // Skip over the '{' - } - // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f - // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 - // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 - uuidVal := uuid{ - char(uuidStr[0:2]), - char(uuidStr[2:4]), - char(uuidStr[4:6]), - char(uuidStr[6:8]), - - char(uuidStr[9:11]), - char(uuidStr[11:13]), - - char(uuidStr[14:16]), - char(uuidStr[16:18]), - - char(uuidStr[19:21]), - char(uuidStr[21:23]), - - char(uuidStr[24:26]), - char(uuidStr[26:28]), - char(uuidStr[28:30]), - char(uuidStr[30:32]), - char(uuidStr[32:34]), - char(uuidStr[34:36]), - } - return uuidVal -} - -func (u uuid) bytes() []byte { - return u[:] -} diff --git a/azbfs/zt_url_file_test.go b/azbfs/zt_url_file_test.go index 6d7b2d78a..b6e8b77ee 100644 --- a/azbfs/zt_url_file_test.go +++ b/azbfs/zt_url_file_test.go @@ -16,7 +16,6 @@ import ( "net/url" //"strings" - "io/ioutil" "net/http" "github.com/Azure/azure-storage-azcopy/v10/azbfs" @@ -213,7 +212,7 @@ func (s *FileURLSuite) TestFileGetProperties(c *chk.C) { // c.Assert(resp.ContentType(), chk.Equals, "application/octet-stream") // c.Assert(resp.Status(), chk.Not(chk.Equals), "") // -// download, err := ioutil.ReadAll(resp.Response().Body) +// download, err := io.ReadAll(resp.Response().Body) // c.Assert(err, chk.IsNil) // c.Assert(download, chk.DeepEquals, contentD[:1024]) //} @@ -250,14 +249,14 @@ func (s *FileURLSuite) TestUnexpectedEOFRecovery(c *chk.C) { // Verify that we can inject errors first. reader := dResp.Body(azbfs.InjectErrorInRetryReaderOptions(errors.New("unrecoverable error"))) - _, err = ioutil.ReadAll(reader) + _, err = io.ReadAll(reader) c.Assert(err, chk.NotNil) c.Assert(err.Error(), chk.Equals, "unrecoverable error") // Then inject the retryable error. reader = dResp.Body(azbfs.InjectErrorInRetryReaderOptions(io.ErrUnexpectedEOF)) - buf, err := ioutil.ReadAll(reader) + buf, err := io.ReadAll(reader) c.Assert(err, chk.IsNil) c.Assert(buf, chk.DeepEquals, contentD) } @@ -309,7 +308,7 @@ func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { c.Assert(resp.Status(), chk.Not(chk.Equals), "") // Verify the partial data - download, err := ioutil.ReadAll(resp.Response().Body) + download, err := io.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(download, chk.DeepEquals, contentD1[:1024]) @@ -325,7 +324,7 @@ func (s *FileURLSuite) TestUploadDownloadRoundTrip(c *chk.C) { c.Assert(resp.Version(), chk.Not(chk.Equals), "") // Verify the entire content - download, err = ioutil.ReadAll(resp.Response().Body) + download, err = io.ReadAll(resp.Response().Body) c.Assert(err, chk.IsNil) c.Assert(download[:2048], chk.DeepEquals, contentD1[:]) c.Assert(download[2048:], chk.DeepEquals, contentD2[:]) diff --git a/azbfs/zz_response_model.go b/azbfs/zz_response_model.go index 84ab04701..e333db190 100644 --- a/azbfs/zz_response_model.go +++ b/azbfs/zz_response_model.go @@ -295,9 +295,8 @@ func (dlr *DirectoryListResponse) Directories() []string { func (dlr *DirectoryListResponse) FilesAndDirectories() []Path { var entities []Path lSchema := (PathList)(*dlr) - for _, path := range lSchema.Paths { - entities = append(entities, path) - } + // Assuming this file is not generated based on the azbfs swagger README which states generated files prefix = zz_generated. + entities = append(entities, lSchema.Paths...) return entities } diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 74a2116b5..3524a367f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -1,3 +1,6 @@ +variables: + AZCOPY_GOLANG_VERSION: $(AZCOPY_GO_VER) + trigger: branches: include: @@ -29,16 +32,7 @@ jobs: env: GO111MODULE: 'on' inputs: - version: '1.19.2' - - - script: | - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.46.2 - echo 'Installation complete' - ./bin/golangci-lint --version - ./bin/golangci-lint run e2etest - displayName: 'Golang Lint Check - Linux' - workingDirectory: $(System.DefaultWorkingDirectory) - condition: eq(variables.type, 'linux') + version: $(AZCOPY_GOLANG_VERSION) - script: | echo 'Running GO Vet' @@ -121,7 +115,7 @@ jobs: steps: - task: GoTool@0 inputs: - version: '1.19.2' + version: $(AZCOPY_GOLANG_VERSION) # Running E2E Tests on Linux - AMD64 - script: | @@ -219,7 +213,7 @@ jobs: - task: GoTool@0 name: 'Set_up_Golang' inputs: - version: '1.19.2' + version: $(AZCOPY_GOLANG_VERSION) - task: DownloadSecureFile@1 name: ciGCSServiceAccountKey displayName: 'Download GCS Service Account Key' diff --git a/cmd/copy.go b/cmd/copy.go index 31a4f9d6b..cb60a3175 100644 --- a/cmd/copy.go +++ b/cmd/copy.go @@ -86,6 +86,9 @@ type rawCopyCmdArgs struct { legacyExclude string // used only for warnings listOfVersionIDs string + // Indicates the user wants to upload the symlink itself, not the file on the other end + preserveSymlinks bool + // filters from flags listOfFilesToCopy string recursive bool @@ -332,12 +335,19 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) { cooked.FromTo = fromTo cooked.Recursive = raw.recursive - cooked.FollowSymlinks = raw.followSymlinks cooked.ForceIfReadOnly = raw.forceIfReadOnly if err = validateForceIfReadOnly(cooked.ForceIfReadOnly, cooked.FromTo); err != nil { return cooked, err } + if err = cooked.SymlinkHandling.Determine(raw.followSymlinks, raw.preserveSymlinks); err != nil { + return cooked, err + } + + if err = validateSymlinkHandlingMode(cooked.SymlinkHandling, cooked.FromTo); err != nil { + return cooked, err + } + // copy&transform flags to type-safety err = cooked.ForceWrite.Parse(raw.forceWrite) if err != nil { @@ -687,7 +697,7 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) { cooked.IncludeDirectoryStubs = raw.includeDirectoryStubs || (cooked.isHNStoHNS && cooked.preservePermissions.IsTruthy()) - if err = crossValidateSymlinksAndPermissions(cooked.FollowSymlinks, cooked.preservePermissions.IsTruthy()); err != nil { + if err = crossValidateSymlinksAndPermissions(cooked.SymlinkHandling, cooked.preservePermissions.IsTruthy()); err != nil { return cooked, err } @@ -774,7 +784,7 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) { case common.EFromTo.BlobLocal(), common.EFromTo.FileLocal(), common.EFromTo.BlobFSLocal(): - if cooked.FollowSymlinks { + if cooked.SymlinkHandling.Follow() { return cooked, fmt.Errorf("follow-symlinks flag is not supported while downloading") } if cooked.blockBlobTier != common.EBlockBlobTier.None() || @@ -808,7 +818,7 @@ func (raw rawCopyCmdArgs) cook() (CookedCopyCmdArgs, error) { if cooked.preserveLastModifiedTime { return cooked, fmt.Errorf("preserve-last-modified-time is not supported while copying from service to service") } - if cooked.FollowSymlinks { + if cooked.SymlinkHandling.Follow() { return cooked, fmt.Errorf("follow-symlinks flag is not supported while copying from service to service") } // blob type is not supported if destination is not blob @@ -946,8 +956,8 @@ func areBothLocationsSMBAware(fromTo common.FromTo) bool { func areBothLocationsPOSIXAware(fromTo common.FromTo) bool { // POSIX properties are stored in blob metadata-- They don't need a special persistence strategy for BlobBlob. return runtime.GOOS == "linux" && ( - // fromTo == common.EFromTo.BlobLocal() || TODO - fromTo == common.EFromTo.LocalBlob()) || + fromTo == common.EFromTo.BlobLocal() || + fromTo == common.EFromTo.LocalBlob()) || fromTo == common.EFromTo.BlobBlob() } @@ -977,9 +987,24 @@ func validatePreserveOwner(preserve bool, fromTo common.FromTo) error { return nil } -func crossValidateSymlinksAndPermissions(followSymlinks, preservePermissions bool) error { - if followSymlinks && preservePermissions { - return errors.New("cannot follow symlinks when preserving permissions (since the correct permission inheritance behaviour for symlink targets is undefined)") +func validateSymlinkHandlingMode(symlinkHandling common.SymlinkHandlingType, fromTo common.FromTo) error { + if symlinkHandling.Preserve() { + switch fromTo { + case common.EFromTo.LocalBlob(), common.EFromTo.BlobLocal(): + return nil // Fine on all OSes that support symlink via the OS package. (Win, MacOS, and Linux do, and that's what we officially support.) + case common.EFromTo.BlobBlob(): + return nil // Blob->Blob doesn't involve any local requirements + default: + return fmt.Errorf("flag --%s can only be used on Blob<->Blob or Local<->Blob", common.PreserveSymlinkFlagName) + } + } + + return nil // other older symlink handling modes can work on all OSes +} + +func crossValidateSymlinksAndPermissions(symlinkHandling common.SymlinkHandlingType, preservePermissions bool) error { + if symlinkHandling != common.ESymlinkHandlingType.Skip() && preservePermissions { + return errors.New("cannot handle symlinks when preserving permissions (since the correct permission inheritance behaviour for symlink targets is undefined)") } return nil } @@ -1070,7 +1095,7 @@ func validateMetadataString(metadata string) error { if err != nil { return err } - for k, _ := range metadataMap { + for k := range metadataMap { if strings.ContainsAny(k, " !#$%^&*,<>{}|\\:.()+'\"?/") { return fmt.Errorf("invalid metadata key value '%s': can't have spaces or special characters", k) } @@ -1108,7 +1133,7 @@ type CookedCopyCmdArgs struct { ListOfFilesChannel chan string // Channels are nullable. Recursive bool StripTopDir bool - FollowSymlinks bool + SymlinkHandling common.SymlinkHandlingType ForceWrite common.OverwriteOption // says whether we should try to overwrite ForceIfReadOnly bool // says whether we should _force_ any overwrites (triggered by forceWrite) to work on Azure Files objects that are set to read-only IsSourceDir bool @@ -1379,11 +1404,9 @@ func (cca *CookedCopyCmdArgs) processRedirectionUpload(blobResource common.Resou // get source credential - if there is a token it will be used to get passed along our pipeline func (cca *CookedCopyCmdArgs) getSrcCredential(ctx context.Context, jpo *common.CopyJobPartOrderRequest) (common.CredentialInfo, error) { - srcCredInfo := common.CredentialInfo{} - var err error - var isPublic bool - if srcCredInfo, isPublic, err = GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions); err != nil { + srcCredInfo, isPublic, err := GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions) + if err != nil { return srcCredInfo, err // If S2S and source takes OAuthToken as its cred type (OR) source takes anonymous as its cred type, but it's not public and there's no SAS } else if cca.FromTo.IsS2S() && @@ -1393,7 +1416,7 @@ func (cca *CookedCopyCmdArgs) getSrcCredential(ctx context.Context, jpo *common. } if cca.Source.SAS != "" && cca.FromTo.IsS2S() && jpo.CredentialInfo.CredentialType == common.ECredentialType.OAuthToken() { - //glcm.Info("Authentication: If the source and destination accounts are in the same AAD tenant & the user/spn/msi has appropriate permissions on both, the source SAS token is not required and OAuth can be used round-trip.") + glcm.Info("Authentication: If the source and destination accounts are in the same AAD tenant & the user/spn/msi has appropriate permissions on both, the source SAS token is not required and OAuth can be used round-trip.") } if cca.FromTo.IsS2S() { @@ -1429,6 +1452,14 @@ func (cca *CookedCopyCmdArgs) processCopyJobPartOrders() (err error) { jobsAdmin.JobsAdmin.SetConcurrencySettingsToAuto() } + if err := common.VerifyIsURLResolvable(cca.Source.Value); cca.FromTo.From().IsRemote() && err != nil { + return fmt.Errorf("failed to resolve source: %w", err) + } + + if err := common.VerifyIsURLResolvable(cca.Destination.Value); cca.FromTo.To().IsRemote() && err != nil { + return fmt.Errorf("failed to resolve destination: %w", err) + } + // Note: credential info here is only used by remove at the moment. // TODO: Get the entirety of remove into the new copyEnumeratorInit script so we can remove this // and stop having two places in copy that we get credential info @@ -1461,14 +1492,15 @@ func (cca *CookedCopyCmdArgs) processCopyJobPartOrders() (err error) { // initialize the fields that are constant across all job part orders, // and for which we have sufficient info now to set them jobPartOrder := common.CopyJobPartOrderRequest{ - JobID: cca.jobID, - FromTo: cca.FromTo, - ForceWrite: cca.ForceWrite, - ForceIfReadOnly: cca.ForceIfReadOnly, - AutoDecompress: cca.autoDecompress, - Priority: common.EJobPriority.Normal(), - LogLevel: azcopyLogVerbosity, - ExcludeBlobType: cca.excludeBlobType, + JobID: cca.jobID, + FromTo: cca.FromTo, + ForceWrite: cca.ForceWrite, + ForceIfReadOnly: cca.ForceIfReadOnly, + AutoDecompress: cca.autoDecompress, + Priority: common.EJobPriority.Normal(), + LogLevel: azcopyLogVerbosity, + ExcludeBlobType: cca.excludeBlobType, + SymlinkHandlingType: cca.SymlinkHandling, BlobAttributes: common.BlobTransferAttributes{ BlobType: cca.blobType, BlockSizeInBytes: cca.blockSize, @@ -1675,7 +1707,7 @@ func (cca *CookedCopyCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (tot totalKnownCount = summary.TotalTransfers // if json is not desired, and job is done, then we generate a special end message to conclude the job - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var computeThroughput = func() float64 { // compute the average throughput for the last time interval @@ -1746,6 +1778,7 @@ Job %s summary Elapsed Time (Minutes): %v Number of File Transfers: %v Number of Folder Property Transfers: %v +Number of Symlink Transfers: %v Total Number of Transfers: %v Number of File Transfers Completed: %v Number of Folder Transfers Completed: %v @@ -1760,6 +1793,7 @@ Final Job Status: %v%s%s jobsAdmin.ToFixed(duration.Minutes(), 4), summary.FileTransfers, summary.FolderPropertyTransfers, + summary.SymlinkTransfers, summary.TotalTransfers, summary.TransfersCompleted-summary.FoldersCompleted, summary.FoldersCompleted, @@ -1907,7 +1941,7 @@ func init() { if userFromTo == common.EFromTo.PipeBlob() { // Case 1: PipeBlob. Check for the std input pipe stdinPipeIn, err := isStdinPipeIn() - if stdinPipeIn == false || err != nil { + if !stdinPipeIn || err != nil { return fmt.Errorf("fatal: failed to read from Stdin due to error: %s", err) } raw.src = pipeLocation @@ -1994,6 +2028,7 @@ func init() { cpCmd.PersistentFlags().BoolVar(&raw.preserveOwner, common.PreserveOwnerFlagName, common.PreserveOwnerDefault, "Only has an effect in downloads, and only when --preserve-smb-permissions is used. If true (the default), the file Owner and Group are preserved in downloads. If set to false, --preserve-smb-permissions will still preserve ACLs but Owner and Group will be based on the user running AzCopy") cpCmd.PersistentFlags().BoolVar(&raw.preserveSMBInfo, "preserve-smb-info", (runtime.GOOS == "windows"), "Preserves SMB property info (last write time, creation time, attribute bits) between SMB-aware resources (Windows and Azure Files). On windows, this flag will be set to true by default. If the source or destination is a volume mounted on Linux using SMB protocol, this flag will have to be explicitly set to true. Only the attribute bits supported by Azure Files will be transferred; any others will be ignored. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern). The info transferred for folders is the same as that for files, except for Last Write Time which is never preserved for folders.") cpCmd.PersistentFlags().BoolVar(&raw.preservePOSIXProperties, "preserve-posix-properties", false, "'Preserves' property info gleaned from stat or statx into object metadata.") + cpCmd.PersistentFlags().BoolVar(&raw.preserveSymlinks, common.PreserveSymlinkFlagName, false, "If enabled, symlink destinations are preserved as the blob content, rather than uploading the file/folder on the other end of the symlink") cpCmd.PersistentFlags().BoolVar(&raw.forceIfReadOnly, "force-if-read-only", false, "When overwriting an existing file on Windows or Azure Files, force the overwrite to work even if the existing file has its read-only attribute set") cpCmd.PersistentFlags().BoolVar(&raw.backupMode, common.BackupModeFlagName, false, "Activates Windows' SeBackupPrivilege for uploads, or SeRestorePrivilege for downloads, to allow AzCopy to see read all files, regardless of their file system permissions, and to restore all permissions. Requires that the account running AzCopy already has these permissions (e.g. has Administrator rights or is a member of the 'Backup Operators' group). All this flag does is activate privileges that the account already has") cpCmd.PersistentFlags().BoolVar(&raw.putMd5, "put-md5", false, "Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the destination blob or file. (By default the hash is NOT created.) Only available when uploading.") @@ -2033,20 +2068,20 @@ func init() { // permanently hidden // Hide the list-of-files flag since it is implemented only for Storage Explorer. - cpCmd.PersistentFlags().MarkHidden("list-of-files") - cpCmd.PersistentFlags().MarkHidden("s2s-get-properties-in-backend") + _ = cpCmd.PersistentFlags().MarkHidden("list-of-files") + _ = cpCmd.PersistentFlags().MarkHidden("s2s-get-properties-in-backend") // temp, to assist users with change in param names, by providing a clearer message when these obsolete ones are accidentally used cpCmd.PersistentFlags().StringVar(&raw.legacyInclude, "include", "", "Legacy include param. DO NOT USE") cpCmd.PersistentFlags().StringVar(&raw.legacyExclude, "exclude", "", "Legacy exclude param. DO NOT USE") - cpCmd.PersistentFlags().MarkHidden("include") - cpCmd.PersistentFlags().MarkHidden("exclude") + _ = cpCmd.PersistentFlags().MarkHidden("include") + _ = cpCmd.PersistentFlags().MarkHidden("exclude") // Hide the flush-threshold flag since it is implemented only for CI. cpCmd.PersistentFlags().Uint32Var(&ste.ADLSFlushThreshold, "flush-threshold", 7500, "Adjust the number of blocks to flush at once on accounts that have a hierarchical namespace.") - cpCmd.PersistentFlags().MarkHidden("flush-threshold") + _ = cpCmd.PersistentFlags().MarkHidden("flush-threshold") // Deprecate the old persist-smb-permissions flag - cpCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") + _ = cpCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") cpCmd.PersistentFlags().BoolVar(&raw.preservePermissions, PreservePermissionsFlag, false, "False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") } diff --git a/cmd/copyEnumeratorHelper.go b/cmd/copyEnumeratorHelper.go index 83cf6957b..8d7f7c0fd 100644 --- a/cmd/copyEnumeratorHelper.go +++ b/cmd/copyEnumeratorHelper.go @@ -3,11 +3,9 @@ package cmd import ( "fmt" "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-azcopy/v10/common" "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" "math/rand" - "strings" - - "github.com/Azure/azure-storage-azcopy/v10/common" ) var EnumerationParallelism = 1 @@ -15,9 +13,7 @@ var EnumerationParallelStatFiles = false // addTransfer accepts a new transfer, if the threshold is reached, dispatch a job part order. func addTransfer(e *common.CopyJobPartOrderRequest, transfer common.CopyTransfer, cca *CookedCopyCmdArgs) error { - // Remove the source and destination roots from the path to save space in the plan files - transfer.Source = strings.TrimPrefix(transfer.Source, e.SourceRoot.Value) - transfer.Destination = strings.TrimPrefix(transfer.Destination, e.DestinationRoot.Value) + // Source and destination paths are and should be relative paths. // dispatch the transfers once the number reaches NumOfFilesPerDispatchJobPart // we do this so that in the case of large transfer, the transfer engine can get started @@ -42,13 +38,16 @@ func addTransfer(e *common.CopyJobPartOrderRequest, transfer common.CopyTransfer // only append the transfer after we've checked and dispatched a part // so that there is at least one transfer for the final part { - //Should this block be a function? + // Should this block be a function? e.Transfers.List = append(e.Transfers.List, transfer) e.Transfers.TotalSizeInBytes += uint64(transfer.SourceSize) - if transfer.EntityType == common.EEntityType.File() { + switch transfer.EntityType { + case common.EEntityType.File(): e.Transfers.FileTransferCount++ - } else { + case common.EEntityType.Folder(): e.Transfers.FolderTransferCount++ + case common.EEntityType.Symlink(): + e.Transfers.SymlinkTransferCount++ } } diff --git a/cmd/copyEnumeratorHelper_test.go b/cmd/copyEnumeratorHelper_test.go index ba4920388..8ef304997 100644 --- a/cmd/copyEnumeratorHelper_test.go +++ b/cmd/copyEnumeratorHelper_test.go @@ -41,23 +41,24 @@ func newRemoteRes(url string) common.ResourceString { return r } -func (s *copyEnumeratorHelperTestSuite) TestAddTransferPathRootsTrimmed(c *chk.C) { +func (s *copyEnumeratorHelperTestSuite) TestRelativePath(c *chk.C) { // setup - request := common.CopyJobPartOrderRequest{ - SourceRoot: newLocalRes("a/b/"), - DestinationRoot: newLocalRes("y/z/"), + cca := CookedCopyCmdArgs{ + Source: newLocalRes("a/b/"), + Destination: newLocalRes("y/z/"), } - transfer := common.CopyTransfer{ - Source: "a/b/c.txt", - Destination: "y/z/c.txt", + object := StoredObject{ + name: "c.txt", + entityType: 1, + relativePath: "c.txt", } // execute - err := addTransfer(&request, transfer, &CookedCopyCmdArgs{}) + srcRelPath := cca.MakeEscapedRelativePath(true, false, false, object) + destRelPath := cca.MakeEscapedRelativePath(false, true, false, object) // assert - c.Assert(err, chk.IsNil) - c.Assert(request.Transfers.List[0].Source, chk.Equals, "c.txt") - c.Assert(request.Transfers.List[0].Destination, chk.Equals, "c.txt") + c.Assert(srcRelPath, chk.Equals, "/c.txt") + c.Assert(destRelPath, chk.Equals, "/c.txt") } diff --git a/cmd/copyEnumeratorInit.go b/cmd/copyEnumeratorInit.go index c3541046e..1a39d94f0 100755 --- a/cmd/copyEnumeratorInit.go +++ b/cmd/copyEnumeratorInit.go @@ -62,16 +62,17 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde (cca.FromTo.From() == common.ELocation.File() && !cca.FromTo.To().IsRemote()) || // If it's a download, we still need LMT and MD5 from files. (cca.FromTo.From() == common.ELocation.File() && cca.FromTo.To().IsRemote() && (cca.s2sSourceChangeValidation || cca.IncludeAfter != nil || cca.IncludeBefore != nil)) || // If S2S from File to *, and sourceChangeValidation is enabled, we get properties so that we have LMTs. Likewise, if we are using includeAfter or includeBefore, which require LMTs. (cca.FromTo.From().IsRemote() && cca.FromTo.To().IsRemote() && cca.s2sPreserveProperties && !cca.s2sGetPropertiesInBackend) // If S2S and preserve properties AND get properties in backend is on, turn this off, as properties will be obtained in the backend. - jobPartOrder.S2SGetPropertiesInBackend = cca.s2sPreserveProperties && !getRemoteProperties && cca.s2sGetPropertiesInBackend // Infer GetProperties if GetPropertiesInBackend is enabled. + jobPartOrder.S2SGetPropertiesInBackend = cca.s2sPreserveProperties && !getRemoteProperties && cca.s2sGetPropertiesInBackend // Infer GetProperties if GetPropertiesInBackend is enabled. jobPartOrder.S2SSourceChangeValidation = cca.s2sSourceChangeValidation jobPartOrder.DestLengthValidation = cca.CheckLength jobPartOrder.S2SInvalidMetadataHandleOption = cca.s2sInvalidMetadataHandleOption jobPartOrder.S2SPreserveBlobTags = cca.S2sPreserveBlobTags traverser, err = InitResourceTraverser(cca.Source, cca.FromTo.From(), &ctx, &srcCredInfo, - &cca.FollowSymlinks, cca.ListOfFilesChannel, cca.Recursive, getRemoteProperties, + cca.SymlinkHandling, cca.ListOfFilesChannel, cca.Recursive, getRemoteProperties, cca.IncludeDirectoryStubs, cca.permanentDeleteOption, func(common.EntityType) {}, cca.ListOfVersionIDs, - cca.S2sPreserveBlobTags, common.ESyncHashType.None(), azcopyLogVerbosity.ToPipelineLogLevel(), cca.CpkOptions, nil /* errorChannel */) + cca.S2sPreserveBlobTags, common.ESyncHashType.None(), cca.preservePermissions, azcopyLogVerbosity.ToPipelineLogLevel(), + cca.CpkOptions, nil /* errorChannel */, cca.StripTopDir) if err != nil { return nil, err @@ -117,8 +118,8 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde return nil, errors.New("cannot use --as-subdir=false with a service level destination") } - // When copying a container directly to a container, strip the top directory - if srcLevel == ELocationLevel.Container() && dstLevel == ELocationLevel.Container() && cca.FromTo.From().IsRemote() && cca.FromTo.To().IsRemote() { + // When copying a container directly to a container, strip the top directory, unless we're attempting to persist permissions. + if srcLevel == ELocationLevel.Container() && dstLevel == ELocationLevel.Container() && cca.FromTo.From().IsRemote() && cca.FromTo.To().IsRemote() && !cca.preservePermissions.IsTruthy() { cca.StripTopDir = true } @@ -223,7 +224,7 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde // decide our folder transfer strategy var message string - jobPartOrder.Fpo, message = newFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), cca.preservePOSIXProperties, cca.isHNStoHNS, strings.EqualFold(cca.Destination.Value, common.Dev_Null), cca.IncludeDirectoryStubs) + jobPartOrder.Fpo, message = NewFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), cca.preservePOSIXProperties, cca.isHNStoHNS, strings.EqualFold(cca.Destination.Value, common.Dev_Null), cca.IncludeDirectoryStubs) if !cca.dryrunMode { glcm.Info(message) } @@ -265,12 +266,7 @@ func (cca *CookedCopyCmdArgs) initEnumerator(jobPartOrder common.CopyJobPartOrde srcRelPath := cca.MakeEscapedRelativePath(true, isDestDir, cca.asSubdir, object) dstRelPath := cca.MakeEscapedRelativePath(false, isDestDir, cca.asSubdir, object) - transfer, shouldSendToSte := object.ToNewCopyTransfer( - cca.autoDecompress && cca.FromTo.IsDownload(), - srcRelPath, dstRelPath, - cca.s2sPreserveAccessTier, - jobPartOrder.Fpo, - ) + transfer, shouldSendToSte := object.ToNewCopyTransfer(cca.autoDecompress && cca.FromTo.IsDownload(), srcRelPath, dstRelPath, cca.s2sPreserveAccessTier, jobPartOrder.Fpo, cca.SymlinkHandling) if !cca.S2sPreserveBlobTags { transfer.BlobTags = cca.blobTags } @@ -341,9 +337,10 @@ func (cca *CookedCopyCmdArgs) isDestDirectory(dst common.ResourceString, ctx *co return false } - rt, err := InitResourceTraverser(dst, cca.FromTo.To(), ctx, &dstCredInfo, nil, + rt, err := InitResourceTraverser(dst, cca.FromTo.To(), ctx, &dstCredInfo, common.ESymlinkHandlingType.Skip(), nil, false, false, false, common.EPermanentDeleteOption.None(), - func(common.EntityType) {}, cca.ListOfVersionIDs, false, common.ESyncHashType.None(), pipeline.LogNone, cca.CpkOptions, nil /* errorChannel */) + func(common.EntityType) {}, cca.ListOfVersionIDs, false, common.ESyncHashType.None(), cca.preservePermissions, pipeline.LogNone, + cca.CpkOptions, nil /* errorChannel */, cca.StripTopDir) if err != nil { return false @@ -435,10 +432,11 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA } existingContainers[containerName] = true - dstCredInfo := common.CredentialInfo{} + var dstCredInfo common.CredentialInfo // 3minutes is enough time to list properties of a container, and create new if it does not exist. - ctx, _ := context.WithTimeout(parentCtx, time.Minute*3) + ctx, cancel := context.WithTimeout(parentCtx, time.Minute*3) + defer cancel() if dstCredInfo, _, err = GetCredentialInfoForLocation(ctx, cca.FromTo.To(), cca.Destination.Value, cca.Destination.SAS, false, cca.CpkOptions); err != nil { return err } @@ -520,7 +518,6 @@ func (cca *CookedCopyCmdArgs) createDstContainer(containerName string, dstWithSA default: panic(fmt.Sprintf("cannot create a destination container at location %s.", cca.FromTo.To())) } - return } @@ -550,7 +547,7 @@ var reverseEncodedChars = map[string]rune{ } func pathEncodeRules(path string, fromTo common.FromTo, disableAutoDecoding bool, source bool) string { - loc := common.ELocation.Unknown() + var loc common.Location if source { loc = fromTo.From() @@ -619,11 +616,6 @@ func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool return pathEncodeRules(relativePath, cca.FromTo, cca.disableAutoDecoding, source) } - // user is not placing the source as a subdir - if object.isSourceRootFolder() && !asSubdir { - relativePath = "" - } - // If it's out here, the object is contained in a folder, or was found via a wildcard, or object.isSourceRootFolder == true if object.isSourceRootFolder() { relativePath = "" // otherwise we get "/" from the line below, and that breaks some clients, e.g. blobFS @@ -666,7 +658,7 @@ func (cca *CookedCopyCmdArgs) MakeEscapedRelativePath(source bool, dstIsDir bool } // we assume that preserveSmbPermissions and preserveSmbInfo have already been validated, such that they are only true if both resource types support them -func newFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, filters []ObjectFilter, preserveSmbInfo, preserveSmbPermissions, preservePosixProperties, isDfsDfs, isDstNull, includeDirectoryStubs bool) (common.FolderPropertyOption, string) { +func NewFolderPropertyOption(fromTo common.FromTo, recursive, stripTopDir bool, filters []ObjectFilter, preserveSmbInfo, preserveSmbPermissions, preservePosixProperties, isDfsDfs, isDstNull, includeDirectoryStubs bool) (common.FolderPropertyOption, string) { getSuffix := func(willProcess bool) string { willProcessString := common.IffString(willProcess, "will be processed", "will not be processed") diff --git a/cmd/copyEnumeratorInit_test.go b/cmd/copyEnumeratorInit_test.go index d99d2e5ca..8a66a088c 100644 --- a/cmd/copyEnumeratorInit_test.go +++ b/cmd/copyEnumeratorInit_test.go @@ -49,7 +49,7 @@ func (ce *copyEnumeratorSuite) TestValidateSourceDirThatExists(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} @@ -78,7 +78,7 @@ func (ce *copyEnumeratorSuite) TestValidateSourceDirDoesNotExist(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} @@ -108,7 +108,7 @@ func (ce *copyEnumeratorSuite) TestValidateSourceFileExists(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) @@ -131,7 +131,7 @@ func (ce *copyEnumeratorSuite) TestValidateSourceFileDoesNotExist(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) cca := CookedCopyCmdArgs{StripTopDir: false, Recursive: false} err := cca.validateSourceDir(blobTraverser) @@ -154,7 +154,7 @@ func (ce *copyEnumeratorSuite) TestValidateSourceWithWildCard(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // dir but recursive flag not set - fail cca := CookedCopyCmdArgs{StripTopDir: true, Recursive: false} diff --git a/cmd/copyUtil.go b/cmd/copyUtil.go index f7dc3bdfa..b5b7d3e98 100644 --- a/cmd/copyUtil.go +++ b/cmd/copyUtil.go @@ -44,8 +44,6 @@ type copyHandlerUtil struct{} // TODO: Need be replaced with anonymous embedded field technique. var gCopyUtil = copyHandlerUtil{} -const wildCard = "*" - // checks if a given url points to a container or virtual directory, as opposed to a blob or prefix match func (util copyHandlerUtil) urlIsContainerOrVirtualDirectory(url *url.URL) bool { if azblob.NewBlobURLParts(*url).IPEndpointStyleInfo.AccountName == "" { @@ -159,31 +157,6 @@ func (util copyHandlerUtil) urlIsAzureFileDirectory(ctx context.Context, url *ur return true } -// append a file name to the container path to generate a blob path -func (copyHandlerUtil) generateObjectPath(destinationPath, fileName string) string { - if strings.LastIndex(destinationPath, "/") == len(destinationPath)-1 { - return fmt.Sprintf("%s%s", destinationPath, fileName) - } - return fmt.Sprintf("%s/%s", destinationPath, fileName) -} - -func (util copyHandlerUtil) getBlobNameFromURL(path string) string { - // return everything after the second / - return strings.SplitAfterN(path[1:], common.AZCOPY_PATH_SEPARATOR_STRING, 2)[1] -} - -func (util copyHandlerUtil) firstIndexOfWildCard(name string) int { - return strings.Index(name, wildCard) -} -func (util copyHandlerUtil) getContainerURLFromString(url url.URL) url.URL { - blobParts := azblob.NewBlobURLParts(url) - blobParts.BlobName = "" - return blobParts.URL() - //containerName := strings.SplitAfterN(url.Path[1:], "/", 2)[0] - //url.Path = "/" + containerName - //return url -} - func (util copyHandlerUtil) getContainerUrl(blobParts azblob.BlobURLParts) url.URL { blobParts.BlobName = "" return blobParts.URL() diff --git a/cmd/credentialUtil.go b/cmd/credentialUtil.go index a5692ead3..06948b92a 100644 --- a/cmd/credentialUtil.go +++ b/cmd/credentialUtil.go @@ -342,7 +342,7 @@ func oAuthTokenExists() (oauthTokenExists bool) { if hasCachedToken, err := uotm.HasCachedToken(); hasCachedToken { oauthTokenExists = true - } else if err != nil { + } else if err != nil { //nolint:staticcheck // Log the error if fail to get cached token, as these are unhandled errors, and should not influence the logic flow. // Uncomment for debugging. // glcm.Info(fmt.Sprintf("No cached token found, %v", err)) @@ -357,10 +357,6 @@ func getAzureFileCredentialType() (common.CredentialType, error) { return common.ECredentialType.Anonymous(), nil } -// envVarCredentialType used for passing credential type into AzCopy through environment variable. -// Note: This is only used for internal integration, and not encouraged to be used directly. -const envVarCredentialType = "AZCOPY_CRED_TYPE" - var stashedEnvCredType = "" // GetCredTypeFromEnvVar tries to get credential type from environment variable defined by envVarCredentialType. @@ -490,10 +486,9 @@ func checkAuthSafeForTarget(ct common.CredentialType, resource, extraSuffixesAAD return fmt.Errorf("Google Application Credentials to %s is not valid", resourceType.String()) } - host := "" u, err := url.Parse(resource) if err == nil { - host = u.Host + host := u.Host _, err := common.NewGCPURLParts(*u) if err != nil { return fmt.Errorf("GCP authentication to %s is not currently supported", host) @@ -614,9 +609,6 @@ func GetCredentialInfoForLocation(ctx context.Context, location common.Location, } else { credInfo.OAuthTokenInfo = *tokenInfo } - } else if credInfo.CredentialType == common.ECredentialType.S3AccessKey() || credInfo.CredentialType == common.ECredentialType.S3PublicBucket() { - // nothing to do here. The extra fields for S3 are fleshed out at the time - // we make the S3Client } return diff --git a/cmd/helpMessages.go b/cmd/helpMessages.go index 52d208dbe..b60db2fa9 100644 --- a/cmd/helpMessages.go +++ b/cmd/helpMessages.go @@ -130,6 +130,10 @@ Download all the versions of a blob from Azure Storage to local directory. Ensur - azcopy cp "https://[srcaccount].blob.core.windows.net/[containername]/[blobname]" "/path/to/dir" --list-of-versions="/another/path/to/dir/[versionidsFile]" +Copy a subset of files within a flat container by using a wildcard symbol (*) in the container name without listing all files in the container. + + - azcopy cp "https://[srcaccount].blob.core.windows.net/[containername]/*" "/path/to/dir" --include-pattern="1*" + Copy a single blob to another blob by using a SAS token. - azcopy cp "https://[srcaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" "https://[destaccount].blob.core.windows.net/[container]/[path/to/blob]?[SAS]" @@ -254,7 +258,7 @@ const listCmdShortDescription = "List the entities in a given resource" const listCmdLongDescription = `List the entities in a given resource. Blob, Files, and ADLS Gen 2 containers, folders, and accounts are supported.` const listCmdExample = "azcopy list [containerURL] --properties [semicolon(;) separated list of attributes " + - "(LastModifiedTime, VersionId, BlobType, BlobAccessTier, ContentType, ContentEncoding, LeaseState, LeaseDuration, LeaseStatus) " + + "(LastModifiedTime, VersionId, BlobType, BlobAccessTier, ContentType, ContentEncoding, ContentMD5, LeaseState, LeaseDuration, LeaseStatus) " + "enclosed in double quotes (\")]" // ===================================== LOGIN COMMAND ===================================== // @@ -379,7 +383,7 @@ Remove a single directory from a Blob Storage account that has a hierarchical na const syncCmdShortDescription = "Replicate source to the destination location" const syncCmdLongDescription = ` -The last modified times are used for comparison. The file is skipped if the last modified time in the destination is more recent. The supported pairs are: +The last modified times are used for comparison. The file is skipped if the last modified time in the destination is more recent. Alternatively, you can use the --compare-hash flag to transfer only files which differ in their MD5 hash. The supported pairs are: - Local <-> Azure Blob / Azure File (either SAS or OAuth authentication can be used) - Azure Blob <-> Azure Blob (Source must include a SAS or is publicly accessible; either SAS or OAuth authentication can be used for destination) @@ -404,7 +408,7 @@ The built-in lookup table is small but on Unix it is augmented by the local syst On Windows, MIME types are extracted from the registry. -Please also note that sync works off of the last modified times exclusively. So in the case of Azure File <-> Azure File, +By default, sync works off of the last modified times unless you override that default behavior by using the --compare-hash flag. So in the case of Azure File <-> Azure File, the header field Last-Modified is used instead of x-ms-file-change-time, which means that metadata changes at the source can also trigger a full copy. ` diff --git a/cmd/jobsClean.go b/cmd/jobsClean.go index a9ffe2027..21ae47f55 100644 --- a/cmd/jobsClean.go +++ b/cmd/jobsClean.go @@ -61,7 +61,7 @@ func init() { if err == nil { if withStatus == common.EJobStatus.All() { glcm.Exit(func(format common.OutputFormat) string { - return fmt.Sprintf("Successfully removed all jobs.") + return "Successfully removed all jobs." }, common.EExitCode.Success()) } else { glcm.Exit(func(format common.OutputFormat) string { diff --git a/cmd/jobsRemove.go b/cmd/jobsRemove.go index 84d9a0124..1fade4fe7 100644 --- a/cmd/jobsRemove.go +++ b/cmd/jobsRemove.go @@ -23,7 +23,6 @@ package cmd import ( "errors" "fmt" - "io/ioutil" "os" "path" "strings" @@ -108,7 +107,7 @@ func handleRemoveSingleJob(jobID common.JobID) error { // remove all files whose names are approved by the predicate in the targetFolder func removeFilesWithPredicate(targetFolder string, predicate func(string) bool) (int, error) { count := 0 - files, err := ioutil.ReadDir(targetFolder) + files, err := os.ReadDir(targetFolder) if err != nil { return count, err } diff --git a/cmd/jobsResume.go b/cmd/jobsResume.go index 97edac966..be9d91280 100644 --- a/cmd/jobsResume.go +++ b/cmd/jobsResume.go @@ -93,7 +93,7 @@ func (cca *resumeJobController) ReportProgressOrExit(lcm common.LifecycleMgr) (t totalKnownCount = summary.TotalTransfers // if json is not desired, and job is done, then we generate a special end message to conclude the job - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var computeThroughput = func() float64 { // compute the average throughput for the last time interval @@ -158,6 +158,7 @@ Job %s summary Elapsed Time (Minutes): %v Number of File Transfers: %v Number of Folder Property Transfers: %v +Number of Symlink Transfers: %v Total Number Of Transfers: %v Number of File Transfers Completed: %v Number of Folder Transfers Completed: %v @@ -172,6 +173,7 @@ Final Job Status: %v jobsAdmin.ToFixed(duration.Minutes(), 4), summary.FileTransfers, summary.FolderPropertyTransfers, + summary.SymlinkTransfers, summary.TotalTransfers, summary.TransfersCompleted-summary.FoldersCompleted, summary.FoldersCompleted, diff --git a/cmd/jobsShow.go b/cmd/jobsShow.go index 921db7942..05b7dd525 100644 --- a/cmd/jobsShow.go +++ b/cmd/jobsShow.go @@ -79,10 +79,9 @@ func init() { // handles the list command // dispatches the list order to the transfer engine func HandleShowCommand(listRequest common.ListRequest) error { - rpcCmd := common.ERpcCmd.None() if listRequest.OfStatus == "" { resp := common.ListJobSummaryResponse{} - rpcCmd = common.ERpcCmd.ListJobSummary() + rpcCmd := common.ERpcCmd.ListJobSummary() Rpc(rpcCmd, &listRequest.JobID, &resp) PrintJobProgressSummary(resp) } else { @@ -95,7 +94,7 @@ func HandleShowCommand(listRequest common.ListRequest) error { return fmt.Errorf("cannot parse the given Transfer Status %s", listRequest.OfStatus) } resp := common.ListJobTransfersResponse{} - rpcCmd = common.ERpcCmd.ListJobTransfers() + rpcCmd := common.ERpcCmd.ListJobTransfers() Rpc(rpcCmd, lsRequest, &resp) PrintJobTransfers(resp) } @@ -151,6 +150,7 @@ func PrintJobProgressSummary(summary common.ListJobSummaryResponse) { Job %s summary Number of File Transfers: %v Number of Folder Property Transfers: %v +Number of Symlink Transfers: %v Total Number Of Transfers: %v Number of File Transfers Completed: %v Number of Folder Transfers Completed: %v @@ -164,6 +164,7 @@ Final Job Status: %v summary.JobID.String(), summary.FileTransfers, summary.FolderPropertyTransfers, + summary.SymlinkTransfers, summary.TotalTransfers, summary.TransfersCompleted-summary.FoldersCompleted, summary.FoldersCompleted, diff --git a/cmd/list.go b/cmd/list.go index ef2f14b75..919e9c933 100755 --- a/cmd/list.go +++ b/cmd/list.go @@ -26,6 +26,7 @@ import ( "fmt" "strconv" "strings" + "encoding/base64" "github.com/Azure/azure-pipeline-go/pipeline" @@ -54,6 +55,7 @@ const ( blobAccessTier validProperty = "BlobAccessTier" contentType validProperty = "ContentType" contentEncoding validProperty = "ContentEncoding" + contentMD5 validProperty = "ContentMD5" leaseState validProperty = "LeaseState" leaseDuration validProperty = "LeaseDuration" leaseStatus validProperty = "LeaseStatus" @@ -63,7 +65,7 @@ const ( // validProperties returns an array of possible values for the validProperty const type. func validProperties() []validProperty { return []validProperty{lastModifiedTime, versionId, blobType, blobAccessTier, - contentType, contentEncoding, leaseState, leaseDuration, leaseStatus, archiveStatus} + contentType, contentEncoding, contentMD5, leaseState, leaseDuration, leaseStatus, archiveStatus} } func (raw *rawListCmdArgs) parseProperties(rawProperties string) []validProperty { @@ -177,6 +179,8 @@ func (cooked cookedListCmdArgs) processProperties(object StoredObject) string { builder.WriteString(propertyStr + ": " + object.contentType + "; ") case contentEncoding: builder.WriteString(propertyStr + ": " + object.contentEncoding + "; ") + case contentMD5: + builder.WriteString(propertyStr + ": " + base64.StdEncoding.EncodeToString(object.md5) + "; ") case leaseState: builder.WriteString(propertyStr + ": " + string(object.leaseState) + "; ") case leaseStatus: @@ -202,6 +206,10 @@ func (cooked cookedListCmdArgs) HandleListContainerCommand() (err error) { return err } + if err := common.VerifyIsURLResolvable(raw.sourcePath); cooked.location.IsRemote() && err != nil { + return fmt.Errorf("failed to resolve target: %w", err) + } + level, err := DetermineLocationLevel(source.Value, cooked.location, true) if err != nil { @@ -222,9 +230,10 @@ func (cooked cookedListCmdArgs) HandleListContainerCommand() (err error) { } } - traverser, err := InitResourceTraverser(source, cooked.location, &ctx, &credentialInfo, nil, nil, + traverser, err := InitResourceTraverser(source, cooked.location, &ctx, &credentialInfo, common.ESymlinkHandlingType.Skip(), nil, true, false, false, common.EPermanentDeleteOption.None(), func(common.EntityType) {}, - nil, false, common.ESyncHashType.None(), pipeline.LogNone, common.CpkOptions{}, nil /* errorChannel */) + nil, false, common.ESyncHashType.None(), common.EPreservePermissionsOption.None(), + pipeline.LogNone, common.CpkOptions{}, nil /* errorChannel */, false) if err != nil { return fmt.Errorf("failed to initialize traverser: %s", err.Error()) diff --git a/cmd/login.go b/cmd/login.go index 1c45f742c..bf122dabd 100644 --- a/cmd/login.go +++ b/cmd/login.go @@ -105,11 +105,6 @@ type loginCmdArgs struct { persistToken bool } -type argValidity struct { - Required string - Invalid string -} - func (lca loginCmdArgs) validate() error { // Only support one kind of oauth login at same time. switch { diff --git a/cmd/make.go b/cmd/make.go index b9c877a55..db42b8cf1 100644 --- a/cmd/make.go +++ b/cmd/make.go @@ -77,6 +77,10 @@ func (cookedArgs cookedMakeCmdArgs) process() (err error) { return err } + if err := common.VerifyIsURLResolvable(resourceStringParts.Value); cookedArgs.resourceLocation.IsRemote() && err != nil { + return fmt.Errorf("failed to resolve target: %w", err) + } + credentialInfo, _, err := GetCredentialInfoForLocation(ctx, cookedArgs.resourceLocation, resourceStringParts.Value, resourceStringParts.SAS, false, common.CpkOptions{}) if err != nil { return err diff --git a/cmd/pathUtils.go b/cmd/pathUtils.go index 7cbaa3f57..8d5bad4bb 100644 --- a/cmd/pathUtils.go +++ b/cmd/pathUtils.go @@ -315,7 +315,7 @@ func splitQueryFromSaslessResource(resource string, loc common.Location) (mainUr // All of the below functions only really do one thing at the moment. // They've been separated from copyEnumeratorInit.go in order to make the code more maintainable, should we want more destinations in the future. func getPathBeforeFirstWildcard(path string) string { - if strings.Index(path, "*") == -1 { + if !strings.Contains(path, "*") { return path } diff --git a/cmd/removeEnumerator.go b/cmd/removeEnumerator.go index 71f6e9c85..3894186d4 100755 --- a/cmd/removeEnumerator.go +++ b/cmd/removeEnumerator.go @@ -49,9 +49,10 @@ func newRemoveEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator, er // Include-path is handled by ListOfFilesChannel. sourceTraverser, err = InitResourceTraverser(cca.Source, cca.FromTo.From(), &ctx, &cca.credentialInfo, - nil, cca.ListOfFilesChannel, cca.Recursive, false, cca.IncludeDirectoryStubs, + common.ESymlinkHandlingType.Skip(), cca.ListOfFilesChannel, cca.Recursive, false, cca.IncludeDirectoryStubs, cca.permanentDeleteOption, func(common.EntityType) {}, cca.ListOfVersionIDs, false, - common.ESyncHashType.None(), azcopyLogVerbosity.ToPipelineLogLevel(), cca.CpkOptions, nil /* errorChannel */) + common.ESyncHashType.None(), common.EPreservePermissionsOption.None(), azcopyLogVerbosity.ToPipelineLogLevel(), + cca.CpkOptions, nil /* errorChannel */, cca.StripTopDir) // report failure to create traverser if err != nil { @@ -79,7 +80,7 @@ func newRemoveEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator, er // (Must enumerate folders when deleting from a folder-aware location. Can't do folder deletion just based on file // deletion, because that would not handle folders that were empty at the start of the job). // isHNStoHNS is IGNORED here, because BlobFS locations don't take this route currently. - fpo, message := newFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, false, false, false, false, false, cca.IncludeDirectoryStubs) + fpo, message := NewFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, false, false, false, false, false, cca.IncludeDirectoryStubs) // do not print Info message if in dry run mode if !cca.dryrunMode { glcm.Info(message) @@ -317,7 +318,7 @@ func removeSingleBfsResource(ctx context.Context, urlParts azbfs.BfsURLParts, p for _, v := range listResp.Paths { entityType := "directory" - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { entityType = "file" } diff --git a/cmd/removeProcessor.go b/cmd/removeProcessor.go index ea95d2535..55f318f78 100644 --- a/cmd/removeProcessor.go +++ b/cmd/removeProcessor.go @@ -27,13 +27,14 @@ import ( // extract the right info from cooked arguments and instantiate a generic copy transfer processor from it func newRemoveTransferProcessor(cca *CookedCopyCmdArgs, numOfTransfersPerPart int, fpo common.FolderPropertyOption) *copyTransferProcessor { copyJobTemplate := &common.CopyJobPartOrderRequest{ - JobID: cca.jobID, - CommandString: cca.commandString, - FromTo: cca.FromTo, - Fpo: fpo, - SourceRoot: cca.Source.CloneWithConsolidatedSeparators(), // TODO: why do we consolidate here, but not in "copy"? Is it needed in both places or neither? Or is copy just covering the same need differently? - CredentialInfo: cca.credentialInfo, - ForceIfReadOnly: cca.ForceIfReadOnly, + JobID: cca.jobID, + CommandString: cca.commandString, + FromTo: cca.FromTo, + Fpo: fpo, + SymlinkHandlingType: common.ESymlinkHandlingType.Preserve(), // We want to delete symlinks + SourceRoot: cca.Source.CloneWithConsolidatedSeparators(), // TODO: why do we consolidate here, but not in "copy"? Is it needed in both places or neither? Or is copy just covering the same need differently? + CredentialInfo: cca.credentialInfo, + ForceIfReadOnly: cca.ForceIfReadOnly, // flags LogLevel: azcopyLogVerbosity, diff --git a/cmd/root.go b/cmd/root.go index f6c1543f8..ee62aba1e 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -227,12 +227,12 @@ func init() { rootCmd.PersistentFlags().StringVar(&debugSkipFiles, "debug-skip-files", "", "Used when debugging, to tell AzCopy to cancel the job midway. List of relative paths to skip in the STE.") // reserved for partner teams - rootCmd.PersistentFlags().MarkHidden("cancel-from-stdin") + _ = rootCmd.PersistentFlags().MarkHidden("cancel-from-stdin") // debug-only - rootCmd.PersistentFlags().MarkHidden("await-continue") - rootCmd.PersistentFlags().MarkHidden("await-open") - rootCmd.PersistentFlags().MarkHidden("debug-skip-files") + _ = rootCmd.PersistentFlags().MarkHidden("await-continue") + _ = rootCmd.PersistentFlags().MarkHidden("await-open") + _ = rootCmd.PersistentFlags().MarkHidden("debug-skip-files") } // always spins up a new goroutine, because sometimes the aka.ms URL can't be reached (e.g. a constrained environment where diff --git a/cmd/rpc.go b/cmd/rpc.go index 482c8b655..49f4967d2 100644 --- a/cmd/rpc.go +++ b/cmd/rpc.go @@ -21,14 +21,9 @@ package cmd import ( - "bytes" - "encoding/json" "fmt" - "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" - "io/ioutil" - "net/http" - "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-azcopy/v10/jobsAdmin" ) // Global singleton for sending RPC requests from the frontend to the STE @@ -55,9 +50,8 @@ func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData inte case common.ERpcCmd.ListJobTransfers(): *(responseData.(*common.ListJobTransfersResponse)) = jobsAdmin.ListJobTransfers(requestData.(common.ListJobTransfersRequest)) - case common.ERpcCmd.PauseJob(): - responseData = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused()) + *(responseData.(*common.CancelPauseResumeResponse)) = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Paused()) case common.ERpcCmd.CancelJob(): *(responseData.(*common.CancelPauseResumeResponse)) = jobsAdmin.CancelPauseJobOrder(requestData.(common.JobID), common.EJobStatus.Cancelling()) @@ -73,50 +67,3 @@ func inprocSend(rpcCmd common.RpcCmd, requestData interface{}, responseData inte } return nil } - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -// NewHttpClient returns the instance of struct containing an instance of http.client and url -func NewHttpClient(url string) *HTTPClient { - return &HTTPClient{ - client: &http.Client{}, - url: url, - } -} - -// todo : use url in case of string -type HTTPClient struct { - client *http.Client - url string -} - -// Send method on HttpClient sends the data passed in the interface for given command type to the client url -func (httpClient *HTTPClient) send(rpcCmd common.RpcCmd, requestData interface{}, responseData interface{}) error { - // Create HTTP request with command in query parameter & request data as JSON payload - requestJson, err := json.Marshal(requestData) - if err != nil { - return fmt.Errorf("error marshalling request payload for command type %q", rpcCmd.String()) - } - request, err := http.NewRequest("POST", httpClient.url, bytes.NewReader(requestJson)) - // adding the commandType as a query param - q := request.URL.Query() - q.Add("commandType", rpcCmd.String()) - request.URL.RawQuery = q.Encode() - - response, err := httpClient.client.Do(request) - if err != nil { - return err - } - - // Read response data, deserialize it and return it (via out responseData parameter) & error - responseJson, err := ioutil.ReadAll(response.Body) - response.Body.Close() - if err != nil { - return fmt.Errorf("error reading response for the request") - } - err = json.Unmarshal(responseJson, responseData) - common.PanicIfErr(err) - return nil -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/cmd/setPropertiesEnumerator.go b/cmd/setPropertiesEnumerator.go index d2e35afd9..29058c878 100755 --- a/cmd/setPropertiesEnumerator.go +++ b/cmd/setPropertiesEnumerator.go @@ -37,7 +37,7 @@ func setPropertiesEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) - srcCredInfo := common.CredentialInfo{} + var srcCredInfo common.CredentialInfo if srcCredInfo, _, err = GetCredentialInfoForLocation(ctx, cca.FromTo.From(), cca.Source.Value, cca.Source.SAS, true, cca.CpkOptions); err != nil { return nil, err @@ -48,9 +48,11 @@ func setPropertiesEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator // Include-path is handled by ListOfFilesChannel. sourceTraverser, err = InitResourceTraverser(cca.Source, cca.FromTo.From(), &ctx, &cca.credentialInfo, - nil, cca.ListOfFilesChannel, cca.Recursive, false, cca.IncludeDirectoryStubs, + common.ESymlinkHandlingType.Preserve(), // preserve because we want to index all blobs, including symlink blobs + cca.ListOfFilesChannel, cca.Recursive, false, cca.IncludeDirectoryStubs, cca.permanentDeleteOption, func(common.EntityType) {}, cca.ListOfVersionIDs, false, - common.ESyncHashType.None(), azcopyLogVerbosity.ToPipelineLogLevel(), cca.CpkOptions, nil /* errorChannel */) + common.ESyncHashType.None(), common.EPreservePermissionsOption.None(), azcopyLogVerbosity.ToPipelineLogLevel(), + cca.CpkOptions, nil /* errorChannel */, cca.StripTopDir) // report failure to create traverser if err != nil { @@ -67,7 +69,7 @@ func setPropertiesEnumerator(cca *CookedCopyCmdArgs) (enumerator *CopyEnumerator filters = append(filters, excludePathFilters...) filters = append(filters, includeSoftDelete...) - fpo, message := newFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, false, false, false, cca.isHNStoHNS, strings.EqualFold(cca.Destination.Value, common.Dev_Null), cca.IncludeDirectoryStubs) + fpo, message := NewFolderPropertyOption(cca.FromTo, cca.Recursive, cca.StripTopDir, filters, false, false, false, cca.isHNStoHNS, strings.EqualFold(cca.Destination.Value, common.Dev_Null), cca.IncludeDirectoryStubs) // do not print Info message if in dry run mode if !cca.dryrunMode { glcm.Info(message) diff --git a/cmd/setPropertiesProcessor.go b/cmd/setPropertiesProcessor.go index 721d1b1a6..3a912d2c8 100644 --- a/cmd/setPropertiesProcessor.go +++ b/cmd/setPropertiesProcessor.go @@ -26,13 +26,14 @@ import ( func setPropertiesTransferProcessor(cca *CookedCopyCmdArgs, numOfTransfersPerPart int, fpo common.FolderPropertyOption) *copyTransferProcessor { copyJobTemplate := &common.CopyJobPartOrderRequest{ - JobID: cca.jobID, - CommandString: cca.commandString, - FromTo: cca.FromTo, - Fpo: fpo, - SourceRoot: cca.Source.CloneWithConsolidatedSeparators(), - CredentialInfo: cca.credentialInfo, - ForceIfReadOnly: cca.ForceIfReadOnly, + JobID: cca.jobID, + CommandString: cca.commandString, + FromTo: cca.FromTo, + Fpo: fpo, + SymlinkHandlingType: common.ESymlinkHandlingType.Preserve(), // we want to set properties on symlink blobs + SourceRoot: cca.Source.CloneWithConsolidatedSeparators(), + CredentialInfo: cca.credentialInfo, + ForceIfReadOnly: cca.ForceIfReadOnly, // flags LogLevel: azcopyLogVerbosity, diff --git a/cmd/sync.go b/cmd/sync.go index 7bfa922a2..32fa68b7f 100644 --- a/cmd/sync.go +++ b/cmd/sync.go @@ -64,6 +64,7 @@ type rawSyncCmdArgs struct { preserveSMBInfo bool preservePOSIXProperties bool followSymlinks bool + preserveSymlinks bool backupMode bool putMd5 bool md5ValidationOption string @@ -212,8 +213,10 @@ func (raw *rawSyncCmdArgs) cook() (cookedSyncCmdArgs, error) { return cooked, err } - cooked.followSymlinks = raw.followSymlinks - if err = crossValidateSymlinksAndPermissions(cooked.followSymlinks, true /* replace with real value when available */); err != nil { + if err = cooked.symlinkHandling.Determine(raw.followSymlinks, raw.preserveSymlinks); err != nil { + return cooked, err + } + if err = crossValidateSymlinksAndPermissions(cooked.symlinkHandling, true /* replace with real value when available */); err != nil { return cooked, err } cooked.recursive = raw.recursive @@ -386,7 +389,7 @@ type cookedSyncCmdArgs struct { // filters recursive bool - followSymlinks bool + symlinkHandling common.SymlinkHandlingType includePatterns []string excludePatterns []string excludePaths []string @@ -559,7 +562,7 @@ func (cca *cookedSyncCmdArgs) getJsonOfSyncJobSummary(summary common.ListJobSumm } func (cca *cookedSyncCmdArgs) ReportProgressOrExit(lcm common.LifecycleMgr) (totalKnownCount uint32) { - duration := time.Now().Sub(cca.jobStartTime) // report the total run time of the job + duration := time.Since(cca.jobStartTime) // report the total run time of the job var summary common.ListJobSummaryResponse var throughput float64 var jobDone bool @@ -668,6 +671,14 @@ func (cca *cookedSyncCmdArgs) process() (err error) { return err } + if err := common.VerifyIsURLResolvable(cca.source.Value); cca.fromTo.From().IsRemote() && err != nil { + return fmt.Errorf("failed to resolve source: %w", err) + } + + if err := common.VerifyIsURLResolvable(cca.destination.Value); cca.fromTo.To().IsRemote() && err != nil { + return fmt.Errorf("failed to resolve destination: %w", err) + } + // Verifies credential type and initializes credential info. // Note that this is for the destination. cca.credentialInfo, _, err = GetCredentialInfoForLocation(ctx, cca.fromTo.To(), cca.destination.Value, cca.destination.SAS, false, cca.cpkOptions) @@ -810,8 +821,8 @@ func init() { // temp, to assist users with change in param names, by providing a clearer message when these obsolete ones are accidentally used syncCmd.PersistentFlags().StringVar(&raw.legacyInclude, "include", "", "Legacy include param. DO NOT USE") syncCmd.PersistentFlags().StringVar(&raw.legacyExclude, "exclude", "", "Legacy exclude param. DO NOT USE") - syncCmd.PersistentFlags().MarkHidden("include") - syncCmd.PersistentFlags().MarkHidden("exclude") + _ = syncCmd.PersistentFlags().MarkHidden("include") + _ = syncCmd.PersistentFlags().MarkHidden("exclude") // TODO follow sym link is not implemented, clarify behavior first // syncCmd.PersistentFlags().BoolVar(&raw.followSymlinks, "follow-symlinks", false, "follow symbolic links when performing sync from local file system.") @@ -819,6 +830,6 @@ func init() { // TODO sync does not support all BlobAttributes on the command line, this functionality should be added // Deprecate the old persist-smb-permissions flag - syncCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") + _ = syncCmd.PersistentFlags().MarkHidden("preserve-smb-permissions") syncCmd.PersistentFlags().BoolVar(&raw.preservePermissions, PreservePermissionsFlag, false, "False by default. Preserves ACLs between aware resources (Windows and Azure Files, or ADLS Gen 2 to ADLS Gen 2). For Hierarchical Namespace accounts, you will need a container SAS or OAuth token with Modify Ownership and Modify Permissions permissions. For downloads, you will also need the --backup flag to restore permissions where the new Owner will not be the user running AzCopy. This flag applies to both files and folders, unless a file-only filter is specified (e.g. include-pattern).") } diff --git a/cmd/syncEnumerator.go b/cmd/syncEnumerator.go index 1a8898ad6..bc947d520 100644 --- a/cmd/syncEnumerator.go +++ b/cmd/syncEnumerator.go @@ -60,12 +60,13 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s // TODO: Consider passing an errorChannel so that enumeration errors during sync can be conveyed to the caller. // GetProperties is enabled by default as sync supports both upload and download. // This property only supports Files and S3 at the moment, but provided that Files sync is coming soon, enable to avoid stepping on Files sync work - sourceTraverser, err := InitResourceTraverser(cca.source, cca.fromTo.From(), &ctx, &srcCredInfo, nil, + sourceTraverser, err := InitResourceTraverser(cca.source, cca.fromTo.From(), &ctx, &srcCredInfo, common.ESymlinkHandlingType.Skip(), nil, cca.recursive, true, cca.isHNSToHNS, common.EPermanentDeleteOption.None(), func(entityType common.EntityType) { if entityType == common.EEntityType.File() { atomic.AddUint64(&cca.atomicSourceFilesScanned, 1) } - }, nil, cca.s2sPreserveBlobTags, cca.compareHash, azcopyLogVerbosity.ToPipelineLogLevel(), cca.cpkOptions, nil /* errorChannel */) + }, nil, cca.s2sPreserveBlobTags, cca.compareHash, cca.preservePermissions, azcopyLogVerbosity.ToPipelineLogLevel(), + cca.cpkOptions, nil /* errorChannel */, false) if err != nil { return nil, err @@ -82,11 +83,12 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s // TODO: enable symlink support in a future release after evaluating the implications // GetProperties is enabled by default as sync supports both upload and download. // This property only supports Files and S3 at the moment, but provided that Files sync is coming soon, enable to avoid stepping on Files sync work - destinationTraverser, err := InitResourceTraverser(cca.destination, cca.fromTo.To(), &ctx, &dstCredInfo, nil, nil, cca.recursive, true, cca.isHNSToHNS, common.EPermanentDeleteOption.None(), func(entityType common.EntityType) { + destinationTraverser, err := InitResourceTraverser(cca.destination, cca.fromTo.To(), &ctx, &dstCredInfo, common.ESymlinkHandlingType.Skip(), nil, cca.recursive, true, cca.isHNSToHNS, common.EPermanentDeleteOption.None(), func(entityType common.EntityType) { if entityType == common.EEntityType.File() { atomic.AddUint64(&cca.atomicDestinationFilesScanned, 1) } - }, nil, cca.s2sPreserveBlobTags, cca.compareHash, azcopyLogVerbosity.ToPipelineLogLevel(), cca.cpkOptions, nil /* errorChannel */) + }, nil, cca.s2sPreserveBlobTags, cca.compareHash, cca.preservePermissions, azcopyLogVerbosity.ToPipelineLogLevel(), + cca.cpkOptions, nil /* errorChannel */, false) if err != nil { return nil, err } @@ -129,7 +131,7 @@ func (cca *cookedSyncCmdArgs) initEnumerator(ctx context.Context) (enumerator *s } // decide our folder transfer strategy - fpo, folderMessage := newFolderPropertyOption(cca.fromTo, cca.recursive, true, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, cca.isHNSToHNS, strings.EqualFold(cca.destination.Value, common.Dev_Null), false) // sync always acts like stripTopDir=true + fpo, folderMessage := NewFolderPropertyOption(cca.fromTo, cca.recursive, true, filters, cca.preserveSMBInfo, cca.preservePermissions.IsTruthy(), false, cca.isHNSToHNS, strings.EqualFold(cca.destination.Value, common.Dev_Null), false) // sync always acts like stripTopDir=true if !cca.dryrunMode { glcm.Info(folderMessage) } diff --git a/cmd/syncProcessor.go b/cmd/syncProcessor.go index 6ad2f9213..52222cdae 100644 --- a/cmd/syncProcessor.go +++ b/cmd/syncProcessor.go @@ -39,13 +39,14 @@ import ( // extract the right info from cooked arguments and instantiate a generic copy transfer processor from it func newSyncTransferProcessor(cca *cookedSyncCmdArgs, numOfTransfersPerPart int, fpo common.FolderPropertyOption) *copyTransferProcessor { copyJobTemplate := &common.CopyJobPartOrderRequest{ - JobID: cca.jobID, - CommandString: cca.commandString, - FromTo: cca.fromTo, - Fpo: fpo, - SourceRoot: cca.source.CloneWithConsolidatedSeparators(), - DestinationRoot: cca.destination.CloneWithConsolidatedSeparators(), - CredentialInfo: cca.credentialInfo, + JobID: cca.jobID, + CommandString: cca.commandString, + FromTo: cca.fromTo, + Fpo: fpo, + SymlinkHandlingType: cca.symlinkHandling, + SourceRoot: cca.source.CloneWithConsolidatedSeparators(), + DestinationRoot: cca.destination.CloneWithConsolidatedSeparators(), + CredentialInfo: cca.credentialInfo, // flags BlobAttributes: common.BlobTransferAttributes{ diff --git a/cmd/zc_attr_filter_windows.go b/cmd/zc_attr_filter_windows.go index 8f26941c1..f90bf835c 100644 --- a/cmd/zc_attr_filter_windows.go +++ b/cmd/zc_attr_filter_windows.go @@ -46,7 +46,7 @@ func (f *attrFilter) AppliesOnlyToFiles() bool { func (f *attrFilter) DoesPass(storedObject StoredObject) bool { fileName := "" - if strings.Index(f.filePath, "*") == -1 { + if !strings.Contains(f.filePath, "*") { fileName = common.GenerateFullPath(f.filePath, storedObject.relativePath) } else { basePath := getPathBeforeFirstWildcard(f.filePath) diff --git a/cmd/zc_enumerator.go b/cmd/zc_enumerator.go index 3410e6201..6c123889d 100755 --- a/cmd/zc_enumerator.go +++ b/cmd/zc_enumerator.go @@ -113,13 +113,13 @@ func (s *StoredObject) isSourceRootFolder() bool { return s.relativePath == "" && s.entityType == common.EEntityType.Folder() } -// isCompatibleWithFpo serves as our universal filter for filtering out folders in the cases where we should not +// isCompatibleWithEntitySettings serves as our universal filter for filtering out folders in the cases where we should not // process them. (If we didn't have a filter like this, we'd have to put the filtering into // every enumerator, which would complicated them.) // We can't just implement this filtering in ToNewCopyTransfer, because delete transfers (from sync) // do not pass through that routine. So we need to make the filtering available in a separate function // so that the sync deletion code path(s) can access it. -func (s *StoredObject) isCompatibleWithFpo(fpo common.FolderPropertyOption) bool { +func (s *StoredObject) isCompatibleWithEntitySettings(fpo common.FolderPropertyOption, sht common.SymlinkHandlingType) bool { if s.entityType == common.EEntityType.File() { return true } else if s.entityType == common.EEntityType.Folder() { @@ -133,11 +133,14 @@ func (s *StoredObject) isCompatibleWithFpo(fpo common.FolderPropertyOption) bool default: panic("undefined folder properties option") } + } else if s.entityType == common.EEntityType.Symlink() { + return sht == common.ESymlinkHandlingType.Preserve() } else { panic("undefined entity type") } } + // ErrorNoHashPresent , ErrorHashNoLongerValid, and ErrorHashNotCompatible indicate a hash is not present, not obtainable, and/or not usable. // For the sake of best-effort, when these errors are emitted, depending on the sync hash policy var ErrorNoHashPresent = errors.New("no hash present on file") @@ -154,7 +157,7 @@ var ErrorHashAsyncCalculation = errors.New("hash is calculating asynchronously") // We use this, so that we can easily test for compatibility in the sync deletion code (which expects an objectProcessor) func newFpoAwareProcessor(fpo common.FolderPropertyOption, inner objectProcessor) objectProcessor { return func(s StoredObject) error { - if s.isCompatibleWithFpo(fpo) { + if s.isCompatibleWithEntitySettings(fpo, common.ESymlinkHandlingType.Skip()) { return inner(s) } else { return nil // nothing went wrong, because we didn't do anything @@ -162,14 +165,9 @@ func newFpoAwareProcessor(fpo common.FolderPropertyOption, inner objectProcessor } } -func (s *StoredObject) ToNewCopyTransfer( - steWillAutoDecompress bool, - Source string, - Destination string, - preserveBlobTier bool, - folderPropertiesOption common.FolderPropertyOption) (transfer common.CopyTransfer, shouldSendToSte bool) { +func (s *StoredObject) ToNewCopyTransfer(steWillAutoDecompress bool, Source string, Destination string, preserveBlobTier bool, folderPropertiesOption common.FolderPropertyOption, symlinkHandlingType common.SymlinkHandlingType) (transfer common.CopyTransfer, shouldSendToSte bool) { - if !s.isCompatibleWithFpo(folderPropertiesOption) { + if !s.isCompatibleWithEntitySettings(folderPropertiesOption, symlinkHandlingType) { return common.CopyTransfer{}, false } @@ -327,14 +325,14 @@ type enumerationCounterFunc func(entityType common.EntityType) // source, location, recursive, and incrementEnumerationCounter are always required. // ctx, pipeline are only required for remote resources. -// followSymlinks is only required for local resources (defaults to false) +// symlinkHandling is only required for local resources (defaults to false) // errorOnDirWOutRecursive is used by copy. // If errorChannel is non-nil, all errors encountered during enumeration will be conveyed through this channel. // To avoid slowdowns, use a buffered channel of enough capacity. func InitResourceTraverser(resource common.ResourceString, location common.Location, ctx *context.Context, - credential *common.CredentialInfo, followSymlinks *bool, listOfFilesChannel chan string, recursive, getProperties, + credential *common.CredentialInfo, symlinkHandling common.SymlinkHandlingType, listOfFilesChannel chan string, recursive, getProperties, includeDirectoryStubs bool, permanentDeleteOption common.PermanentDeleteOption, incrementEnumerationCounter enumerationCounterFunc, listOfVersionIds chan string, - s2sPreserveBlobTags bool, syncHashType common.SyncHashType, logLevel pipeline.LogLevel, cpkOptions common.CpkOptions, errorChannel chan ErrorFileInfo) (ResourceTraverser, error) { + s2sPreserveBlobTags bool, syncHashType common.SyncHashType, preservePermissions common.PreservePermissionsOption, logLevel pipeline.LogLevel, cpkOptions common.CpkOptions, errorChannel chan ErrorFileInfo, stripTopDir bool) (ResourceTraverser, error) { var output ResourceTraverser var p *pipeline.Pipeline @@ -369,11 +367,6 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat p = &tmppipe } - toFollow := false - if followSymlinks != nil { - toFollow = *followSymlinks - } - // Feed list of files channel into new list traverser if listOfFilesChannel != nil { if location.IsLocal() { @@ -386,8 +379,8 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat } } - output = newListTraverser(resource, location, credential, ctx, recursive, toFollow, getProperties, - listOfFilesChannel, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, logLevel, cpkOptions) + output = newListTraverser(resource, location, credential, ctx, recursive, symlinkHandling, getProperties, + listOfFilesChannel, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, logLevel, cpkOptions, syncHashType, preservePermissions) return output, nil } @@ -396,7 +389,7 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat _, err := common.OSStat(resource.ValueLocal()) // If wildcard is present and this isn't an existing file/folder, glob and feed the globbed list into a list enum. - if strings.Index(resource.ValueLocal(), "*") != -1 && err != nil { + if strings.Contains(resource.ValueLocal(), "*") && (stripTopDir || err != nil) { basePath := getPathBeforeFirstWildcard(resource.ValueLocal()) matches, err := filepath.Glob(resource.ValueLocal()) @@ -414,13 +407,13 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat }() baseResource := resource.CloneWithValue(cleanLocalPath(basePath)) - output = newListTraverser(baseResource, location, nil, nil, recursive, toFollow, getProperties, - globChan, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, logLevel, cpkOptions) + output = newListTraverser(baseResource, location, nil, nil, recursive, symlinkHandling, getProperties, + globChan, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, logLevel, cpkOptions, syncHashType, preservePermissions) } else { if ctx != nil { - output = newLocalTraverser(*ctx, resource.ValueLocal(), recursive, toFollow, syncHashType, incrementEnumerationCounter, errorChannel) + output = newLocalTraverser(*ctx, resource.ValueLocal(), recursive, stripTopDir, symlinkHandling, syncHashType, incrementEnumerationCounter, errorChannel) } else { - output = newLocalTraverser(context.TODO(), resource.ValueLocal(), recursive, toFollow, syncHashType, incrementEnumerationCounter, errorChannel) + output = newLocalTraverser(context.TODO(), resource.ValueLocal(), recursive, stripTopDir, symlinkHandling, syncHashType, incrementEnumerationCounter, errorChannel) } } case common.ELocation.Benchmark(): @@ -450,11 +443,11 @@ func InitResourceTraverser(resource common.ResourceString, location common.Locat return nil, errors.New(accountTraversalInherentlyRecursiveError) } - output = newBlobAccountTraverser(resourceURL, *p, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions) + output = newBlobAccountTraverser(resourceURL, *p, *ctx, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, preservePermissions) } else if listOfVersionIds != nil { output = newBlobVersionsTraverser(resourceURL, *p, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, listOfVersionIds, cpkOptions) } else { - output = newBlobTraverser(resourceURL, *p, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion) + output = newBlobTraverser(resourceURL, *p, *ctx, recursive, includeDirectoryStubs, incrementEnumerationCounter, s2sPreserveBlobTags, cpkOptions, includeDeleted, includeSnapshot, includeVersion, preservePermissions) } case common.ELocation.File(): resourceURL, err := resource.FullURL() @@ -735,7 +728,7 @@ func (e *CopyEnumerator) enumerate() (err error) { // -------------------------------------- Helper Funcs -------------------------------------- \\ func passedFilters(filters []ObjectFilter, storedObject StoredObject) bool { - if filters != nil && len(filters) > 0 { + if len(filters) > 0 { // loop through the filters, if any of them fail, then return false for _, filter := range filters { msg, supported := filter.DoesSupportThisOS() diff --git a/cmd/zc_processor.go b/cmd/zc_processor.go index 2d4720fe6..9dc499064 100644 --- a/cmd/zc_processor.go +++ b/cmd/zc_processor.go @@ -47,6 +47,7 @@ type copyTransferProcessor struct { preserveAccessTier bool folderPropertiesOption common.FolderPropertyOption + symlinkHandlingType common.SymlinkHandlingType dryrunMode bool } @@ -62,6 +63,7 @@ func newCopyTransferProcessor(copyJobTemplate *common.CopyJobPartOrderRequest, n reportFinalPartDispatched: reportFinalPartDispatched, preserveAccessTier: preserveAccessTier, folderPropertiesOption: copyJobTemplate.Fpo, + symlinkHandlingType: copyJobTemplate.SymlinkHandlingType, dryrunMode: dryrunMode, } } @@ -73,13 +75,7 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) srcRelativePath := pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, true) dstRelativePath := pathEncodeRules(storedObject.relativePath, s.copyJobTemplate.FromTo, false, false) - copyTransfer, shouldSendToSte := storedObject.ToNewCopyTransfer( - false, // sync has no --decompress option - srcRelativePath, - dstRelativePath, - s.preserveAccessTier, - s.folderPropertiesOption, - ) + copyTransfer, shouldSendToSte := storedObject.ToNewCopyTransfer(false, srcRelativePath, dstRelativePath, s.preserveAccessTier, s.folderPropertiesOption, s.symlinkHandlingType) if s.copyJobTemplate.FromTo.To() == common.ELocation.None() { copyTransfer.BlobTier = s.copyJobTemplate.BlobAttributes.BlockBlobTier.ToAccessTierType() @@ -114,22 +110,22 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) common.PanicIfErr(err) // if remove then To() will equal to common.ELocation.Unknown() - if s.copyJobTemplate.FromTo.To() == common.ELocation.Unknown() { //remove + if s.copyJobTemplate.FromTo.To() == common.ELocation.Unknown() { // remove return fmt.Sprintf("DRYRUN: remove %v/%v", s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath) } - if s.copyJobTemplate.FromTo.To() == common.ELocation.None() { //set-properties + if s.copyJobTemplate.FromTo.To() == common.ELocation.None() { // set-properties return fmt.Sprintf("DRYRUN: set-properties %v/%v", s.copyJobTemplate.SourceRoot.Value, prettySrcRelativePath) - } else { //copy for sync + } else { // copy for sync if s.copyJobTemplate.FromTo.From() == common.ELocation.Local() { // formatting from local source dryrunValue := fmt.Sprintf("DRYRUN: copy %v", common.ToShortPath(s.copyJobTemplate.SourceRoot.Value)) if runtime.GOOS == "windows" { dryrunValue += "\\" + strings.ReplaceAll(prettySrcRelativePath, "/", "\\") - } else { //linux and mac + } else { // linux and mac dryrunValue += "/" + prettySrcRelativePath } dryrunValue += fmt.Sprintf(" to %v/%v", strings.Trim(s.copyJobTemplate.DestinationRoot.Value, "/"), prettyDstRelativePath) @@ -141,7 +137,7 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) common.ToShortPath(s.copyJobTemplate.DestinationRoot.Value)) if runtime.GOOS == "windows" { dryrunValue += "\\" + strings.ReplaceAll(prettyDstRelativePath, "/", "\\") - } else { //linux and mac + } else { // linux and mac dryrunValue += "/" + prettyDstRelativePath } return dryrunValue @@ -175,10 +171,14 @@ func (s *copyTransferProcessor) scheduleCopyTransfer(storedObject StoredObject) // so that there is at least one transfer for the final part s.copyJobTemplate.Transfers.List = append(s.copyJobTemplate.Transfers.List, copyTransfer) s.copyJobTemplate.Transfers.TotalSizeInBytes += uint64(copyTransfer.SourceSize) - if copyTransfer.EntityType == common.EEntityType.File() { + + switch copyTransfer.EntityType { + case common.EEntityType.File(): s.copyJobTemplate.Transfers.FileTransferCount++ - } else { + case common.EEntityType.Folder(): s.copyJobTemplate.Transfers.FolderTransferCount++ + case common.EEntityType.Symlink(): + s.copyJobTemplate.Transfers.SymlinkTransferCount++ } return nil diff --git a/cmd/zc_traverser_blob.go b/cmd/zc_traverser_blob.go index 142de3b58..c37b962f5 100644 --- a/cmd/zc_traverser_blob.go +++ b/cmd/zc_traverser_blob.go @@ -25,6 +25,7 @@ import ( "fmt" "net/url" "strings" + "time" "github.com/Azure/azure-storage-azcopy/v10/common/parallel" @@ -56,13 +57,13 @@ type blobTraverser struct { cpkOptions common.CpkOptions + preservePermissions common.PreservePermissionsOption + includeDeleted bool includeSnapshot bool includeVersion bool - - stripTopDir bool } func (t *blobTraverser) IsDirectory(isSource bool) (bool, error) { @@ -199,13 +200,14 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro if azcopyScanningLogger != nil { azcopyScanningLogger.Log(pipeline.LogDebug, "Detected the root as a blob.") + azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Root entity type: %s", getEntityType(blobProperties.NewMetadata()))) } storedObject := newStoredObject( preprocessor, getObjectNameOnly(strings.TrimSuffix(blobUrlParts.BlobName, common.AZCOPY_PATH_SEPARATOR_STRING)), "", - common.EntityType(common.IffUint8(isBlob, uint8(common.EEntityType.File()), uint8(common.EEntityType.Folder()))), + getEntityType(blobProperties.NewMetadata()), blobProperties.LastModified(), blobProperties.ContentLength(), blobProperties, @@ -224,7 +226,7 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro } } if t.incrementEnumerationCounter != nil { - t.incrementEnumerationCounter(common.EEntityType.File()) + t.incrementEnumerationCounter(storedObject.entityType) } err := processIfPassedFilters(filters, storedObject, processor) @@ -234,6 +236,34 @@ func (t *blobTraverser) Traverse(preprocessor objectMorpher, processor objectPro if !t.includeDeleted && (isBlob || err != nil) { return err } + } else if blobUrlParts.BlobName == "" && t.preservePermissions.IsTruthy() { + // if the root is a container and we're copying "folders", we should persist the ACLs there too. + if azcopyScanningLogger != nil { + azcopyScanningLogger.Log(pipeline.LogDebug, "Detected the root as a container.") + } + + storedObject := newStoredObject( + preprocessor, + "", + "", + common.EEntityType.Folder(), + time.Now(), + 0, + noContentProps, + noBlobProps, + common.Metadata{}, + blobUrlParts.ContainerName, + ) + + if t.incrementEnumerationCounter != nil { + t.incrementEnumerationCounter(common.EEntityType.Folder()) + } + + err := processIfPassedFilters(filters, storedObject, processor) + _, err = getProcessingError(err) + if err != nil { + return err + } } // get the container URL so that we can list the blobs @@ -370,12 +400,12 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container // initiate parallel scanning, starting at the root path workerContext, cancelWorkers := context.WithCancel(t.ctx) + defer cancelWorkers() cCrawled := parallel.Crawl(workerContext, searchPrefix+extraSearchPrefix, enumerateOneDir, EnumerationParallelism) for x := range cCrawled { item, workerError := x.Item() if workerError != nil { - cancelWorkers() return workerError } @@ -387,23 +417,34 @@ func (t *blobTraverser) parallelList(containerURL azblob.ContainerURL, container processErr := processIfPassedFilters(filters, object, processor) _, processErr = getProcessingError(processErr) if processErr != nil { - cancelWorkers() return processErr } } return nil } +func getEntityType(blobInfo azblob.Metadata) common.EntityType { + if _, isfolder := blobInfo["hdi_isfolder"]; isfolder { + return common.EEntityType.Folder() + } else if _, isSymlink := blobInfo["is_symlink"]; isSymlink { + return common.EEntityType.Symlink() + } + + return common.EEntityType.File() +} func (t *blobTraverser) createStoredObjectForBlob(preprocessor objectMorpher, blobInfo azblob.BlobItemInternal, relativePath string, containerName string) StoredObject { adapter := blobPropertiesAdapter{blobInfo.Properties} - _, isFolder := blobInfo.Metadata["hdi_isfolder"] + if azcopyScanningLogger != nil { + azcopyScanningLogger.Log(pipeline.LogDebug, fmt.Sprintf("Blob %s entity type: %s", relativePath, getEntityType(blobInfo.Metadata))) + } + object := newStoredObject( preprocessor, getObjectNameOnly(blobInfo.Name), relativePath, - common.EntityType(common.IffUint8(isFolder, uint8(common.EEntityType.Folder()), uint8(common.EEntityType.File()))), + getEntityType(blobInfo.Metadata), blobInfo.Properties.LastModified, *blobInfo.Properties.ContentLength, adapter, @@ -482,7 +523,7 @@ func (t *blobTraverser) serialList(containerURL azblob.ContainerURL, containerNa return nil } -func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, recursive, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, includeDeleted, includeSnapshot, includeVersion bool) (t *blobTraverser) { +func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, recursive, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, includeDeleted, includeSnapshot, includeVersion bool, preservePermissions common.PreservePermissionsOption) (t *blobTraverser) { t = &blobTraverser{ rawURL: rawURL, p: p, @@ -496,6 +537,7 @@ func newBlobTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, includeDeleted: includeDeleted, includeSnapshot: includeSnapshot, includeVersion: includeVersion, + preservePermissions: preservePermissions, } disableHierarchicalScanning := strings.ToLower(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.DisableHierarchicalScanning())) diff --git a/cmd/zc_traverser_blob_account.go b/cmd/zc_traverser_blob_account.go index 6c946e01c..b5c3f3980 100644 --- a/cmd/zc_traverser_blob_account.go +++ b/cmd/zc_traverser_blob_account.go @@ -45,6 +45,7 @@ type blobAccountTraverser struct { s2sPreserveSourceTags bool cpkOptions common.CpkOptions + preservePermissions common.PreservePermissionsOption } func (t *blobAccountTraverser) IsDirectory(_ bool) (bool, error) { @@ -100,7 +101,7 @@ func (t *blobAccountTraverser) Traverse(preprocessor objectMorpher, processor ob for _, v := range cList { containerURL := t.accountURL.NewContainerURL(v).URL() - containerTraverser := newBlobTraverser(&containerURL, t.p, t.ctx, true, t.includeDirectoryStubs, t.incrementEnumerationCounter, t.s2sPreserveSourceTags, t.cpkOptions, false, false, false) + containerTraverser := newBlobTraverser(&containerURL, t.p, t.ctx, true, t.includeDirectoryStubs, t.incrementEnumerationCounter, t.s2sPreserveSourceTags, t.cpkOptions, false, false, false, t.preservePermissions) preprocessorForThisChild := preprocessor.FollowedBy(newContainerDecorator(v)) @@ -115,7 +116,7 @@ func (t *blobAccountTraverser) Traverse(preprocessor objectMorpher, processor ob return nil } -func newBlobAccountTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions) (t *blobAccountTraverser) { +func newBlobAccountTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.Context, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveSourceTags bool, cpkOptions common.CpkOptions, preservePermissions common.PreservePermissionsOption) (t *blobAccountTraverser) { bURLParts := azblob.NewBlobURLParts(*rawURL) cPattern := bURLParts.ContainerName @@ -133,6 +134,7 @@ func newBlobAccountTraverser(rawURL *url.URL, p pipeline.Pipeline, ctx context.C includeDirectoryStubs: includeDirectoryStubs, s2sPreserveSourceTags: s2sPreserveSourceTags, cpkOptions: cpkOptions, + preservePermissions: preservePermissions, } return diff --git a/cmd/zc_traverser_blobfs.go b/cmd/zc_traverser_blobfs.go index 7ed5405b6..566a08ceb 100644 --- a/cmd/zc_traverser_blobfs.go +++ b/cmd/zc_traverser_blobfs.go @@ -170,7 +170,7 @@ func (t *blobFSTraverser) Traverse(preprocessor objectMorpher, processor objectP for _, v := range dlr.Paths { var entityType common.EntityType lmt := v.LastModifiedTime() - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { entityType = common.EEntityType.File() contentProps = md5OnlyAdapter{md5: t.getContentMd5(t.ctx, dirUrl, v)} size = *v.ContentLength @@ -219,7 +219,7 @@ func (t *blobFSTraverser) Traverse(preprocessor objectMorpher, processor objectP var fileListBuilder strings.Builder for _, v := range dlr.Paths { - if v.IsDirectory == nil || *v.IsDirectory == false { + if v.IsDirectory == nil || !*v.IsDirectory { // it's a file fmt.Fprintf(&fileListBuilder, " %s,", *v.Name) } else { diff --git a/cmd/zc_traverser_blobfs_account.go b/cmd/zc_traverser_blobfs_account.go index b768b7b0d..9c13fb2e6 100644 --- a/cmd/zc_traverser_blobfs_account.go +++ b/cmd/zc_traverser_blobfs_account.go @@ -103,13 +103,17 @@ func (t *BlobFSAccountTraverser) Traverse(preprocessor objectMorpher, processor // listContainers will return the cached filesystem list if filesystems have already been listed by this traverser. fsList, err := t.listContainers() + if err != nil { + return err + } + for _, v := range fsList { fileSystemURL := t.accountURL.NewFileSystemURL(v).URL() fileSystemTraverser := newBlobFSTraverser(&fileSystemURL, t.p, t.ctx, true, t.incrementEnumerationCounter) preprocessorForThisChild := preprocessor.FollowedBy(newContainerDecorator(v)) - err = fileSystemTraverser.Traverse(preprocessorForThisChild, processor, filters) + err := fileSystemTraverser.Traverse(preprocessorForThisChild, processor, filters) if err != nil { WarnStdoutAndScanningLog(fmt.Sprintf("failed to list files in filesystem %s: %s", v, err)) diff --git a/cmd/zc_traverser_gcp.go b/cmd/zc_traverser_gcp.go index 278df9327..15a657b48 100644 --- a/cmd/zc_traverser_gcp.go +++ b/cmd/zc_traverser_gcp.go @@ -130,7 +130,6 @@ func (t *gcpTraverser) Traverse(preprocessor objectMorpher, processor objectProc } } } - return nil } func newGCPTraverser(rawURL *url.URL, ctx context.Context, recursive, getProperties bool, incrementEnumerationCounter enumerationCounterFunc) (*gcpTraverser, error) { diff --git a/cmd/zc_traverser_gcp_service.go b/cmd/zc_traverser_gcp_service.go index e4bf2ff89..500595c80 100644 --- a/cmd/zc_traverser_gcp_service.go +++ b/cmd/zc_traverser_gcp_service.go @@ -110,5 +110,5 @@ func newGCPServiceTraverser(rawURL *url.URL, ctx context.Context, getProperties t.gcpURL = gcpURLParts t.gcpClient, err = common.CreateGCPClient(t.ctx) - return t, nil + return t, err } diff --git a/cmd/zc_traverser_list.go b/cmd/zc_traverser_list.go index ec578634e..c25ca15a1 100755 --- a/cmd/zc_traverser_list.go +++ b/cmd/zc_traverser_list.go @@ -90,12 +90,10 @@ func (l *listTraverser) Traverse(preprocessor objectMorpher, processor objectPro } func newListTraverser(parent common.ResourceString, parentType common.Location, credential *common.CredentialInfo, - ctx *context.Context, recursive, followSymlinks, getProperties bool, listChan chan string, + ctx *context.Context, recursive bool, handleSymlinks common.SymlinkHandlingType, getProperties bool, listChan chan string, includeDirectoryStubs bool, incrementEnumerationCounter enumerationCounterFunc, s2sPreserveBlobTags bool, - logLevel pipeline.LogLevel, cpkOptions common.CpkOptions) ResourceTraverser { - var traverserGenerator childTraverserGenerator - - traverserGenerator = func(relativeChildPath string) (ResourceTraverser, error) { + logLevel pipeline.LogLevel, cpkOptions common.CpkOptions, syncHashType common.SyncHashType, preservePermissions common.PreservePermissionsOption) ResourceTraverser { + traverserGenerator := func(relativeChildPath string) (ResourceTraverser, error) { source := parent.Clone() if parentType != common.ELocation.Local() { // assume child path is not URL-encoded yet, this is consistent with the behavior of previous implementation @@ -108,9 +106,9 @@ func newListTraverser(parent common.ResourceString, parentType common.Location, } // Construct a traverser that goes through the child - traverser, err := InitResourceTraverser(source, parentType, ctx, credential, &followSymlinks, + traverser, err := InitResourceTraverser(source, parentType, ctx, credential, handleSymlinks, nil, recursive, getProperties, includeDirectoryStubs, common.EPermanentDeleteOption.None(), incrementEnumerationCounter, - nil, s2sPreserveBlobTags, common.ESyncHashType.None(), logLevel, cpkOptions, nil /* errorChannel */) + nil, s2sPreserveBlobTags, syncHashType, preservePermissions, logLevel, cpkOptions, nil /* errorChannel */, false) if err != nil { return nil, err } diff --git a/cmd/zc_traverser_local.go b/cmd/zc_traverser_local.go index ef44ec0b7..9629abeb9 100755 --- a/cmd/zc_traverser_local.go +++ b/cmd/zc_traverser_local.go @@ -31,7 +31,6 @@ import ( "github.com/Azure/azure-storage-azcopy/v10/common/parallel" "hash" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -44,10 +43,11 @@ import ( const MAX_SYMLINKS_TO_FOLLOW = 40 type localTraverser struct { - fullPath string - recursive bool - followSymlinks bool - appCtx context.Context + fullPath string + recursive bool + stripTopDir bool + symlinkHandling common.SymlinkHandlingType + appCtx context.Context // a generic function to notify that a new stored object has been enumerated incrementEnumerationCounter enumerationCounterFunc errorChannel chan ErrorFileInfo @@ -72,6 +72,10 @@ func (t *localTraverser) IsDirectory(bool) (bool, error) { } func (t *localTraverser) getInfoIfSingleFile() (os.FileInfo, bool, error) { + if t.stripTopDir { + return nil, false, nil // StripTopDir can NEVER be a single file. If a user wants to target a single file, they must escape the *. + } + fileInfo, err := common.OSStat(t.fullPath) if err != nil { @@ -196,7 +200,7 @@ func writeToErrorChannel(errorChannel chan ErrorFileInfo, err ErrorFileInfo) { // Separate this from the traverser for two purposes: // 1) Cleaner code // 2) Easier to test individually than to test the entire traverser. -func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath.WalkFunc, followSymlinks bool, errorChannel chan ErrorFileInfo) (err error) { +func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath.WalkFunc, symlinkHandling common.SymlinkHandlingType, errorChannel chan ErrorFileInfo) (err error) { // We want to re-queue symlinks up in their evaluated form because filepath.Walk doesn't evaluate them for us. // So, what is the plan of attack? @@ -217,7 +221,7 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath // do NOT put fullPath: true into the map at this time, because we want to match the semantics of filepath.Walk, where the walkfunc is called for the root // When following symlinks, our current implementation tracks folders and files. Which may consume GB's of RAM when there are 10s of millions of files. var seenPaths seenPathsRecorder = &nullSeenPathsRecorder{} // uses no RAM - if followSymlinks { + if symlinkHandling.Follow() { // only if we're following we need to worry about this seenPaths = &realSeenPathsRecorder{make(map[string]struct{})} // have to use the RAM if we are dealing with symlinks, to prevent cycles } @@ -240,23 +244,35 @@ func WalkWithSymlinks(appCtx context.Context, fullPath string, walkFunc filepath computedRelativePath = "" } - // TODO: Later we might want to transfer these special files as such. - unsupportedFileTypes := (os.ModeSocket | os.ModeNamedPipe | os.ModeIrregular | os.ModeDevice) - if fileInfo == nil { err := fmt.Errorf("fileInfo is nil for file %s", filePath) WarnStdoutAndScanningLog(err.Error()) return nil } - if (fileInfo.Mode() & unsupportedFileTypes) != 0 { - err := fmt.Errorf("Unsupported file type %s: %v", filePath, fileInfo.Mode()) - WarnStdoutAndScanningLog(err.Error()) - return nil - } - if fileInfo.Mode()&os.ModeSymlink != 0 { - if !followSymlinks { + if symlinkHandling.Preserve() { + // Handle it like it's not a symlink + result, err := filepath.Abs(filePath) + + if err != nil { + WarnStdoutAndScanningLog(fmt.Sprintf("Failed to get absolute path of %s: %s", filePath, err)) + return nil + } + + err = walkFunc(common.GenerateFullPath(fullPath, computedRelativePath), fileInfo, fileError) + // Since this doesn't directly manipulate the error, and only checks for a specific error, it's OK to use in a generic function. + skipped, err := getProcessingError(err) + + // If the file was skipped, don't record it. + if !skipped { + seenPaths.Record(common.ToExtendedPath(result)) + } + + return err + } + + if symlinkHandling.None() { return nil // skip it } @@ -653,7 +669,9 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } var entityType common.EntityType - if fileInfo.IsDir() { + if fileInfo.Mode()&os.ModeSymlink == os.ModeSymlink { + entityType = common.EEntityType.Symlink() + } else if fileInfo.IsDir() { newFileInfo, err := WrapFolder(filePath, fileInfo) if err != nil { WarnStdoutAndScanningLog(fmt.Sprintf("Failed to get last change of target at %s: %s", filePath, err.Error())) @@ -668,8 +686,8 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } relPath := strings.TrimPrefix(strings.TrimPrefix(cleanLocalPath(filePath), cleanLocalPath(t.fullPath)), common.DeterminePathSeparator(t.fullPath)) - if !t.followSymlinks && fileInfo.Mode()&os.ModeSymlink != 0 { - WarnStdoutAndScanningLog(fmt.Sprintf("Skipping over symlink at %s because --follow-symlinks is false", common.GenerateFullPath(t.fullPath, relPath))) + if t.symlinkHandling.None() && fileInfo.Mode()&os.ModeSymlink != 0 { + WarnStdoutAndScanningLog(fmt.Sprintf("Skipping over symlink at %s because symlinks are not handled (--follow-symlinks or --preserve-symlinks)", common.GenerateFullPath(t.fullPath, relPath))) return nil } @@ -696,27 +714,32 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } // note: Walk includes root, so no need here to separately create StoredObject for root (as we do for other folder-aware sources) - return finalizer(WalkWithSymlinks(t.appCtx, t.fullPath, processFile, t.followSymlinks, t.errorChannel)) + return finalizer(WalkWithSymlinks(t.appCtx, t.fullPath, processFile, t.symlinkHandling, t.errorChannel)) } else { // if recursive is off, we only need to scan the files immediately under the fullPath // We don't transfer any directory properties here, not even the root. (Because the root's // properties won't be transferred, because the only way to do a non-recursive directory transfer // is with /* (aka stripTopDir). - files, err := ioutil.ReadDir(t.fullPath) + entries, err := os.ReadDir(t.fullPath) if err != nil { return err } + entityType := common.EEntityType.File() + // go through the files and return if any of them fail to process - for _, singleFile := range files { + for _, entry := range entries { // This won't change. It's purely to hand info off to STE about where the symlink lives. - relativePath := singleFile.Name() - if singleFile.Mode()&os.ModeSymlink != 0 { - if !t.followSymlinks { + relativePath := entry.Name() + fileInfo, _ := entry.Info() + if fileInfo.Mode()&os.ModeSymlink != 0 { + if t.symlinkHandling.None() { continue - } else { + } else if t.symlinkHandling.Preserve() { // Mark the entity type as a symlink. + entityType = common.EEntityType.Symlink() + } else if t.symlinkHandling.Follow() { // Because this only goes one layer deep, we can just append the filename to fullPath and resolve with it. - symlinkPath := common.GenerateFullPath(t.fullPath, singleFile.Name()) + symlinkPath := common.GenerateFullPath(t.fullPath, entry.Name()) // Evaluate the symlink result, err := UnfurlSymlinks(symlinkPath) @@ -732,7 +755,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } // Replace the current FileInfo with - singleFile, err = common.OSStat(result) + fileInfo, err = common.OSStat(result) if err != nil { return err @@ -740,7 +763,7 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr } } - if singleFile.IsDir() { + if entry.IsDir() { continue // it doesn't make sense to transfer directory properties when not recurring } @@ -752,11 +775,11 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr err := processIfPassedFilters(filters, newStoredObject( preprocessor, - singleFile.Name(), + entry.Name(), strings.ReplaceAll(relativePath, common.DeterminePathSeparator(t.fullPath), common.AZCOPY_PATH_SEPARATOR_STRING), // Consolidate relative paths to the azcopy path separator for sync - common.EEntityType.File(), // TODO: add code path for folders - singleFile.ModTime(), - singleFile.Size(), + entityType, // TODO: add code path for folders + fileInfo.ModTime(), + fileInfo.Size(), noContentProps, // Local MD5s are computed in the STE, and other props don't apply to local files noBlobProps, noMetdata, @@ -775,15 +798,17 @@ func (t *localTraverser) Traverse(preprocessor objectMorpher, processor objectPr return finalizer(err) } -func newLocalTraverser(ctx context.Context, fullPath string, recursive bool, followSymlinks bool, syncHashType common.SyncHashType, incrementEnumerationCounter enumerationCounterFunc, errorChannel chan ErrorFileInfo) *localTraverser { +func newLocalTraverser(ctx context.Context, fullPath string, recursive bool, stripTopDir bool, symlinkHandling common.SymlinkHandlingType, syncHashType common.SyncHashType, incrementEnumerationCounter enumerationCounterFunc, errorChannel chan ErrorFileInfo) *localTraverser { traverser := localTraverser{ fullPath: cleanLocalPath(fullPath), recursive: recursive, - followSymlinks: followSymlinks, + symlinkHandling: symlinkHandling, appCtx: ctx, incrementEnumerationCounter: incrementEnumerationCounter, errorChannel: errorChannel, - targetHashType: syncHashType} + targetHashType: syncHashType, + stripTopDir: stripTopDir, + } return &traverser } diff --git a/cmd/zt_copy_blob_download_test.go b/cmd/zt_copy_blob_download_test.go index 62b488ee4..b13c0850c 100644 --- a/cmd/zt_copy_blob_download_test.go +++ b/cmd/zt_copy_blob_download_test.go @@ -174,7 +174,7 @@ func (s *cmdIntegrationSuite) TestDownloadAccount(c *chk.C) { // Traverse the account ahead of time and determine the relative paths for testing. relPaths := make([]string, 0) // Use a map for easy lookup - blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}) + blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) processor := func(object StoredObject) error { // Append the container name to the relative path relPath := "/" + object.ContainerName + "/" + object.relativePath @@ -222,7 +222,7 @@ func (s *cmdIntegrationSuite) TestDownloadAccountWildcard(c *chk.C) { // Traverse the account ahead of time and determine the relative paths for testing. relPaths := make([]string, 0) // Use a map for easy lookup - blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}) + blobTraverser := newBlobAccountTraverser(&rawBSU, p, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) processor := func(object StoredObject) error { // Append the container name to the relative path relPath := "/" + object.ContainerName + "/" + object.relativePath diff --git a/cmd/zt_generic_processor_test.go b/cmd/zt_generic_processor_test.go index e26d140f0..48d590987 100644 --- a/cmd/zt_generic_processor_test.go +++ b/cmd/zt_generic_processor_test.go @@ -57,7 +57,7 @@ func (processorTestSuiteHelper) getExpectedTransferFromStoredObjectList(storedOb } func (processorTestSuiteHelper) getCopyJobTemplate() *common.CopyJobPartOrderRequest { - return &common.CopyJobPartOrderRequest{Fpo: common.EFolderPropertiesOption.NoFolders()} + return &common.CopyJobPartOrderRequest{Fpo: common.EFolderPropertiesOption.NoFolders(), SymlinkHandlingType: common.ESymlinkHandlingType.Skip()} } func (s *genericProcessorSuite) TestCopyTransferProcessorMultipleFiles(c *chk.C) { diff --git a/cmd/zt_generic_service_traverser_test.go b/cmd/zt_generic_service_traverser_test.go index 33450e609..09c5427e2 100644 --- a/cmd/zt_generic_service_traverser_test.go +++ b/cmd/zt_generic_service_traverser_test.go @@ -58,7 +58,7 @@ func (s *genericTraverserSuite) TestBlobFSServiceTraverserWithManyObjects(c *chk scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, objectList) // Create a local traversal - localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, true, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // Invoke the traversal with an indexer so the results are indexed for easy validation localIndexer := newObjectIndexer() @@ -174,7 +174,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, objectList) // Create a local traversal - localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, true, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // Invoke the traversal with an indexer so the results are indexed for easy validation localIndexer := newObjectIndexer() @@ -184,7 +184,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithManyObjects(c *chk.C) { // construct a blob account traverser blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) - blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}) + blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -358,7 +358,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, objectList) // Create a local traversal - localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, true, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), dstDirName, true, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // Invoke the traversal with an indexer so the results are indexed for easy validation localIndexer := newObjectIndexer() @@ -369,7 +369,7 @@ func (s *genericTraverserSuite) TestServiceTraverserWithWildcards(c *chk.C) { blobPipeline := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawBSU := scenarioHelper{}.getRawBlobServiceURLWithSAS(c) rawBSU.Path = "/objectmatch*" // set the container name to contain a wildcard - blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}) + blobAccountTraverser := newBlobAccountTraverser(&rawBSU, blobPipeline, ctx, false, func(common.EntityType) {}, false, common.CpkOptions{}, common.EPreservePermissionsOption.None()) // invoke the blob account traversal with a dummy processor blobDummyProcessor := dummyProcessor{} diff --git a/cmd/zt_generic_traverser_test.go b/cmd/zt_generic_traverser_test.go index 0c9602dfb..c094a0590 100644 --- a/cmd/zt_generic_traverser_test.go +++ b/cmd/zt_generic_traverser_test.go @@ -22,10 +22,11 @@ package cmd import ( "context" + "github.com/Azure/azure-pipeline-go/pipeline" "io" - "io/ioutil" "os" "path/filepath" + "runtime" "strings" "time" @@ -56,6 +57,66 @@ func trySymlink(src, dst string, c *chk.C) { } } +func (s *genericTraverserSuite) TestLocalWildcardOverlap(c *chk.C) { + if runtime.GOOS == "windows" { + c.Skip("invalid filename used") + return + } + + /* + Wildcard support is not actually a part of the local traverser, believe it or not. + It's instead implemented in InitResourceTraverser as a short-circuit to a list traverser + utilizing the filepath.Glob function, which then initializes local traversers to achieve the same effect. + */ + tmpDir := scenarioHelper{}.generateLocalDirectory(c) + defer func(path string) { _ = os.RemoveAll(path) }(tmpDir) + + scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, []string{ + "test.txt", + "tes*t.txt", + "foobarbaz/test.txt", + }) + + resource, err := SplitResourceString(filepath.Join(tmpDir, "tes*t.txt"), common.ELocation.Local()) + c.Assert(err, chk.IsNil) + + traverser, err := InitResourceTraverser( + resource, + common.ELocation.Local(), + nil, + nil, + common.ESymlinkHandlingType.Follow(), + nil, + true, + false, + false, + common.EPermanentDeleteOption.None(), + nil, + nil, + false, + common.ESyncHashType.None(), + common.EPreservePermissionsOption.None(), + pipeline.LogInfo, + common.CpkOptions{}, + nil, + true, + ) + c.Assert(err, chk.IsNil) + + seenFiles := make(map[string]bool) + + err = traverser.Traverse(nil, func(storedObject StoredObject) error { + seenFiles[storedObject.relativePath] = true + return nil + }, []ObjectFilter{}) + c.Assert(err, chk.IsNil) + + c.Assert(seenFiles, chk.DeepEquals, map[string]bool{ + "test.txt": true, + "tes*t.txt": true, + }) +} + // GetProperties tests. // GetProperties does not exist on Blob, as the properties come in the list call. // While BlobFS could get properties in the future, it's currently disabled as BFS source S2S isn't set up right now, and likely won't be. @@ -276,7 +337,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFolder(c *chk.C) { fileCount++ return nil }, - true, nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) // 3 files live in base, 3 files live in symlink c.Assert(fileCount, chk.Equals, 6) @@ -341,7 +402,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksBreakLoop(c *chk.C) { fileCount++ return nil }, - true, nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) c.Assert(fileCount, chk.Equals, 3) } @@ -351,7 +412,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) { fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"} tmpDir := scenarioHelper{}.generateLocalDirectory(c) defer os.RemoveAll(tmpDir) - symlinkTmpDir, err := ioutil.TempDir(tmpDir, "subdir") + symlinkTmpDir, err := os.MkdirTemp(tmpDir, "subdir") c.Assert(err, chk.IsNil) scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames) @@ -371,7 +432,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) { fileCount++ return nil }, - true, nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) c.Assert(fileCount, chk.Equals, 6) } @@ -402,7 +463,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksMultitarget(c *chk.C) { fileCount++ return nil }, - true, nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) // 3 files live in base, 3 files live in first symlink, second & third symlink is ignored. c.Assert(fileCount, chk.Equals, 6) @@ -416,7 +477,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) { root2 := scenarioHelper{}.generateLocalDirectory(c) defer os.RemoveAll(root2) - child, err := ioutil.TempDir(root2, "childdir") + child, err := os.MkdirTemp(root2, "childdir") c.Assert(err, chk.IsNil) scenarioHelper{}.generateLocalFilesFromList(c, root2, fileNames) @@ -435,7 +496,7 @@ func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) { fileCount++ return nil }, - true, nil), chk.IsNil) + common.ESymlinkHandlingType.Follow(), nil), chk.IsNil) // 6 files total live under toroot. tochild should be ignored (or if tochild was traversed first, child will be ignored on toroot). c.Assert(fileCount, chk.Equals, 6) @@ -484,7 +545,7 @@ func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList) // construct a local traverser - localTraverser := newLocalTraverser(context.TODO(), filepath.Join(dstDirName, dstFileName), false, false, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), filepath.Join(dstDirName, dstFileName), false, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // invoke the local traversal with a dummy processor localDummyProcessor := dummyProcessor{} @@ -496,7 +557,7 @@ func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) { ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0]) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, false, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, false, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the blob traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -644,7 +705,7 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { // construct a local traverser - localTraverser := newLocalTraverser(context.TODO(), dstDirName, isRecursiveOn, false, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), dstDirName, isRecursiveOn, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // invoke the local traversal with an indexer // so that the results are indexed for easy validation @@ -656,7 +717,7 @@ func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName) - blobTraverser := newBlobTraverser(&rawContainerURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawContainerURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -805,7 +866,7 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk // test two scenarios, either recursive or not for _, isRecursiveOn := range []bool{true, false} { // construct a local traverser - localTraverser := newLocalTraverser(context.TODO(), filepath.Join(dstDirName, virDirName), isRecursiveOn, false, common.ESyncHashType.None(), func(common.EntityType) {}, nil) + localTraverser := newLocalTraverser(context.TODO(), filepath.Join(dstDirName, virDirName), isRecursiveOn, false, common.ESymlinkHandlingType.Follow(), common.ESyncHashType.None(), func(common.EntityType) {}, nil) // invoke the local traversal with an indexer // so that the results are indexed for easy validation @@ -817,7 +878,7 @@ func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName) - blobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // invoke the local traversal with a dummy processor blobDummyProcessor := dummyProcessor{} @@ -925,10 +986,10 @@ func (s *genericTraverserSuite) TestSerialAndParallelBlobTraverser(c *chk.C) { ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion) p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{}) rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName) - parallelBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + parallelBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) // construct a serial blob traverser - serialBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + serialBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) serialBlobTraverser.parallelListing = false // invoke the parallel traversal with a dummy processor diff --git a/cmd/zt_scenario_helpers_for_test.go b/cmd/zt_scenario_helpers_for_test.go index 5fe024534..8bbcf9622 100644 --- a/cmd/zt_scenario_helpers_for_test.go +++ b/cmd/zt_scenario_helpers_for_test.go @@ -24,7 +24,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/url" "os" "path" @@ -64,7 +63,7 @@ var specialNames = []string{ // note: this is to emulate the list-of-files flag func (scenarioHelper) generateListOfFiles(c *chk.C, fileList []string) (path string) { - parentDirName, err := ioutil.TempDir("", "AzCopyLocalTest") + parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.Assert(err, chk.IsNil) // create the file @@ -74,13 +73,13 @@ func (scenarioHelper) generateListOfFiles(c *chk.C, fileList []string) (path str // pipe content into it content := strings.Join(fileList, "\n") - err = ioutil.WriteFile(path, []byte(content), common.DEFAULT_FILE_PERM) + err = os.WriteFile(path, []byte(content), common.DEFAULT_FILE_PERM) c.Assert(err, chk.IsNil) return } func (scenarioHelper) generateLocalDirectory(c *chk.C) (dstDirName string) { - dstDirName, err := ioutil.TempDir("", "AzCopyLocalTest") + dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.Assert(err, chk.IsNil) return } @@ -97,7 +96,7 @@ func (scenarioHelper) generateLocalFile(filePath string, fileSize int) ([]byte, } // write to file and return the data - err = ioutil.WriteFile(filePath, bigBuff, common.DEFAULT_FILE_PERM) + err = os.WriteFile(filePath, bigBuff, common.DEFAULT_FILE_PERM) return bigBuff, err } diff --git a/cmd/zt_sync_blob_local_test.go b/cmd/zt_sync_blob_local_test.go index 67b705bfa..aacf391bd 100644 --- a/cmd/zt_sync_blob_local_test.go +++ b/cmd/zt_sync_blob_local_test.go @@ -23,7 +23,6 @@ package cmd import ( "bytes" "context" - "io/ioutil" "os" "path/filepath" "strings" @@ -211,7 +210,7 @@ func (s *cmdIntegrationSuite) TestSyncDownloadWithMismatchedDestination(c *chk.C validateDownloadTransfersAreScheduled(c, "", "", expectedOutput, mockedRPC) // make sure the extra files were deleted - currentDstFileList, err := ioutil.ReadDir(dstDirName) + currentDstFileList, err := os.ReadDir(dstDirName) extraFilesFound := false for _, file := range currentDstFileList { if strings.Contains(file.Name(), "extra") { diff --git a/cmd/zt_test.go b/cmd/zt_test.go index 76eba7e0a..eae7790ca 100644 --- a/cmd/zt_test.go +++ b/cmd/zt_test.go @@ -26,7 +26,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math/rand" "net/url" "os" @@ -778,7 +777,7 @@ func disableSoftDelete(c *chk.C, bsu azblob.ServiceURL) { func validateUpload(c *chk.C, blobURL azblob.BlockBlobURL) { resp, err := blobURL.Download(ctx, 0, 0, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) c.Assert(err, chk.IsNil) - data, _ := ioutil.ReadAll(resp.Response().Body) + data, _ := io.ReadAll(resp.Response().Body) c.Assert(data, chk.HasLen, 0) } diff --git a/cmd/zt_traverser_blob_test.go b/cmd/zt_traverser_blob_test.go index cd2eb4879..e77d49cd6 100644 --- a/cmd/zt_traverser_blob_test.go +++ b/cmd/zt_traverser_blob_test.go @@ -48,7 +48,7 @@ func (s *traverserBlobSuite) TestIsSourceDirWithStub(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) c.Assert(isDir, chk.Equals, true) @@ -69,7 +69,7 @@ func (s *traverserBlobSuite) TestIsSourceDirWithNoStub(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dirName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) c.Assert(isDir, chk.Equals, true) @@ -92,7 +92,7 @@ func (s *traverserBlobSuite) TestIsSourceFileExists(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) c.Assert(isDir, chk.Equals, false) @@ -113,7 +113,7 @@ func (s *traverserBlobSuite) TestIsSourceFileDoesNotExist(c *chk.C) { // List rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, fileName) - blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false) + blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, true, true, func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false, common.EPreservePermissionsOption.None()) isDir, err := blobTraverser.IsDirectory(true) c.Assert(isDir, chk.Equals, false) diff --git a/common/CountPerSecond.go b/common/CountPerSecond.go index 43d391ab0..c2d9af608 100644 --- a/common/CountPerSecond.go +++ b/common/CountPerSecond.go @@ -33,7 +33,7 @@ func (cps *countPerSecond) Add(delta uint64) uint64 { func (cps *countPerSecond) LatestRate() float64 { cps.nocopy.Check() - dur := time.Now().Sub(time.Unix(cps.start, 0)) + dur := time.Since(time.Unix(cps.start, 0)) if dur <= 0 { dur = 1 } diff --git a/common/chunkStatusLogger.go b/common/chunkStatusLogger.go index 7b5f43ba1..7affd794f 100644 --- a/common/chunkStatusLogger.go +++ b/common/chunkStatusLogger.go @@ -310,10 +310,9 @@ func (csl *chunkStatusLogger) FlushLog() { // In order to be idempotent, we don't close any channel here, we just flush it - csl.unsavedEntries <- nil // tell writer that it it must flush, then wait until it has done so - select { - case <-csl.flushDone: - } + csl.unsavedEntries <- nil // tell writer that it must flush, then wait until it has done so + + <-csl.flushDone } // CloseLogger close the chunklogger thread. diff --git a/common/cpuMonitor.go b/common/cpuMonitor.go index fb53a14c6..e25252069 100644 --- a/common/cpuMonitor.go +++ b/common/cpuMonitor.go @@ -57,7 +57,7 @@ func NewCalibratedCpuUsageMonitor() CPUMonitor { // start it running and wait until it has self-calibrated calibration := make(chan struct{}) go c.computationWorker(calibration) - _ = <-calibration + <-calibration return c } @@ -92,7 +92,7 @@ func (c *cpuUsageMonitor) computationWorker(calibrationDone chan struct{}) { // run a separate loop to do the probes/measurements go c.monitoringWorker(waitTime, durations) - _ = <-durations // discard first value, it doesn't seem very reliable + <-durations // discard first value, it doesn't seem very reliable // get the next 3 and average them, as our baseline. We chose 3 somewhat arbitrarily x := <-durations @@ -140,10 +140,7 @@ func (c *cpuUsageMonitor) monitoringWorker(waitTime time.Duration, d chan time.D for { start := time.Now() - select { - case <-time.After(waitTime): - // noop - } + <-time.After(waitTime) // noop duration := time.Since(start) // how much longer than expected did it take for us to wake up? diff --git a/common/credCacheInternal_linux.go b/common/credCacheInternal_linux.go index 8d6a4c4d3..8c358f3ca 100644 --- a/common/credCacheInternal_linux.go +++ b/common/credCacheInternal_linux.go @@ -29,7 +29,6 @@ import ( // CredCacheInternalIntegration manages credential caches with Gnome keyring. // Note: This should be only used for internal integration. type CredCacheInternalIntegration struct { - state string accountName string serviceName string keyName string diff --git a/common/credCache_linux.go b/common/credCache_linux.go index 6ce9bf38f..9efd551cc 100644 --- a/common/credCache_linux.go +++ b/common/credCache_linux.go @@ -49,7 +49,7 @@ func NewCredCache(options CredCacheOptions) *CredCache { } runtime.SetFinalizer(c, func(CredCache *CredCache) { - if CredCache.isPermSet == false && CredCache.key != nil { + if !CredCache.isPermSet && CredCache.key != nil { // Indicates Permission is by default ProcessAll, which is not safe and try to recycle the key. // Note: there is no method to grant permission during adding key, // this mechanism is added to ensure key exists only if its permission is set properly. diff --git a/common/credCache_windows.go b/common/credCache_windows.go index 69b4d8787..8df60cf81 100644 --- a/common/credCache_windows.go +++ b/common/credCache_windows.go @@ -23,7 +23,6 @@ package common import ( "errors" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -133,7 +132,7 @@ func (c *CredCache) removeCachedTokenInternal() error { // loadTokenInternal restores a Token object from file cache. func (c *CredCache) loadTokenInternal() (*OAuthTokenInfo, error) { tokenFilePath := c.tokenFilePath() - b, err := ioutil.ReadFile(tokenFilePath) + b, err := os.ReadFile(tokenFilePath) if err != nil { return nil, fmt.Errorf("failed to read token file %q during loading token: %v", tokenFilePath, err) } @@ -163,7 +162,7 @@ func (c *CredCache) saveTokenInternal(token OAuthTokenInfo) error { return fmt.Errorf("failed to create directory %q to store token in, %v", dir, err) } - newFile, err := ioutil.TempFile(dir, "token") + newFile, err := os.CreateTemp(dir, "token") if err != nil { return fmt.Errorf("failed to create the temp file to write the token, %v", err) } @@ -250,7 +249,7 @@ func encrypt(data []byte, entropy *dataBlob) ([]byte, error) { var outblob dataBlob defer func() { if outblob.pbData != nil { - mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) + _, _, _ = mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) } }() @@ -277,7 +276,7 @@ func decrypt(data []byte, entropy *dataBlob) ([]byte, error) { var outblob dataBlob defer func() { if outblob.pbData != nil { - mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) + _, _, _ = mLocalFree.Call(uintptr(unsafe.Pointer(outblob.pbData))) } }() diff --git a/common/decompressingWriter.go b/common/decompressingWriter.go index 420ea554a..ea75bc6c8 100644 --- a/common/decompressingWriter.go +++ b/common/decompressingWriter.go @@ -91,8 +91,6 @@ func (d decompressingWriter) worker(tp CompressionType, preader *io.PipeReader, b := decompressingWriterBufferPool.RentSlice(decompressingWriterCopyBufferSize) _, err = io.CopyBuffer(destination, dec, b) // returns err==nil if hits EOF, as per docs decompressingWriterBufferPool.ReturnSlice(b) - - return } // Write, conceptually, takes a slice of compressed data, decompresses it, and writes it into the final destination. diff --git a/common/emptyChunkReader.go b/common/emptyChunkReader.go index c8610f513..127c17295 100644 --- a/common/emptyChunkReader.go +++ b/common/emptyChunkReader.go @@ -62,5 +62,5 @@ func (cr *emptyChunkReader) Length() int64 { } func (cr *emptyChunkReader) WriteBufferTo(h hash.Hash) { - return // no content to write + // no content to write } diff --git a/common/environment.go b/common/environment.go index 85b12a1c3..d1333f00e 100644 --- a/common/environment.go +++ b/common/environment.go @@ -370,3 +370,11 @@ func (EnvironmentVariable) DownloadToTempPath() EnvironmentVariable { Description: "Configures azcopy to download to a temp path before actual download. Allowed values are true/false", } } + +func (EnvironmentVariable) DisableBlobTransferResume() EnvironmentVariable { + return EnvironmentVariable { + Name: "AZCOPY_DISABLE_INCOMPLETE_BLOB_TRANSFER", + DefaultValue: "false", + Description: "An incomplete transfer to blob endpoint will be resumed from start if set to true", + } +} \ No newline at end of file diff --git a/common/extensions.go b/common/extensions.go index 3cf77743a..4e721973c 100644 --- a/common/extensions.go +++ b/common/extensions.go @@ -2,6 +2,8 @@ package common import ( "bytes" + "encoding/base64" + "fmt" "net/http" "net/url" "runtime" @@ -12,7 +14,7 @@ import ( "github.com/Azure/azure-storage-file-go/azfile" ) -///////////////////////////////////////////////////////////////////////////////////////////////// +// /////////////////////////////////////////////////////////////////////////////////////////////// type URLStringExtension string func (s URLStringExtension) RedactSecretQueryParamForLogging() string { @@ -25,7 +27,7 @@ func (s URLStringExtension) RedactSecretQueryParamForLogging() string { return URLExtension{*u}.RedactSecretQueryParamForLogging() } -///////////////////////////////////////////////////////////////////////////////////////////////// +// /////////////////////////////////////////////////////////////////////////////////////////////// type URLExtension struct { url.URL } @@ -82,7 +84,7 @@ func RedactSecretQueryParam(rawQuery, queryKeyNeedRedact string) (bool, string) return sigFound, values.Encode() } -///////////////////////////////////////////////////////////////////////////////////////////////// +// /////////////////////////////////////////////////////////////////////////////////////////////// type FileURLPartsExtension struct { azfile.FileURLParts } @@ -98,7 +100,7 @@ func (parts FileURLPartsExtension) GetServiceURL() url.URL { return parts.URL() } -///////////////////////////////////////////////////////////////////////////////////////////////// +// /////////////////////////////////////////////////////////////////////////////////////////////// type HTTPResponseExtension struct { *http.Response } @@ -116,7 +118,7 @@ func (r HTTPResponseExtension) IsSuccessStatusCode(successStatusCodes ...int) bo return false } -///////////////////////////////////////////////////////////////////////////////////////////////// +// /////////////////////////////////////////////////////////////////////////////////////////////// type ByteSlice []byte type ByteSliceExtension struct { ByteSlice @@ -178,3 +180,14 @@ func GenerateFullPathWithQuery(rootPath, childPath, extraQuery string) string { return p + "?" + extraQuery } } + +// Current size of block names in AzCopy is 48B. To be consistent with this, +// we have to generate a 36B string and then base64-encode this to retain the +// same size. +// Block Names of blobs are of format noted below. +// <5B empty placeholder> <16B GUID of AzCopy re-interpreted as string><5B PartNum><5B Index in the jobPart><5B blockNum> +const AZCOPY_BLOCKNAME_LENGTH = 48 +func GenerateBlockBlobBlockID(blockNamePrefix string, index int32) string { + blockID := []byte(fmt.Sprintf("%s%05d", blockNamePrefix, index)) + return base64.StdEncoding.EncodeToString(blockID) +} diff --git a/common/extensions_test.go b/common/extensions_test.go index dcaf0cfb7..50f573aa2 100644 --- a/common/extensions_test.go +++ b/common/extensions_test.go @@ -1,9 +1,14 @@ package common import ( - chk "gopkg.in/check.v1" + "fmt" + "math/rand" "net/url" "strings" + "unsafe" + + "github.com/Azure/azure-storage-blob-go/azblob" + chk "gopkg.in/check.v1" ) type extensionsTestSuite struct{} @@ -141,3 +146,29 @@ func (*extensionsTestSuite) TestRedaction(c *chk.C) { } } } + + +func (*extensionsTestSuite) TestBlockblobBlockIDGeneration(c *chk.C) { + // Make sure that for a given JobID, jobPart, an index in job part and a block index, + // the blockID generated is consistent. + numOfFilesPerDispatchJobPart :=int32(10000) // == cmd.NumOfFilesPerDispatchJobPart + maxNumberOfParts := int32(99999) // Depends on our plan file Name, we support max of 99999 parts + azCopyBlockLength := 48 // Current size of blocks in AzCopy + + placeHolder := "00000" // 5B placeholder + jobId := NewUUID() + jobIdStr := string((*[16]byte)(unsafe.Pointer(&jobId))[:]) // 16Byte jobID + partNum := rand.Int31n(maxNumberOfParts) // 5B partNumber + fileIndex := rand.Int31n(numOfFilesPerDispatchJobPart) // 5Byte index of file in part + blockIndex := rand.Int31n(azblob.BlockBlobMaxBlocks) // 5B blockIndex + + blockNamePrefix := fmt.Sprintf("%s%s%05d%05d", placeHolder, jobIdStr, partNum, fileIndex) + blockName := GenerateBlockBlobBlockID(blockNamePrefix, blockIndex) + c.Assert(len(blockName), chk.Equals, azCopyBlockLength) + + for i := 1; i <= 10; i++ { + tmp := GenerateBlockBlobBlockID(blockNamePrefix, blockIndex) + c.Assert(tmp, chk.Equals, blockName) + } + +} \ No newline at end of file diff --git a/common/fe-ste-models.go b/common/fe-ste-models.go index f4555979b..8d197c968 100644 --- a/common/fe-ste-models.go +++ b/common/fe-ste-models.go @@ -697,6 +697,8 @@ func (TransferStatus) Success() TransferStatus { return TransferStatus(2) } // Folder was created, but properties have not been persisted yet. Equivalent to Started, but never intended to be set on anything BUT folders. func (TransferStatus) FolderCreated() TransferStatus { return TransferStatus(3) } +func (TransferStatus) Restarted() TransferStatus { return TransferStatus(4) } + // Transfer failed due to some error. func (TransferStatus) Failed() TransferStatus { return TransferStatus(-1) } @@ -711,10 +713,6 @@ func (TransferStatus) TierAvailabilityCheckFailure() TransferStatus { return Tra func (TransferStatus) Cancelled() TransferStatus { return TransferStatus(-6) } -func (ts TransferStatus) ShouldTransfer() bool { - return ts == ETransferStatus.NotStarted() || ts == ETransferStatus.Started() || ts == ETransferStatus.FolderCreated() -} - // Transfer is any of the three possible state (InProgress, Completer or Failed) func (TransferStatus) All() TransferStatus { return TransferStatus(math.MaxInt8) } func (ts TransferStatus) String() string { @@ -759,6 +757,7 @@ func (BlockBlobTier) None() BlockBlobTier { return BlockBlobTier(0) } func (BlockBlobTier) Hot() BlockBlobTier { return BlockBlobTier(1) } func (BlockBlobTier) Cool() BlockBlobTier { return BlockBlobTier(2) } func (BlockBlobTier) Archive() BlockBlobTier { return BlockBlobTier(3) } +func (BlockBlobTier) Cold() BlockBlobTier { return BlockBlobTier(4) } func (bbt BlockBlobTier) String() string { return enum.StringInt(bbt, reflect.TypeOf(bbt)) @@ -1229,7 +1228,7 @@ func ToCommonBlobTagsMap(blobTagsString string) BlobTags { const metadataRenamedKeyPrefix = "rename_" const metadataKeyForRenamedOriginalKeyPrefix = "rename_key_" -var metadataKeyInvalidCharRegex = regexp.MustCompile("\\W") +var metadataKeyInvalidCharRegex = regexp.MustCompile(`\W`) var metadataKeyRenameErrStr = "failed to rename invalid metadata key %q" // ResolveInvalidKey resolves invalid metadata key with following steps: @@ -1470,8 +1469,10 @@ var EEntityType = EntityType(0) type EntityType uint8 -func (EntityType) File() EntityType { return EntityType(0) } -func (EntityType) Folder() EntityType { return EntityType(1) } +func (EntityType) File() EntityType { return EntityType(0) } +func (EntityType) Folder() EntityType { return EntityType(1) } +func (EntityType) Symlink() EntityType { return EntityType(2) } +func (EntityType) FileProperties() EntityType { return EntityType(3) } func (e EntityType) String() string { return enum.StringInt(e, reflect.TypeOf(e)) @@ -1571,6 +1572,10 @@ type CpkInfo struct { EncryptionKeySha256 *string } +func (csi CpkInfo) Empty() bool { + return csi.EncryptionKey == nil || csi.EncryptionKeySha256 == nil +} + func (csi CpkInfo) Marshal() (string, error) { result, err := json.Marshal(csi) if err != nil { @@ -1580,7 +1585,7 @@ func (csi CpkInfo) Marshal() (string, error) { } func ToClientProvidedKeyOptions(cpkInfo CpkInfo, cpkScopeInfo CpkScopeInfo) azblob.ClientProvidedKeyOptions { - if (cpkInfo.EncryptionKey == nil || cpkInfo.EncryptionKeySha256 == nil) && cpkScopeInfo.EncryptionScope == nil { + if cpkInfo.Empty() && cpkScopeInfo.EncryptionScope == nil { return azblob.ClientProvidedKeyOptions{} } @@ -1666,7 +1671,7 @@ func (rpt RehydratePriorityType) ToRehydratePriorityType() azblob.RehydratePrior } } -// ////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// type SyncHashType uint8 var ESyncHashType SyncHashType = 0 @@ -1690,3 +1695,33 @@ func (ht *SyncHashType) Parse(s string) error { func (ht SyncHashType) String() string { return enum.StringInt(ht, reflect.TypeOf(ht)) } + +//////////////////////////////////////////////////////////////////////////////// +type SymlinkHandlingType uint8 // SymlinkHandlingType is only utilized internally to avoid having to carry around two contradictory flags. Thus, it doesn't have a parse method. + +// for reviewers: This is different than we usually implement enums, but it's something I've found to be more pleasant in personal projects, especially for bitflags. Should we change the pattern to match this in the future? + +type eSymlinkHandlingType uint8 + +var ESymlinkHandlingType = eSymlinkHandlingType(0) + +func (eSymlinkHandlingType) Skip() SymlinkHandlingType { return SymlinkHandlingType(0) } +func (eSymlinkHandlingType) Follow() SymlinkHandlingType { return SymlinkHandlingType(1) } // Upload what's on the other hand of the symlink +func (eSymlinkHandlingType) Preserve() SymlinkHandlingType { return SymlinkHandlingType(2) } // Copy the link + +func (sht SymlinkHandlingType) None() bool { return sht == 0 } +func (sht SymlinkHandlingType) Follow() bool { return sht == 1 } +func (sht SymlinkHandlingType) Preserve() bool { return sht == 2 } + +func (sht *SymlinkHandlingType) Determine(Follow, Preserve bool) error { + switch { + case Follow && Preserve: + return errors.New("cannot both follow and preserve symlinks (--preserve-symlinks and --follow-symlinks contradict)") + case Preserve: + *sht = ESymlinkHandlingType.Preserve() + case Follow: + *sht = ESymlinkHandlingType.Follow() + } + + return nil +} diff --git a/common/lifecyleMgr.go b/common/lifecyleMgr.go index 9320ea624..397842162 100644 --- a/common/lifecyleMgr.go +++ b/common/lifecyleMgr.go @@ -11,7 +11,9 @@ import ( "strconv" "strings" "sync/atomic" + "syscall" "time" + "unicode" "github.com/Azure/azure-pipeline-go/pipeline" ) @@ -136,6 +138,17 @@ func (lcm *lifecycleMgr) watchInputs() { default: } + allCharsAreWhiteSpace := true + for _, ch := range msg { + if !unicode.IsSpace(ch) { + allCharsAreWhiteSpace = false + break + } + } + if allCharsAreWhiteSpace { + continue + } + var req LCMMsgReq if lcm.allowCancelFromStdIn && strings.EqualFold(msg, "cancel") { lcm.cancelChannel <- os.Interrupt @@ -428,9 +441,7 @@ func (lcm *lifecycleMgr) processNoneOutput(msgToOutput outputMessage) { lcm.closeFunc() os.Exit(int(msgToOutput.exitCode)) } - // ignore all other outputs - return } func (lcm *lifecycleMgr) processJSONOutput(msgToOutput outputMessage) { @@ -555,7 +566,7 @@ func (lcm *lifecycleMgr) InitiateProgressReporting(jc WorkController) { lastFetchTime := time.Now().Add(-wait) // So that we start fetching time immediately // cancelChannel will be notified when os receives os.Interrupt and os.Kill signals - signal.Notify(lcm.cancelChannel, os.Interrupt, os.Kill) + signal.Notify(lcm.cancelChannel, os.Interrupt, syscall.SIGTERM) cancelCalled := false diff --git a/common/logger.go b/common/logger.go index 56e708e6b..6b30fda39 100644 --- a/common/logger.go +++ b/common/logger.go @@ -52,56 +52,6 @@ type ILoggerResetable interface { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// -func NewAppLogger(minimumLevelToLog pipeline.LogLevel, logFileFolder string) ILoggerCloser { - // TODO: Put start date time in file Name - // TODO: log life time management. - // appLogFile, err := os.OpenFile(path.Join(logFileFolder, "azcopy.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) // TODO: Make constant for 0666 - // PanicIfErr(err) - return &appLogger{ - minimumLevelToLog: minimumLevelToLog, - // file: appLogFile, - // logger: log.New(appLogFile, "", log.LstdFlags|log.LUTC), - } -} - -type appLogger struct { - // maximum loglevel represents the maximum severity of log messages which can be logged to Job Log file. - // any message with severity higher than this will be ignored. - minimumLevelToLog pipeline.LogLevel // The maximum customer-desired log level for this job - file *os.File // The job's log file - logger *log.Logger // The Job's logger -} - -func (al *appLogger) ShouldLog(level pipeline.LogLevel) bool { - if level == pipeline.LogNone { - return false - } - return level <= al.minimumLevelToLog -} - -func (al *appLogger) CloseLog() { - // TODO consider delete completely to get rid of app logger - // al.logger.Println("Closing Log") - // err := al.file.Close() - // PanicIfErr(err) -} - -func (al *appLogger) Log(loglevel pipeline.LogLevel, msg string) { - // TODO consider delete completely to get rid of app logger - // TODO: see also the workaround in jobsAdmin.LogToJobLog - // TODO: if we DON'T delete, use azCopyLogSanitizer - // if al.ShouldLog(loglevel) { - // al.logger.Println(msg) - // } -} - -func (al *appLogger) Panic(err error) { - // TODO consider delete completely to get rid of app logger - // al.logger.Panic(err) -} - -//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// - type jobLogger struct { // maximum loglevel represents the maximum severity of log messages which can be logged to Job Log file. // any message with severity higher than this will be ignored. diff --git a/common/logger_unix.go b/common/logger_unix.go index 63e31a52e..3277d5adb 100644 --- a/common/logger_unix.go +++ b/common/logger_unix.go @@ -1,4 +1,6 @@ +//go:build linux || darwin // +build linux darwin + // Copyright Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy @@ -23,29 +25,28 @@ package common import ( "fmt" - "runtime" "log/syslog" + "runtime" "github.com/Azure/azure-pipeline-go/pipeline" ) -////////////////////////////////////////// +// //////////////////////////////////////// type sysLogger struct { // minimum loglevel represents the minimum severity of log messages which can be logged to Job Log file. // any message with severity higher than this will be ignored. jobID JobID minimumLevelToLog pipeline.LogLevel // The maximum customer-desired log level for this job - writer *syslog.Writer // The Job's logger + writer *syslog.Writer // The Job's logger logSuffix string sanitizer pipeline.LogSanitizer } - func NewSysLogger(jobID JobID, minimumLevelToLog LogLevel, logSuffix string) ILoggerResetable { return &sysLogger{ jobID: jobID, minimumLevelToLog: minimumLevelToLog.ToPipelineLogLevel(), - logSuffix: logSuffix, + logSuffix: logSuffix, sanitizer: NewAzCopyLogSanitizer(), } } @@ -53,16 +54,16 @@ func NewSysLogger(jobID JobID, minimumLevelToLog LogLevel, logSuffix string) ILo func (sl *sysLogger) OpenLog() { if sl.minimumLevelToLog == pipeline.LogNone { return - } + } writer, err := syslog.New(syslog.LOG_NOTICE, fmt.Sprintf("%s %s", sl.logSuffix, sl.jobID.String())) PanicIfErr(err) sl.writer = writer // Log the Azcopy Version - sl.writer.Notice("AzcopyVersion " + AzcopyVersion) + _ = sl.writer.Notice("AzcopyVersion " + AzcopyVersion) // Log the OS Environment and OS Architecture - sl.writer.Notice("OS-Environment " + runtime.GOOS) - sl.writer.Notice("OS-Architecture " + runtime.GOARCH) + _ = sl.writer.Notice("OS-Environment " + runtime.GOOS) + _ = sl.writer.Notice("OS-Architecture " + runtime.GOARCH) } func (sl *sysLogger) MinimumLogLevel() pipeline.LogLevel { @@ -81,13 +82,12 @@ func (sl *sysLogger) CloseLog() { return } - sl.writer.Notice("Closing Log") + _ = sl.writer.Notice("Closing Log") sl.writer.Close() } - func (sl *sysLogger) Panic(err error) { - sl.writer.Crit(err.Error()) // We do NOT panic here as the app would terminate; + _ = sl.writer.Crit(err.Error()) // We do NOT panic here as the app would terminate; //we just log it. We should never reach this line of code! } @@ -103,16 +103,16 @@ func (sl *sysLogger) Log(loglevel pipeline.LogLevel, msg string) { case pipeline.LogNone: //nothing to do case pipeline.LogFatal: - w.Emerg(msg) + _ = w.Emerg(msg) case pipeline.LogPanic: - w.Crit(msg) + _ = w.Crit(msg) case pipeline.LogError: - w.Err(msg) + _ = w.Err(msg) case pipeline.LogWarning: - w.Warning(msg) + _ = w.Warning(msg) case pipeline.LogInfo: - w.Info(msg) + _ = w.Info(msg) case pipeline.LogDebug: - w.Debug(msg) + _ = w.Debug(msg) } -} \ No newline at end of file +} diff --git a/common/mmf_linux.go b/common/mmf_linux.go index 911e34506..d21b8c10a 100644 --- a/common/mmf_linux.go +++ b/common/mmf_linux.go @@ -1,3 +1,4 @@ +//go:build linux || darwin // +build linux darwin // Copyright © 2017 Microsoft @@ -55,7 +56,7 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (*MMF, err } addr, err := syscall.Mmap(int(file.Fd()), offset, int(length), prot, flags) if !writable { - syscall.Madvise(addr, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) + _ = syscall.Madvise(addr, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) } return &MMF{slice: (addr), isMapped: true, lock: sync.RWMutex{}}, err } diff --git a/common/mmf_windows.go b/common/mmf_windows.go index bf1192a4e..0b3f79cf7 100644 --- a/common/mmf_windows.go +++ b/common/mmf_windows.go @@ -60,8 +60,8 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (*MMF, err if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - defer syscall.CloseHandle(hMMF) - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + defer syscall.CloseHandle(hMMF) //nolint:errcheck + addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) if !writable { // pre-fetch the memory mapped file so that performance is better when it is read @@ -92,7 +92,7 @@ func (m *MMF) Unmap() { // "lazily" to disk; that is, modifications may be cached in memory and written to disk // at a later time. To avoid modifications to be cached in memory,explicitly flushing // modified pages using the FlushViewOfFile function. - syscall.FlushViewOfFile(addr, uintptr(m.length)) + _ = syscall.FlushViewOfFile(addr, uintptr(m.length)) err := syscall.UnmapViewOfFile(addr) PanicIfErr(err) m.isMapped = false diff --git a/common/oauthTokenManager.go b/common/oauthTokenManager.go index 48fdd2f98..fc271cd4a 100644 --- a/common/oauthTokenManager.go +++ b/common/oauthTokenManager.go @@ -30,7 +30,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "os" @@ -92,7 +91,7 @@ func newAzcopyHTTPClient() *http.Client { Timeout: 10 * time.Second, KeepAlive: 10 * time.Second, DualStack: true, - }).Dial, /*Context*/ + }).Dial, /*Context*/ MaxIdleConns: 0, // No limit MaxIdleConnsPerHost: 1000, IdleConnTimeout: 180 * time.Second, @@ -258,8 +257,8 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromSecret(ctx context.Context) (*ada // Read a potentially encrypted PKCS block func readPKCSBlock(block *pem.Block, secret []byte, parseFunc func([]byte) (interface{}, error)) (pk interface{}, err error) { // Reduce code duplication by baking the parse functions into this - if x509.IsEncryptedPEMBlock(block) { - data, err := x509.DecryptPEMBlock(block, secret) + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck + data, err := x509.DecryptPEMBlock(block, secret) //nolint:staticcheck if err == nil { pk, err = parseFunc(data) @@ -303,7 +302,7 @@ func certLoginNoUOTM(tenantID, activeDirectoryEndpoint, certPath, certPass, appl return nil, err } - certData, err := ioutil.ReadFile(certPath) + certData, err := os.ReadFile(certPath) if err != nil { return nil, err } @@ -743,7 +742,7 @@ func (credInfo *OAuthTokenInfo) queryIMDS(ctx context.Context, msiEndpoint strin req.Header.Set("Metadata", "true") // Set context. - req.WithContext(ctx) + req = req.WithContext(ctx) // In case of some other process (Http Server) listening at 127.0.0.1:40342 , we do not want to wait forever for it to serve request msiTokenHTTPClient.Timeout = 10 * time.Second // Send request @@ -805,7 +804,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T req, resp, errArcVM := credInfo.queryIMDS(ctx, MSIEndpointArcVM, targetResource, IMDSAPIVersionArcVM) if errArcVM != nil { // Try Azure VM since there was an error in trying Arc VM - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck if errAzureVM != nil { var serr syscall.Errno if errors.As(errArcVM, &serr) { @@ -833,17 +832,17 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } // Arc IMDS failed with error, but Azure IMDS succeeded - req, resp = reqAzureVM, respAzureVM + req, resp = reqAzureVM, respAzureVM //nolint:staticcheck } else if !isValidArcResponse(resp) { // Not valid response from ARC IMDS endpoint. Perhaps some other process listening on it. Try Azure IMDS endpoint as fallback option. - reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) + reqAzureVM, respAzureVM, errAzureVM := credInfo.queryIMDS(ctx, MSIEndpointAzureVM, targetResource, IMDSAPIVersionAzureVM) //nolint:staticcheck if errAzureVM != nil { // Neither Arc nor Azure VM IMDS endpoint available. Can't use MSI. return nil, fmt.Errorf("invalid response received from Arc IMDS endpoint (%s), probably some unknown process listening. If this an Azure VM, please check whether MSI is enabled, to enable MSI please refer to https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-identity-on-an-existing-vm: %v", MSIEndpointArcVM, errAzureVM) } // Azure VM IMDS endpoint ok! - req, resp = reqAzureVM, respAzureVM + req, resp = reqAzureVM, respAzureVM //nolint:staticcheck } else { // Valid response received from ARC IMDS endpoint. Proceed with the next step. challengeTokenPath := strings.Split(resp.Header["Www-Authenticate"][0], "=")[1] @@ -880,7 +879,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T } defer func() { // resp and Body should not be nil - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(io.Discard, resp.Body) resp.Body.Close() }() @@ -890,7 +889,7 @@ func (credInfo *OAuthTokenInfo) GetNewTokenFromMSI(ctx context.Context) (*adal.T return nil, fmt.Errorf("failed to get token from msi, status code: %v", resp.StatusCode) } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/common/parallel/FileSystemCrawler.go b/common/parallel/FileSystemCrawler.go index f8cf4a4a3..61f2c69bb 100644 --- a/common/parallel/FileSystemCrawler.go +++ b/common/parallel/FileSystemCrawler.go @@ -100,6 +100,7 @@ func Walk(appCtx context.Context, root string, parallelism int, parallelStat boo defer reader.Close() ctx, cancel = context.WithCancel(appCtx) + defer cancel() ch := CrawlLocalDirectory(ctx, root, remainingParallelism, reader) for crawlResult := range ch { entry, err := crawlResult.Item() @@ -116,7 +117,6 @@ func Walk(appCtx context.Context, root string, parallelism int, parallelStat boo } } if err != nil { - cancel() return } } diff --git a/common/parallel/zt_FileSystemCrawlerTest_test.go b/common/parallel/zt_FileSystemCrawlerTest_test.go index af184c4b3..362925811 100644 --- a/common/parallel/zt_FileSystemCrawlerTest_test.go +++ b/common/parallel/zt_FileSystemCrawlerTest_test.go @@ -37,7 +37,6 @@ func Test(t *testing.T) { chk.TestingT(t) } type fileSystemCrawlerSuite struct{} var _ = chk.Suite(&fileSystemCrawlerSuite{}) -var ctx = context.Background() var windowsSystemDirectory = "" diff --git a/common/rpc-models.go b/common/rpc-models.go index 24782b538..6d69525a0 100644 --- a/common/rpc-models.go +++ b/common/rpc-models.go @@ -110,24 +110,26 @@ func ConsolidatePathSeparators(path string) string { // Transfers describes each file/folder being transferred in a given JobPartOrder, and // other auxiliary details of this order. type Transfers struct { - List []CopyTransfer - TotalSizeInBytes uint64 - FileTransferCount uint32 - FolderTransferCount uint32 + List []CopyTransfer + TotalSizeInBytes uint64 + FileTransferCount uint32 + FolderTransferCount uint32 + SymlinkTransferCount uint32 } // This struct represents the job info (a single part) to be sent to the storage engine type CopyJobPartOrderRequest struct { - Version Version // version of azcopy - JobID JobID // Guid - job identifier - PartNum PartNumber // part number of the job - IsFinalPart bool // to determine the final part for a specific job - ForceWrite OverwriteOption // to determine if the existing needs to be overwritten or not. If set to true, existing blobs are overwritten - ForceIfReadOnly bool // Supplements ForceWrite with addition setting for Azure Files objects with read-only attribute - AutoDecompress bool // if true, source data with encodings that represent compression are automatically decompressed when downloading - Priority JobPriority // priority of the task - FromTo FromTo - Fpo FolderPropertyOption // passed in from front-end to ensure that front-end and STE agree on the desired behaviour for the job + Version Version // version of azcopy + JobID JobID // Guid - job identifier + PartNum PartNumber // part number of the job + IsFinalPart bool // to determine the final part for a specific job + ForceWrite OverwriteOption // to determine if the existing needs to be overwritten or not. If set to true, existing blobs are overwritten + ForceIfReadOnly bool // Supplements ForceWrite with addition setting for Azure Files objects with read-only attribute + AutoDecompress bool // if true, source data with encodings that represent compression are automatically decompressed when downloading + Priority JobPriority // priority of the task + FromTo FromTo + Fpo FolderPropertyOption // passed in from front-end to ensure that front-end and STE agree on the desired behaviour for the job + SymlinkHandlingType SymlinkHandlingType // list of blobTypes to exclude. ExcludeBlobType []azblob.BlobType @@ -260,6 +262,7 @@ type ListJobSummaryResponse struct { // FileTransfers. FileTransfers uint32 `json:",string"` FolderPropertyTransfers uint32 `json:",string"` + SymlinkTransfers uint32 `json:",string"` FoldersCompleted uint32 `json:",string"` // Files can be figured out by TransfersCompleted - FoldersCompleted TransfersCompleted uint32 `json:",string"` diff --git a/common/singleChunkReader.go b/common/singleChunkReader.go index d362c2ea8..4586368ea 100644 --- a/common/singleChunkReader.go +++ b/common/singleChunkReader.go @@ -25,7 +25,6 @@ import ( "errors" "hash" "io" - "runtime" "sync" "github.com/Azure/azure-pipeline-go/pipeline" @@ -454,7 +453,7 @@ func (cr *singleChunkReader) GetPrologueState() PrologueState { // unuse before Seek, since Seek is public cr.unuse() // MUST re-wind, so that the bytes we read will get transferred too! - _, err = cr.Seek(0, io.SeekStart) + _, _ = cr.Seek(0, io.SeekStart) return PrologueState{LeadingBytes: leadingBytes} } @@ -473,14 +472,3 @@ func (cr *singleChunkReader) WriteBufferTo(h hash.Hash) { panic("documentation of hash.Hash.Write says it will never return an error") } } - -func stack() []byte { - buf := make([]byte, 2048) - for { - n := runtime.Stack(buf, false) - if n < len(buf) { - return buf[:n] - } - buf = make([]byte, 2*len(buf)) - } -} diff --git a/common/unixStatAdapter.go b/common/unixStatAdapter.go index c55a3c744..6f5b89abc 100644 --- a/common/unixStatAdapter.go +++ b/common/unixStatAdapter.go @@ -2,6 +2,7 @@ package common import ( "github.com/Azure/azure-storage-blob-go/azblob" + "os" "strconv" "time" ) @@ -45,6 +46,11 @@ var AllLinuxProperties = []string{ POSIXOwnerMeta, POSIXGroupMeta, POSIXModeMeta, + LINUXStatxMaskMeta, + LINUXAttributeMaskMeta, + POSIXCTimeMeta, + POSIXModTimeMeta, + LINUXAttributeMeta, } //goland:noinspection GoCommentStart @@ -338,7 +344,22 @@ func ClearStatFromBlobMetadata(metadata azblob.Metadata) { } func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { - // TODO: File mode properties (hdi_isfolder, etc.) + applyMode := func(mode os.FileMode) { + modes := map[uint32]string { + S_IFCHR: POSIXCharDeviceMeta, + S_IFBLK: POSIXBlockDeviceMeta, + S_IFSOCK: POSIXSocketMeta, + S_IFIFO: POSIXFIFOMeta, + S_IFDIR: POSIXFolderMeta, + } + + for modeToTest, metaToApply := range modes { + if mode & os.FileMode(modeToTest) == os.FileMode(modeToTest) { + tryAddMetadata(metadata, metaToApply, "true") + } + } + } + if s.Extended() { // try to poll the other properties mask := s.StatxMask() @@ -350,7 +371,7 @@ func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { tryAddMetadata(metadata, LINUXBTimeMeta, strconv.FormatInt(s.BTime().UnixNano(), 10)) } - if StatXReturned(mask, STATX_MODE) { + if StatXReturned(mask, STATX_NLINK) { tryAddMetadata(metadata, POSIXNlinkMeta, strconv.FormatUint(s.NLink(), 10)) } @@ -364,6 +385,7 @@ func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { if StatXReturned(mask, STATX_MODE) { tryAddMetadata(metadata, POSIXModeMeta, strconv.FormatUint(uint64(s.FileMode()), 10)) + applyMode(os.FileMode(s.FileMode())) } if StatXReturned(mask, STATX_INO) { @@ -395,6 +417,7 @@ func AddStatToBlobMetadata(s UnixStatAdapter, metadata azblob.Metadata) { tryAddMetadata(metadata, POSIXOwnerMeta, strconv.FormatUint(uint64(s.Owner()), 10)) tryAddMetadata(metadata, POSIXGroupMeta, strconv.FormatUint(uint64(s.Group()), 10)) tryAddMetadata(metadata, POSIXModeMeta, strconv.FormatUint(uint64(s.FileMode()), 10)) + applyMode(os.FileMode(s.FileMode())) tryAddMetadata(metadata, POSIXINodeMeta, strconv.FormatUint(s.INode(), 10)) tryAddMetadata(metadata, POSIXDevMeta, strconv.FormatUint(s.Device(), 10)) diff --git a/common/util.go b/common/util.go index 1e4307184..12a2468c4 100644 --- a/common/util.go +++ b/common/util.go @@ -1,4 +1,20 @@ package common +import ( + "net" + "net/url" +) + var AzcopyJobPlanFolder string -var AzcopyCurrentJobLogger ILoggerResetable \ No newline at end of file +var AzcopyCurrentJobLogger ILoggerResetable + + +func VerifyIsURLResolvable(url_string string) (error) { + url, err := url.Parse(url_string) + if (err != nil) { + return err + } + + _, err = net.LookupIP(url.Host) + return err +} \ No newline at end of file diff --git a/common/util_test.go b/common/util_test.go new file mode 100644 index 000000000..ad5453054 --- /dev/null +++ b/common/util_test.go @@ -0,0 +1,17 @@ +package common + +import chk "gopkg.in/check.v1" + +type utilityFunctionsSuite struct{} + +var _ = chk.Suite(&utilityFunctionsSuite{}) + +func (*utilityFunctionsSuite) Test_VerifyIsURLResolvable(c *chk.C) { + valid_url := "https://github.com/" + invalidUrl := "someString" + invalidUrl2 := "https://$invalidAccount.blob.core.windows.net/" + + c.Assert(VerifyIsURLResolvable(valid_url), chk.IsNil) + c.Assert(VerifyIsURLResolvable(invalidUrl), chk.NotNil) + c.Assert(VerifyIsURLResolvable(invalidUrl2), chk.NotNil) +} \ No newline at end of file diff --git a/common/uuid.go b/common/uuid.go index 8c4dcf9c9..fd1099a8d 100644 --- a/common/uuid.go +++ b/common/uuid.go @@ -9,11 +9,8 @@ import ( // The JobID reserved variants. const ( - reservedNCS byte = 0x80 - reservedRFC4122 byte = 0x40 - reservedMicrosoft byte = 0x20 - reservedFuture byte = 0x00 - guidFormat = "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" + reservedRFC4122 byte = 0x40 + guidFormat = "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" ) // A UUID representation compliant with specification in RFC 4122 document. diff --git a/common/version.go b/common/version.go index 55222b260..202e0da9b 100644 --- a/common/version.go +++ b/common/version.go @@ -1,6 +1,6 @@ package common -const AzcopyVersion = "10.17.0" +const AzcopyVersion = "10.18.0" const UserAgent = "AzCopy/" + AzcopyVersion const S3ImportUserAgent = "S3Import " + UserAgent const GCPImportUserAgent = "GCPImport " + UserAgent diff --git a/common/writeThoughFile.go b/common/writeThoughFile.go index 16bc4f100..2fe184762 100644 --- a/common/writeThoughFile.go +++ b/common/writeThoughFile.go @@ -30,6 +30,7 @@ const IncludeBeforeFlagName = "include-before" const IncludeAfterFlagName = "include-after" const BackupModeFlagName = "backup" // original name, backup mode, matches the name used for the same thing in Robocopy const PreserveOwnerFlagName = "preserve-owner" +const PreserveSymlinkFlagName = "preserve-symlinks" const PreserveOwnerDefault = true // The regex doesn't require a / on the ending, it just requires something similar to the following @@ -41,15 +42,28 @@ const PreserveOwnerDefault = true var RootDriveRegex = regexp.MustCompile(`(?i)(^[A-Z]:\/?$)`) var RootShareRegex = regexp.MustCompile(`(^\/\/[^\/]*\/?$)`) +func isRootPath(s string) bool { + shortParentDir := strings.ReplaceAll(ToShortPath(s), OS_PATH_SEPARATOR, AZCOPY_PATH_SEPARATOR_STRING) + return RootDriveRegex.MatchString(shortParentDir) || + RootShareRegex.MatchString(shortParentDir) || + strings.EqualFold(shortParentDir, "/") +} + + func CreateParentDirectoryIfNotExist(destinationPath string, tracker FolderCreationTracker) error { - // find the parent directory - directory := destinationPath[:strings.LastIndex(destinationPath, DeterminePathSeparator(destinationPath))] + // If we're pointing at the root of a drive, don't try because it won't work. + if isRootPath(destinationPath) { + return nil + } + + lastIndex := strings.LastIndex(destinationPath, DeterminePathSeparator(destinationPath)) + directory := destinationPath[:lastIndex] return CreateDirectoryIfNotExist(directory, tracker) } func CreateDirectoryIfNotExist(directory string, tracker FolderCreationTracker) error { // If we're pointing at the root of a drive, don't try because it won't work. - if shortParentDir := strings.ReplaceAll(ToShortPath(directory), OS_PATH_SEPARATOR, AZCOPY_PATH_SEPARATOR_STRING); RootDriveRegex.MatchString(shortParentDir) || RootShareRegex.MatchString(shortParentDir) || strings.EqualFold(shortParentDir, "/") { + if isRootPath(directory) { return nil } @@ -59,7 +73,7 @@ func CreateDirectoryIfNotExist(directory string, tracker FolderCreationTracker) // stat errors can be present in write-only scenarios, when the directory isn't present, etc. // as a result, we care more about the mkdir error than the stat error, because that's the tell. // first make sure the parent directory exists but we ignore any error that comes back - CreateParentDirectoryIfNotExist(directory, tracker) + _ = CreateParentDirectoryIfNotExist(directory, tracker) // then create the directory mkDirErr := tracker.CreateFolder(directory, func() error { diff --git a/common/writeThoughFile_windows.go b/common/writeThoughFile_windows.go index d218bdf5d..c1ca1e2bb 100644 --- a/common/writeThoughFile_windows.go +++ b/common/writeThoughFile_windows.go @@ -125,13 +125,6 @@ func CreateFileOfSizeWithWriteThroughOption(destinationPath string, fileSize int fd, err := doOpen() if err != nil { - // Because a hidden file isn't necessarily a intentional lock on a file, we choose to make it a default override. - toMatchSet := FILE_ATTRIBUTE_HIDDEN - // But, by the opposite nature, readonly is a intentional lock, so we make it a required option. - if forceIfReadOnly { - toMatchSet |= FILE_ATTRIBUTE_READONLY - } - // Let's check what we might need to clear, and if we should retry toClearFlagSet, allFlags, toRetry := getFlagMatches(FILE_ATTRIBUTE_READONLY | FILE_ATTRIBUTE_HIDDEN) diff --git a/e2etest/declarativeHelpers.go b/e2etest/declarativeHelpers.go index ef09d4622..f3a825430 100644 --- a/e2etest/declarativeHelpers.go +++ b/e2etest/declarativeHelpers.go @@ -52,7 +52,6 @@ func equals() comparison { return comparison{true} } -// nolint func notEquals() comparison { return comparison{false} } @@ -116,7 +115,7 @@ func (a *testingAsserter) AssertNoErr(err error, comment ...string) { a.t.Helper() // exclude this method from the logged callstack redactedErr := sanitizer.SanitizeLogMessage(err.Error()) a.t.Logf("Error %s%s", redactedErr, a.formatComments(comment)) - a.t.FailNow() + a.t.Fail() } } @@ -172,6 +171,7 @@ type params struct { accessTier azblob.AccessTierType checkMd5 common.HashValidationOption compareHash common.SyncHashType + symlinkHandling common.SymlinkHandlingType destNull bool diff --git a/e2etest/declarativeResourceAdapters.go b/e2etest/declarativeResourceAdapters.go index faadbb05a..f992e8569 100644 --- a/e2etest/declarativeResourceAdapters.go +++ b/e2etest/declarativeResourceAdapters.go @@ -60,8 +60,13 @@ func (a blobResourceAdapter) toHeaders() azblob.BlobHTTPHeaders { func (a blobResourceAdapter) toMetadata() azblob.Metadata { if a.obj.creationProperties.nameValueMetadata == nil { - return azblob.Metadata{} + a.obj.creationProperties.nameValueMetadata = azblob.Metadata{} } + + if a.obj.creationProperties.posixProperties != nil { + a.obj.creationProperties.posixProperties.AddToMetadata(a.obj.creationProperties.nameValueMetadata) + } + return a.obj.creationProperties.nameValueMetadata } diff --git a/e2etest/declarativeResourceManagers.go b/e2etest/declarativeResourceManagers.go index 4b86ef673..67d0fe89a 100644 --- a/e2etest/declarativeResourceManagers.go +++ b/e2etest/declarativeResourceManagers.go @@ -44,7 +44,6 @@ type downloadContentOptions struct { downloadFileContentOptions } -// nolint type downloadBlobContentOptions struct { containerURL azblob.ContainerURL cpkInfo common.CpkInfo @@ -123,6 +122,7 @@ func (r *resourceLocal) createFiles(a asserter, s *scenario, isSource bool) { generateFromListOptions: generateFromListOptions{ fs: s.fs.allObjects(isSource), defaultSize: s.fs.defaultSize, + preservePosixProperties: s.p.preservePOSIXProperties, }, }) } diff --git a/e2etest/declarativeRunner.go b/e2etest/declarativeRunner.go index e0615bb34..568c607e3 100644 --- a/e2etest/declarativeRunner.go +++ b/e2etest/declarativeRunner.go @@ -109,7 +109,7 @@ func RunScenarios( operations Operation, testFromTo TestFromTo, validate Validate, // TODO: do we really want the test author to have to nominate which validation should happen? Pros: better perf of tests. Cons: they have to tell us, and if they tell us wrong test may not test what they think it tests - // _ interface{}, // TODO if we want it??, blockBlobsOnly or specific/all blob types +// _ interface{}, // TODO if we want it??, blockBlobsOnly or specific/all blob types // It would be a pain to list out every combo by hand, // In addition to the fact that not every credential type is sensible. diff --git a/e2etest/declarativeScenario.go b/e2etest/declarativeScenario.go index d79f1525f..ccec1dd6a 100644 --- a/e2etest/declarativeScenario.go +++ b/e2etest/declarativeScenario.go @@ -27,6 +27,7 @@ import ( "os" "path" "path/filepath" + "strings" "time" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -256,7 +257,7 @@ func (s *scenario) runAzCopy(logDirectory string) { result, wasClean, err := r.ExecuteAzCopyCommand( s.operation, s.state.source.getParam(s.stripTopDir, needsSAS(s.credTypes[0]), tf.objectTarget), - s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.IffString(tf.destTarget != "", tf.destTarget, tf.objectTarget)), + s.state.dest.getParam(false, needsSAS(s.credTypes[1]), common.IffString(tf.destTarget != "", tf.destTarget, tf.objectTarget)), s.credTypes[0] == common.ECredentialType.OAuthToken() || s.credTypes[1] == common.ECredentialType.OAuthToken(), // needsOAuth afterStart, s.chToStdin, logDirectory) @@ -355,7 +356,7 @@ func (s *scenario) validateTransferStates(azcopyDir string) { actualTransfers, err := s.state.result.GetTransferList(statusToTest, azcopyDir) s.a.AssertNoErr(err) - Validator{}.ValidateCopyTransfersAreScheduled(s.a, isSrcEncoded, isDstEncoded, srcRoot, dstRoot, expectedTransfers, actualTransfers, statusToTest, s.FromTo(), s.srcAccountType, s.destAccountType) + Validator{}.ValidateCopyTransfersAreScheduled(s.a, isSrcEncoded, isDstEncoded, srcRoot, dstRoot, expectedTransfers, actualTransfers, statusToTest, expectFolders) // TODO: how are we going to validate folder transfers???? } @@ -367,16 +368,26 @@ func (s *scenario) getTransferInfo() (srcRoot string, dstRoot string, expectFold srcRoot = s.state.source.getParam(false, false, "") dstRoot = s.state.dest.getParam(false, false, "") + srcBase := filepath.Base(srcRoot) + srcRootURL, err := url.Parse(srcRoot) + if err == nil { + snapshotID := srcRootURL.Query().Get("sharesnapshot") + if snapshotID != "" { + srcBase = filepath.Base(strings.TrimSuffix(srcRoot, "?sharesnapshot="+snapshotID)) + } + } + // do we expect folder transfers expectFolders = (s.fromTo.From().IsFolderAware() && s.fromTo.To().IsFolderAware() && s.p.allowsFolderTransfers()) || - (s.p.preserveSMBPermissions && s.FromTo() == common.EFromTo.BlobBlob()) + (s.p.preserveSMBPermissions && s.FromTo() == common.EFromTo.BlobBlob()) || + (s.p.preservePOSIXProperties && (s.FromTo() == common.EFromTo.LocalBlob() || s.FromTo() == common.EFromTo.BlobBlob() || s.FromTo() == common.EFromTo.BlobLocal())) expectRootFolder := expectFolders // compute dest, taking into account our stripToDir rules addedDirAtDest = "" - areBothContainerLike := s.state.source.isContainerLike() && s.state.dest.isContainerLike() + areBothContainerLike := s.state.source.isContainerLike() && s.state.dest.isContainerLike() && !s.p.preserveSMBPermissions // There are no permission-compatible sources and destinations that do not feature support for root folder perms anymore* tf := s.GetTestFiles() if s.stripTopDir || s.operation == eOperation.Sync() || areBothContainerLike { @@ -385,16 +396,25 @@ func (s *scenario) getTransferInfo() (srcRoot string, dstRoot string, expectFold // Yes, this is arguably inconsistent. But its the way its always been, and it does seem to match user expectations for copies // of that kind. expectRootFolder = false + } else if expectRootFolder && s.fromTo == common.EFromTo.BlobLocal() && s.destAccountType != EAccountType.HierarchicalNamespaceEnabled() && tf.objectTarget == "" { + expectRootFolder = false // we can only persist the root folder if it's a subfolder of the container on Blob. + + if tf.objectTarget == "" && tf.destTarget == "" { + addedDirAtDest = path.Base(srcRoot) + } else if tf.destTarget != "" { + addedDirAtDest = tf.destTarget + } + dstRoot = fmt.Sprintf("%s/%s", dstRoot, addedDirAtDest) } else if s.fromTo.From().IsLocal() { if tf.objectTarget == "" && tf.destTarget == "" { - addedDirAtDest = filepath.Base(srcRoot) + addedDirAtDest = srcBase } else if tf.destTarget != "" { addedDirAtDest = tf.destTarget } dstRoot = fmt.Sprintf("%s%c%s", dstRoot, os.PathSeparator, addedDirAtDest) } else { if tf.objectTarget == "" && tf.destTarget == "" { - addedDirAtDest = path.Base(srcRoot) + addedDirAtDest = srcBase } else if tf.destTarget != "" { addedDirAtDest = tf.destTarget } @@ -447,7 +467,9 @@ func (s *scenario) validateProperties() { } // validate all the different things - s.validateMetadata(expected.nameValueMetadata, actual.nameValueMetadata, expected.isFolder) + s.validatePOSIXProperties(f, actual.nameValueMetadata) + s.validateSymlink(f, actual.nameValueMetadata) + s.validateMetadata(expected.nameValueMetadata, actual.nameValueMetadata) s.validateBlobTags(expected.blobTags, actual.blobTags) s.validateContentHeaders(expected.contentHeaders, actual.contentHeaders) s.validateCreateTime(expected.creationTime, actual.creationTime) @@ -484,7 +506,7 @@ func (s *scenario) validateContent() { if f.creationProperties.contentHeaders == nil { s.a.Failed() } - if !f.isFolder() { + if f.hasContentToValidate() { expectedContentMD5 := f.creationProperties.contentHeaders.contentMD5 resourceRelPath := fixSlashes(path.Join(addedDirAtDest, f.name), s.fromTo.To()) actualContent := s.state.dest.downloadContent(s.a, downloadContentOptions{ @@ -503,15 +525,91 @@ func (s *scenario) validateContent() { } } -// // Individual property validation routines +func (s *scenario) validatePOSIXProperties(f *testObject, metadata map[string]string) { + if !s.p.preservePOSIXProperties { + return + } + + _, _, _, _, addedDirAtDest := s.getTransferInfo() + + var adapter common.UnixStatAdapter + switch s.fromTo.To() { + case common.ELocation.Local(): + adapter = osScenarioHelper{}.GetUnixStatAdapterForFile(s.a, filepath.Join(s.state.dest.(*resourceLocal).dirPath, addedDirAtDest, f.name)) + case common.ELocation.Blob(): + var err error + adapter, err = common.ReadStatFromMetadata(metadata, 0) + s.a.AssertNoErr(err, "reading stat from metadata") + } + + s.a.Assert(f.verificationProperties.posixProperties.EquivalentToStatAdapter(adapter), equals(), "", "POSIX properties were mismatched") +} + +func (s *scenario) validateSymlink(f *testObject, metadata map[string]string) { + c := s.GetAsserter() + + prepareSymlinkForComparison := func(oldName string) string { + switch s.fromTo { + case common.EFromTo.LocalBlob(): + source := s.state.source.(*resourceLocal) -func (s *scenario) validateMetadata(expected, actual map[string]string, isFolder bool) { - if isFolder { // hdi_isfolder is service-relevant metadata, not something we'd be testing for. This can pop up when specifying a folder() on blob. - delete(expected, "hdi_isfolder") - delete(actual, "hdi_isfolder") + return strings.TrimPrefix(oldName, source.dirPath + common.OS_PATH_SEPARATOR) + case common.EFromTo.BlobLocal(): + dest := s.state.dest.(*resourceLocal) + _, _, _, _, addedDirAtDest := s.getTransferInfo() + + return strings.TrimPrefix(oldName, path.Join(dest.dirPath, addedDirAtDest) + common.OS_PATH_SEPARATOR) + case common.EFromTo.BlobBlob(): + return oldName // no adjustment necessary + default: + c.Error("Symlink persistence is only available on Local<->Blob->Blob") + return "" + } + } + + if f.verificationProperties.entityType == common.EEntityType.Symlink() { + c.Assert(s.p.symlinkHandling, equals(), common.ESymlinkHandlingType.Preserve()) // we should only be doing this if we're persisting symlinks + + dest := s.GetDestination() + _, _, _, _, addedDirAtDest := s.getTransferInfo() + switch s.fromTo.To() { + case common.ELocation.Local(): + symlinkDest := path.Join(dest.(*resourceLocal).dirPath, addedDirAtDest, f.name) + stat, err := os.Lstat(symlinkDest) + c.AssertNoErr(err) + c.Assert(stat.Mode() & os.ModeSymlink, equals(), os.ModeSymlink, "the file is not a symlink") + + oldName, err := os.Readlink(symlinkDest) + c.AssertNoErr(err) + c.Assert(prepareSymlinkForComparison(oldName), equals(), *f.verificationProperties.symlinkTarget) + case common.ELocation.Blob(): + val, ok := metadata[common.POSIXSymlinkMeta] + c.Assert(ok, equals(), true) + c.Assert(val, equals(), "true") + + content := dest.downloadContent(c, downloadContentOptions{ + resourceRelPath: fixSlashes(path.Join(addedDirAtDest, f.name), common.ELocation.Blob()), + downloadBlobContentOptions: downloadBlobContentOptions{ + cpkInfo: common.GetCpkInfo(s.p.cpkByValue), + cpkScopeInfo: common.GetCpkScopeInfo(s.p.cpkByName), + }, + }) + + c.Assert(prepareSymlinkForComparison(string(content)), equals(), *f.verificationProperties.symlinkTarget) + default: + c.Error("Cannot validate symlink from endpoint other than local/blob") + } + } +} + +// // Individual property validation routines +func (s *scenario) validateMetadata(expected, actual map[string]string) { + for _,v := range common.AllLinuxProperties { // properties are evaluated elsewhere + delete(expected, v) + delete(actual, v) } - s.a.Assert(len(expected), equals(), len(actual), "Both should have same number of metadata entries") + s.a.Assert(len(actual), equals(), len(expected), "Both should have same number of metadata entries") for key := range expected { exValue := expected[key] actualValue, ok := actual[key] @@ -625,7 +723,6 @@ func (s *scenario) validateLastWriteTime(expected, actual *time.Time) { expected, actual)) } -// nolint func (s *scenario) validateSMBAttrs(expected, actual *uint32) { if expected == nil { // These properties were not explicitly stated for verification diff --git a/e2etest/declarativeTestFiles.go b/e2etest/declarativeTestFiles.go index 41a607b4f..18ee0920f 100644 --- a/e2etest/declarativeTestFiles.go +++ b/e2etest/declarativeTestFiles.go @@ -25,6 +25,7 @@ import ( "fmt" "math" "reflect" + "strconv" "strings" "time" @@ -86,7 +87,9 @@ func (h *contentHeaders) String() string { // This is exposed to the declarativeResourceManagers, to create/check the objects. // All field are pointers or interfaces to make them nil-able. Nil means "unspecified". type objectProperties struct { - isFolder bool // if false, the object is a file + entityType common.EntityType + symlinkTarget *string + posixProperties *objectUnixStatContainer size *int64 contentHeaders *contentHeaders nameValueMetadata map[string]string @@ -101,6 +104,112 @@ type objectProperties struct { cpkScopeInfo *common.CpkScopeInfo } +type objectUnixStatContainer struct { + // mode can contain THE FOLLOWING file type specifier bits (common.S_IFSOCK, common.S_IFIFO) + // common.S_IFDIR and common.S_IFLNK are achievable using folder() and symlink(). + // TODO/Spike: common.S_IFBLK and common.S_IFCHR may be difficult to replicate consistently in a test environment + mode *uint32 + + accessTime *time.Time + modTime *time.Time +} + +func (o *objectUnixStatContainer) Empty() bool { + if o == nil { + return true + } + + return o.mode == nil && + o.accessTime == nil && + o.modTime == nil +} + +func (o *objectUnixStatContainer) DeepCopy() *objectUnixStatContainer { + if o == nil { + return nil + } + out := &objectUnixStatContainer{} + + if o.mode != nil { + mode := *o.mode + out.mode = &mode + } + + if o.accessTime != nil { + accessTime := *o.accessTime + out.accessTime = &accessTime + } + + if o.modTime != nil { + modTime := *o.modTime + out.modTime = &modTime + } + + return out +} + +func (o *objectUnixStatContainer) EquivalentToStatAdapter(s common.UnixStatAdapter) string { + if o == nil { + return "" // no comparison to make + } + + mismatched := make([]string, 0) + // only compare if we set it + if o.mode != nil { + if s.FileMode() != *o.mode { + mismatched = append(mismatched, "mode") + } + } + + if o.accessTime != nil { + if o.accessTime.UnixNano() != s.ATime().UnixNano() { + mismatched = append(mismatched, "atime") + } + } + + if o.modTime != nil { + if o.modTime.UnixNano() != s.MTime().UnixNano() { + mismatched = append(mismatched, "mtime") + } + } + + return strings.Join(mismatched, ", ") +} + +func (o *objectUnixStatContainer) AddToMetadata(metadata map[string]string) { + if o == nil { + return + } + + mask := uint32(0) + + if o.mode != nil { // always overwrite; perhaps it got changed in one of the hooks. + mask |= common.STATX_MODE + metadata[common.POSIXModeMeta] = strconv.FormatUint(uint64(*o.mode), 10) + + delete(metadata, common.POSIXFIFOMeta) + delete(metadata, common.POSIXSocketMeta) + switch { + case *o.mode & common.S_IFIFO == common.S_IFIFO: + metadata[common.POSIXFIFOMeta] = "true" + case *o.mode & common.S_IFSOCK == common.S_IFSOCK: + metadata[common.POSIXSocketMeta] = "true" + } + } + + if o.accessTime != nil { + mask |= common.STATX_ATIME + metadata[common.POSIXATimeMeta] = strconv.FormatInt(o.accessTime.UnixNano(), 10) + } + + if o.modTime != nil { + mask |= common.STATX_MTIME + metadata[common.POSIXModTimeMeta] = strconv.FormatInt(o.accessTime.UnixNano(), 10) + } + + metadata[common.LINUXStatxMaskMeta] = strconv.FormatUint(uint64(mask), 10) +} + // returns op.size, if present, else defaultSize func (op objectProperties) sizeBytes(a asserter, defaultSize string) int { if op.size != nil { @@ -122,7 +231,16 @@ func (op objectProperties) sizeBytes(a asserter, defaultSize string) int { func (op objectProperties) DeepCopy() objectProperties { ret := objectProperties{} - ret.isFolder = op.isFolder + ret.entityType = op.entityType + + if op.symlinkTarget != nil { + target := *op.symlinkTarget + ret.symlinkTarget = &target + } + + if !op.posixProperties.Empty() { + ret.posixProperties = op.posixProperties.DeepCopy() + } if op.size != nil { val := op.size @@ -214,11 +332,20 @@ func (t *testObject) DeepCopy() *testObject { return &ret } +func (t *testObject) hasContentToValidate() bool { + if t.verificationProperties != nil && t.creationProperties.entityType != t.verificationProperties.entityType { + panic("entityType property is misconfigured") + } + + return t.creationProperties.entityType == common.EEntityType.File() +} + func (t *testObject) isFolder() bool { - if t.verificationProperties != nil && t.creationProperties.isFolder != t.verificationProperties.isFolder { - panic("isFolder properties are misconfigured") + if t.verificationProperties != nil && t.creationProperties.entityType != t.verificationProperties.entityType { + panic("entityType property is misconfigured") } - return t.creationProperties.isFolder + + return t.creationProperties.entityType == common.EEntityType.Folder() } func (t *testObject) isRootFolder() bool { @@ -293,6 +420,21 @@ func f(n string, properties ...withPropertyProvider) *testObject { return result } +func symlink(new, target string) *testObject { + name := strings.TrimLeft(new, "/") + result := f(name) + + // result.creationProperties + result.creationProperties.entityType = common.EEntityType.Symlink() + result.creationProperties.symlinkTarget = &target + + result.verificationProperties = &objectProperties{} + result.verificationProperties.entityType = common.EEntityType.Symlink() + result.verificationProperties.symlinkTarget = &target + + return result +} + // define a folder, in the expectations lists on a testFiles struct func folder(n string, properties ...withPropertyProvider) *testObject { name := strings.TrimLeft(n, "/") @@ -300,9 +442,9 @@ func folder(n string, properties ...withPropertyProvider) *testObject { // isFolder is at properties level, not testObject level, because we need it at properties level when reading // the properties back from the destination (where we don't read testObjects, we just read objectProperties) - result.creationProperties.isFolder = true + result.creationProperties.entityType = common.EEntityType.Folder() if result.verificationProperties != nil { - result.verificationProperties.isFolder = true + result.verificationProperties.entityType = common.EEntityType.Folder() } return result @@ -379,14 +521,15 @@ func (*testFiles) copyList(src []interface{}) []interface{} { // or force them to use f() for every file? func (*testFiles) toTestObjects(rawList []interface{}, isFail bool) []*testObject { result := make([]*testObject, 0, len(rawList)) - for _, r := range rawList { + for k, r := range rawList { if asTestObject, ok := r.(*testObject); ok { if asTestObject.expectedFailureMessage != "" && !isFail { panic("expected failures are only allowed in the shouldFail list. They are not allowed for other test files") } result = append(result, asTestObject) } else if asString, ok := r.(string); ok { - result = append(result, &testObject{name: asString}) + rawList[k] = &testObject{name: asString} // convert to a full deal so we can apply md5 + result = append(result, rawList[k].(*testObject)) } else { panic("testFiles lists may contain only strings and testObjects. Create your test objects with the f() and folder() functions") } diff --git a/e2etest/declarativeWithPropertyProviders.go b/e2etest/declarativeWithPropertyProviders.go index 801ec4c12..839c31a10 100644 --- a/e2etest/declarativeWithPropertyProviders.go +++ b/e2etest/declarativeWithPropertyProviders.go @@ -36,6 +36,10 @@ import ( type with struct { size string // uses our standard K, M, G suffix + symlinkTarget string + + posixProperties objectUnixStatContainer + cacheControl string contentDisposition string contentEncoding string @@ -83,6 +87,10 @@ func (w with) createObjectProperties() *objectProperties { } // content headers + if w.symlinkTarget != "" { + populated = true + result.symlinkTarget = &w.symlinkTarget + } if w.cacheControl != "" { populated = true ensureContentPropsExist() @@ -148,6 +156,10 @@ func (w with) createObjectProperties() *objectProperties { populated = true result.adlsPermissionsACL = &w.adlsPermissionsACL } + if !w.posixProperties.Empty() { + populated = true + result.posixProperties = &w.posixProperties + } if w.cpkByName != "" { populated = true @@ -173,12 +185,10 @@ func (w with) createObjectProperties() *objectProperties { // use createOnly if you want to define properties that should be used when creating an object, but not // used when verifying the state of the transferred object. Generally you'll have no use for this. // Just use "with", and the test framework will do the right thing. -//nolint type createOnly struct { with } -//nolint func (createOnly) appliesToVerification() bool { return false } @@ -186,7 +196,7 @@ func (createOnly) appliesToVerification() bool { //// // Use verifyOnly if you need to specify some properties that should NOT be applied to the file when it is created, -// but should be present on it afte) the transfer +// but should be present on it after) the transfer type verifyOnly struct { with } diff --git a/e2etest/factory.go b/e2etest/factory.go index 5217efca0..c8fb9c2b3 100644 --- a/e2etest/factory.go +++ b/e2etest/factory.go @@ -184,7 +184,7 @@ func (TestResourceFactory) CreateNewFileShareSnapshot(c asserter, fileShare azfi } func (TestResourceFactory) CreateLocalDirectory(c asserter) (dstDirName string) { - dstDirName, err := os.MkdirTemp("","AzCopyLocalTest") + dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) return } @@ -249,7 +249,6 @@ func getTestName(t *testing.T) (pseudoSuite, test string) { return pseudoSuite, removeUnderscores(testName) } -// nolint // This function generates an entity name by concatenating the passed prefix, // the name of the test requesting the entity name, and the minute, second, and nanoseconds of the call. // This should make it easy to associate the entities with their test, uniquely identify @@ -280,7 +279,6 @@ func (TestResourceNameGenerator) GenerateContainerName(c asserter) string { return uuid.New().String() } -// nolint func (TestResourceNameGenerator) generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } diff --git a/e2etest/helpers.go b/e2etest/helpers.go index 44563e4c1..9e9e0cdf9 100644 --- a/e2etest/helpers.go +++ b/e2etest/helpers.go @@ -65,54 +65,44 @@ const ( ) // if S3_TESTS_OFF is set at all, S3 tests are disabled. -// nolint func isS3Disabled() bool { return strings.ToLower(os.Getenv("S3_TESTS_OFF")) != "" } -// nolint func skipIfS3Disabled(c asserter) { if isS3Disabled() { c.Skip("S3 testing is disabled for this unit test suite run.") } } -// nolint func generateContainerName(c asserter) string { return generateName(c, containerPrefix, 63) } -// nolint func generateBlobName(c asserter) string { return generateName(c, blobPrefix, 0) } -// nolint func generateBucketName(c asserter) string { return generateName(c, bucketPrefix, 63) } -// nolint func generateBucketNameWithCustomizedPrefix(c asserter, customizedPrefix string) string { return generateName(c, customizedPrefix, 63) } -// nolint func generateObjectName(c asserter) string { return generateName(c, objectPrefix, 0) } -// nolint func generateShareName(c asserter) string { return generateName(c, sharePrefix, 63) } -// nolint func generateFilesystemName(c asserter) string { return generateName(c, blobfsPrefix, 63) } -// nolint func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { name = generateShareName(c) share = fsu.NewShareURL(name) @@ -120,17 +110,14 @@ func getShareURL(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name return share, name } -// nolint func generateAzureFileName(c asserter) string { return generateName(c, azureFilePrefix, 0) } -// nolint func generateBfsFileName(c asserter) string { return generateName(c, blobfsPrefix, 0) } -// nolint func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { name = generateContainerName(c) container = bsu.NewContainerURL(name) @@ -138,7 +125,6 @@ func getContainerURL(c asserter, bsu azblob.ServiceURL) (container azblob.Contai return container, name } -// nolint func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { name = generateFilesystemName(c) filesystem = bfssu.NewFileSystemURL(name) @@ -146,7 +132,6 @@ func getFilesystemURL(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.File return } -// nolint func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { name = prefix + generateBlobName(c) blob = container.NewBlockBlobURL(name) @@ -154,7 +139,6 @@ func getBlockBlobURL(c asserter, container azblob.ContainerURL, prefix string) ( return blob, name } -// nolint func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { name = prefix + generateBfsFileName(c) file = filesystemURL.NewRootDirectoryURL().NewFileURL(name) @@ -162,7 +146,6 @@ func getBfsFileURL(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) return } -// nolint func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { name = generateBlobName(c) blob = container.NewAppendBlobURL(prefix + name) @@ -170,7 +153,6 @@ func getAppendBlobURL(c asserter, container azblob.ContainerURL, prefix string) return blob, name } -// nolint func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { name = generateBlobName(c) blob = container.NewPageBlobURL(prefix + name) @@ -178,7 +160,6 @@ func getPageBlobURL(c asserter, container azblob.ContainerURL, prefix string) (b return } -// nolint func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileURL azfile.FileURL, name string) { name = prefix + generateAzureFileName(c) fileURL = shareURL.NewRootDirectoryURL().NewFileURL(name) @@ -186,7 +167,6 @@ func getAzureFileURL(c asserter, shareURL azfile.ShareURL, prefix string) (fileU return } -// nolint func getReaderToRandomBytes(n int) *bytes.Reader { r, _ := getRandomDataAndReader(n) return r @@ -200,7 +180,6 @@ func getRandomDataAndReader(n int) (*bytes.Reader, []byte) { return bytes.NewReader(data), data } -// nolint func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.ContainerURL, name string) { container, name = getContainerURL(c, bsu) @@ -210,7 +189,6 @@ func createNewContainer(c asserter, bsu azblob.ServiceURL) (container azblob.Con return container, name } -// nolint func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.FileSystemURL, name string) { filesystem, name = getFilesystemURL(c, bfssu) @@ -220,7 +198,6 @@ func createNewFilesystem(c asserter, bfssu azbfs.ServiceURL) (filesystem azbfs.F return } -// nolint func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) (file azbfs.FileURL, name string) { file, name = getBfsFileURL(c, filesystem, prefix) @@ -239,7 +216,6 @@ func createNewBfsFile(c asserter, filesystem azbfs.FileSystemURL, prefix string) return } -// nolint func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.BlockBlobURL, name string) { blob, name = getBlockBlobURL(c, container, prefix) @@ -252,7 +228,6 @@ func createNewBlockBlob(c asserter, container azblob.ContainerURL, prefix string return } -// nolint func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareURL, name string) { share, name = getShareURL(c, fsu) @@ -262,7 +237,6 @@ func createNewAzureShare(c asserter, fsu azfile.ServiceURL) (share azfile.ShareU return share, name } -// nolint func createNewAzureFile(c asserter, share azfile.ShareURL, prefix string) (file azfile.FileURL, name string) { file, name = getAzureFileURL(c, share, prefix) @@ -287,7 +261,6 @@ func generateParentsForAzureFile(c asserter, fileURL azfile.FileURL) { c.AssertNoErr(err) } -// nolint func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.AppendBlobURL, name string) { blob, name = getAppendBlobURL(c, container, prefix) @@ -298,7 +271,6 @@ func createNewAppendBlob(c asserter, container azblob.ContainerURL, prefix strin return } -// nolint func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) (blob azblob.PageBlobURL, name string) { blob, name = getPageBlobURL(c, container, prefix) @@ -309,26 +281,22 @@ func createNewPageBlob(c asserter, container azblob.ContainerURL, prefix string) return } -// nolint func deleteContainer(c asserter, container azblob.ContainerURL) { resp, err := container.Delete(ctx, azblob.ContainerAccessConditions{}) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -// nolint func deleteFilesystem(c asserter, filesystem azbfs.FileSystemURL) { resp, err := filesystem.Delete(ctx) c.AssertNoErr(err) c.Assert(resp.StatusCode(), equals(), 202) } -// nolint type createS3ResOptions struct { Location string } -// nolint func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, error) { skipIfS3Disabled(c) @@ -347,7 +315,6 @@ func createS3ClientWithMinio(c asserter, o createS3ResOptions) (*minio.Client, e return s3Client, nil } -// nolint func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) string { bucketName := generateBucketName(c) err := client.MakeBucket(bucketName, o.Location) @@ -356,13 +323,11 @@ func createNewBucket(c asserter, client *minio.Client, o createS3ResOptions) str return bucketName } -// nolint func createNewBucketWithName(c asserter, client *minio.Client, bucketName string, o createS3ResOptions) { err := client.MakeBucket(bucketName, o.Location) c.AssertNoErr(err) } -// nolint func createNewObject(c asserter, client *minio.Client, bucketName string, prefix string) (objectKey string) { objectKey = prefix + generateObjectName(c) @@ -375,7 +340,6 @@ func createNewObject(c asserter, client *minio.Client, bucketName string, prefix return } -// nolint func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuarterMinute bool) { // If we error out in this function, simply just skip over deleting the bucket. // Some of our buckets have become "ghost" buckets in the past. @@ -420,7 +384,6 @@ func deleteBucket(_ asserter, client *minio.Client, bucketName string, waitQuart } } -// nolint func cleanS3Account(c asserter, client *minio.Client) { buckets, err := client.ListBuckets() if err != nil { @@ -437,7 +400,6 @@ func cleanS3Account(c asserter, client *minio.Client) { time.Sleep(time.Minute) } -// nolint func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { marker := azblob.Marker{} for marker.NotDone() { @@ -453,7 +415,6 @@ func cleanBlobAccount(c asserter, serviceURL azblob.ServiceURL) { } } -// nolint func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { marker := azfile.Marker{} for marker.NotDone() { @@ -471,7 +432,6 @@ func cleanFileAccount(c asserter, serviceURL azfile.ServiceURL) { time.Sleep(time.Minute) } -// nolint func getGenericCredentialForFile(accountType string) (*azfile.SharedKeyCredential, error) { accountNameEnvVar := accountType + "ACCOUNT_NAME" accountKeyEnvVar := accountType + "ACCOUNT_KEY" @@ -492,7 +452,6 @@ func deleteShare(c asserter, share azfile.ShareURL) { // those changes not being reflected yet, we will wait 30 seconds and try the test again. If it fails this time for any reason, // we fail the test. It is the responsibility of the the testImplFunc to determine which error string indicates the test should be retried. // There can only be one such string. All errors that cannot be due to this detail should be asserted and not returned as an error string. -// nolint func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code string, enableServicePropertyFunc func(asserter, azblob.ServiceURL), testImplFunc func(asserter, azblob.ServiceURL) error, @@ -508,7 +467,6 @@ func runTestRequiringServiceProperties(c asserter, bsu azblob.ServiceURL, code s } } -// nolint func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, containerName string) azblob.ContainerURL { sasQueryParams, err := azblob.BlobSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -531,7 +489,6 @@ func getContainerURLWithSAS(c asserter, credential azblob.SharedKeyCredential, c return azblob.NewContainerURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -// nolint func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) azblob.ServiceURL { sasQueryParams, err := azblob.AccountSASSignatureValues{ Protocol: azblob.SASProtocolHTTPS, @@ -554,7 +511,6 @@ func getBlobServiceURLWithSAS(c asserter, credential azblob.SharedKeyCredential) return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})) } -// nolint func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) azfile.ServiceURL { sasQueryParams, err := azfile.AccountSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -574,7 +530,6 @@ func getFileServiceURLWithSAS(c asserter, credential azfile.SharedKeyCredential) return azfile.NewServiceURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -// nolint func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, shareName string) azfile.ShareURL { sasQueryParams, err := azfile.FileSASSignatureValues{ Protocol: azfile.SASProtocolHTTPS, @@ -597,7 +552,6 @@ func getShareURLWithSAS(c asserter, credential azfile.SharedKeyCredential, share return azfile.NewShareURL(*fullURL, azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})) } -// nolint func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) azbfs.ServiceURL { sasQueryParams, err := azbfs.AccountSASSignatureValues{ Protocol: azbfs.SASProtocolHTTPS, @@ -621,13 +575,11 @@ func getAdlsServiceURLWithSAS(c asserter, credential azbfs.SharedKeyCredential) } // check.v1 style "StringContains" checker -// nolint type stringContainsChecker struct { *chk.CheckerInfo } // Check -// nolint func (checker *stringContainsChecker) Check(params []interface{}, _ []string) (result bool, error string) { if len(params) < 2 { return false, "StringContains requires two parameters" diff --git a/e2etest/runner.go b/e2etest/runner.go index 27b397683..415e314c5 100644 --- a/e2etest/runner.go +++ b/e2etest/runner.go @@ -110,6 +110,13 @@ func (t *TestRunner) SetAllFlags(p params, o Operation) { if o == eOperation.Copy() { set("s2s-preserve-access-tier", p.s2sPreserveAccessTier, true) set("preserve-posix-properties", p.preservePOSIXProperties, "") + + switch p.symlinkHandling { + case common.ESymlinkHandlingType.Follow(): + set("follow-symlinks", true, nil) + case common.ESymlinkHandlingType.Preserve(): + set("preserve-symlinks", true, nil) + } } else if o == eOperation.Sync() { set("preserve-posix-properties", p.preservePOSIXProperties, false) set("compare-hash", p.compareHash.String(), "None") diff --git a/e2etest/scenario_helpers.go b/e2etest/scenario_helpers.go index 2ad0157b3..2d12c2ebe 100644 --- a/e2etest/scenario_helpers.go +++ b/e2etest/scenario_helpers.go @@ -35,6 +35,7 @@ import ( "path" "path/filepath" "runtime" + "strconv" "strings" "time" @@ -52,7 +53,6 @@ const defaultStringFileSize = "1k" type scenarioHelper struct{} -// nolint var specialNames = []string{ "打麻将.txt", "wow such space so much space", @@ -68,7 +68,6 @@ var specialNames = []string{ } // note: this is to emulate the list-of-files flag -// nolint func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path string) { parentDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -85,7 +84,6 @@ func (scenarioHelper) generateListOfFiles(c asserter, fileList []string) (path s return } -// nolint func (scenarioHelper) generateLocalDirectory(c asserter) (dstDirName string) { dstDirName, err := os.MkdirTemp("", "AzCopyLocalTest") c.AssertNoErr(err) @@ -118,8 +116,10 @@ type generateLocalFilesFromList struct { func (s scenarioHelper) generateLocalFilesFromList(c asserter, options *generateLocalFilesFromList) { for _, file := range options.fs { var err error + destFile := filepath.Join(options.dirPath, file.name) + if file.isFolder() { - err = os.MkdirAll(filepath.Join(options.dirPath, file.name), os.ModePerm) + err = os.MkdirAll(destFile, os.ModePerm) c.AssertNoErr(err) // TODO: You'll need to set up things like attributes, and other relevant things from // file.creationProperties here. (Use all the properties of file.creationProperties that are supported @@ -129,32 +129,45 @@ func (s scenarioHelper) generateLocalFilesFromList(c asserter, options *generate osScenarioHelper{}.setFileSDDLString(c, filepath.Join(options.dirPath, file.name), *file.creationProperties.smbPermissionsSddl) } if file.creationProperties.lastWriteTime != nil { - c.AssertNoErr(os.Chtimes(filepath.Join(options.dirPath, file.name), time.Now(), *file.creationProperties.lastWriteTime), "set times") + c.AssertNoErr(os.Chtimes(destFile, time.Now(), *file.creationProperties.lastWriteTime), "set times") } - } else { - sourceData, err := s.generateLocalFile( - filepath.Join(options.dirPath, file.name), - file.creationProperties.sizeBytes(c, options.defaultSize), file.body) - if file.creationProperties.contentHeaders == nil { - file.creationProperties.contentHeaders = &contentHeaders{} + } else if file.creationProperties.entityType == common.EEntityType.File() { + var mode uint32 + if file.creationProperties.posixProperties != nil && file.creationProperties.posixProperties.mode != nil { + mode = *file.creationProperties.posixProperties.mode } + switch { + case mode & common.S_IFIFO == common.S_IFIFO || mode & common.S_IFSOCK == common.S_IFSOCK: + osScenarioHelper{}.Mknod(c, destFile, mode, 0) + default: + sourceData, err := s.generateLocalFile( + destFile, + file.creationProperties.sizeBytes(c, options.defaultSize), file.body) + if file.creationProperties.contentHeaders == nil { + file.creationProperties.contentHeaders = &contentHeaders{} + } - if file.creationProperties.contentHeaders.contentMD5 == nil { - contentMD5 := md5.Sum(sourceData) - file.creationProperties.contentHeaders.contentMD5 = contentMD5[:] - } + if file.creationProperties.contentHeaders.contentMD5 == nil { + contentMD5 := md5.Sum(sourceData) + file.creationProperties.contentHeaders.contentMD5 = contentMD5[:] + } - c.AssertNoErr(err) + c.AssertNoErr(err) + } // TODO: You'll need to set up things like attributes, and other relevant things from // file.creationProperties here. (Use all the properties of file.creationProperties that are supported // by local files. E.g. not contentHeaders or metadata). if file.creationProperties.smbPermissionsSddl != nil { - osScenarioHelper{}.setFileSDDLString(c, filepath.Join(options.dirPath, file.name), *file.creationProperties.smbPermissionsSddl) + osScenarioHelper{}.setFileSDDLString(c, destFile, *file.creationProperties.smbPermissionsSddl) } if file.creationProperties.lastWriteTime != nil { - c.AssertNoErr(os.Chtimes(filepath.Join(options.dirPath, file.name), time.Now(), *file.creationProperties.lastWriteTime), "set times") + c.AssertNoErr(os.Chtimes(destFile, time.Now(), *file.creationProperties.lastWriteTime), "set times") } + } else if file.creationProperties.entityType == common.EEntityType.Symlink() { + c.Assert(file.creationProperties.symlinkTarget, notEquals(), nil) + oldName := filepath.Join(options.dirPath, *file.creationProperties.symlinkTarget) + c.AssertNoErr(os.Symlink(oldName, destFile)) } } @@ -188,8 +201,15 @@ func (s scenarioHelper) enumerateLocalProperties(a asserter, dirpath string) map pSmbAttributes = osScenarioHelper{}.getFileAttrs(a, fullpath) pSmbPermissionsSddl = osScenarioHelper{}.getFileSDDLString(a, fullpath) } + entityType := common.EEntityType.File() + if info.IsDir() { + entityType = common.EEntityType.Folder() + } else if info.Mode()&os.ModeSymlink == os.ModeSymlink { + entityType = common.EEntityType.Symlink() + } + props := objectProperties{ - isFolder: info.IsDir(), + entityType: entityType, size: &size, creationTime: pCreationTime, lastWriteTime: &lastWriteTime, @@ -206,7 +226,6 @@ func (s scenarioHelper) enumerateLocalProperties(a asserter, dirpath string) map return result } -// nolint func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath string, prefix string) (fileList []string) { fileList = make([]string, 50) for i := 0; i < 10; i++ { @@ -230,7 +249,6 @@ func (s scenarioHelper) generateCommonRemoteScenarioForLocal(c asserter, dirPath return } -// nolint func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerURL azblob.ContainerURL, prefix string) (blobList []string) { // make 50 blobs with random names // 10 of them at the top level @@ -259,7 +277,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlob(c asserter, containerU return } -// nolint func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesystemURL azbfs.FileSystemURL, prefix string) (pathList []string) { pathList = make([]string, 50) @@ -282,7 +299,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForBlobFS(c asserter, filesyst return } -// nolint func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, shareURL azfile.ShareURL, prefix string) (fileList []string) { fileList = make([]string, 50) @@ -305,7 +321,6 @@ func (scenarioHelper) generateCommonRemoteScenarioForAzureFile(c asserter, share return } -// nolint func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serviceURL azblob.ServiceURL, containerList []string, blobList []*testObject) { for _, containerName := range containerList { curl := serviceURL.NewContainerURL(containerName) @@ -321,7 +336,6 @@ func (s scenarioHelper) generateBlobContainersAndBlobsFromLists(c asserter, serv } } -// nolint func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceURL azfile.ServiceURL, shareList []string, fileList []*testObject) { for _, shareName := range shareList { sURL := serviceURL.NewShareURL(shareName) @@ -336,7 +350,6 @@ func (s scenarioHelper) generateFileSharesAndFilesFromLists(c asserter, serviceU } } -// nolint func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, serviceURL azbfs.ServiceURL, fsList []string, fileList []string, data string) { for _, filesystemName := range fsList { fsURL := serviceURL.NewFileSystemURL(filesystemName) @@ -347,7 +360,6 @@ func (s scenarioHelper) generateFilesystemsAndFilesFromLists(c asserter, service } } -// nolint func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(c asserter, s3Client *minio.Client, bucketList []string, objectList []string, data string) { for _, bucketName := range bucketList { err := s3Client.MakeBucket(bucketName, "") @@ -360,6 +372,7 @@ func (s scenarioHelper) generateS3BucketsAndObjectsFromLists(c asserter, s3Clien type generateFromListOptions struct { fs []*testObject defaultSize string + preservePosixProperties bool accountType AccountType } @@ -375,8 +388,46 @@ type generateBlobFromListOptions struct { // create the demanded blobs func (scenarioHelper) generateBlobsFromList(c asserter, options *generateBlobFromListOptions) { for _, b := range options.fs { - if b.isFolder() { - continue // no real folders in blob + switch b.creationProperties.entityType { + case common.EEntityType.Folder(): // it's fine to create folders even when we're not explicitly testing them, UNLESS we're testing CPK-- AzCopy can't properly pick that up! + if !options.cpkInfo.Empty() || b.name == "" { + continue // can't write root, and can't handle dirs with CPK + } + + if b.creationProperties.nameValueMetadata == nil { + b.creationProperties.nameValueMetadata = map[string]string{} + } + + b.body = make([]byte, 0) + b.creationProperties.nameValueMetadata[common.POSIXFolderMeta] = "true" + mode := uint64(os.FileMode(common.DEFAULT_FILE_PERM) | os.ModeDir) + b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = strconv.FormatUint(mode, 10) + b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) + case common.EEntityType.Symlink(): + if b.creationProperties.nameValueMetadata == nil { + b.creationProperties.nameValueMetadata = map[string]string{} + } + + b.body = []byte(*b.creationProperties.symlinkTarget) + b.creationProperties.nameValueMetadata[common.POSIXSymlinkMeta] = "true" + mode := uint64(os.FileMode(common.DEFAULT_FILE_PERM) | os.ModeSymlink) + b.creationProperties.nameValueMetadata[common.POSIXModeMeta] = strconv.FormatUint(mode, 10) + b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) + default: + if b.creationProperties.nameValueMetadata == nil { + b.creationProperties.nameValueMetadata = map[string]string{} + } + + b.creationProperties.posixProperties.AddToMetadata(b.creationProperties.nameValueMetadata) + + if b.creationProperties.posixProperties != nil && b.creationProperties.posixProperties.mode != nil { + mode := *b.creationProperties.posixProperties.mode + + // todo: support for device rep files may be difficult in a testing environment. + if mode & common.S_IFSOCK == common.S_IFSOCK || mode & common.S_IFIFO == common.S_IFIFO { + b.body = make([]byte, 0) + } + } } ad := blobResourceAdapter{b} var reader *bytes.Reader @@ -534,7 +585,7 @@ func (s scenarioHelper) enumerateContainerBlobProperties(a asserter, containerUR md := map[string]string(blobInfo.Metadata) props := objectProperties{ - isFolder: false, // no folders in Blob + entityType: common.EEntityType.File(), // todo: posix properties includes folders size: bp.ContentLength, contentHeaders: &h, nameValueMetadata: md, @@ -579,7 +630,6 @@ func (s scenarioHelper) downloadBlobContent(a asserter, options downloadContentO return destData[:] } -// nolint func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -616,7 +666,6 @@ func (scenarioHelper) generatePageBlobsFromList(c asserter, containerURL azblob. time.Sleep(time.Millisecond * 1050) } -// nolint func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblob.ContainerURL, blobList []string, data string) { for _, blobName := range blobList { // Create the blob (PUT blob) @@ -647,7 +696,6 @@ func (scenarioHelper) generateAppendBlobsFromList(c asserter, containerURL azblo time.Sleep(time.Millisecond * 1050) } -// nolint func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL azblob.ContainerURL, blobName string, accessTier azblob.AccessTierType) { blob := containerURL.NewBlockBlobURL(blobName) cResp, err := blob.Upload(ctx, strings.NewReader(blockBlobDefaultData), azblob.BlobHTTPHeaders{}, @@ -657,7 +705,6 @@ func (scenarioHelper) generateBlockBlobWithAccessTier(c asserter, containerURL a } // create the demanded objects -// nolint func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketName string, objectList []string) { size := int64(len(objectDefaultData)) for _, objectName := range objectList { @@ -668,7 +715,6 @@ func (scenarioHelper) generateObjects(c asserter, client *minio.Client, bucketNa } // create the demanded files -// nolint func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fileList []string) { for _, fileName := range fileList { file := shareURL.NewRootDirectoryURL().NewFileURL(fileName) @@ -680,7 +726,6 @@ func (scenarioHelper) generateFlatFiles(c asserter, shareURL azfile.ShareURL, fi time.Sleep(time.Millisecond * 1050) } -// nolint func (scenarioHelper) generateCommonRemoteScenarioForS3(c asserter, client *minio.Client, bucketName string, prefix string, returnObjectListWithBucketName bool) (objectList []string) { // make 50 objects with random names // 10 of them at the top level @@ -768,7 +813,7 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz // TODO: I'm pretty sure we don't prserve lastWritetime or contentProperties (headers) for folders, so the above if statement doesn't test those // Is that the correct decision? - } else { + } else if f.creationProperties.entityType == common.EEntityType.File() { file := options.shareURL.NewRootDirectoryURL().NewFileURL(f.name) // create parents first @@ -781,8 +826,10 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz if f.body != nil { contentR = bytes.NewReader(f.body) contentD = f.body + fileSize = contentR.Size() } else { contentR, contentD = getRandomDataAndReader(int(fileSize)) + f.body = contentD } if f.creationProperties.contentHeaders == nil { f.creationProperties.contentHeaders = &contentHeaders{} @@ -834,6 +881,8 @@ func (scenarioHelper) generateAzureFilesFromList(c asserter, options *generateAz } // TODO: do we want to put some random content into it? + } else { + panic(fmt.Sprintf("file %s unsupported entity type %s", f.name, f.creationProperties.entityType.String())) } } @@ -886,7 +935,7 @@ func (s scenarioHelper) enumerateShareFileProperties(a asserter, shareURL azfile } props := objectProperties{ - isFolder: false, // no folders in Blob + entityType: common.EEntityType.File(), // only enumerating files in list call size: &fileSize, nameValueMetadata: fProps.NewMetadata(), contentHeaders: &h, @@ -927,7 +976,7 @@ func (s scenarioHelper) enumerateShareFileProperties(a asserter, shareURL azfile // Set up properties props := objectProperties{ - isFolder: true, + entityType: common.EEntityType.Folder(), // Only enumerating directories in list call nameValueMetadata: dProps.NewMetadata(), creationTime: &creationTime, lastWriteTime: &lastWriteTime, @@ -965,7 +1014,6 @@ func (s scenarioHelper) downloadFileContent(a asserter, options downloadContentO return destData } -// nolint func (scenarioHelper) generateBFSPathsFromList(c asserter, filesystemURL azbfs.FileSystemURL, fileList []string) { for _, bfsPath := range fileList { file := filesystemURL.NewRootDirectoryURL().NewFileURL(bfsPath) @@ -997,7 +1045,6 @@ func (scenarioHelper) convertListToMap(list []*testObject, converter func(*testO return lookupMap } -// nolint func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { cleanList := make([]string, len(list)) for i, item := range list { @@ -1006,7 +1053,6 @@ func (scenarioHelper) shaveOffPrefix(list []string, prefix string) []string { return cleanList } -// nolint func (scenarioHelper) addPrefix(list []string, prefix string) []string { modifiedList := make([]string, len(list)) for i, item := range list { @@ -1015,7 +1061,6 @@ func (scenarioHelper) addPrefix(list []string, prefix string) []string { return modifiedList } -// nolint func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1024,7 +1069,6 @@ func (scenarioHelper) getRawContainerURLWithSAS(c asserter, containerName string return containerURLWithSAS.URL() } -// nolint func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blobName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1034,7 +1078,6 @@ func (scenarioHelper) getRawBlobURLWithSAS(c asserter, containerName string, blo return blobURLWithSAS.URL() } -// nolint func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1043,7 +1086,6 @@ func (scenarioHelper) getRawBlobServiceURLWithSAS(c asserter) url.URL { return getBlobServiceURLWithSAS(c, *credential).URL() } -// nolint func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) @@ -1052,7 +1094,6 @@ func (scenarioHelper) getRawFileServiceURLWithSAS(c asserter) url.URL { return getFileServiceURLWithSAS(c, *credential).URL() } -// nolint func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential := azbfs.NewSharedKeyCredential(accountName, accountKey) @@ -1060,7 +1101,6 @@ func (scenarioHelper) getRawAdlsServiceURLWithSAS(c asserter) azbfs.ServiceURL { return getAdlsServiceURLWithSAS(c, *credential) } -// nolint func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) @@ -1074,7 +1114,6 @@ func (scenarioHelper) getBlobServiceURL(c asserter) azblob.ServiceURL { return azblob.NewServiceURL(*fullURL, azblob.NewPipeline(credential, azblob.PipelineOptions{})) } -// nolint func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob.ContainerURL { serviceURL := s.getBlobServiceURL(c) containerURL := serviceURL.NewContainerURL(containerName) @@ -1082,7 +1121,6 @@ func (s scenarioHelper) getContainerURL(c asserter, containerName string) azblob return containerURL } -// nolint func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com", common.IffString(region == "", "", "-"+region)) @@ -1093,7 +1131,6 @@ func (scenarioHelper) getRawS3AccountURL(c asserter, region string) url.URL { } // TODO: Possibly add virtual-hosted-style and dual stack support. Currently use path style for testing. -// nolint func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s", common.IffString(region == "", "", "-"+region), bucketName) @@ -1103,7 +1140,6 @@ func (scenarioHelper) getRawS3BucketURL(c asserter, region string, bucketName st return *fullURL } -// nolint func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName string, objectName string) url.URL { rawURL := fmt.Sprintf("https://s3%s.amazonaws.com/%s/%s", common.IffString(region == "", "", "-"+region), bucketName, objectName) @@ -1113,7 +1149,6 @@ func (scenarioHelper) getRawS3ObjectURL(c asserter, region string, bucketName st return *fullURL } -// nolint func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileName string) url.URL { credential, err := getGenericCredentialForFile("") c.AssertNoErr(err) @@ -1122,7 +1157,6 @@ func (scenarioHelper) getRawFileURLWithSAS(c asserter, shareName string, fileNam return fileURLWithSAS.URL() } -// nolint func (scenarioHelper) getRawShareURLWithSAS(c asserter, shareName string) url.URL { accountName, accountKey := GlobalInputManager{}.GetAccountAndKey(EAccountType.Standard()) credential, err := azfile.NewSharedKeyCredential(accountName, accountKey) diff --git a/e2etest/scenario_os_helpers.go b/e2etest/scenario_os_helpers.go index 57038bd0c..30a59d7dc 100644 --- a/e2etest/scenario_os_helpers.go +++ b/e2etest/scenario_os_helpers.go @@ -1,4 +1,5 @@ -// +build !windows +//go:build !windows && !linux + // Copyright © Microsoft // // Permission is hereby granted, free of charge, to any person obtaining a copy @@ -24,40 +25,45 @@ package e2etest import ( + "github.com/Azure/azure-storage-azcopy/v10/common" "time" ) type osScenarioHelper struct{} // set file attributes to test file -//nolint func (osScenarioHelper) setAttributesForLocalFile() error { panic("should never be called") } -//nolint func (osScenarioHelper) setAttributesForLocalFiles(c asserter, dirPath string, fileList []string, attrList []string) { panic("should never be called") } -//nolint func (osScenarioHelper) getFileDates(c asserter, filePath string) (createdTime, lastWriteTime time.Time) { panic("should never be called") } -//nolint func (osScenarioHelper) getFileAttrs(c asserter, filepath string) *uint32 { var ret uint32 return &ret } -//nolint func (osScenarioHelper) getFileSDDLString(c asserter, filepath string) *string { ret := "" return &ret } -//nolint func (osScenarioHelper) setFileSDDLString(c asserter, filepath string, sddldata string) { panic("should never be called") } + +//nolint +func (osScenarioHelper) Mknod(c asserter, path string, mode uint32, dev int) { + panic("should never be called") +} + +//nolint +func (osScenarioHelper) GetUnixStatAdapterForFile(c asserter, filepath string) common.UnixStatAdapter { + panic("should never be called") +} diff --git a/e2etest/scenario_os_helpers_for_linux.go b/e2etest/scenario_os_helpers_for_linux.go new file mode 100644 index 000000000..e710e2a5c --- /dev/null +++ b/e2etest/scenario_os_helpers_for_linux.go @@ -0,0 +1,95 @@ +//go:build linux + +// Copyright © Microsoft +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package e2etest + +import ( + "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-azcopy/v10/ste" + "golang.org/x/sys/unix" + "time" +) + +type osScenarioHelper struct{} + +// set file attributes to test file +//nolint +func (osScenarioHelper) setAttributesForLocalFile() error { + panic("should never be called") +} + +//nolint +func (osScenarioHelper) setAttributesForLocalFiles(c asserter, dirPath string, fileList []string, attrList []string) { + panic("should never be called") +} + +//nolint +func (osScenarioHelper) getFileDates(c asserter, filePath string) (createdTime, lastWriteTime time.Time) { + panic("should never be called") +} + +//nolint +func (osScenarioHelper) getFileAttrs(c asserter, filepath string) *uint32 { + var ret uint32 + return &ret +} + +//nolint +func (osScenarioHelper) getFileSDDLString(c asserter, filepath string) *string { + ret := "" + return &ret +} + +//nolint +func (osScenarioHelper) setFileSDDLString(c asserter, filepath string, sddldata string) { + panic("should never be called") +} + +func (osScenarioHelper) Mknod(c asserter, path string, mode uint32, dev int) { + c.AssertNoErr(unix.Mknod(path, mode, dev)) +} + +func (osScenarioHelper) GetUnixStatAdapterForFile(c asserter, filepath string) common.UnixStatAdapter { + { // attempt to call statx, if ENOSYS is returned, statx is unavailable + var stat unix.Statx_t + + statxFlags := unix.AT_STATX_SYNC_AS_STAT | unix.AT_SYMLINK_NOFOLLOW + // dirfd is a null pointer, because we should only ever be passing relative paths here, and directories will be passed via transferInfo.Source. + // AT_SYMLINK_NOFOLLOW is not used, because we automagically resolve symlinks. TODO: Add option to not follow symlinks, and use AT_SYMLINK_NOFOLLOW when resolving is disabled. + err := unix.Statx(0, filepath, + statxFlags, + unix.STATX_ALL, + &stat) + + if err != nil && err != unix.ENOSYS { // catch if statx is unsupported + c.AssertNoErr(err, "for file " + filepath) + } else if err == nil { + return ste.StatxTAdapter(stat) + } + } + + var stat unix.Stat_t + err := unix.Stat(filepath, &stat) + c.AssertNoErr(err) + + return ste.StatTAdapter(stat) +} diff --git a/e2etest/scenario_os_helpers_for_windows.go b/e2etest/scenario_os_helpers_for_windows.go index ad7fde9ae..b75ce9605 100644 --- a/e2etest/scenario_os_helpers_for_windows.go +++ b/e2etest/scenario_os_helpers_for_windows.go @@ -136,3 +136,13 @@ func (osScenarioHelper) setFileSDDLString(c asserter, filepath string, sddldata err = windows.SetNamedSecurityInfo(filepath, windows.SE_FILE_OBJECT, secInfo, o, g, d, nil) c.AssertNoErr(err) } + +//nolint +func (osScenarioHelper) Mknod(c asserter, path string, mode uint32, dev int) { + panic("should never be called") +} + +//nolint +func (osScenarioHelper) GetUnixStatAdapterForFile(c asserter, filepath string) common.UnixStatAdapter { + panic("should never be called") +} diff --git a/e2etest/validator.go b/e2etest/validator.go index c72a2445c..a4ea912f3 100644 --- a/e2etest/validator.go +++ b/e2etest/validator.go @@ -55,7 +55,7 @@ func (Validator) ValidateRemoveTransfer(c asserter, isSrcEncoded bool, isDstEnco // TODO: Think of how to validate files in case of remove } func (Validator) ValidateCopyTransfersAreScheduled(c asserter, isSrcEncoded bool, isDstEncoded bool, - sourcePrefix string, destinationPrefix string, expectedTransfers []*testObject, actualTransfers []common.TransferDetail, statusToTest common.TransferStatus, fromTo common.FromTo, srcAccountType, dstAccountType AccountType) { + sourcePrefix string, destinationPrefix string, expectedTransfers []*testObject, actualTransfers []common.TransferDetail, statusToTest common.TransferStatus, expectFolders bool) { sourcePrefix = makeSlashesComparable(sourcePrefix) destinationPrefix = makeSlashesComparable(destinationPrefix) @@ -83,7 +83,7 @@ func (Validator) ValidateCopyTransfersAreScheduled(c asserter, isSrcEncoded bool return s + "/" } lookupMap := scenarioHelper{}.convertListToMap(expectedTransfers, func(to *testObject) string { - if to.isFolder() && (fromTo.To() != common.ELocation.Blob() || dstAccountType == EAccountType.HierarchicalNamespaceEnabled()) { + if to.isFolder() && expectFolders { return addFolderSuffix(to.name) } else { return to.name diff --git a/e2etest/zt_preserve_access_tier_test.go b/e2etest/zt_preserve_access_tier_test.go index 041b1cb73..f0f96c24a 100644 --- a/e2etest/zt_preserve_access_tier_test.go +++ b/e2etest/zt_preserve_access_tier_test.go @@ -26,6 +26,19 @@ import ( "testing" ) +//func TestTier_UploadCold(t *testing.T) { +// RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.LocalBlob()), eValidate.Auto(), anonymousAuthOnly, anonymousAuthOnly, params{ +// recursive: true, +// accessTier: common.EBlockBlobTier.Cold().ToAccessTierType(), // this is not valid yet on the service, so this test is disabled. +// }, nil, testFiles{ +// defaultSize: "4M", +// shouldTransfer: []interface{}{ +// folder(""), // root folder +// f("filea"), +// }, +// }, EAccountType.Classic(), EAccountType.Standard(), "") +//} + func TestTier_V2ToClassicAccount(t *testing.T) { RunScenarios(t, eOperation.Copy(), eTestFromTo.Other(common.EFromTo.BlobBlob()), eValidate.AutoPlusContent(), anonymousAuthOnly, anonymousAuthOnly, params{ diff --git a/e2etest/zt_preserve_posix_properties_test.go b/e2etest/zt_preserve_posix_properties_test.go new file mode 100644 index 000000000..49c1c51b0 --- /dev/null +++ b/e2etest/zt_preserve_posix_properties_test.go @@ -0,0 +1,111 @@ +//go:build linux +// +build linux + +package e2etest + +import ( + "github.com/Azure/azure-storage-azcopy/v10/common" + "testing" +) + +// Block/char device rep is untested due to difficulty to test +func TestPOSIX_SpecialFilesToBlob(t *testing.T) { + ptr := func(u uint32) *uint32 { + return &u + } + + RunScenarios( + t, + eOperation.Copy(), + eTestFromTo.Other(common.EFromTo.LocalBlob(), common.EFromTo.BlobLocal()), // no blobblob since that's just metadata and we already test that + eValidate.Auto(), + anonymousAuthOnly, // this is a small test, so running it with all cred types (which will really just be oauth and anon) is fine + anonymousAuthOnly, + params{ + recursive: true, + preservePOSIXProperties: true, + symlinkHandling: common.ESymlinkHandlingType.Preserve(), + }, + nil, + testFiles{ + defaultSize: "1K", + shouldTransfer: []interface{}{ + folder(""), + f("fifo", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFIFO) } }), // fifo should work + f("sock", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFSOCK) } }), // sock should work + "a", + symlink("b", "a"), //symlink to real target should succeed + symlink("d", "c"), //symlink to nowhere should succeed + }, + }, + EAccountType.Standard(), EAccountType.Standard(), "", + ) +} + +// Block/char device rep is untested due to difficulty to test +func TestPOSIX_SpecialFilesToHNS(t *testing.T) { + ptr := func(u uint32) *uint32 { + return &u + } + + RunScenarios( + t, + eOperation.Copy(), + eTestFromTo.Other(common.EFromTo.LocalBlob()), // no blobblob since that's just metadata and we already test that + eValidate.Auto(), + anonymousAuthOnly, // this is a small test, so running it with all cred types (which will really just be oauth and anon) is fine + anonymousAuthOnly, + params{ + recursive: true, + preservePOSIXProperties: true, + symlinkHandling: common.ESymlinkHandlingType.Preserve(), + }, + nil, + testFiles{ + defaultSize: "1K", + shouldTransfer: []interface{}{ + folder(""), + f("fifo", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFIFO) } }), // fifo should work + f("sock", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFSOCK) } }), // sock should work + "a", + symlink("b", "a"), //symlink to real target should succeed + symlink("d", "c"), //symlink to nowhere should succeed + }, + }, + EAccountType.HierarchicalNamespaceEnabled(), EAccountType.Standard(), "", + ) +} + +// Block/char device rep is untested due to difficulty to test +func TestPOSIX_SpecialFilesFromHNS(t *testing.T) { + ptr := func(u uint32) *uint32 { + return &u + } + + RunScenarios( + t, + eOperation.Copy(), + eTestFromTo.Other(common.EFromTo.BlobLocal()), // no blobblob since that's just metadata and we already test that + eValidate.Auto(), + anonymousAuthOnly, // this is a small test, so running it with all cred types (which will really just be oauth and anon) is fine + anonymousAuthOnly, + params{ + recursive: true, + preservePOSIXProperties: true, + symlinkHandling: common.ESymlinkHandlingType.Preserve(), + }, + nil, + testFiles{ + defaultSize: "1K", + shouldTransfer: []interface{}{ + folder(""), + f("fifo", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFIFO) } }), // fifo should work + f("sock", with{ posixProperties: objectUnixStatContainer{ mode: ptr(common.DEFAULT_FILE_PERM | common.S_IFSOCK) } }), // sock should work + "a", + symlink("b", "a"), //symlink to real target should succeed + symlink("d", "c"), //symlink to nowhere should succeed + }, + }, + EAccountType.Standard(), EAccountType.HierarchicalNamespaceEnabled(), "", + ) +} \ No newline at end of file diff --git a/e2etest/zt_preserve_properties_test.go b/e2etest/zt_preserve_properties_test.go index d94a9514e..a37c66014 100644 --- a/e2etest/zt_preserve_properties_test.go +++ b/e2etest/zt_preserve_properties_test.go @@ -62,7 +62,7 @@ func TestProperties_HNSACLs(t *testing.T) { }, nil, testFiles{ defaultSize: "1K", shouldTransfer: []interface{}{ - folder(""), + folder("", with{adlsPermissionsACL: "user::rwx,group::rw-,other::r--"}), f("filea", with{adlsPermissionsACL: "user::rwx,group::rwx,other::r--"}), folder("a", with{adlsPermissionsACL: "user::rwx,group::rwx,other::-w-"}), f("a/fileb", with{adlsPermissionsACL: "user::rwx,group::rwx,other::--x"}), diff --git a/go.mod b/go.mod index 4d9e974bf..caf8ce0d6 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/Azure/azure-storage-azcopy/v10 require ( - cloud.google.com/go/storage v1.21.0 + cloud.google.com/go/storage v1.29.0 github.com/Azure/azure-pipeline-go v0.2.4-0.20220425205405-09e6f201e1e4 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/azure-storage-file-go v0.6.1-0.20201111053559-3c1754dc00a5 @@ -20,43 +20,43 @@ require ( github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807 github.com/wastore/keyctl v0.3.1 golang.org/x/crypto v0.0.0-20220314234724-5d542ad81a58 - golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 - google.golang.org/api v0.72.0 + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.5.0 + google.golang.org/api v0.106.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c ) require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/compute v1.5.0 // indirect - cloud.google.com/go/iam v0.3.0 // indirect + cloud.google.com/go v0.107.0 // indirect + cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v0.8.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-ini/ini v1.66.4 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/go-cmp v0.5.7 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.3.0 // indirect - go.opencensus.io v0.23.0 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect + google.golang.org/grpc v1.51.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 2183e517e..bf1bf8cbd 100644 --- a/go.sum +++ b/go.sum @@ -27,35 +27,365 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0 h1:b1zWmYuuHz7gO9kDcM/EpHGr06UgsYNRpNJzI2kFiLM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.21.0 h1:HwnT2u2D309SFDHQII6m18HlrCi3jAXhUMTLOWXYH14= -cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-pipeline-go v0.2.4-0.20220425205405-09e6f201e1e4 h1:hDJImUzpTAeIw/UasFUUDB/+UsZm5Q/6x2/jKKvEUiw= @@ -86,6 +416,7 @@ github.com/PuerkitoBio/goquery v1.7.1/go.mod h1:XY0pP4kfraEmmV1O7Uf6XyjoslwsneBb github.com/andybalholm/cascadia v1.2.0/go.mod h1:YCyR8vOZT9aZ1CHEd8ap0gMVm2aFgxBp0T0eFw1RUQY= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -99,6 +430,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -116,6 +448,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -175,8 +508,10 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -202,11 +537,23 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -256,14 +603,17 @@ github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807 h1:Uzh85j0tl46Sf2OOx1wDePSWkz3Eq8XdCFkLXqaX8Bg= github.com/wastore/keychain v0.0.0-20180920053336-f2c902a3d807/go.mod h1:zI8umr7xnBSyT9ZJ8wn48RiQ0EWXo4xmYLNw9FQvC9w= github.com/wastore/keyctl v0.3.1 h1:wMkYW9y9jGbQ1ARBLGLwnDdbgrkbuSeuIQeHy+BZOU0= @@ -273,14 +623,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -327,6 +679,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -369,8 +722,19 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -387,8 +751,16 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -399,8 +771,12 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -455,13 +831,24 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220110181412-a018aaa089fe/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -470,11 +857,17 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -526,11 +919,16 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -562,13 +960,28 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM= -google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M= google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80= google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.72.0 h1:rPZI0IqY9chaZ4Wq1bDz8YVIPT58pCnO6KnkIPq8xe0= -google.golang.org/api v0.72.0/go.mod h1:lbd/q6BRFJbdpV6OUCXstVeiI5mL/d3/WifG7iNKnjI= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0 h1:ffmW0faWCwKkpbbtvlY/K/8fUl+JKvNS5CVzRoyfCv8= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -616,6 +1029,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -637,19 +1051,56 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE= -google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -677,8 +1128,16 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -692,8 +1151,10 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -704,8 +1165,9 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/jobsAdmin/JobsAdmin.go b/jobsAdmin/JobsAdmin.go index f9458640d..00b4d3445 100755 --- a/jobsAdmin/JobsAdmin.go +++ b/jobsAdmin/JobsAdmin.go @@ -239,13 +239,12 @@ func (ja *jobsAdmin) recordTuningCompleted(showOutput bool) { // There will be only 1 instance of the jobsAdmin type. // The coordinator uses this to manage all the running jobs and their job parts. type jobsAdmin struct { - atomicSuccessfulBytesInActiveFiles int64 - atomicBytesTransferredWhileTuning int64 - atomicTuningEndSeconds int64 - atomicCurrentMainPoolSize int32 // align 64 bit integers for 32 bit arch - concurrency ste.ConcurrencySettings - logger common.ILoggerCloser - jobIDToJobMgr jobIDToJobMgr // Thread-safe map from each JobID to its JobInfo + atomicBytesTransferredWhileTuning int64 + atomicTuningEndSeconds int64 + atomicCurrentMainPoolSize int32 // align 64 bit integers for 32 bit arch + concurrency ste.ConcurrencySettings + logger common.ILoggerCloser + jobIDToJobMgr jobIDToJobMgr // Thread-safe map from each JobID to its JobInfo // Other global state can be stored in more fields here... logDir string // Where log files are stored planDir string // Initialize to directory where Job Part Plans are stored @@ -367,7 +366,7 @@ func (ja *jobsAdmin) ResurrectJob(jobId common.JobID, sourceSAS string, destinat // are include in the result files := func(prefix, ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && fileInfo.Size() != 0 && strings.HasPrefix(fileInfo.Name(), prefix) && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -404,7 +403,7 @@ func (ja *jobsAdmin) ResurrectJobParts() { // Get all the Job part plan files in the plan directory files := func(ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && fileInfo.Size() != 0 && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -431,7 +430,7 @@ func (ja *jobsAdmin) ListJobs(givenStatus common.JobStatus) common.ListJobsRespo ret := common.ListJobsResponse{JobIDDetails: []common.JobIDDetails{}} files := func(ext string) []os.FileInfo { var files []os.FileInfo - filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { + _ = filepath.Walk(ja.planDir, func(path string, fileInfo os.FileInfo, _ error) error { if !fileInfo.IsDir() && strings.HasSuffix(fileInfo.Name(), ext) { files = append(files, fileInfo) } @@ -466,7 +465,7 @@ func (ja *jobsAdmin) ListJobs(givenStatus common.JobStatus) common.ListJobsRespo func (ja *jobsAdmin) SetConcurrencySettingsToAuto() { // Setting initial pool size to 4 and max pool size to 3,000 ja.concurrency.InitialMainPoolSize = 4 - ja.concurrency.MaxMainPoolSize = &ste.ConfiguredInt{3000, false, common.EEnvironmentVariable.ConcurrencyValue().Name, "auto-tuning limit"} + ja.concurrency.MaxMainPoolSize = &ste.ConfiguredInt{Value: 3000, IsUserSpecified: false, EnvVarName: common.EEnvironmentVariable.ConcurrencyValue().Name, DefaultSourceDesc: "auto-tuning limit"} // recreate the concurrency tuner. // Tuner isn't called until the first job part is scheduled for transfer, so it is safe to update it before that. @@ -584,13 +583,6 @@ func (ja *jobsAdmin) TryGetPerformanceAdvice(bytesInJob uint64, filesInJob uint3 return a.GetAdvice() } -//Structs for messageHandler - -/* PerfAdjustment message. */ -type jaPerfAdjustmentMsg struct { - Throughput int64 `json:"cap-mbps,string"` -} - func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { toBitsPerSec := func(megaBitsPerSec int64) int64 { return megaBitsPerSec * 1000 * 1000 / 8 @@ -603,7 +595,7 @@ func (ja *jobsAdmin) messageHandler(inputChan <-chan *common.LCMMsg) { for { msg := <-inputChan var msgType common.LCMMsgType - msgType.Parse(msg.Req.MsgType) // MsgType is already verified by LCM + _ = msgType.Parse(msg.Req.MsgType) // MsgType is already verified by LCM switch msgType { case common.ELCMMsgType.PerformanceAdjustment(): var resp common.PerfAdjustmentResp diff --git a/jobsAdmin/init.go b/jobsAdmin/init.go index 92d9e6024..11a0a5da9 100755 --- a/jobsAdmin/init.go +++ b/jobsAdmin/init.go @@ -24,9 +24,10 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "math" "net/http" + "os" "time" "github.com/Azure/azure-pipeline-go/pipeline" @@ -64,7 +65,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo // if we've a custom mime map if path := common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.MimeMapping()); path != "" { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return err } @@ -81,12 +82,12 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo deserialize := func(request *http.Request, v interface{}) { // TODO: Check the HTTP verb here? // reading the entire request body and closing the request body - body, err := ioutil.ReadAll(request.Body) + body, err := io.ReadAll(request.Body) request.Body.Close() if err != nil { JobsAdmin.Panic(fmt.Errorf("error deserializing HTTP request")) } - json.Unmarshal(body, v) + _ = json.Unmarshal(body, v) } serialize := func(v interface{}, response http.ResponseWriter) { payload, err := json.Marshal(response) @@ -95,7 +96,7 @@ func MainSTE(concurrency ste.ConcurrencySettings, targetRateInMegaBitsPerSec flo } // sending successful response back to front end response.WriteHeader(http.StatusAccepted) - response.Write(payload) + _, _ = response.Write(payload) } http.HandleFunc(common.ERpcCmd.CopyJobPartOrder().Pattern(), func(writer http.ResponseWriter, request *http.Request) { @@ -188,6 +189,7 @@ func ExecuteNewCopyJobPartOrder(order common.CopyJobPartOrderRequest) common.Cop IsFinalPart: order.IsFinalPart, TotalBytesEnumerated: order.Transfers.TotalSizeInBytes, FileTransfers: order.Transfers.FileTransferCount, + SymlinkTransfers: order.Transfers.SymlinkTransferCount, FolderTransfer: order.Transfers.FolderTransferCount}) return common.CopyJobPartOrderResponse{JobStarted: true} @@ -422,7 +424,7 @@ func ResumeJobOrder(req common.ResumeJobRequest) common.CancelPauseResumeRespons // If the transfer status is less than -1, it means the transfer failed because of some reason. // Transfer Status needs to reset. if jppt.TransferStatus() <= common.ETransferStatus.Failed() { - jppt.SetTransferStatus(common.ETransferStatus.Started(), true) + jppt.SetTransferStatus(common.ETransferStatus.Restarted(), true) jppt.SetErrorCode(0, true) } } @@ -563,10 +565,13 @@ func resurrectJobSummary(jm ste.IJobMgr) common.ListJobSummaryResponse { jppt := jpp.Transfer(t) js.TotalBytesEnumerated += uint64(jppt.SourceSize) - if jppt.EntityType == common.EEntityType.File() { + switch jppt.EntityType { + case common.EEntityType.File(): js.FileTransfers++ - } else { + case common.EEntityType.Folder(): js.FolderPropertyTransfers++ + case common.EEntityType.Symlink(): + js.SymlinkTransfers++ } // check for all completed transfer to calculate the progress percentage at the end @@ -574,6 +579,7 @@ func resurrectJobSummary(jm ste.IJobMgr) common.ListJobSummaryResponse { case common.ETransferStatus.NotStarted(), common.ETransferStatus.FolderCreated(), common.ETransferStatus.Started(), + common.ETransferStatus.Restarted(), common.ETransferStatus.Cancelled(): js.TotalBytesExpected += uint64(jppt.SourceSize) case common.ETransferStatus.Success(): diff --git a/main.go b/main.go index 1985a05a3..abdc81a25 100644 --- a/main.go +++ b/main.go @@ -26,7 +26,6 @@ import ( "os" "path" "runtime" - "runtime/debug" "time" "github.com/Azure/azure-pipeline-go/pipeline" @@ -67,7 +66,7 @@ func main() { } if err := os.MkdirAll(azcopyJobPlanFolder, os.ModeDir|os.ModePerm); err != nil && !os.IsExist(err) { - log.Fatalf("Problem making .azcopy directory. Try setting AZCOPY_PLAN_FILE_LOCATION env variable. %v", err) + log.Fatalf("Problem making .azcopy directory. Try setting AZCOPY_JOB_PLAN_LOCATION env variable. %v", err) } jobID := common.NewJobID() @@ -78,7 +77,6 @@ func main() { } configureGoMaxProcs() - configureGC() // Perform os specific initialization maxFileAndSocketHandles, err := ProcessOSSpecificInitialization() @@ -90,16 +88,6 @@ func main() { glcm.Exit(nil, common.EExitCode.Success()) } -// Golang's default behaviour is to GC when new objects = (100% of) total of objects surviving previous GC. -// But our "survivors" add up to many GB, so its hard for users to be confident that we don't have -// a memory leak (since with that default setting new GCs are very rare in our case). So configure them to be more frequent. -func configureGC() { - go func() { - time.Sleep(20 * time.Second) // wait a little, so that our initial pool of buffers can get allocated without heaps of (unnecessary) GC activity - debug.SetGCPercent(20) // activate more aggressive/frequent GC than the default - }() -} - // Ensure we always have more than 1 OS thread running goroutines, since there are issues with having just 1. // (E.g. version check doesn't happen at login time, if have only one go proc. Not sure why that happens if have only one // proc. Is presumably due to the high CPU usage we see on login if only 1 CPU, even tho can't see any busy-wait in that code) diff --git a/main_windows.go b/main_windows.go index dd05e37ed..c3ca18496 100644 --- a/main_windows.go +++ b/main_windows.go @@ -21,27 +21,15 @@ package main import ( + "github.com/minio/minio-go" "math" "net/http" - "os/exec" "path" "strings" - "syscall" - - "github.com/minio/minio-go" "github.com/Azure/azure-storage-azcopy/v10/common" ) -func osModifyProcessCommand(cmd *exec.Cmd) *exec.Cmd { - // On Windows, create the child process in new process group to avoid receiving signals - // (Ctrl+C, Ctrl+Break) from the console - cmd.SysProcAttr = &syscall.SysProcAttr{ - CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP, - } - return cmd -} - // ProcessOSSpecificInitialization changes the soft limit for filedescriptor for process // return the filedescriptor limit for process. If the function fails with some, it returns // the error diff --git a/perf-test.yaml b/perf-test.yaml new file mode 100644 index 000000000..6c935d9f7 --- /dev/null +++ b/perf-test.yaml @@ -0,0 +1,152 @@ +trigger: none +pr: none + +stages: +- stage: Smallfiles + jobs: + - job: PerformanceTest + timeoutInMinutes: 720 + strategy: + matrix: + Ubuntu-22: + imageName: "azcopyPerfTestUbuntu22.04" + Description: "AzCopy Perf Test" + + pool: + name: "AzCopyPerfTestUbuntu" + demands: + - ImageOverride -equals $(imageName) + + variables: + - group: AzCopyPerfTestTargets + - name: localPath + value: "/mnt/storage" + + steps: + - script: | + echo $(Description) + hostnamectl + displayName: 'Print Agent Info' + + - task: GoTool@0 + inputs: + version: '1.19.3' + + - script: | + go build -o $GOROOT/bin/azcopy + azcopy --version + displayName: 'Build Azcopy' + + - script: | + time azcopy copy $(Blob2BlobLargeFilesSrc) $(Blob2BlobLargeFilesDst) --recursive --block-size-mb=128 --log-level=ERROR --cap-mbps=40000 + displayName: 'Blob2Blob - Large Files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + AZCOPY_CONCURRENCY_VALUE: "256" + AZCOPY_SHOW_PERF_STATES: "1" + + - script: | + time azcopy copy $(Blob2BlobSmallAndMedFilesSrc) $(Blob2BlobSmallAndMedFilesDst) --recursive --block-size-mb=128 --log-level=ERROR + displayName: 'Blob2Blob - Small to Medium sized files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + AZCOPY_CONCURRENCY_VALUE: "256" + AZCOPY_SHOW_PERF_STATES: "1" + + - script: | + time azcopy copy $(Blob2BlobSmallFilesSrc) $(Blob2BlobSmallFilesDst) --recursive --check-length=false --log-level=ERROR + displayName: 'Blob2Blob - Small Files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + AZCOPY_CONCURRENCY_VALUE: "256" + AZCOPY_SHOW_PERF_STATES: "1" + + - script: | + sudo mkdir -m 777 $(localPath)/largeFiles/ + time azcopy copy $(Blob2BlobLargeFilesSrc) /dev/null --recursive --log-level=ERROR + displayName: 'Download - Large files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - script: | + time azcopy bench $(Blob2BlobLargeFilesDst) --log-level=ERROR --size-per-file=50G --file-count=50 --put-md5=false --delete-test-data=false + sudo rm -rf $(localPath)/* + displayName: 'Upload - Large files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - script: | + sudo mkdir -m 777 $(localPath)/smallToMediumFiles/ + time azcopy copy $(Blob2BlobSmallAndMedFilesSrc) $(localPath)/smallToMediumFiles --recursive --log-level=ERROR + displayName: 'Download - Small to Medium sized files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - script: | + time azcopy copy $(localPath)/smallToMediumFiles/ $(Blob2BlobSmallAndMedFilesDst) --recursive --log-level=ERROR + sudo rm -rf $(localPath)/* + displayName: 'Upload - Small to Medium sized files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - script: | + sudo mkdir -m 777 $(localPath)/smallFiles/ + time azcopy copy $(Blob2BlobSmallFilesSrc) /dev/null --recursive --check-length=false --log-level=ERROR + displayName: 'Download - Small Files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - script: | + time azcopy bench $(Blob2BlobSmallFilesDst) --size-per-file=5k --file-count=8000000 --check-length=false --log-level=ERROR --delete-test-data=false + sudo rm -rf $(localPath)/* + displayName: 'Upload - Small Files' + condition: always() + env: + AZCOPY_AUTO_LOGIN_TYPE: $(AZCOPY_AUTO_LOGIN_TYPE) + AZCOPY_MSI_CLIENT_ID: $(AZCOPY_MSI_CLIENT_ID) + AZCOPY_SHOW_PERF_STATES: "1" + AZCOPY_LOG_LOCATION: $(Build.ArtifactStagingDirectory)/logs + + - task: PublishBuildArtifacts@1 + condition: always() + inputs: + pathToPublish: $(Build.ArtifactStagingDirectory) + artifactName: Logs + + - script: | + curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash + az login --identity --username $(AZCOPY_MSI_CLIENT_ID) + for container in `az storage container list --account-name $(DestinationAccount) --query "[*].[name]" --output tsv --auth-mode login`; do + az storage container delete --account-name $(DestinationAccount) --name $container --auth-mode login + done + displayName: 'Clean destination storage Account' + condition: always() \ No newline at end of file diff --git a/sddl/sddlHelper_linux.go b/sddl/sddlHelper_linux.go index 874550d7f..5d5384ee8 100644 --- a/sddl/sddlHelper_linux.go +++ b/sddl/sddlHelper_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux // Copyright Microsoft @@ -872,6 +873,7 @@ func aceRightsToString(aceRights uint32) string { // Does the aceType correspond to an object ACE? // We don't support object ACEs. +//nolint:deadcode,unused func isObjectAce(aceType byte) bool { switch aceType { case ACCESS_ALLOWED_OBJECT_ACE_TYPE, diff --git a/ste/JobPartPlan.go b/ste/JobPartPlan.go index ef7cd7638..1d2b8bee7 100644 --- a/ste/JobPartPlan.go +++ b/ste/JobPartPlan.go @@ -262,7 +262,7 @@ func (jpph *JobPartPlanHeader) TransferSrcPropertiesAndMetadata(transferIndex ui if t.SrcBlobTagsLength != 0 { blobTagsString := jpph.getString(offset, t.SrcBlobTagsLength) blobTags = common.ToCommonBlobTagsMap(blobTagsString) - offset += int64(t.SrcBlobTagsLength) + offset += int64(t.SrcBlobTagsLength) //nolint:ineffassign } return } diff --git a/ste/JobPartPlanFileName.go b/ste/JobPartPlanFileName.go index 13ac8cfe7..0da1e5f60 100644 --- a/ste/JobPartPlanFileName.go +++ b/ste/JobPartPlanFileName.go @@ -42,7 +42,8 @@ func (jpfn JobPartPlanFileName) Parse() (jobID common.JobID, partNumber common.P jpfnSplit := strings.Split(string(jpfn), "--") jobId, err := common.ParseJobID(jpfnSplit[0]) if err != nil { - err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) + err = fmt.Errorf("failed to parse the JobId from JobPartFileName %s. Failed with error %s", string(jpfn), err.Error()) //nolint:staticcheck + // TODO: return here on error? or ignore } jobID = jobId n, err := fmt.Sscanf(jpfnSplit[1], "%05d.steV%d", &partNumber, &dataSchemaVersion) @@ -119,7 +120,7 @@ func (jpfn JobPartPlanFileName) Create(order common.CopyJobPartOrderRequest) { rv := reflect.ValueOf(v) structSize := reflect.TypeOf(v).Elem().Size() slice := reflect.SliceHeader{Data: rv.Pointer(), Len: int(structSize), Cap: int(structSize)} - byteSlice := *(*[]byte)(unsafe.Pointer(&slice)) + byteSlice := *(*[]byte)(unsafe.Pointer(&slice)) //nolint:govet err := binary.Write(writer, binary.LittleEndian, byteSlice) common.PanicIfErr(err) return int64(structSize) diff --git a/ste/concurrencyTuner.go b/ste/concurrencyTuner.go index ed9b0e50b..3b03dad6f 100644 --- a/ste/concurrencyTuner.go +++ b/ste/concurrencyTuner.go @@ -21,7 +21,6 @@ package ste import ( - "github.com/Azure/azure-storage-azcopy/v10/common" "sync" "sync/atomic" ) @@ -73,7 +72,6 @@ type autoConcurrencyTuner struct { } initialConcurrency int maxConcurrency int - cpuMonitor common.CPUMonitor callbacksWhenStable chan func() finalReason string finalConcurrency int @@ -233,10 +231,10 @@ func (t *autoConcurrencyTuner) worker() { } if multiplier < minMulitplier { - break // no point in tuning any more + break // no point in tuning anymore } else { - lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) - lastSpeed, _ = t.getCurrentSpeed() // must re-measure immediately after backing off + lastReason = t.setConcurrency(concurrency, concurrencyReasonBackoff) //nolint:staticcheck + lastSpeed, _ = t.getCurrentSpeed() // must re-measure immediately after backing off } } } diff --git a/ste/downloader-azureFiles_linux.go b/ste/downloader-azureFiles_linux.go index 87e11198c..6847fde83 100644 --- a/ste/downloader-azureFiles_linux.go +++ b/ste/downloader-azureFiles_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package ste @@ -56,7 +57,7 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP var ts [2]unix.Timespec // Don't set atime. - ts[0] = unix.Timespec{unix.UTIME_OMIT, unix.UTIME_OMIT} + ts[0] = unix.Timespec{Sec: unix.UTIME_OMIT, Nsec: unix.UTIME_OMIT} // Set mtime to smbLastWrite. ts[1] = unix.NsecToTimespec(smbLastWrite.UnixNano()) diff --git a/ste/downloader-azureFiles_windows.go b/ste/downloader-azureFiles_windows.go index 6da4845e6..021b65b16 100644 --- a/ste/downloader-azureFiles_windows.go +++ b/ste/downloader-azureFiles_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ste @@ -77,7 +78,8 @@ func (*azureFilesDownloader) PutSMBProperties(sip ISMBPropertyBearingSourceInfoP err = windows.SetFileTime(fd, &smbCreationFileTime, nil, pLastWriteTime) if err != nil { - err = fmt.Errorf("attempted update file times: %w", err) + err = fmt.Errorf("attempted update file times: %w", err) //nolint:staticcheck,ineffassign + // TODO: return here on error? or ignore } return nil } diff --git a/ste/downloader-blob.go b/ste/downloader-blob.go index 335442b36..5f25fade9 100644 --- a/ste/downloader-blob.go +++ b/ste/downloader-blob.go @@ -22,6 +22,7 @@ package ste import ( "net/url" + "os" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" @@ -38,12 +39,22 @@ type blobDownloader struct { // used to avoid downloading zero ranges of page blobs pageRangeOptimizer *pageRangeOptimizer - // used to avoid re-setting file mode - setMode bool + jptm IJobPartTransferMgr + txInfo TransferInfo +} + +func (bd *blobDownloader) CreateSymlink(jptm IJobPartTransferMgr) error { + sip, err := newBlobSourceInfoProvider(jptm) + if err != nil { + return err + } + symsip := sip.(ISymlinkBearingSourceInfoProvider) // blob always implements this + symlinkInfo, _ := symsip.ReadLink() - jptm IJobPartTransferMgr - txInfo TransferInfo - fileMode uint32 + // create the link + err = os.Symlink(symlinkInfo, jptm.Info().Destination) + + return err } func newBlobDownloader() downloader { @@ -68,6 +79,27 @@ func (bd *blobDownloader) Prologue(jptm IJobPartTransferMgr, srcPipeline pipelin } func (bd *blobDownloader) Epilogue() { + if bd.jptm != nil { + if bd.jptm.IsLive() && bd.jptm.Info().PreservePOSIXProperties { + bsip, err := newBlobSourceInfoProvider(bd.jptm) + if err != nil { + bd.jptm.FailActiveDownload("get blob source info provider", err) + } + unixstat, _ := bsip.(IUNIXPropertyBearingSourceInfoProvider) + if ubd, ok := (interface{})(bd).(unixPropertyAwareDownloader); ok && unixstat.HasUNIXProperties() { + adapter, err := unixstat.GetUNIXProperties() + if err != nil { + bd.jptm.FailActiveDownload("get unix properties", err) + } + + stage, err := ubd.ApplyUnixProperties(adapter) + if err != nil { + bd.jptm.FailActiveDownload("set unix properties: "+stage, err) + } + } + } + } + _ = bd.filePacer.Close() } diff --git a/ste/downloader-blob_linux.go b/ste/downloader-blob_linux.go new file mode 100644 index 000000000..527cda65a --- /dev/null +++ b/ste/downloader-blob_linux.go @@ -0,0 +1,190 @@ +// +build linux + +package ste + +import ( + "fmt" + "github.com/Azure/azure-storage-azcopy/v10/common" + "golang.org/x/sys/unix" + "io" + "os" + "syscall" + "time" +) + +// CreateFile covers the following UNIX properties: +// File Mode, File Type +func (bd *blobDownloader) CreateFile(jptm IJobPartTransferMgr, destination string, size int64, writeThrough bool, t FolderCreationTracker) (file io.WriteCloser, needChunks bool, err error) { + var sip ISourceInfoProvider + sip, err = newBlobSourceInfoProvider(jptm) + if err != nil { + return + } + + unixSIP := sip.(IUNIXPropertyBearingSourceInfoProvider) // Blob may have unix properties. + + err = common.CreateParentDirectoryIfNotExist(destination, t) + if err != nil { + return + } + + // try to remove the file before we create something else over it + _ = os.Remove(destination) + + needChunks = size > 0 + needMakeFile := true + var mode = uint32(common.DEFAULT_FILE_PERM) + if jptm.Info().PreservePOSIXProperties && unixSIP.HasUNIXProperties() { + var stat common.UnixStatAdapter + stat, err = unixSIP.GetUNIXProperties() + + if stat.Extended() { + if stat.StatxMask()&common.STATX_MODE == common.STATX_MODE { // We need to retain access to the file until we're well & done with it + mode = stat.FileMode() | common.DEFAULT_FILE_PERM + } + } else { + mode = stat.FileMode() | common.DEFAULT_FILE_PERM + } + + if mode != 0 { // Folders & Symlinks are not necessary to handle + switch { + case mode&common.S_IFBLK == common.S_IFBLK || mode&common.S_IFCHR == common.S_IFCHR: + // the file is representative of a device and does not need to be written to + err = unix.Mknod(destination, mode, int(stat.RDevice())) + + needChunks = false + needMakeFile = false + case mode&common.S_IFIFO == common.S_IFIFO || mode&common.S_IFSOCK == common.S_IFSOCK: + // the file is a pipe and does not need to be written to + err = unix.Mknod(destination, mode, 0) + + needChunks = false + needMakeFile = false + } + } + } + + if !needMakeFile { + return + } + + flags := os.O_RDWR | os.O_CREATE | os.O_TRUNC + if writeThrough { + flags |= os.O_SYNC + } + + file, err = os.OpenFile(destination, flags, os.FileMode(mode)) // os.FileMode is uint32 on Linux. + if err != nil { + return + } + + if size == 0 { + return + } + + err = syscall.Fallocate(int(file.(*os.File).Fd()), 0, 0, size) + if err == syscall.ENOTSUP { + err = file.(*os.File).Truncate(size) // err will get returned at the end + } + + return +} + +func (bd *blobDownloader) ApplyUnixProperties(adapter common.UnixStatAdapter) (stage string, err error) { + // At this point, mode has already been applied. Let's work out what we need to apply, and apply the rest. + destination := bd.txInfo.getDownloadPath() + + // First, grab our file descriptor and such. + fi, err := os.Stat(destination) + if err != nil { + return "stat", err + } + + // At this point, mode has already been applied. Let's work out what we need to apply, and apply the rest. + if adapter.Extended() { + stat := fi.Sys().(*syscall.Stat_t) + mask := adapter.StatxMask() + + // stx_attributes is not persisted. + + mode := os.FileMode(common.DEFAULT_FILE_PERM) + if common.StatXReturned(mask, common.STATX_MODE) { + mode = os.FileMode(adapter.FileMode()) + } + + err = os.Chmod(destination, mode) + if err != nil { + return "chmod", err + } + + uid := stat.Uid + if common.StatXReturned(mask, common.STATX_UID) { + uid = adapter.Owner() + } + + gid := stat.Gid + if common.StatXReturned(mask, common.STATX_GID) { + gid = adapter.Group() + } + // set ownership + err = os.Chown(destination, int(uid), int(gid)) + if err != nil { + return "chown", err + } + + atime := time.Unix(stat.Atim.Unix()) + if common.StatXReturned(mask, common.STATX_ATIME) || !adapter.ATime().IsZero() { // workaround for noatime when underlying fs supports atime + atime = adapter.ATime() + } + + mtime := time.Unix(stat.Mtim.Unix()) + if common.StatXReturned(mask, common.STATX_MTIME) { + mtime = adapter.MTime() + } + + // adapt times + err = os.Chtimes(destination, atime, mtime) + if err != nil { + return "chtimes", err + } + } else { + err = os.Chmod(destination, os.FileMode(adapter.FileMode())) // only write permissions + if err != nil { + return "chmod", err + } + err = os.Chown(destination, int(adapter.Owner()), int(adapter.Group())) + if err != nil { + return "chown", err + } + err = os.Chtimes(destination, adapter.ATime(), adapter.MTime()) + if err != nil { + return "chtimes", err + } + } + + return +} + +func (bd *blobDownloader) SetFolderProperties(jptm IJobPartTransferMgr) error { + sip, err := newBlobSourceInfoProvider(jptm) + if err != nil { + return err + } + + bd.txInfo = jptm.Info() // inform our blobDownloader a bit. + + usip := sip.(IUNIXPropertyBearingSourceInfoProvider) + if usip.HasUNIXProperties() { + props, err := usip.GetUNIXProperties() + if err != nil { + return err + } + stage, err := bd.ApplyUnixProperties(props) + + if err != nil { + return fmt.Errorf("set unix properties: %s; %w", stage, err) + } + } + + return nil +} diff --git a/ste/downloader-blob_other.go b/ste/downloader-blob_other.go new file mode 100644 index 000000000..fe44a0e1f --- /dev/null +++ b/ste/downloader-blob_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package ste + +func (bd *blobDownloader) SetFolderProperties(jptm IJobPartTransferMgr) error { + return nil +} diff --git a/ste/downloader.go b/ste/downloader.go index 5f17f53f7..8e1557bea 100644 --- a/ste/downloader.go +++ b/ste/downloader.go @@ -23,6 +23,7 @@ package ste import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" + "io" ) // Abstraction of the methods needed to download files/blobs from a remote location @@ -40,12 +41,33 @@ type downloader interface { Epilogue() } +// creationTimeDownloader is a downloader that has custom functionality for creating files +// This is currently only utilized on Linux for persisting file type and reference device (folder, symlink, FIFO, etc.) +type creationTimeDownloader interface { + downloader + // CreateFile is expected to handle + // in some cases (e.g. symlinks) the file may be > 0 bytes, but not need any chunks written. + CreateFile(jptm IJobPartTransferMgr, destination string, size int64, writeThrough bool, t FolderCreationTracker) (file io.WriteCloser, needWriteChunks bool, err error) +} + +type unixPropertyAwareDownloader interface { + downloader + + ApplyUnixProperties(adapter common.UnixStatAdapter) (stage string, err error) +} + // folderDownloader is a downloader that can also process folder properties type folderDownloader interface { downloader SetFolderProperties(jptm IJobPartTransferMgr) error } +// symlinkDownloader is a downloader that can also handle symbolic links. +type symlinkDownloader interface { + downloader + CreateSymlink(jptm IJobPartTransferMgr) error +} + // smbPropertyAwareDownloader is a windows-triggered interface. // Code outside of windows-specific files shouldn't implement this ever. type smbPropertyAwareDownloader interface { diff --git a/ste/emptyCloseableReaderAt.go b/ste/emptyCloseableReaderAt.go new file mode 100644 index 000000000..77022c75e --- /dev/null +++ b/ste/emptyCloseableReaderAt.go @@ -0,0 +1,13 @@ +package ste + +type emptyCloseableReaderAt struct { +} + +func (e emptyCloseableReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + return 0, nil +} + +func (e emptyCloseableReaderAt) Close() error { + // no-op + return nil +} diff --git a/ste/jobStatusManager.go b/ste/jobStatusManager.go index 62d0c4ac5..7d2a1a827 100755 --- a/ste/jobStatusManager.go +++ b/ste/jobStatusManager.go @@ -32,6 +32,7 @@ type JobPartCreatedMsg struct { TotalBytesEnumerated uint64 FileTransfers uint32 FolderTransfer uint32 + SymlinkTransfers uint32 } type xferDoneMsg = common.TransferDetail @@ -110,6 +111,7 @@ func (jm *jobMgr) handleStatusUpdateMessage() { js.TotalTransfers += msg.TotalTransfers js.FileTransfers += msg.FileTransfers js.FolderPropertyTransfers += msg.FolderTransfer + js.SymlinkTransfers += msg.SymlinkTransfers js.TotalBytesEnumerated += msg.TotalBytesEnumerated js.TotalBytesExpected += msg.TotalBytesEnumerated diff --git a/ste/mgr-JobMgr.go b/ste/mgr-JobMgr.go index 762e23917..8a8b05647 100755 --- a/ste/mgr-JobMgr.go +++ b/ste/mgr-JobMgr.go @@ -321,9 +321,6 @@ type jobMgr struct { // only a single instance of the prompter is needed for all transfers overwritePrompter *overwritePrompter - // must have a single instance of this, for the whole job - folderCreationTracker FolderCreationTracker - initMu *sync.Mutex initState *jobMgrInitState @@ -1014,9 +1011,7 @@ func (jm *jobMgr) transferProcessor(workerID int) { jptm.ReportTransferDone() } else { // TODO fix preceding space - if jptm.ShouldLog(pipeline.LogInfo) { - jptm.Log(pipeline.LogInfo, fmt.Sprintf("has worker %d which is processing TRANSFER %d", workerID, jptm.(*jobPartTransferMgr).transferIndex)) - } + jptm.Log(pipeline.LogDebug, fmt.Sprintf("has worker %d which is processing TRANSFER %d", workerID, jptm.(*jobPartTransferMgr).transferIndex)) jptm.StartJobXfer() } } diff --git a/ste/mgr-JobPartMgr.go b/ste/mgr-JobPartMgr.go index d97acc7d3..8b5a8f447 100644 --- a/ste/mgr-JobPartMgr.go +++ b/ste/mgr-JobPartMgr.go @@ -174,6 +174,17 @@ func NewBlobPipeline(c azblob.Credential, o azblob.PipelineOptions, r XferRetryO pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked // NewPacerPolicyFactory(p), NewVersionPolicyFactory(), + // Bump the service version when using the Cold access tier. + pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // TODO: Remove me when bumping the service version is no longer relevant. + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if request.Header.Get("x-ms-access-tier") == common.EBlockBlobTier.Cold().String() { + request.Header.Set("x-ms-version", "2021-12-02") + } + + return next.Do(ctx, request) + } + }), NewRequestLogPolicyFactory(RequestLogOptions{ LogWarningIfTryOverThreshold: o.RequestLog.LogWarningIfTryOverThreshold, SyslogDisabled: common.IsForceLoggingDisabled(), @@ -423,7 +434,7 @@ func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context, sourceBlobToken // If the transfer was failed, then while rescheduling the transfer marking it Started. if ts == common.ETransferStatus.Failed() { - jppt.SetTransferStatus(common.ETransferStatus.Started(), true) + jppt.SetTransferStatus(common.ETransferStatus.Restarted(), true) } if _, dst, isFolder := plan.TransferSrcDstStrings(t); isFolder { @@ -454,9 +465,7 @@ func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context, sourceBlobToken // TODO: insert the factory func interface in jptm. // numChunks will be set by the transfer's prologue method } - if jpm.ShouldLog(pipeline.LogInfo) { - jpm.Log(pipeline.LogInfo, fmt.Sprintf("scheduling JobID=%v, Part#=%d, Transfer#=%d, priority=%v", plan.JobID, plan.PartNum, t, plan.Priority)) - } + jpm.Log(pipeline.LogDebug, fmt.Sprintf("scheduling JobID=%v, Part#=%d, Transfer#=%d, priority=%v", plan.JobID, plan.PartNum, t, plan.Priority)) // ===== TEST KNOB relSrc, relDst := plan.TransferSrcDstRelatives(t) @@ -470,7 +479,7 @@ func (jpm *jobPartMgr) ScheduleTransfers(jobCtx context.Context, sourceBlobToken if plan.FromTo.To().IsRemote() { relDst, err = url.PathUnescape(relDst) } - relDst = strings.TrimPrefix(relSrc, common.AZCOPY_PATH_SEPARATOR_STRING) + relDst = strings.TrimPrefix(relDst, common.AZCOPY_PATH_SEPARATOR_STRING) common.PanicIfErr(err) _, srcOk := DebugSkipFiles[relSrc] @@ -525,15 +534,16 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl if jpm.credInfo.CredentialType == common.ECredentialType.Unknown() { credInfo = jpm.jobMgr.getInMemoryTransitJobState().CredentialInfo } - userAgent := common.UserAgent + var userAgent string if fromTo.From() == common.ELocation.S3() { userAgent = common.S3ImportUserAgent } else if fromTo.From() == common.ELocation.GCP() { userAgent = common.GCPImportUserAgent } else if fromTo.From() == common.ELocation.Benchmark() || fromTo.To() == common.ELocation.Benchmark() { userAgent = common.BenchmarkUserAgent + } else { + userAgent = common.GetLifecycleMgr().AddUserAgentPrefix(common.UserAgent) } - userAgent = common.GetLifecycleMgr().AddUserAgentPrefix(common.UserAgent) credOption := common.CredentialOpOptions{ LogInfo: func(str string) { jpm.Log(pipeline.LogInfo, str) }, @@ -552,8 +562,8 @@ func (jpm *jobPartMgr) createPipelines(ctx context.Context, sourceBlobToken azbl var statsAccForSip *PipelineNetworkStats = nil // we don't accumulate stats on the source info provider - // Create source info provider's pipeline for S2S copy. - if fromTo == common.EFromTo.BlobBlob() || fromTo == common.EFromTo.BlobFile() { + // Create source info provider's pipeline for S2S copy or download (in some cases). + if fromTo == common.EFromTo.BlobBlob() || fromTo == common.EFromTo.BlobFile() || fromTo == common.EFromTo.BlobLocal() { var sourceCred azblob.Credential = azblob.NewAnonymousCredential() jobState := jpm.jobMgr.getInMemoryTransitJobState() if fromTo.To() == common.ELocation.Blob() && jobState.S2SSourceCredentialType.IsAzureOAuth() { @@ -870,12 +880,6 @@ func (jpm *jobPartMgr) ReportTransferDone(status common.TransferStatus) (transfe transfersDone = atomic.AddUint32(&jpm.atomicTransfersDone, 1) jpm.updateJobPartProgress(status) - // Add a safety count-check - - if jpm.ShouldLog(pipeline.LogInfo) { - plan := jpm.Plan() - jpm.Log(pipeline.LogInfo, fmt.Sprintf("JobID=%v, Part#=%d, TransfersDone=%d of %d", plan.JobID, plan.PartNum, transfersDone, plan.NumTransfers)) - } if transfersDone == jpm.planMMF.Plan().NumTransfers { jppi := jobPartProgressInfo{ transfersCompleted: int(atomic.LoadUint32(&jpm.atomicTransfersCompleted)), @@ -886,6 +890,10 @@ func (jpm *jobPartMgr) ReportTransferDone(status common.TransferStatus) (transfe jpm.Plan().SetJobPartStatus(common.EJobStatus.EnhanceJobStatusInfo(jppi.transfersSkipped > 0, jppi.transfersFailed > 0, jppi.transfersCompleted > 0)) jpm.jobMgr.ReportJobPartDone(jppi) + + jpm.Log(pipeline.LogInfo, fmt.Sprintf("JobID=%v, Part#=%d, TransfersDone=%d of %d", + jpm.planMMF.Plan().JobID, jpm.planMMF.Plan().PartNum, transfersDone, + jpm.planMMF.Plan().NumTransfers)) } return transfersDone } diff --git a/ste/mgr-JobPartTransferMgr.go b/ste/mgr-JobPartTransferMgr.go index 1a7160761..1d4ae2028 100644 --- a/ste/mgr-JobPartTransferMgr.go +++ b/ste/mgr-JobPartTransferMgr.go @@ -96,6 +96,8 @@ type IJobPartTransferMgr interface { PropertiesToTransfer() common.SetPropertiesFlags ResetSourceSize() // sets source size to 0 (made to be used by setProperties command to make number of bytes transferred = 0) SuccessfulBytesTransferred() int64 + TransferIndex() (partNum, transferIndex uint32) + RestartedTransfer() bool } type TransferInfo struct { @@ -120,12 +122,14 @@ type TransferInfo struct { SrcBlobType azblob.BlobType // used for both S2S and for downloads to local from blob S2SSrcBlobTier azblob.AccessTierType // AccessTierType (string) is used to accommodate service-side support matrix change. - // NumChunks is the number of chunks in which transfer will be split into while uploading the transfer. - // NumChunks is not used in case of AppendBlob transfer. - NumChunks uint16 RehydratePriority azblob.RehydratePriorityType } + +func (i TransferInfo) IsFilePropertiesTransfer() bool { + return i.EntityType == common.EEntityType.FileProperties() +} + func (i TransferInfo) IsFolderPropertiesTransfer() bool { return i.EntityType == common.EEntityType.Folder() } @@ -150,6 +154,8 @@ func (i TransferInfo) ShouldTransferLastWriteTime() bool { func (i TransferInfo) entityTypeLogIndicator() string { if i.IsFolderPropertiesTransfer() { return "(folder properties) " + } else if i.IsFilePropertiesTransfer() { + return "(file properties) " } else { return "" } @@ -341,7 +347,7 @@ func (jptm *jobPartTransferMgr) Info() TransferInfo { // does not exceeds 50000 (max number of block per blob) if blockSize == 0 { blockSize = common.DefaultBlockBlobBlockSize - for ; sourceSize >= common.MaxNumberOfBlocksPerBlob * blockSize; blockSize = 2 * blockSize { + for ; sourceSize >= common.MaxNumberOfBlocksPerBlob*blockSize; blockSize = 2 * blockSize { if blockSize > common.BlockSizeThreshold { /* * For a RAM usage of 0.5G/core, we would have 4G memory on typical 8 core device, meaning at a blockSize of 256M, @@ -433,9 +439,9 @@ func (jptm *jobPartTransferMgr) FileCountLimiter() common.CacheLimiter { // As at Oct 2019, cases where we mutate destination names are // (i) when destination is Windows or Azure Files, and source contains characters unsupported at the destination // (ii) when downloading with --decompress and there are two files that differ only in an extension that will will strip -//e.g. foo.txt and foo.txt.gz (if we decompress the latter, we'll strip the extension and the names will collide) +// e.g. foo.txt and foo.txt.gz (if we decompress the latter, we'll strip the extension and the names will collide) // (iii) For completeness, there's also bucket->container name resolution when copying from S3, but that is not expected to ever -//create collisions, since it already takes steps to prevent them. +// create collisions, since it already takes steps to prevent them. func (jptm *jobPartTransferMgr) WaitUntilLockDestination(ctx context.Context) error { if strings.EqualFold(jptm.Info().Destination, common.Dev_Null) { return nil // nothing to lock @@ -551,6 +557,16 @@ func (jptm *jobPartTransferMgr) ResetSourceSize() { jptm.transferInfo.SourceSize = 0 } +// This will identity a file in a job +func (jptm *jobPartTransferMgr) TransferIndex() (partNum, transferIndex uint32) { + return uint32(jptm.jobPartMgr.Plan().PartNum), jptm.transferIndex +} + +func (jptm *jobPartTransferMgr) RestartedTransfer() bool { + return (jptm.jobPartMgr.Plan().FromTo.To() == common.ELocation.Blob() && + jptm.TransferStatusIgnoringCancellation() == common.ETransferStatus.Restarted()) +} + // JobHasLowFileCount returns an estimate of whether we only have a very small number of files in the overall job // (An "estimate" because it actually only looks at the current job part) func (jptm *jobPartTransferMgr) JobHasLowFileCount() bool { diff --git a/ste/s2sCopier-URLToBlob.go b/ste/s2sCopier-URLToBlob.go index 82618cfd2..bb1fca4f7 100644 --- a/ste/s2sCopier-URLToBlob.go +++ b/ste/s2sCopier-URLToBlob.go @@ -21,7 +21,6 @@ package ste import ( - "errors" "fmt" "net/url" @@ -56,7 +55,7 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline // I don't think it would ever reach here if the source URL failed to parse, but this is a sanity check. if err != nil { - return nil, errors.New(fmt.Sprintf("Failed to parse URL %s in scheduler. Check sanity.", jptm.Info().Source)) + return nil, fmt.Errorf("Failed to parse URL %s in scheduler. Check sanity.", jptm.Info().Source) } fileName := srcURL.Path @@ -79,6 +78,8 @@ func newURLToBlobCopier(jptm IJobPartTransferMgr, destination string, p pipeline if jptm.Info().IsFolderPropertiesTransfer() { return newBlobFolderSender(jptm, destination, p, pacer, srcInfoProvider) + } else if jptm.Info().EntityType == common.EEntityType.Symlink() { + return newBlobSymlinkSender(jptm, destination, p, pacer, srcInfoProvider) } switch targetBlobType { diff --git a/ste/sender-appendBlob.go b/ste/sender-appendBlob.go index 5cef8331c..55c502de8 100644 --- a/ste/sender-appendBlob.go +++ b/ste/sender-appendBlob.go @@ -47,6 +47,8 @@ type appendBlobSenderBase struct { blobTagsToApply azblob.BlobTagsMap cpkToApply azblob.ClientProvidedKeyOptions + sip ISourceInfoProvider + soleChunkFuncSemaphore *semaphore.Weighted } @@ -91,6 +93,7 @@ func newAppendBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pip headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), metadataToApply: props.SrcMetadata.ToAzBlobMetadata(), blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), + sip: srcInfoProvider, cpkToApply: cpkToApply, soleChunkFuncSemaphore: semaphore.NewWeighted(1)}, nil } diff --git a/ste/sender-blobFolders.go b/ste/sender-blobFolders.go index 4b4904055..2a2f53fba 100644 --- a/ste/sender-blobFolders.go +++ b/ste/sender-blobFolders.go @@ -46,9 +46,9 @@ func newBlobFolderSender(jptm IJobPartTransferMgr, destination string, p pipelin } fromTo := jptm.FromTo() if fromTo.IsUpload() { - out = &dummyUploader{fsend} + out = &dummyFolderUploader{fsend} } else { - out = &dummys2sCopier{fsend} + out = &dummyFolderS2SCopier{fsend} } return out, nil @@ -76,22 +76,25 @@ func (b *blobFolderSender) setDatalakeACLs() { func (b *blobFolderSender) overwriteDFSProperties() (string, error) { b.jptm.Log(pipeline.LogWarning, "It is impossible to completely overwrite a folder with existing content under it on a hierarchical namespace storage account. A best-effort attempt will be made, but if CPK does not match the transfer will fail.") - b.metadataToApply["hdi_isfolder"] = "true" // Set folder metadata flag err := b.getExtraProperties() if err != nil { return "Get Extra Properties", fmt.Errorf("when getting additional folder properties: %w", err) } + // do not set folder flag as it's invalid to modify a folder with + delete(b.metadataToApply, "hdi_isfolder") + // SetMetadata can set CPK if it wasn't specified prior. This is not a "full" overwrite, but a best-effort overwrite. _, err = b.destination.SetMetadata(b.jptm.Context(), b.metadataToApply, azblob.BlobAccessConditions{}, b.cpkToApply) if err != nil { return "Set Metadata", fmt.Errorf("A best-effort overwrite was attempted; CPK errors cannot be handled when the blob cannot be deleted.\n%w", err) } - _, err = b.destination.SetTags(b.jptm.Context(), nil, nil, nil, b.blobTagsToApply) - if err != nil { - return "Set Blob Tags", err - } + // blob API not yet supported for HNS account error; re-enable later. + //_, err = b.destination.SetTags(b.jptm.Context(), nil, nil, nil, b.blobTagsToApply) + //if err != nil { + // return "Set Blob Tags", err + //} _, err = b.destination.SetHTTPHeaders(b.jptm.Context(), b.headersToAppply, azblob.BlobAccessConditions{}) if err != nil { return "Set HTTP Headers", err @@ -105,9 +108,36 @@ func (b *blobFolderSender) overwriteDFSProperties() (string, error) { return "", nil } +func (b *blobFolderSender) SetContainerACL() error { + bURLParts := azblob.NewBlobURLParts(b.destination.URL()) + bURLParts.BlobName = "/" // Container-level ACLs NEED a / + bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".blob", ".dfs") + // todo: jank, and violates the principle of interfaces + fileURL := azbfs.NewFileSystemURL(bURLParts.URL(), b.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondaryPipeline) + + // We know for a fact our source is a "blob". + acl, err := b.sip.(*blobSourceInfoProvider).AccessControl() + if err != nil { + b.jptm.FailActiveSend("Grabbing source ACLs", err) + return folderPropertiesSetInCreation{} // standard completion will detect failure + } + acl.Permissions = "" // Since we're sending the full ACL, Permissions is irrelevant. + _, err = fileURL.SetAccessControl(b.jptm.Context(), acl) + if err != nil { + b.jptm.FailActiveSend("Putting ACLs", err) + return folderPropertiesSetInCreation{} // standard completion will detect failure + } + + return folderPropertiesSetInCreation{} // standard completion will handle the rest +} + func (b *blobFolderSender) EnsureFolderExists() error { t := b.jptm.GetFolderCreationTracker() + if azblob.NewBlobURLParts(b.destination.URL()).BlobName == "" { + return b.SetContainerACL() // Can't do much with a container, but it is here. + } + _, err := b.destination.GetProperties(b.jptm.Context(), azblob.BlobAccessConditions{}, b.cpkToApply) if err != nil { if stgErr, ok := err.(azblob.StorageError); !(ok && stgErr.ServiceCode() == azblob.ServiceCodeBlobNotFound) { @@ -177,12 +207,6 @@ func (b *blobFolderSender) EnsureFolderExists() error { } return nil - - if err != nil { - return err - } - - return folderPropertiesSetInCreation{} } func (b *blobFolderSender) SetFolderProperties() error { @@ -230,25 +254,25 @@ func (b *blobFolderSender) GetDestinationLength() (int64, error) { // implement uploader to handle commonSenderCompletion -type dummyUploader struct { +type dummyFolderUploader struct { blobFolderSender } -func (d dummyUploader) GenerateUploadFunc(chunkID common.ChunkID, blockIndex int32, reader common.SingleChunkReader, chunkIsWholeFile bool) chunkFunc { +func (d dummyFolderUploader) GenerateUploadFunc(chunkID common.ChunkID, blockIndex int32, reader common.SingleChunkReader, chunkIsWholeFile bool) chunkFunc { panic("this sender only sends folders.") } -func (d dummyUploader) Md5Channel() chan<- []byte { +func (d dummyFolderUploader) Md5Channel() chan<- []byte { panic("this sender only sends folders.") } // ditto for s2sCopier -type dummys2sCopier struct { +type dummyFolderS2SCopier struct { blobFolderSender } -func (d dummys2sCopier) GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { +func (d dummyFolderS2SCopier) GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { // TODO implement me panic("implement me") } diff --git a/ste/sender-blobSymlinks.go b/ste/sender-blobSymlinks.go new file mode 100644 index 000000000..96f3f35e9 --- /dev/null +++ b/ste/sender-blobSymlinks.go @@ -0,0 +1,130 @@ +package ste + +import ( + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-azcopy/v10/common" + "github.com/Azure/azure-storage-blob-go/azblob" + "net/url" + "strings" + "time" +) + +type blobSymlinkSender struct { + destBlockBlobURL azblob.BlockBlobURL + jptm IJobPartTransferMgr + sip ISourceInfoProvider + headersToApply azblob.BlobHTTPHeaders + metadataToApply azblob.Metadata + destBlobTier azblob.AccessTierType + blobTagsToApply azblob.BlobTagsMap + cpkToApply azblob.ClientProvidedKeyOptions +} + +func newBlobSymlinkSender(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) { + destURL, err := url.Parse(destination) + if err != nil { + return nil, err + } + + destBlockBlobURL := azblob.NewBlockBlobURL(*destURL, p) + + props, err := sip.Properties() + if err != nil { + return nil, err + } + + destBlobTier := azblob.AccessTierNone + blockBlobTierOverride, _ := jptm.BlobTiers() + if blockBlobTierOverride != common.EBlockBlobTier.None() { + destBlobTier = blockBlobTierOverride.ToAccessTierType() + } + + var out sender + ssend := blobSymlinkSender{ + jptm: jptm, + sip: sip, + destBlockBlobURL: destBlockBlobURL, + metadataToApply: props.SrcMetadata.Clone().ToAzBlobMetadata(), // We're going to modify it, so we should clone it. + headersToApply: props.SrcHTTPHeaders.ToAzBlobHTTPHeaders(), + blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), + cpkToApply: common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()), + destBlobTier: destBlobTier, + } + fromTo := jptm.FromTo() + if fromTo.IsUpload() { + out = &dummySymlinkUploader{ssend} + } else { + out = &dummySymlinkS2SCopier{ssend} + } + + return out, nil +} + +func (s *blobSymlinkSender) SendSymlink(linkData string) error { + err := s.getExtraProperties() + if err != nil { + return fmt.Errorf("when getting additional folder properties: %w", err) + } + s.metadataToApply["is_symlink"] = "true" + + _, err = s.destBlockBlobURL.Upload(s.jptm.Context(), strings.NewReader(linkData), s.headersToApply, s.metadataToApply, azblob.BlobAccessConditions{}, s.destBlobTier, s.blobTagsToApply, s.cpkToApply, azblob.ImmutabilityPolicyOptions{}) + return err +} + +// ===== Implement sender so that it can be returned in newBlobUploader. ===== +/* + It's OK to just panic all of these out, as they will never get called in a symlink transfer. +*/ + +func (s *blobSymlinkSender) ChunkSize() int64 { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) NumChunks() uint32 { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) RemoteFileExists() (bool, time.Time, error) { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) Prologue(state common.PrologueState) (destinationModified bool) { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) Epilogue() { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) Cleanup() { + panic("this sender only sends symlinks.") +} + +func (s *blobSymlinkSender) GetDestinationLength() (int64, error) { + panic("this sender only sends symlinks.") +} + +// implement uploader to handle commonSenderCompletion + +type dummySymlinkUploader struct { + blobSymlinkSender +} + +func (d dummySymlinkUploader) GenerateUploadFunc(chunkID common.ChunkID, blockIndex int32, reader common.SingleChunkReader, chunkIsWholeFile bool) chunkFunc { + panic("this sender only sends folders.") +} + +func (d dummySymlinkUploader) Md5Channel() chan<- []byte { + panic("this sender only sends folders.") +} + +// ditto for s2sCopier + +type dummySymlinkS2SCopier struct { + blobSymlinkSender +} + +func (d dummySymlinkS2SCopier) GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc { + panic("this sender only sends folders.") +} diff --git a/ste/sender-blobSymlinks_linux.go b/ste/sender-blobSymlinks_linux.go new file mode 100644 index 000000000..b4e55d4e3 --- /dev/null +++ b/ste/sender-blobSymlinks_linux.go @@ -0,0 +1,31 @@ +package ste + +import ( + "errors" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-azcopy/v10/common" +) + +func (s *blobSymlinkSender) getExtraProperties() error { + if s.jptm.Info().PreservePOSIXProperties { + if unixSIP, ok := s.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { + // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. + s.metadataToApply = common.Metadata(s.metadataToApply).Clone().ToAzBlobMetadata() + + statAdapter, err := unixSIP.GetUNIXProperties() + if err != nil { + return err + } + + s.jptm.Log(pipeline.LogInfo, fmt.Sprintf("MODE: %b", statAdapter.FileMode())) + if !(statAdapter.FileMode()&common.S_IFLNK == common.S_IFLNK) { // sanity check this is actually targeting the symlink + return errors.New("sanity check: GetUNIXProperties did not return symlink properties") + } + + common.AddStatToBlobMetadata(statAdapter, s.metadataToApply) + } + } + + return nil +} diff --git a/ste/sender-blobSymlinks_other.go b/ste/sender-blobSymlinks_other.go new file mode 100644 index 000000000..62d6652a1 --- /dev/null +++ b/ste/sender-blobSymlinks_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package ste + +func (s *blobSymlinkSender) getExtraProperties() error { + return nil +} diff --git a/ste/sender-blockBlob.go b/ste/sender-blockBlob.go index 383d52fc2..9bd6bf793 100644 --- a/ste/sender-blockBlob.go +++ b/ste/sender-blockBlob.go @@ -26,10 +26,12 @@ import ( "errors" "fmt" "net/url" + "strconv" "strings" "sync" "sync/atomic" "time" + "unsafe" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/azbfs" @@ -61,6 +63,8 @@ type blockBlobSenderBase struct { atomicChunksWritten int32 atomicPutListIndicator int32 muBlockIDs *sync.Mutex + blockNamePrefix string + completedBlockList map[int]string } func getVerifiedChunkParams(transferInfo TransferInfo, memLimit int64) (chunkSize int64, numChunks uint32, err error) { @@ -104,6 +108,17 @@ func getVerifiedChunkParams(transferInfo TransferInfo, memLimit int64) (chunkSiz return } +// Current size of block names in AzCopy is 48B. To be consistent with this, +// we have to generate a 36B string and then base64-encode this to conform +// to the same size. We generate prefix here. +// Block Names of blobs are of format noted below. +// <5B empty placeholder><16B GUID of AzCopy re-interpreted as string><5B PartNum><5B Index in the jobPart><5B blockNum> +func getBlockNamePrefix(jobID common.JobID, partNum uint32, transferIndex uint32) string { + jobIdStr := string((*[16]byte)(unsafe.Pointer(&jobID))[:]) + placeHolderPrefix := "00000" + return fmt.Sprintf("%s%s%05d%05d", placeHolderPrefix, jobIdStr, partNum, transferIndex) +} + func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, srcInfoProvider ISourceInfoProvider, inferredAccessTierType azblob.AccessTierType) (*blockBlobSenderBase, error) { // compute chunk count chunkSize, numChunks, err := getVerifiedChunkParams(jptm.Info(), jptm.CacheLimiter().Limit()) @@ -138,6 +153,8 @@ func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipe // Once track2 goes live, we'll not need to do this conversion/casting and can directly use CpkInfo & CpkScopeInfo cpkToApply := common.ToClientProvidedKeyOptions(jptm.CpkInfo(), jptm.CpkScopeInfo()) + partNum, transferIndex := jptm.TransferIndex() + return &blockBlobSenderBase{ jptm: jptm, sip: srcInfoProvider, @@ -151,7 +168,9 @@ func newBlockBlobSenderBase(jptm IJobPartTransferMgr, destination string, p pipe blobTagsToApply: props.SrcBlobTags.ToAzBlobTagsMap(), destBlobTier: destBlobTier, cpkToApply: cpkToApply, - muBlockIDs: &sync.Mutex{}}, nil + muBlockIDs: &sync.Mutex{}, + blockNamePrefix: getBlockNamePrefix(jptm.Info().JobID, partNum, transferIndex), + }, nil } func (s *blockBlobSenderBase) SendableEntityType() common.EntityType { @@ -171,6 +190,9 @@ func (s *blockBlobSenderBase) RemoteFileExists() (bool, time.Time, error) { } func (s *blockBlobSenderBase) Prologue(ps common.PrologueState) (destinationModified bool) { + if s.jptm.RestartedTransfer() { + s.buildCommittedBlockMap() + } if s.jptm.ShouldInferContentType() { s.headersToApply.ContentType = ps.GetInferredContentType(s.jptm) } @@ -273,21 +295,21 @@ func (s *blockBlobSenderBase) Cleanup() { } } -//Currently we've common Metadata Copier across all senders for block blob. +// Currently we've common Metadata Copier across all senders for block blob. func (s *blockBlobSenderBase) GenerateCopyMetadata(id common.ChunkID) chunkFunc { return createChunkFunc(true, s.jptm, id, func() { if unixSIP, ok := s.sip.(IUNIXPropertyBearingSourceInfoProvider); ok { // Clone the metadata before we write to it, we shouldn't be writing to the same metadata as every other blob. s.metadataToApply = common.Metadata(s.metadataToApply).Clone().ToAzBlobMetadata() - + statAdapter, err := unixSIP.GetUNIXProperties() if err != nil { s.jptm.FailActiveSend("GetUNIXProperties", err) } - + common.AddStatToBlobMetadata(statAdapter, s.metadataToApply) } - _, err := s.destBlockBlobURL.SetMetadata(s.jptm.Context(), s.metadataToApply, azblob.BlobAccessConditions{}, s.cpkToApply) + _, err := s.destBlockBlobURL.SetMetadata(s.jptm.Context(), s.metadataToApply, azblob.BlobAccessConditions{}, s.cpkToApply) if err != nil { s.jptm.FailActiveSend("Setting Metadata", err) return @@ -304,7 +326,69 @@ func (s *blockBlobSenderBase) setBlockID(index int32, value string) { s.blockIDs[index] = value } -func (s *blockBlobSenderBase) generateEncodedBlockID() string { - blockID := common.NewUUID().String() - return base64.StdEncoding.EncodeToString([]byte(blockID)) +func (s *blockBlobSenderBase) generateEncodedBlockID(index int32) string { + return common.GenerateBlockBlobBlockID(s.blockNamePrefix, index) +} + +func (s *blockBlobSenderBase) buildCommittedBlockMap() { + invalidAzCopyBlockNameMsg := "buildCommittedBlockMap: Found blocks which are not committed by AzCopy. Restarting whole file" + changedChunkSize := "buildCommittedBlockMap: Chunksize mismatch on uncommitted blocks" + list := make(map[int]string) + + if common.GetLifecycleMgr().GetEnvironmentVariable(common.EEnvironmentVariable.DisableBlobTransferResume()) == "true" { + return + } + + blockList, err := s.destBlockBlobURL.GetBlockList(s.jptm.Context(), azblob.BlockListUncommitted, azblob.LeaseAccessConditions{}) + if err != nil { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogError, "Failed to get blocklist. Restarting whole file.") + return + } + + if len(blockList.UncommittedBlocks) == 0 { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, "No uncommitted chunks found.") + return + } + + // We return empty list if + // 1. We find chunks by a different actor + // 2. Chunk size differs + for _, block := range blockList.UncommittedBlocks { + if len(block.Name) != common.AZCOPY_BLOCKNAME_LENGTH { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, invalidAzCopyBlockNameMsg) + return + } + + tmp, err := base64.StdEncoding.DecodeString(block.Name) + decodedBlockName := string(tmp) + if err != nil || !strings.HasPrefix(decodedBlockName, s.blockNamePrefix) { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, invalidAzCopyBlockNameMsg) + return + } + + index, err := strconv.Atoi(decodedBlockName[len(s.blockNamePrefix):]) + if err != nil || index < 0 || index > int(s.numChunks) { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, invalidAzCopyBlockNameMsg) + return + } + + // Last chunk may have different blockSize + if block.Size != s.ChunkSize() && index != int(s.numChunks) { + s.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, changedChunkSize) + return + } + + list[index] = decodedBlockName + } + + // We are here only if all the uncommitted blocks are uploaded by this job with same blockSize + s.completedBlockList = list +} + +func (s *blockBlobSenderBase) ChunkAlreadyTransferred(index int32) bool { + if s.completedBlockList != nil { + return false + } + _, ok := s.completedBlockList[int(index)] + return ok } diff --git a/ste/sender-blockBlobFromLocal.go b/ste/sender-blockBlobFromLocal.go index fdaf3f460..9aead673e 100644 --- a/ste/sender-blockBlobFromLocal.go +++ b/ste/sender-blockBlobFromLocal.go @@ -22,6 +22,7 @@ package ste import ( "bytes" + "fmt" "sync/atomic" "github.com/Azure/azure-pipeline-go/pipeline" @@ -86,7 +87,14 @@ func (u *blockBlobUploader) GenerateUploadFunc(id common.ChunkID, blockIndex int func (u *blockBlobUploader) generatePutBlock(id common.ChunkID, blockIndex int32, reader common.SingleChunkReader) chunkFunc { return createSendToRemoteChunkFunc(u.jptm, id, func() { // step 1: generate block ID - encodedBlockID := u.generateEncodedBlockID() + encodedBlockID := u.generateEncodedBlockID(blockIndex) + + if u.ChunkAlreadyTransferred(blockIndex) { + u.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, + fmt.Sprintf("Skipping chunk %d as it was already transferred.", blockIndex)) + atomic.AddInt32(&u.atomicChunksWritten, 1) + return + } // step 2: save the block ID into the list of block IDs u.setBlockID(blockIndex, encodedBlockID) diff --git a/ste/sender-blockBlobFromURL.go b/ste/sender-blockBlobFromURL.go index 4a872ff79..df95adfb1 100644 --- a/ste/sender-blockBlobFromURL.go +++ b/ste/sender-blockBlobFromURL.go @@ -22,6 +22,7 @@ package ste import ( "bytes" + "fmt" "net/url" "sync/atomic" @@ -126,11 +127,17 @@ func (c *urlToBlockBlobCopier) generateCreateEmptyBlob(id common.ChunkID) chunkF func (c *urlToBlockBlobCopier) generatePutBlockFromURL(id common.ChunkID, blockIndex int32, adjustedChunkSize int64) chunkFunc { return createSendToRemoteChunkFunc(c.jptm, id, func() { // step 1: generate block ID - encodedBlockID := c.generateEncodedBlockID() + encodedBlockID := c.generateEncodedBlockID(blockIndex) // step 2: save the block ID into the list of block IDs c.setBlockID(blockIndex, encodedBlockID) + if c.ChunkAlreadyTransferred(blockIndex) { + c.jptm.LogAtLevelForCurrentTransfer(pipeline.LogDebug, fmt.Sprintf("Skipping chunk %d as it was already transferred.", blockIndex)) + atomic.AddInt32(&c.atomicChunksWritten, 1) + return + } + // step 3: put block to remote c.jptm.LogChunkStatus(id, common.EWaitReason.S2SCopyOnWire()) diff --git a/ste/sender-pageBlob.go b/ste/sender-pageBlob.go index 8b52e95bf..4adcaf3d7 100644 --- a/ste/sender-pageBlob.go +++ b/ste/sender-pageBlob.go @@ -206,8 +206,8 @@ func (s *pageBlobSenderBase) Prologue(ps common.PrologueState) (destinationModif return } if s.srcSize != p.ContentLength() { - sizeErr := errors.New(fmt.Sprintf("source file is not same size as the destination page blob. Source size is %d bytes but destination size is %d bytes. Re-create the destination with exactly the right size. E.g. see parameter UploadSizeInBytes in PowerShell's New-AzDiskConfig. Ensure the source is a fixed-size VHD", - s.srcSize, p.ContentLength())) + sizeErr := fmt.Errorf("source file is not same size as the destination page blob. Source size is %d bytes but destination size is %d bytes. Re-create the destination with exactly the right size. E.g. see parameter UploadSizeInBytes in PowerShell's New-AzDiskConfig. Ensure the source is a fixed-size VHD", + s.srcSize, p.ContentLength()) s.jptm.FailActiveSend("Checking size of managed disk blob", sizeErr) return } diff --git a/ste/sender.go b/ste/sender.go index 4e59733c5..152c3c7d2 100644 --- a/ste/sender.go +++ b/ste/sender.go @@ -100,6 +100,13 @@ func (f folderPropertiesNotOverwroteInCreation) Error() string { panic("Not a real error") } +///////////////////////////////////////////////////////////////////////////////////////////////// +// symlinkSender is a sender that also knows how to send symlink properties +///////////////////////////////////////////////////////////////////////////////////////////////// +type symlinkSender interface { + SendSymlink(linkData string) error +} + type senderFactory func(jptm IJobPartTransferMgr, destination string, p pipeline.Pipeline, pacer pacer, sip ISourceInfoProvider) (sender, error) ///////////////////////////////////////////////////////////////////////////////////////////////// @@ -116,9 +123,6 @@ type s2sCopier interface { GenerateCopyFunc(chunkID common.ChunkID, blockIndex int32, adjustedChunkSize int64, chunkIsWholeFile bool) chunkFunc } -type s2sCopierFactory func(jptm IJobPartTransferMgr, srcInfoProvider IRemoteSourceInfoProvider, destination string, p pipeline.Pipeline, pacer pacer) (s2sCopier, error) - -// /////////////////////////////////////////////////////////////////////////////////////////////// // Abstraction of the methods needed to upload one file to a remote location // /////////////////////////////////////////////////////////////////////////////////////////////// type uploader interface { @@ -220,6 +224,8 @@ func newBlobUploader(jptm IJobPartTransferMgr, destination string, p pipeline.Pi if jptm.Info().IsFolderPropertiesTransfer() { return newBlobFolderSender(jptm, destination, p, pacer, sip) + } else if jptm.Info().EntityType == common.EEntityType.Symlink() { + return newBlobSymlinkSender(jptm, destination, p, pacer, sip) } switch intendedType { diff --git a/ste/sourceInfoProvider-Blob.go b/ste/sourceInfoProvider-Blob.go index 4e9b3d27a..f0de67003 100644 --- a/ste/sourceInfoProvider-Blob.go +++ b/ste/sourceInfoProvider-Blob.go @@ -21,6 +21,7 @@ package ste import ( + "io" "strings" "time" @@ -35,6 +36,38 @@ type blobSourceInfoProvider struct { defaultRemoteSourceInfoProvider } +func (p *blobSourceInfoProvider) ReadLink() (string, error) { + uri, err := p.PreSignedSourceURL() + if err != nil { + return "", err + } + + pl := p.jptm.SourceProviderPipeline() + ctx := p.jptm.Context() + + blobURL := azblob.NewBlockBlobURL(*uri, pl) + + clientProvidedKey := azblob.ClientProvidedKeyOptions{} + if p.jptm.IsSourceEncrypted() { + clientProvidedKey = common.ToClientProvidedKeyOptions(p.jptm.CpkInfo(), p.jptm.CpkScopeInfo()) + } + + resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false, clientProvidedKey) + if err != nil { + return "", err + } + + symlinkBuf, err := io.ReadAll(resp.Body(azblob.RetryReaderOptions{ + MaxRetryRequests: 5, + NotifyFailedRead: common.NewReadLogFunc(p.jptm, uri), + })) + if err != nil { + return "", err + } + + return string(symlinkBuf), nil +} + func (p *blobSourceInfoProvider) GetUNIXProperties() (common.UnixStatAdapter, error) { prop, err := p.Properties() if err != nil { @@ -79,10 +112,14 @@ func (p *blobSourceInfoProvider) AccessControl() (azbfs.BlobFSAccessControl, err bURLParts := azblob.NewBlobURLParts(*presignedURL) bURLParts.Host = strings.ReplaceAll(bURLParts.Host, ".blob", ".dfs") - bURLParts.BlobName = strings.TrimSuffix(bURLParts.BlobName, "/") // BlobFS doesn't handle folders correctly like this. + if bURLParts.BlobName != "" { + bURLParts.BlobName = strings.TrimSuffix(bURLParts.BlobName, "/") // BlobFS doesn't handle folders correctly like this. + } else { + bURLParts.BlobName = "/" // container level perms MUST have a / + } + // todo: jank, and violates the principle of interfaces fURL := azbfs.NewFileURL(bURLParts.URL(), p.jptm.(*jobPartTransferMgr).jobPartMgr.(*jobPartMgr).secondarySourceProviderPipeline) - return fURL.GetAccessControl(p.jptm.Context()) } diff --git a/ste/sourceInfoProvider-GCP.go b/ste/sourceInfoProvider-GCP.go index 17af5c0c9..73f6ded67 100644 --- a/ste/sourceInfoProvider-GCP.go +++ b/ste/sourceInfoProvider-GCP.go @@ -6,7 +6,8 @@ import ( "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" "golang.org/x/oauth2/google" - "io/ioutil" + "os" + "net/url" "time" ) @@ -52,7 +53,7 @@ func newGCPSourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, er return nil, err } glcm := common.GetLifecycleMgr() - jsonKey, err = ioutil.ReadFile(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.GoogleAppCredentials())) + jsonKey, err = os.ReadFile(glcm.GetEnvironmentVariable(common.EEnvironmentVariable.GoogleAppCredentials())) if err != nil { return nil, fmt.Errorf("Cannot read JSON key file. Please verify you have correctly set GOOGLE_APPLICATION_CREDENTIALS environment variable") } diff --git a/ste/sourceInfoProvider-Local.go b/ste/sourceInfoProvider-Local.go index b5700aef2..b8a7cdb2e 100644 --- a/ste/sourceInfoProvider-Local.go +++ b/ste/sourceInfoProvider-Local.go @@ -33,6 +33,10 @@ type localFileSourceInfoProvider struct { transferInfo TransferInfo } +func (f localFileSourceInfoProvider) ReadLink() (string, error) { + return os.Readlink(f.jptm.Info().Source) +} + func newLocalSourceInfoProvider(jptm IJobPartTransferMgr) (ISourceInfoProvider, error) { return &localFileSourceInfoProvider{jptm, jptm.Info()}, nil } @@ -63,6 +67,23 @@ func (f localFileSourceInfoProvider) IsLocal() bool { func (f localFileSourceInfoProvider) OpenSourceFile() (common.CloseableReaderAt, error) { path := f.jptm.Info().Source + hasMode := func(fi os.FileInfo, mode os.FileMode) bool { + return fi.Mode()&mode == mode + } + + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + // These files have no data for us to upload, and will cause AzCopy to hang upon attempting to open. + if hasMode(fi, os.ModeNamedPipe) || + hasMode(fi, os.ModeDevice) || + hasMode(fi, os.ModeCharDevice) || + hasMode(fi, os.ModeSocket) { + return emptyCloseableReaderAt{}, nil + } + if custom, ok := interface{}(f).(ICustomLocalOpener); ok { return custom.Open(path) } diff --git a/ste/sourceInfoProvider-Local_linux.go b/ste/sourceInfoProvider-Local_linux.go index 73a03f429..55a160d99 100644 --- a/ste/sourceInfoProvider-Local_linux.go +++ b/ste/sourceInfoProvider-Local_linux.go @@ -20,17 +20,22 @@ func (f localFileSourceInfoProvider) HasUNIXProperties() bool { func (f localFileSourceInfoProvider) GetUNIXProperties() (common.UnixStatAdapter, error) { { // attempt to call statx, if ENOSYS is returned, statx is unavailable var stat unix.Statx_t + + statxFlags := unix.AT_STATX_SYNC_AS_STAT + if f.EntityType() == common.EEntityType.Symlink() { + statxFlags |= unix.AT_SYMLINK_NOFOLLOW + } // dirfd is a null pointer, because we should only ever be passing relative paths here, and directories will be passed via transferInfo.Source. // AT_SYMLINK_NOFOLLOW is not used, because we automagically resolve symlinks. TODO: Add option to not follow symlinks, and use AT_SYMLINK_NOFOLLOW when resolving is disabled. err := unix.Statx(0, f.transferInfo.Source, - unix.AT_STATX_SYNC_AS_STAT, + statxFlags, unix.STATX_ALL, &stat) if err != nil && err != unix.ENOSYS { return nil, err } else if err == nil { - return statxTAdapter(stat), nil + return StatxTAdapter(stat), nil } } @@ -40,130 +45,130 @@ func (f localFileSourceInfoProvider) GetUNIXProperties() (common.UnixStatAdapter return nil, err } - return statTAdapter(stat), nil + return StatTAdapter(stat), nil } -type statxTAdapter unix.Statx_t +type StatxTAdapter unix.Statx_t -func (s statxTAdapter) Extended() bool { +func (s StatxTAdapter) Extended() bool { return true } -func (s statxTAdapter) StatxMask() uint32 { +func (s StatxTAdapter) StatxMask() uint32 { return s.Mask } -func (s statxTAdapter) Attribute() uint64 { +func (s StatxTAdapter) Attribute() uint64 { return s.Attributes } -func (s statxTAdapter) AttributeMask() uint64 { +func (s StatxTAdapter) AttributeMask() uint64 { return s.Attributes_mask } -func (s statxTAdapter) BTime() time.Time { +func (s StatxTAdapter) BTime() time.Time { return time.Unix(s.Btime.Sec, int64(s.Btime.Nsec)) } -func (s statxTAdapter) NLink() uint64 { +func (s StatxTAdapter) NLink() uint64 { return uint64(s.Nlink) } -func (s statxTAdapter) Owner() uint32 { +func (s StatxTAdapter) Owner() uint32 { return s.Uid } -func (s statxTAdapter) Group() uint32 { +func (s StatxTAdapter) Group() uint32 { return s.Gid } -func (s statxTAdapter) FileMode() uint32 { +func (s StatxTAdapter) FileMode() uint32 { return uint32(s.Mode) } -func (s statxTAdapter) INode() uint64 { +func (s StatxTAdapter) INode() uint64 { return s.Ino } -func (s statxTAdapter) Device() uint64 { +func (s StatxTAdapter) Device() uint64 { return unix.Mkdev(s.Dev_major, s.Dev_minor) } -func (s statxTAdapter) RDevice() uint64 { +func (s StatxTAdapter) RDevice() uint64 { return unix.Mkdev(s.Rdev_major, s.Rdev_minor) } -func (s statxTAdapter) ATime() time.Time { +func (s StatxTAdapter) ATime() time.Time { return time.Unix(s.Atime.Sec, int64(s.Atime.Nsec)) } -func (s statxTAdapter) MTime() time.Time { +func (s StatxTAdapter) MTime() time.Time { return time.Unix(s.Mtime.Sec, int64(s.Mtime.Nsec)) } -func (s statxTAdapter) CTime() time.Time { +func (s StatxTAdapter) CTime() time.Time { return time.Unix(s.Ctime.Sec, int64(s.Ctime.Nsec)) } -type statTAdapter unix.Stat_t +type StatTAdapter unix.Stat_t -func (s statTAdapter) Extended() bool { +func (s StatTAdapter) Extended() bool { return false } -func (s statTAdapter) StatxMask() uint32 { +func (s StatTAdapter) StatxMask() uint32 { return 0 } -func (s statTAdapter) Attribute() uint64 { +func (s StatTAdapter) Attribute() uint64 { return 0 } -func (s statTAdapter) AttributeMask() uint64 { +func (s StatTAdapter) AttributeMask() uint64 { return 0 } -func (s statTAdapter) BTime() time.Time { +func (s StatTAdapter) BTime() time.Time { return time.Time{} } -func (s statTAdapter) NLink() uint64 { +func (s StatTAdapter) NLink() uint64 { return uint64(s.Nlink) // On amd64, this is a uint64. On arm64, this is a uint32. Do not remove this typecast. } -func (s statTAdapter) Owner() uint32 { +func (s StatTAdapter) Owner() uint32 { return s.Uid } -func (s statTAdapter) Group() uint32 { +func (s StatTAdapter) Group() uint32 { return s.Gid } -func (s statTAdapter) FileMode() uint32 { +func (s StatTAdapter) FileMode() uint32 { return s.Mode } -func (s statTAdapter) INode() uint64 { +func (s StatTAdapter) INode() uint64 { return s.Ino } -func (s statTAdapter) Device() uint64 { +func (s StatTAdapter) Device() uint64 { return s.Dev } -func (s statTAdapter) RDevice() uint64 { +func (s StatTAdapter) RDevice() uint64 { return s.Rdev } -func (s statTAdapter) ATime() time.Time { +func (s StatTAdapter) ATime() time.Time { return time.Unix(s.Atim.Unix()) } -func (s statTAdapter) MTime() time.Time { +func (s StatTAdapter) MTime() time.Time { return time.Unix(s.Mtim.Unix()) } -func (s statTAdapter) CTime() time.Time { +func (s StatTAdapter) CTime() time.Time { return time.Unix(s.Ctim.Unix()) } diff --git a/ste/sourceInfoProvider.go b/ste/sourceInfoProvider.go index c0b552214..e153c48cb 100644 --- a/ste/sourceInfoProvider.go +++ b/ste/sourceInfoProvider.go @@ -98,6 +98,12 @@ type IUNIXPropertyBearingSourceInfoProvider interface { HasUNIXProperties() bool } +type ISymlinkBearingSourceInfoProvider interface { + ISourceInfoProvider + + ReadLink() (string, error) +} + type ICustomLocalOpener interface { ISourceInfoProvider Open(path string) (*os.File, error) diff --git a/ste/xfer-anyToRemote-file.go b/ste/xfer-anyToRemote-file.go index 6433be093..42108292a 100644 --- a/ste/xfer-anyToRemote-file.go +++ b/ste/xfer-anyToRemote-file.go @@ -116,7 +116,7 @@ func BlobTierAllowed(destTier azblob.AccessTierType) bool { // Standard storage account. If it's Hot, Cool, or Archive, we're A-OK. // Page blobs, however, don't have an access tier on Standard accounts. // However, this is also OK, because the pageblob sender code prevents us from using a standard access tier type. - return destTier == azblob.AccessTierArchive || destTier == azblob.AccessTierCool || destTier == azblob.AccessTierHot + return destTier == azblob.AccessTierArchive || destTier == azblob.AccessTierCool || destTier == common.EBlockBlobTier.Cold().ToAccessTierType() || destTier == azblob.AccessTierHot } } @@ -179,12 +179,19 @@ func anyToRemote(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer, sen } } - if info.IsFolderPropertiesTransfer() { + switch info.EntityType { + case common.EEntityType.Folder(): anyToRemote_folder(jptm, info, p, pacer, senderFactory, sipf) - } else if (jptm.GetOverwriteOption() == common.EOverwriteOption.PosixProperties() && info.EntityType == common.EEntityType.File()) { + case common.EEntityType.FileProperties(): anyToRemote_fileProperties(jptm, info, p, pacer, senderFactory, sipf) - } else { - anyToRemote_file(jptm, info, p, pacer, senderFactory, sipf) + case common.EEntityType.File(): + if jptm.GetOverwriteOption() == common.EOverwriteOption.PosixProperties() { + anyToRemote_fileProperties(jptm, info, p, pacer, senderFactory, sipf) + } else { + anyToRemote_file(jptm, info, p, pacer, senderFactory, sipf) + } + case common.EEntityType.Symlink(): + anyToRemote_symlink(jptm, info, p, pacer, senderFactory, sipf) } } @@ -538,7 +545,7 @@ func epilogueWithCleanupSendToRemote(jptm IJobPartTransferMgr, s sender, sip ISo shouldCheckLength = false checkLengthFailureOnReadOnlyDst.Do(func() { var glcm = common.GetLifecycleMgr() - msg := fmt.Sprintf("Could not read destination length. If the destination is write-only, use --check-length=false on the command line.") + msg := "Could not read destination length. If the destination is write-only, use --check-length=false on the command line." glcm.Info(msg) if jptm.ShouldLog(pipeline.LogError) { jptm.Log(pipeline.LogError, msg) diff --git a/ste/xfer-anyToRemote-symlink.go b/ste/xfer-anyToRemote-symlink.go new file mode 100644 index 000000000..906ee02ac --- /dev/null +++ b/ste/xfer-anyToRemote-symlink.go @@ -0,0 +1,67 @@ +package ste + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-azcopy/v10/common" +) + +func anyToRemote_symlink(jptm IJobPartTransferMgr, info TransferInfo, p pipeline.Pipeline, pacer pacer, senderFactory senderFactory, sipf sourceInfoProviderFactory) { + // Check if cancelled + if jptm.WasCanceled() { + /* This is earliest we detect that jptm has been cancelled before we reach destination */ + jptm.SetStatus(common.ETransferStatus.Cancelled()) + jptm.ReportTransferDone() + return + } + + // Create SIP + srcInfoProvider, err := sipf(jptm) + if err != nil { + jptm.LogSendError(info.Source, info.Destination, err.Error(), 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + if srcInfoProvider.EntityType() != common.EEntityType.Symlink() { + panic("configuration error. Source Info Provider does not have symlink entity type") + } + symSIP, ok := srcInfoProvider.(ISymlinkBearingSourceInfoProvider) + if !ok { + jptm.LogSendError(info.Source, info.Destination, "source info provider implementation does not support symlinks", 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + + path, err := symSIP.ReadLink() + if err != nil { + jptm.FailActiveSend("getting symlink path", err) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + + baseSender, err := senderFactory(jptm, info.Destination, p, pacer, srcInfoProvider) + if err != nil { + jptm.LogSendError(info.Source, info.Destination, err.Error(), 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + + s, ok := baseSender.(symlinkSender) // todo: symlinkSender + if !ok { + jptm.LogSendError(info.Source, info.Destination, "sender implementation does not support symlinks", 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + + // write the symlink + err = s.SendSymlink(path) + if err != nil { + jptm.FailActiveSend("creating destination symlink representative", err) + } + + commonSenderCompletion(jptm, baseSender, info) +} diff --git a/ste/xfer-remoteToLocal-file.go b/ste/xfer-remoteToLocal-file.go index 611a120af..4e18926ae 100644 --- a/ste/xfer-remoteToLocal-file.go +++ b/ste/xfer-remoteToLocal-file.go @@ -41,6 +41,8 @@ func remoteToLocal(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer, d info := jptm.Info() if info.IsFolderPropertiesTransfer() { remoteToLocal_folder(jptm, p, pacer, df) + } else if info.EntityType == common.EEntityType.Symlink() { + remoteToLocal_symlink(jptm, p, pacer, df) } else { remoteToLocal_file(jptm, p, pacer, df) } @@ -122,49 +124,34 @@ func remoteToLocal_file(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pac // } var dstFile io.WriteCloser - // step 4b: special handling for empty files - if fileSize == 0 { - if strings.EqualFold(info.Destination, common.Dev_Null) { - // do nothing - } else { - err := jptm.WaitUntilLockDestination(jptm.Context()) - if err == nil { - err = createEmptyFile(jptm, info.Destination) - } - if err != nil { - jptm.LogDownloadError(info.Source, info.Destination, "Empty File Creation error "+err.Error(), 0) - jptm.SetStatus(common.ETransferStatus.Failed()) - } + if ctdl, ok := dl.(creationTimeDownloader); info.Destination != os.DevNull && ok { // ctdl never needs to handle devnull + failFileCreation := func(err error) { + jptm.LogDownloadError(info.Source, info.Destination, "File Creation Error "+err.Error(), 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + // use standard epilogue for consistency, but force release of file count (without an actual file) if necessary + epilogueWithCleanupDownload(jptm, dl, nil, nil) + } + // block until we can safely use a file handle + err := jptm.WaitUntilLockDestination(jptm.Context()) + if err != nil { + failFileCreation(err) + return } - // Run the prologue anyway, as some downloaders (files) require this. - // Note that this doesn't actually have adverse effects (at the moment). - // For files, it just sets a few properties. - // For blobs, it sets up a page blob pacer if it's a page blob. - // For blobFS, it's a noop. - dl.Prologue(jptm, p) - epilogueWithCleanupDownload(jptm, dl, nil, nil) // need standard epilogue, rather than a quick exit, so we can preserve modification dates - return - } - - // step 4c: normal file creation when source has content - failFileCreation := func(err error) { - jptm.LogDownloadError(info.Source, info.Destination, "File Creation Error "+err.Error(), 0) - jptm.SetStatus(common.ETransferStatus.Failed()) - // use standard epilogue for consistency, but force release of file count (without an actual file) if necessary - epilogueWithCleanupDownload(jptm, dl, nil, nil) - } - // block until we can safely use a file handle - err := jptm.WaitUntilLockDestination(jptm.Context()) - if err != nil { - failFileCreation(err) - return - } + size := fileSize + ct := common.ECompressionType.None() + if jptm.ShouldDecompress() { + size = 0 // we don't know what the final size will be, so we can't pre-size it + ct, err = jptm.GetSourceCompressionType() // calls same decompression getter routine as the front-end does + if err != nil { // check this, and return error, before we create any disk file, since if we return err, then no cleanup of file will be required + failFileCreation(err) + return + } + // Why get the decompression type again here, when we already looked at it at enumeration time? + // Because we have better ability to report unsupported compression types here, with clear "transfer failed" handling, + // and we still need to set size to zero here, so relying on enumeration more wouldn't simply this code much, if at all. + } - if strings.EqualFold(info.Destination, common.Dev_Null) { - // the user wants to discard the downloaded data - dstFile = devNullWriter{} - } else { // Normal scenario, create the destination file as expected // Use pseudo chunk id to allow our usual state tracking mechanism to keep count of how many // file creations are running at any given instant, for perf diagnostics @@ -173,12 +160,88 @@ func remoteToLocal_file(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pac // to correct name. pseudoId := common.NewPseudoChunkIDForWholeFile(info.Source) jptm.LogChunkStatus(pseudoId, common.EWaitReason.CreateLocalFile()) - dstFile, err = createDestinationFile(jptm, info.getDownloadPath(), fileSize, writeThrough) + var needChunks bool + dstFile, needChunks, err = ctdl.CreateFile(jptm, info.getDownloadPath(), size, writeThrough, jptm.GetFolderCreationTracker()) jptm.LogChunkStatus(pseudoId, common.EWaitReason.ChunkDone()) // normal setting to done doesn't apply to these pseudo ids if err != nil { failFileCreation(err) return } + + if !needChunks { // If no chunks need to be transferred (e.g. this is 0-bytes long, a symlink, etc.), treat it as a 0-byte transfer + dl.Prologue(jptm, p) + epilogueWithCleanupDownload(jptm, dl, nil, nil) + return + } + + if jptm.ShouldDecompress() { // Wrap the file in the decompressor if necessary + jptm.LogAtLevelForCurrentTransfer(pipeline.LogInfo, "will be decompressed from "+ct.String()) + + // wrap for automatic decompression + dstFile = common.NewDecompressingWriter(dstFile, ct) + // why don't we just let Go's network stack automatically decompress for us? Because + // 1. Then we can't check the MD5 hash (since logically, any stored hash should be the hash of the file that exists in Storage, i.e. the compressed one) + // 2. Then we can't pre-plan a certain number of fixed-size chunks (which is required by the way our architecture currently works). + } + } else { + // step 4b: special handling for empty files + if fileSize == 0 { + if strings.EqualFold(info.Destination, common.Dev_Null) { + // do nothing + } else { + err := jptm.WaitUntilLockDestination(jptm.Context()) + if err == nil { + err = createEmptyFile(jptm, info.Destination) + } + if err != nil { + jptm.LogDownloadError(info.Source, info.Destination, "Empty File Creation error "+err.Error(), 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + } + } + // Run the prologue anyway, as some downloaders (files) require this. + // Note that this doesn't actually have adverse effects (at the moment). + // For files, it just sets a few properties. + // For blobs, it sets up a page blob pacer if it's a page blob. + // For blobFS, it's a noop. + dl.Prologue(jptm, p) + epilogueWithCleanupDownload(jptm, dl, nil, nil) // need standard epilogue, rather than a quick exit, so we can preserve modification dates + return + } + + // step 4c: normal file creation when source has content + + failFileCreation := func(err error) { + jptm.LogDownloadError(info.Source, info.Destination, "File Creation Error "+err.Error(), 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + // use standard epilogue for consistency, but force release of file count (without an actual file) if necessary + epilogueWithCleanupDownload(jptm, dl, nil, nil) + } + // block until we can safely use a file handle + err := jptm.WaitUntilLockDestination(jptm.Context()) + if err != nil { + failFileCreation(err) + return + } + + if strings.EqualFold(info.Destination, common.Dev_Null) { + // the user wants to discard the downloaded data + dstFile = devNullWriter{} + } else { + // Normal scenario, create the destination file as expected + // Use pseudo chunk id to allow our usual state tracking mechanism to keep count of how many + // file creations are running at any given instant, for perf diagnostics + // + // We create the file to a temporary location with name .azcopy-- and then move it + // to correct name. + pseudoId := common.NewPseudoChunkIDForWholeFile(info.Source) + jptm.LogChunkStatus(pseudoId, common.EWaitReason.CreateLocalFile()) + dstFile, err = createDestinationFile(jptm, info.getDownloadPath(), fileSize, writeThrough) + jptm.LogChunkStatus(pseudoId, common.EWaitReason.ChunkDone()) // normal setting to done doesn't apply to these pseudo ids + if err != nil { + failFileCreation(err) + return + } + } } // TODO: Question: do we need to Stat the file, to check its size, after explicitly making it with the desired size? @@ -496,7 +559,7 @@ func tryDeleteFile(info TransferInfo, jptm IJobPartTransferMgr) { // download to a temp path we return a temp path in format // /actual/parent/path/.azDownload-- func (info *TransferInfo) getDownloadPath() string { - if common.GetLifecycleMgr().DownloadToTempPath() { + if common.GetLifecycleMgr().DownloadToTempPath() && info.SourceSize > 0 { // 0-byte files don't need a rename. parent, fileName := filepath.Split(info.Destination) fileName = fmt.Sprintf(azcopyTempDownloadPrefix, info.JobID.String()) + fileName return filepath.Join(parent, fileName) diff --git a/ste/xfer-remoteToLocal-symlink.go b/ste/xfer-remoteToLocal-symlink.go new file mode 100644 index 000000000..463e29dd0 --- /dev/null +++ b/ste/xfer-remoteToLocal-symlink.go @@ -0,0 +1,34 @@ +package ste + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-azcopy/v10/common" +) + +func remoteToLocal_symlink(jptm IJobPartTransferMgr, p pipeline.Pipeline, pacer pacer, df downloaderFactory) { + info := jptm.Info() + + // Perform initial checks + // If the transfer was cancelled, then report transfer as done + if jptm.WasCanceled() { + /* This is the earliest we detect that jptm was cancelled, before we go to destination */ + jptm.SetStatus(common.ETransferStatus.Cancelled()) + jptm.ReportTransferDone() + return + } + + dl, ok := df().(symlinkDownloader) + if !ok { + jptm.LogDownloadError(info.Source, info.Destination, "downloader implementation does not support symlinks", 0) + jptm.SetStatus(common.ETransferStatus.Failed()) + jptm.ReportTransferDone() + return + } + + err := dl.CreateSymlink(jptm) + if err != nil { + jptm.FailActiveSend("creating destination symlink", err) + } + + commonDownloaderCompletion(jptm, info, common.EEntityType.Symlink()) +} diff --git a/ste/xferLogPolicy.go b/ste/xferLogPolicy.go index ae3a2b7f3..3c9efeeba 100644 --- a/ste/xferLogPolicy.go +++ b/ste/xferLogPolicy.go @@ -254,7 +254,7 @@ func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { // contains header x-ms-copy-source which could contains secrets for authentication. // Prepare the headers for logging, with redact secrets in x-ms-copy-source header. if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { - req = request.Copy() + req = req.Copy() url, err := url.Parse(req.Header.Get(key)) if err == nil { rawQuery := url.RawQuery @@ -267,10 +267,22 @@ func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { } } } + // Redact headers that have to do with CPK keys. + if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsEncryptionKey); exist { + req = req.Copy() + req.Header.Set(key, "REDACTED") + } + if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsEncryptionKeySha256); exist { + req = req.Copy() + req.Header.Set(key, "REDACTED") + } + return req.Request } const xMsCopySourceHeader = "x-ms-copy-source" +const xMsEncryptionKey = "x-ms-encryption-key" +const xMsEncryptionKeySha256 = "x-ms-encryption-key-sha256" func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { for keyInHeader := range header { diff --git a/ste/xferRetrypolicy.go b/ste/xferRetrypolicy.go index ec8865875..b715ef01e 100644 --- a/ste/xferRetrypolicy.go +++ b/ste/xferRetrypolicy.go @@ -3,7 +3,6 @@ package ste import ( "context" "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -192,7 +191,7 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t @@ -282,8 +281,8 @@ func NewBFSXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { break // Don't retry } if response.Response() != nil { - // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + // If we're going to retry, and we got a previous response, then flush its body to avoid leaking its TCP connection + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context @@ -363,7 +362,7 @@ func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { // Set the server-side timeout query parameter "timeout=[seconds]" timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two - t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline + t := int32(time.Until(deadline).Seconds()) // Duration from now until user's ctx reaches its deadline logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t) if t < timeout { timeout = t @@ -455,7 +454,7 @@ func NewBlobXferRetryPolicyFactory(o XferRetryOptions) pipeline.Factory { } if response.Response() != nil { // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection - io.Copy(ioutil.Discard, response.Response().Body) + _, _ = io.Copy(io.Discard, response.Response().Body) response.Response().Body.Close() } // If retrying, cancel the current per-try timeout context diff --git a/ste/xferStatsPolicy.go b/ste/xferStatsPolicy.go index 7b489b79d..e40792817 100644 --- a/ste/xferStatsPolicy.go +++ b/ste/xferStatsPolicy.go @@ -25,7 +25,7 @@ import ( "context" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-azcopy/v10/common" - "io/ioutil" + "io" "net/http" "strings" "sync/atomic" @@ -206,9 +206,9 @@ func transparentlyReadBody(r *http.Response) string { if r.Body == http.NoBody { return "" } - buf, _ := ioutil.ReadAll(r.Body) // error responses are short fragments of XML, so safe to read all - _ = r.Body.Close() // must close the real body - r.Body = ioutil.NopCloser(bytes.NewReader(buf)) // replace it with something that will read the same data we just read + buf, _ := io.ReadAll(r.Body) // error responses are short fragments of XML, so safe to read all + _ = r.Body.Close() // must close the real body + r.Body = io.NopCloser(bytes.NewReader(buf)) // replace it with something that will read the same data we just read return string(buf) // copy to string } diff --git a/testSuite/cmd/clean.go b/testSuite/cmd/clean.go index 8dfbf57f3..03ed0e769 100644 --- a/testSuite/cmd/clean.go +++ b/testSuite/cmd/clean.go @@ -363,7 +363,7 @@ func cleanBfsFile(fileURLStr string) { fileURL := azbfs.NewFileURL(*u, createBlobFSPipeline(*u)) _, err = fileURL.Delete(ctx) if err != nil { - fmt.Println(fmt.Sprintf("error deleting the blob FS file, %v", err)) + fmt.Printf("error deleting the blob FS file, %v\n", err) os.Exit(1) } } @@ -547,7 +547,7 @@ func deleteGCPBucket(client *gcpUtils.Client, bucketName string) { break } if err == nil { - err = bucket.Object(attrs.Name).Delete(nil) + err = bucket.Object(attrs.Name).Delete(context.TODO()) if err != nil { fmt.Println("Could not clear GCS Buckets.") return diff --git a/testSuite/cmd/create.go b/testSuite/cmd/create.go index b0cb39592..73ac0bb7e 100644 --- a/testSuite/cmd/create.go +++ b/testSuite/cmd/create.go @@ -11,7 +11,6 @@ import ( "time" "io" - "io/ioutil" "math/rand" "net/http" "strings" @@ -277,11 +276,11 @@ func createBlob(blobURL string, blobSize uint32, metadata azblob.Metadata, blobH azblob.ClientProvidedKeyOptions{}, azblob.ImmutabilityPolicyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error uploading the blob %v", err)) + fmt.Printf("error uploading the blob %v\n", err) os.Exit(1) } if putBlobResp.Response() != nil { - io.Copy(ioutil.Discard, putBlobResp.Response().Body) + _, _ = io.Copy(io.Discard, putBlobResp.Response().Body) putBlobResp.Response().Body.Close() } } @@ -355,7 +354,7 @@ func createFile(fileURLStr string, fileSize uint32, metadata azfile.Metadata, fi Metadata: metadata, }) if err != nil { - fmt.Println(fmt.Sprintf("error uploading the file %v", err)) + fmt.Printf("error uploading the file %v\n", err) os.Exit(1) } } @@ -460,7 +459,7 @@ func createGCPObject(objectURLStr string, objectSize uint32, o gcpUtils.ObjectAt os.Exit(1) } - gcpClient, err := createGCPClientWithGCSSDK() + gcpClient, _ := createGCPClientWithGCSSDK() randomString := createStringWithRandomChars(int(objectSize)) if o.ContentType == "" { @@ -470,8 +469,8 @@ func createGCPObject(objectURLStr string, objectSize uint32, o gcpUtils.ObjectAt obj := gcpClient.Bucket(gcpURLParts.BucketName).Object(gcpURLParts.ObjectKey) wc := obj.NewWriter(context.Background()) reader := strings.NewReader(randomString) - _, err = io.Copy(wc, reader) - err = wc.Close() + _, _ = io.Copy(wc, reader) + _ = wc.Close() _, err = obj.Update(context.Background(), o) if err != nil { diff --git a/testSuite/cmd/list.go b/testSuite/cmd/list.go index 2820463a4..b1f27c7c8 100644 --- a/testSuite/cmd/list.go +++ b/testSuite/cmd/list.go @@ -107,7 +107,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { listBlob, err := containerUrl.ListBlobsFlatSegment(context.TODO(), marker, azblob.ListBlobsSegmentOptions{Prefix: searchPrefix}) if err != nil { - fmt.Println(fmt.Sprintf("cannot list blobs for download. Failed with error %s", err.Error())) + fmt.Printf("cannot list blobs for download. Failed with error %s\n", err.Error()) os.Exit(1) } @@ -116,7 +116,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { blobName := blobInfo.Name if len(searchPrefix) > 0 { // strip away search prefix from the blob name. - blobName = strings.Replace(blobName, searchPrefix, "", 1) + blobName = strings.Replace(blobName, searchPrefix, "", 1) //nolint:ineffassign,staticcheck } numberOfblobs++ } @@ -124,7 +124,7 @@ func listContainer(resourceUrl string, numberOfresource int64) { } if numberOfblobs != numberOfresource { - fmt.Println(fmt.Sprintf("expected number of blobs / file %d inside the resource does not match the actual %d", numberOfresource, numberOfblobs)) + fmt.Printf("expected number of blobs / file %d inside the resource does not match the actual %d\n", numberOfresource, numberOfblobs) os.Exit(1) } } diff --git a/testSuite/cmd/mmap_windows.go b/testSuite/cmd/mmap_windows.go index 628c62bb1..d9b28cfe1 100644 --- a/testSuite/cmd/mmap_windows.go +++ b/testSuite/cmd/mmap_windows.go @@ -18,8 +18,8 @@ func NewMMF(file *os.File, writable bool, offset int64, length int64) (MMF, erro if hMMF == 0 { return nil, os.NewSyscallError("CreateFileMapping", errno) } - defer syscall.CloseHandle(hMMF) - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) + defer syscall.CloseHandle(hMMF) //nolint:errcheck + addr, _ := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) m := MMF{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr diff --git a/testSuite/cmd/testblob.go b/testSuite/cmd/testblob.go index 78741ac28..0f6090d42 100644 --- a/testSuite/cmd/testblob.go +++ b/testSuite/cmd/testblob.go @@ -5,7 +5,6 @@ import ( "crypto/md5" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -189,14 +188,14 @@ func verifyBlockBlobDirUpload(testBlobCmd TestBlobCommand) { 0, *size, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the blob %s", blobInfo.Name)) + fmt.Printf("error downloading the blob %s\n", blobInfo.Name) os.Exit(1) } // read all bytes. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if err != nil { - fmt.Println(fmt.Sprintf("error reading the body of blob %s downloaded and failed with error %s", blobInfo.Name, err.Error())) + fmt.Printf("error reading the body of blob %s downloaded and failed with error %s\n", blobInfo.Name, err.Error()) os.Exit(1) } // remove the search prefix from the blob name @@ -258,7 +257,7 @@ func validateMetadata(expectedMetaDataString string, actualMetaData azblob.Metad // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata for key, value := range actualMetaData { if expectedMetaData[key] != value { - fmt.Println(fmt.Sprintf("value of user given key %s is %s in actual data while it is %s in expected metadata", key, value, expectedMetaData[key])) + fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) return false } } @@ -323,17 +322,17 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := pageBlobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the properties of the blob. failed with error %s", err.Error())) + fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("Access blob tier type %s does not match the expected %s tier type", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response body. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } } @@ -344,7 +343,7 @@ func verifySinglePageBlobUpload(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } @@ -462,7 +461,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { sourceSas := testBlobCmd.Subject sourceURL, err := url.Parse(sourceSas) if err != nil { - fmt.Println(fmt.Sprintf("Error parsing the blob url source %s", testBlobCmd.Object)) + fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) os.Exit(1) } @@ -499,17 +498,17 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := blobUrl.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the blob properties. Failed with error %s", err.Error())) + fmt.Printf("error getting the blob properties. Failed with error %s\n", err.Error()) os.Exit(1) } // Match the Access Tier Type with Expected Tier Type. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("block blob access tier %s does not matches the expected tier %s", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("block blob access tier %s does not matches the expected tier %s\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } // If the access tier type of blob is set to Archive, then the blob is offline and reading the blob is not allowed, @@ -525,7 +524,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the blob bytes. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } @@ -537,7 +536,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { // If the fileSize is 0 and the len of downloaded bytes is not 0 // validation fails if len(blobBytesDownloaded) != 0 { - fmt.Println(fmt.Sprintf("validation failed since the actual file size %d differs from the downloaded file size %d", fileInfo.Size(), len(blobBytesDownloaded))) + fmt.Printf("validation failed since the actual file size %d differs from the downloaded file size %d\n", fileInfo.Size(), len(blobBytesDownloaded)) os.Exit(1) } // If both the actual and downloaded file size is 0, @@ -598,7 +597,7 @@ func verifySingleBlockBlob(testBlobCmd TestBlobCommand) { mmap.Unmap() err = file.Close() if err != nil { - fmt.Println(fmt.Sprintf("error closing the file %s and failed with error %s. Error could be while validating the blob.", file.Name(), err.Error())) + fmt.Printf("error closing the file %s and failed with error %s. Error could be while validating the blob.\n", file.Name(), err.Error()) os.Exit(1) } @@ -634,7 +633,7 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { // getting the shared access signature of the resource. sourceURL, err := url.Parse(testBlobCmd.Subject) if err != nil { - fmt.Println(fmt.Sprintf("Error parsing the blob url source %s", testBlobCmd.Object)) + fmt.Printf("Error parsing the blob url source %s\n", testBlobCmd.Object) os.Exit(1) } @@ -668,17 +667,17 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { if azblob.AccessTierType(testBlobCmd.BlobTier) != azblob.AccessTierNone { blobProperties, err := appendBlobURL.GetProperties(testCtx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) if err != nil { - fmt.Println(fmt.Sprintf("error getting the properties of the blob. failed with error %s", err.Error())) + fmt.Printf("error getting the properties of the blob. failed with error %s\n", err.Error()) os.Exit(1) } // If the blob tier does not match the expected blob tier. if !strings.EqualFold(blobProperties.AccessTier(), testBlobCmd.BlobTier) { - fmt.Println(fmt.Sprintf("Access blob tier type %s does not match the expected %s tier type", blobProperties.AccessTier(), testBlobCmd.BlobTier)) + fmt.Printf("Access blob tier type %s does not match the expected %s tier type\n", blobProperties.AccessTier(), testBlobCmd.BlobTier) os.Exit(1) } // Closing the blobProperties response body. if blobProperties.Response() != nil { - io.Copy(ioutil.Discard, blobProperties.Response().Body) + _, _ = io.Copy(io.Discard, blobProperties.Response().Body) blobProperties.Response().Body.Close() } } @@ -689,7 +688,7 @@ func verifySingleAppendBlob(testBlobCmd TestBlobCommand) { os.Exit(1) } // reading all the bytes downloaded. - blobBytesDownloaded, err := ioutil.ReadAll(get.Body(azblob.RetryReaderOptions{})) + blobBytesDownloaded, err := io.ReadAll(get.Body(azblob.RetryReaderOptions{})) if get.Response().Body != nil { get.Response().Body.Close() } diff --git a/testSuite/cmd/testblobFS.go b/testSuite/cmd/testblobFS.go index 4973617e8..1a0d773ff 100644 --- a/testSuite/cmd/testblobFS.go +++ b/testSuite/cmd/testblobFS.go @@ -100,7 +100,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteFile() { fileUrl := azbfs.NewFileURL(*subjectUrl, p) dResp, err := fileUrl.Download(context.Background(), 0, 0) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the subject %s. Failed with error %s", fileUrl.String(), err.Error())) + fmt.Printf("error downloading the subject %s. Failed with error %s\n", fileUrl.String(), err.Error()) os.Exit(1) } // get the size of the downloaded file @@ -122,7 +122,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteFile() { // If the length of file at two location is not same // validation has failed if downloadedLength != fInfo.Size() { - fmt.Println(fmt.Sprintf("validation failed because there is difference in the source size %d and destination size %d", fInfo.Size(), downloadedLength)) + fmt.Printf("validation failed because there is difference in the source size %d and destination size %d\n", fInfo.Size(), downloadedLength) os.Exit(1) } // If the size of the file is 0 both locally and remote @@ -193,11 +193,11 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // local and remote objectInfo, err := os.Stat(tbfsc.Object) if err != nil { - fmt.Println(fmt.Sprintf("error getting the file info for dir %s. failed with error %s", tbfsc.Object, err.Error())) + fmt.Printf("error getting the file info for dir %s. failed with error %s\n", tbfsc.Object, err.Error()) os.Exit(1) } if !objectInfo.IsDir() { - fmt.Println(fmt.Sprintf("the source provided %s is not a directory path", tbfsc.Object)) + fmt.Printf("the source provided %s is not a directory path\n", tbfsc.Object) os.Exit(1) } // break the remote Url into parts @@ -211,12 +211,12 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { var firstListing bool = true dResp, err := dirUrl.ListDirectorySegment(context.Background(), &continuationMarker, true) if err != nil { - fmt.Println(fmt.Sprintf("error listing the directory path defined by url %s. Failed with error %s", dirUrl.String(), err.Error())) + fmt.Printf("error listing the directory path defined by url %s. Failed with error %s\n", dirUrl.String(), err.Error()) os.Exit(1) } // numberOfFilesinSubject keeps the count of number of files of at the destination numberOfFilesinSubject := int(0) - for continuationMarker != "" || firstListing == true { + for continuationMarker != "" || firstListing { firstListing = false continuationMarker = dResp.XMsContinuation() files := dResp.Files() @@ -235,13 +235,13 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // open the filePath locally and calculate the md5 fpLocal, err := os.Open(filepathLocal) if err != nil { - fmt.Println(fmt.Sprintf("error opening the file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error opening the file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } // Get the fileInfo to get size. fpLocalInfo, err := fpLocal.Stat() if err != nil { - fmt.Println(fmt.Sprintf("error getting the file info for file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error getting the file info for file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } // Check the size of file @@ -260,7 +260,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // memory map the file fpMMf, err := NewMMF(fpLocal, false, 0, fpLocalInfo.Size()) if err != nil { - fmt.Println(fmt.Sprintf("error memory mapping the file %s. failed with error %s", filepathLocal, err.Error())) + fmt.Printf("error memory mapping the file %s. failed with error %s\n", filepathLocal, err.Error()) os.Exit(1) } @@ -274,7 +274,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { fileUrl := azbfs.NewFileURL(tempUrlParts.URL(), p) fResp, err := fileUrl.Download(context.Background(), 0, 0) if err != nil { - fmt.Println(fmt.Sprintf("error downloading the file %s. failed with error %s", fileUrl.String(), err.Error())) + fmt.Printf("error downloading the file %s. failed with error %s\n", fileUrl.String(), err.Error()) os.Exit(1) } downloadedBuffer := make([]byte, *file.ContentLength) // byte buffer in which file will be downloaded to @@ -287,7 +287,7 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { // calculate the downloaded file Md5 subjMd5 := md5.Sum(downloadedBuffer) if objMd5 != subjMd5 { - fmt.Println(fmt.Sprintf("source file %s doesn't match the remote file %s", filepathLocal, fileUrl.String())) + fmt.Printf("source file %s doesn't match the remote file %s\n", filepathLocal, fileUrl.String()) os.Exit(1) } } @@ -304,15 +304,15 @@ func (tbfsc TestBlobFSCommand) verifyRemoteDir() { return nil }) if err != nil { - fmt.Println(fmt.Sprintf("validation failed with error %s walking inside the source %s", err.Error(), tbfsc.Object)) + fmt.Printf("validation failed with error %s walking inside the source %s\n", err.Error(), tbfsc.Object) os.Exit(1) } // If the number of files inside the directories locally and remote // is not same, validation fails. if numberOFFilesInObject != numberOfFilesinSubject { - fmt.Println(fmt.Sprintf("validation failed since there is difference in the number of files in source and destination")) + fmt.Println("validation failed since there is difference in the number of files in source and destination") os.Exit(1) } - fmt.Println(fmt.Sprintf("successfully validated the source %s and destination %s", tbfsc.Object, tbfsc.Subject)) + fmt.Printf("successfully validated the source %s and destination %s\n", tbfsc.Object, tbfsc.Subject) } diff --git a/testSuite/cmd/testfile.go b/testSuite/cmd/testfile.go index af5e515af..735f2b005 100644 --- a/testSuite/cmd/testfile.go +++ b/testSuite/cmd/testfile.go @@ -4,7 +4,7 @@ import ( "context" "crypto/md5" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "os" @@ -132,7 +132,7 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure // look for all files that in current directory listFile, err := curAzureDirURL.ListFilesAndDirectoriesSegment(context.Background(), marker, azfile.ListFilesAndDirectoriesOptions{}) if err != nil { - // fmt.Println(fmt.Sprintf("fail to list files and directories inside the directory. Please check the directory sas, %v", err)) + // fmt.Printf("fail to list files and directories inside the directory. Please check the directory sas, %v\n", err) os.Exit(1) } @@ -149,23 +149,23 @@ func validateAzureDirWithLocalFile(curAzureDirURL azfile.DirectoryURL, baseAzure get, err := curFileURL.Download(context.Background(), 0, azfile.CountToEnd, false) if err != nil { - fmt.Println(fmt.Sprintf("fail to download the file %s", fileInfo.Name)) + fmt.Printf("fail to download the file %s\n", fileInfo.Name) os.Exit(1) } retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) // read all bytes. - fileBytesDownloaded, err := ioutil.ReadAll(retryReader) + fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { - fmt.Println(fmt.Sprintf("fail to read the body of file %s downloaded and failed with error %s", fileInfo.Name, err.Error())) + fmt.Printf("fail to read the body of file %s downloaded and failed with error %s\n", fileInfo.Name, err.Error()) os.Exit(1) } retryReader.Close() tokens := strings.SplitAfterN(curFileURL.URL().Path, baseAzureDirPath, 2) if len(tokens) < 2 { - fmt.Println(fmt.Sprintf("fail to get sub directory and file name, file URL '%s', original dir path '%s'", curFileURL.String(), baseAzureDirPath)) + fmt.Printf("fail to get sub directory and file name, file URL '%s', original dir path '%s'\n", curFileURL.String(), baseAzureDirPath) os.Exit(1) } @@ -232,7 +232,7 @@ func validateMetadataForFile(expectedMetaDataString string, actualMetaData azfil // iterating through each key value pair of actual metaData and comparing the key value pair in expected metadata for key, value := range actualMetaData { if expectedMetaData[key] != value { - fmt.Println(fmt.Sprintf("value of user given key %s is %s in actual data while it is %s in expected metadata", key, value, expectedMetaData[key])) + fmt.Printf("value of user given key %s is %s in actual data while it is %s in expected metadata\n", key, value, expectedMetaData[key]) return false } } @@ -261,7 +261,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // getting the shared access signature of the resource. sourceURL, err := url.Parse(testFileCmd.Subject) if err != nil { - // fmt.Println(fmt.Sprintf("Error parsing the file url source %s", testFileCmd.Object)) + // fmt.Printf("Error parsing the file url source %s\n", testFileCmd.Object) os.Exit(1) } @@ -277,7 +277,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // reading all the bytes downloaded. retryReader := get.Body(azfile.RetryReaderOptions{MaxRetryRequests: 3}) defer retryReader.Close() - fileBytesDownloaded, err := ioutil.ReadAll(retryReader) + fileBytesDownloaded, err := io.ReadAll(retryReader) if err != nil { fmt.Println("error reading the byes from response and failed with error ", err.Error()) os.Exit(1) @@ -287,7 +287,7 @@ func verifySingleFileUpload(testFileCmd TestFileCommand) { // If the fileSize is 0 and the len of downloaded bytes is not 0 // validation fails if len(fileBytesDownloaded) != 0 { - fmt.Println(fmt.Sprintf("validation failed since the actual file size %d differs from the downloaded file size %d", fileInfo.Size(), len(fileBytesDownloaded))) + fmt.Printf("validation failed since the actual file size %d differs from the downloaded file size %d\n", fileInfo.Size(), len(fileBytesDownloaded)) os.Exit(1) } // If both the actual and downloaded file size is 0, diff --git a/testSuite/cmd/upload.go b/testSuite/cmd/upload.go index fe6c75843..826af1c4d 100644 --- a/testSuite/cmd/upload.go +++ b/testSuite/cmd/upload.go @@ -146,7 +146,7 @@ func (u *testUploader) uploadToGCP() { } obj := gcpClient.Bucket(gcpURLPartsForFile.BucketName).Object(gcpURLPartsForFile.ObjectKey) wc := obj.NewWriter(context.Background()) - _, err = io.Copy(wc, f) + _, _ = io.Copy(wc, f) err = wc.Close() if err != nil { return err @@ -262,7 +262,6 @@ func getRelativePath(rootPath, filePath string) string { if len(rootPath) == 0 { return filePath } - result := filePath // replace the path separator in filepath with AZCOPY_PATH_SEPARATOR // this replacement is required to handle the windows filepath @@ -276,7 +275,7 @@ func getRelativePath(rootPath, filePath string) string { scrubAway = rootPath[:strings.LastIndex(rootPath, common.AZCOPY_PATH_SEPARATOR_STRING)+1] } - result = strings.Replace(filePath, scrubAway, "", 1) + result := strings.Replace(filePath, scrubAway, "", 1) return result } diff --git a/testSuite/scripts/test_blob_download.py b/testSuite/scripts/test_blob_download.py index 0e0fa9f3c..24642c1af 100644 --- a/testSuite/scripts/test_blob_download.py +++ b/testSuite/scripts/test_blob_download.py @@ -41,6 +41,29 @@ def test_download_1kb_blob_to_null(self): dst = os.devnull result = util.Command("copy").add_arguments(src).add_arguments(dst).add_flags("log-level", "info") + def test_download_1kb_blob_to_root(self): + # create file of size 1kb + filename = "test_1kb_blob_upload_download_null.txt" + file_path = util.create_test_file(filename, 1024) + + # upload 1kb using azcopy + src = file_path + dst = util.test_container_url + result = util.Command("copy").add_arguments(src).add_arguments(dst). \ + add_flags("log-level", "info").execute_azcopy_copy_command() + self.assertTrue(result) + + # verify the uploaded blob + resource_url = util.get_resource_sas(filename) + result = util.Command("testBlob").add_arguments(file_path).add_arguments(resource_url).execute_azcopy_verify() + self.assertTrue(result) + + # downloading the uploaded blob to devnull + # note we have no tests to verify the success of check-md5. TODO: remove this when fault induction is introduced + src = util.get_resource_sas(filename) + dst = "/" + result = util.Command("copy").add_arguments(src).add_arguments(dst).add_flags("log-level", "info") + # test_download_1kb_blob verifies the download of 1Kb blob using azcopy. def test_download_1kb_blob(self): # create file of size 1KB. diff --git a/testSuite/scripts/utility.py b/testSuite/scripts/utility.py index 8b25559a4..13eb28894 100644 --- a/testSuite/scripts/utility.py +++ b/testSuite/scripts/utility.py @@ -100,7 +100,7 @@ def execute_azcopy_info(self): def execute_testsuite_upload(self): return verify_operation(self.string()) -# processes oauth command according to swtiches +# processes oauth command according to switches def process_oauth_command( cmd, fromTo=""): diff --git a/website/src/index.html b/website/src/index.html index 20a0acac0..3649f22e5 100644 --- a/website/src/index.html +++ b/website/src/index.html @@ -184,11 +184,11 @@

- +
- +