diff --git a/cmd/podman/artifact/push.go b/cmd/podman/artifact/push.go index e20315fea4..63b32c3812 100644 --- a/cmd/podman/artifact/push.go +++ b/cmd/podman/artifact/push.go @@ -19,12 +19,13 @@ import ( // CLI-only fields into the API types. type pushOptionsWrapper struct { entities.ArtifactPushOptions - TLSVerifyCLI bool // CLI only - CredentialsCLI string - signing common.SigningCLIOnlyOptions - EncryptionKeys []string - EncryptLayers []int - DigestFile string + TLSVerifyCLI bool // CLI only + CredentialsCLI string + signing common.SigningCLIOnlyOptions + EncryptionKeys []string + EncryptLayers []int + DigestFile string + DigestAlgorithm string } var ( @@ -89,6 +90,10 @@ func pushFlags(cmd *cobra.Command) { flags.BoolVar(&pushOptions.TLSVerifyCLI, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries") + digestFlagName := "digest" + flags.StringVar(&pushOptions.DigestAlgorithm, digestFlagName, "", "Digest algorithm to use for content addressing (sha256, sha512). Defaults to value from storage.conf or sha256") + _ = cmd.RegisterFlagCompletionFunc(digestFlagName, completion.AutocompleteNone) + // TODO I think these two can be removed? /* compFormat := "compression-format" @@ -203,6 +208,14 @@ func artifactPush(cmd *cobra.Command, args []string) error { } */ + // Validate and copy the digest algorithm from CLI wrapper to ArtifactPushOptions + if pushOptions.DigestAlgorithm != "" { + if pushOptions.DigestAlgorithm != "sha256" && pushOptions.DigestAlgorithm != "sha512" { + return fmt.Errorf("invalid digest algorithm %q: must be sha256 or sha512", pushOptions.DigestAlgorithm) + } + } + pushOptions.ArtifactPushOptions.ImagePushOptions.DigestAlgorithm = pushOptions.DigestAlgorithm + _, err = registry.ImageEngine().ArtifactPush(registry.Context(), source, pushOptions.ArtifactPushOptions) return err } diff --git a/cmd/podman/common/build.go b/cmd/podman/common/build.go index 804977e185..12334cea24 100644 --- a/cmd/podman/common/build.go +++ b/cmd/podman/common/build.go @@ -47,6 +47,8 @@ type BuildFlagsWrapper struct { SquashAll bool // Cleanup removes built images from remote connections on success Cleanup bool + // DigestAlgorithm specifies the digest algorithm to use for content addressing + DigestAlgorithm string } // FarmBuildHiddenFlags are the flags hidden from the farm build command because they are either not @@ -115,6 +117,11 @@ func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBu fromAndBudFlagsCompletions := buildahCLI.GetFromAndBudFlagsCompletions() completion.CompleteCommandFlags(cmd, fromAndBudFlagsCompletions) flags.SetNormalizeFunc(buildahCLI.AliasFlags) + + // Digest algorithm flag + digestFlagName := "digest" + flags.StringVar(&buildOpts.DigestAlgorithm, digestFlagName, "", "Digest algorithm to use for content addressing (sha256, sha512). Defaults to value from storage.conf or sha256") + _ = cmd.RegisterFlagCompletionFunc(digestFlagName, completion.AutocompleteNone) if registry.IsRemote() { // Unset the isolation default as we never want to send this over the API // as it can be wrong (root vs rootless). @@ -263,6 +270,16 @@ func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrap apiBuildOpts.ContainerFiles = containerFiles apiBuildOpts.Authfile = buildOpts.Authfile + // Validate and process digest algorithm + if buildOpts.DigestAlgorithm != "" { + if buildOpts.DigestAlgorithm != "sha256" && buildOpts.DigestAlgorithm != "sha512" { + return nil, fmt.Errorf("invalid digest algorithm %q: must be sha256 or sha512", buildOpts.DigestAlgorithm) + } + // Store the digest algorithm preference in the apiBuildOpts + // This will be used to configure the build environment + apiBuildOpts.DigestAlgorithm = buildOpts.DigestAlgorithm + } + return &apiBuildOpts, err } diff --git a/cmd/podman/images/list.go b/cmd/podman/images/list.go index d7f429e091..68b4090679 100644 --- a/cmd/podman/images/list.go +++ b/cmd/podman/images/list.go @@ -343,7 +343,21 @@ func (i imageReporter) ID() string { if !listFlag.noTrunc && len(i.ImageSummary.ID) >= 12 { return i.ImageSummary.ID[0:12] } - return "sha256:" + i.ImageSummary.ID + + // Determine digest algorithm based on ID length + // SHA256 = 64 hex chars, SHA512 = 128 hex chars + var prefix string + switch len(i.ImageSummary.ID) { + case 128: + prefix = "sha512:" + case 64: + prefix = "sha256:" + default: + // For unknown lengths, default to sha256 for backward compatibility + prefix = "sha256:" + } + + return prefix + i.ImageSummary.ID } func (i imageReporter) Created() string { diff --git a/cmd/podman/images/push.go b/cmd/podman/images/push.go index 953ee28f47..4740b41445 100644 --- a/cmd/podman/images/push.go +++ b/cmd/podman/images/push.go @@ -19,12 +19,13 @@ import ( // CLI-only fields into the API types. type pushOptionsWrapper struct { entities.ImagePushOptions - TLSVerifyCLI bool // CLI only - CredentialsCLI string - signing common.SigningCLIOnlyOptions - EncryptionKeys []string - EncryptLayers []int - DigestFile string + TLSVerifyCLI bool // CLI only + CredentialsCLI string + signing common.SigningCLIOnlyOptions + EncryptionKeys []string + EncryptLayers []int + DigestFile string + DigestAlgorithm string } var ( @@ -137,6 +138,10 @@ func pushFlags(cmd *cobra.Command) { flags.IntSliceVar(&pushOptions.EncryptLayers, encryptLayersFlagName, nil, "Layers to encrypt, 0-indexed layer indices with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer). If not defined, will encrypt all layers if encryption-key flag is specified") _ = cmd.RegisterFlagCompletionFunc(encryptLayersFlagName, completion.AutocompleteDefault) + digestFlagName := "digest" + flags.StringVar(&pushOptions.DigestAlgorithm, digestFlagName, "", "Digest algorithm to use for content addressing (sha256, sha512). Defaults to value from storage.conf or sha256") + _ = cmd.RegisterFlagCompletionFunc(digestFlagName, completion.AutocompleteNone) + if registry.IsRemote() { _ = flags.MarkHidden("cert-dir") _ = flags.MarkHidden("compress") @@ -229,6 +234,14 @@ func imagePush(cmd *cobra.Command, args []string) error { } } + // Validate and copy the digest algorithm from CLI wrapper to ImagePushOptions + if pushOptions.DigestAlgorithm != "" { + if pushOptions.DigestAlgorithm != "sha256" && pushOptions.DigestAlgorithm != "sha512" { + return fmt.Errorf("invalid digest algorithm %q: must be sha256 or sha512", pushOptions.DigestAlgorithm) + } + } + pushOptions.ImagePushOptions.DigestAlgorithm = pushOptions.DigestAlgorithm + // Let's do all the remaining Yoga in the API to prevent us from scattering // logic across (too) many parts of the code. report, err := registry.ImageEngine().Push(registry.Context(), source, destination, pushOptions.ImagePushOptions) diff --git a/cmd/podman/manifest/push.go b/cmd/podman/manifest/push.go index 0c0942ad38..97409d26d2 100644 --- a/cmd/podman/manifest/push.go +++ b/cmd/podman/manifest/push.go @@ -25,6 +25,7 @@ type manifestPushOptsWrapper struct { CredentialsCLI string signing common.SigningCLIOnlyOptions DigestFile string + DigestAlgorithm string } var ( @@ -96,6 +97,10 @@ func init() { flags.Int(compressionLevel, 0, "compression level to use") _ = pushCmd.RegisterFlagCompletionFunc(compressionLevel, completion.AutocompleteNone) + digestFlagName := "digest" + flags.StringVar(&manifestPushOpts.DigestAlgorithm, digestFlagName, "", "Digest algorithm to use for content addressing (sha256, sha512). Defaults to value from storage.conf or sha256") + _ = pushCmd.RegisterFlagCompletionFunc(digestFlagName, completion.AutocompleteNone) + if registry.IsRemote() { _ = flags.MarkHidden("cert-dir") } @@ -165,6 +170,14 @@ func push(cmd *cobra.Command, args []string) error { } } + // Validate and copy the digest algorithm from CLI wrapper to ImagePushOptions + if manifestPushOpts.DigestAlgorithm != "" { + if manifestPushOpts.DigestAlgorithm != "sha256" && manifestPushOpts.DigestAlgorithm != "sha512" { + return fmt.Errorf("invalid digest algorithm %q: must be sha256 or sha512", manifestPushOpts.DigestAlgorithm) + } + } + manifestPushOpts.ImagePushOptions.DigestAlgorithm = manifestPushOpts.DigestAlgorithm + digest, err := registry.ImageEngine().ManifestPush(registry.Context(), listImageSpec, destSpec, manifestPushOpts.ImagePushOptions) if err != nil { return err diff --git a/go.mod b/go.mod index e3572acef6..c78530f6af 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,14 @@ module github.com/containers/podman/v5 // Warning: if there is a "toolchain" directive anywhere in this file (and most of the // time there shouldn't be), its version must be an exact match to the "go" directive. -go 1.24.0 +go 1.24.2 require ( github.com/Microsoft/go-winio v0.6.2 github.com/blang/semver/v4 v4.0.0 github.com/checkpoint-restore/checkpointctl v1.4.0 github.com/checkpoint-restore/go-criu/v7 v7.2.0 - github.com/containernetworking/plugins v1.7.1 + github.com/containernetworking/plugins v1.8.0 github.com/containers/buildah v1.41.1-0.20250829135344-3367a9bc2c9f github.com/containers/conmon v2.0.20+incompatible github.com/containers/gvisor-tap-vsock v0.8.7 @@ -55,7 +55,7 @@ require ( github.com/opencontainers/runtime-spec v1.2.1 github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 github.com/opencontainers/selinux v1.12.0 - github.com/openshift/imagebuilder v1.2.16-0.20250828154754-e22ebd3ff511 + github.com/openshift/imagebuilder v1.2.17 github.com/rootless-containers/rootlesskit/v2 v2.3.5 github.com/shirou/gopsutil/v4 v4.25.8 github.com/sirupsen/logrus v1.9.3 @@ -110,7 +110,7 @@ require ( github.com/ebitengine/purego v0.8.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/fsouza/go-dockerclient v1.12.1 // indirect + github.com/fsouza/go-dockerclient v1.12.2 // indirect github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -119,9 +119,9 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-containerregistry v0.20.3 // indirect + github.com/google/go-containerregistry v0.20.6 // indirect github.com/google/go-intervals v0.0.2 // indirect - github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.8 // indirect @@ -136,7 +136,7 @@ require ( github.com/mdlayher/socket v0.5.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect - github.com/moby/buildkit v0.23.2 // indirect + github.com/moby/buildkit v0.24.0 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/mountinfo v0.7.2 // indirect @@ -145,7 +145,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/opencontainers/runc v1.3.0 // indirect + github.com/opencontainers/runc v1.3.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/sftp v1.13.9 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect @@ -156,14 +156,14 @@ require ( github.com/seccomp/libseccomp-golang v0.11.1 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect github.com/segmentio/ksuid v1.0.4 // indirect - github.com/sigstore/fulcio v1.6.6 // indirect + github.com/sigstore/fulcio v1.7.1 // indirect github.com/sigstore/protobuf-specs v0.4.1 // indirect github.com/sigstore/sigstore v1.9.5 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/smallstep/pkcs7 v0.1.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect - github.com/sylabs/sif/v2 v2.21.1 // indirect + github.com/sylabs/sif/v2 v2.22.0 // indirect github.com/tchap/go-patricia/v2 v2.3.3 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect @@ -174,21 +174,29 @@ require ( github.com/vishvananda/netns v0.0.5 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.36.0 // indirect + go.opentelemetry.io/otel/metric v1.36.0 // indirect + go.opentelemetry.io/otel/trace v1.36.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/mod v0.27.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/oauth2 v0.31.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.36.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect google.golang.org/grpc v1.72.2 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect tags.cncf.io/container-device-interface/specs-go v1.0.0 // indirect ) + +replace go.podman.io/common => /home/lsm5/repositories/containers/container-libs/common/ + +replace go.podman.io/image/v5 => /home/lsm5/repositories/containers/container-libs/image + +replace go.podman.io/storage => /home/lsm5/repositories/containers/container-libs/storage + +replace github.com/containers/buildah => /home/lsm5/repositories/containers/buildah diff --git a/go.sum b/go.sum index e0b17043db..4e035b2927 100644 --- a/go.sum +++ b/go.sum @@ -53,10 +53,8 @@ github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++ github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= -github.com/containernetworking/plugins v1.7.1 h1:CNAR0jviDj6FS5Vg85NTgKWLDzZPfi/lj+VJfhMDTIs= -github.com/containernetworking/plugins v1.7.1/go.mod h1:xuMdjuio+a1oVQsHKjr/mgzuZ24leAsqUYRnzGoXHy0= -github.com/containers/buildah v1.41.1-0.20250829135344-3367a9bc2c9f h1:t2zdi9mHtJoGmRMXa3i+oD/7xlYHIgoA+/Jtd0Ysf6c= -github.com/containers/buildah v1.41.1-0.20250829135344-3367a9bc2c9f/go.mod h1:LtwfkfBed4dUOFTcBG+O+9Vcu5znw/PLYWDJ1mieHic= +github.com/containernetworking/plugins v1.8.0 h1:WjGbV/0UQyo8A4qBsAh6GaDAtu1hevxVxsEuqtBqUFk= +github.com/containernetworking/plugins v1.8.0/go.mod h1:JG3BxoJifxxHBhG3hFyxyhid7JgRVBu/wtooGEvWf1c= github.com/containers/common v0.62.2 h1:xO45OOoeq17EZMIDZoSyRqg7GXGcRHa9sXlrr75zH+U= github.com/containers/common v0.62.2/go.mod h1:veFiR9iq2j3CHXtB4YnPHuOkSRdhIQ3bAY8AFMP/5bE= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= @@ -102,8 +100,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.3.3+incompatible h1:fp9ZHAr1WWPGdIWBM1b3zLtgCF+83gRdVMTJsUeiyAo= -github.com/docker/cli v28.3.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.4.0+incompatible h1:RBcf3Kjw2pMtwui5V0DIMdyeab8glEw5QY0UUU4C9kY= +github.com/docker/cli v28.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.4.0+incompatible h1:KVC7bz5zJY/4AZe/78BIvCnPsLaC9T/zh72xnlrTTOk= @@ -129,8 +127,8 @@ github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fsouza/go-dockerclient v1.12.1 h1:FMoLq+Zhv9Oz/rFmu6JWkImfr6CBgZOPcL+bHW4gS0o= -github.com/fsouza/go-dockerclient v1.12.1/go.mod h1:OqsgJJcpCwqyM3JED7TdfM9QVWS5O7jSYwXxYKmOooY= +github.com/fsouza/go-dockerclient v1.12.2 h1:+pbP/SacoHfqaVZuiudvcdYGd9jzU7y9EcgoBOHivEI= +github.com/fsouza/go-dockerclient v1.12.2/go.mod h1:ZGCkAsnBGjnTRG9wV6QaICPJ5ig2KlaxTccDQy5WQ38= github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -156,15 +154,15 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= -github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2 h1:CVuJwN34x4xM2aT4sIKhmeib40NeBPhRihNjQmpJsA4= github.com/google/goterm v0.0.0-20200907032337-555d40f16ae2/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY= +github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -176,8 +174,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -249,8 +247,8 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs/v3 v3.0.1 h1:YaoXgBePoMA12+S1u/ddkv+QqxcfiZK4prI6HPnkFiU= github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= -github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ= -github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0= +github.com/moby/buildkit v0.24.0 h1:qYfTl7W1SIJzWDIDCcPT8FboHIZCYfi++wvySi3eyFE= +github.com/moby/buildkit v0.24.0/go.mod h1:4qovICAdR2H4C7+EGMRva5zgHW1gyhT4/flHI7F5F9k= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= @@ -292,16 +290,16 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= -github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI= -github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs= +github.com/opencontainers/runc v1.3.1 h1:c/yY0oh2wK7tzDuD56REnSxyU8ubh8hoAIOLGLrm4SM= +github.com/opencontainers/runc v1.3.1/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2 h1:2xZEHOdeQBV6PW8ZtimN863bIOl7OCW/X10K0cnxKeA= github.com/opencontainers/runtime-tools v0.9.1-0.20250523060157-0ea5ed0382a2/go.mod h1:MXdPzqAA8pHC58USHqNCSjyLnRQ6D+NjbpP+02Z1U/0= github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8= github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U= -github.com/openshift/imagebuilder v1.2.16-0.20250828154754-e22ebd3ff511 h1:8pU6rEt+HyVdXlszfbWIwUDf8THLXvjXX5n+5EkxlW8= -github.com/openshift/imagebuilder v1.2.16-0.20250828154754-e22ebd3ff511/go.mod h1:I9FlC4LVo0z/8GM8wdWVhxmw3tUVNM6tiwb8tRv4jvQ= +github.com/openshift/imagebuilder v1.2.17 h1:xusHiBvK7PpBsEeMGTg61Zx5kSajybjUAbBzVEJKH6g= +github.com/openshift/imagebuilder v1.2.17/go.mod h1:I9FlC4LVo0z/8GM8wdWVhxmw3tUVNM6tiwb8tRv4jvQ= github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -323,8 +321,8 @@ github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/ github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -337,20 +335,20 @@ github.com/rootless-containers/rootlesskit/v2 v2.3.5/go.mod h1:83EIYLeMX8UeNgLHk github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= -github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY= -github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= +github.com/sebdah/goldie/v2 v2.7.1 h1:PkBHymaYdtvEkZV7TmyqKxdmn5/Vcj+8TpATWZjnG5E= +github.com/sebdah/goldie/v2 v2.7.1/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI= github.com/seccomp/libseccomp-golang v0.11.1 h1:wuk4ZjSx6kyQII4rj6G6fvVzRHQaSiPvccJazDagu4g= github.com/seccomp/libseccomp-golang v0.11.1/go.mod h1:5m1Lk8E9OwgZTTVz4bBOer7JuazaBa+xTkM895tDiWc= github.com/secure-systems-lab/go-securesystemslib v0.9.1 h1:nZZaNz4DiERIQguNy0cL5qTdn9lR8XKHf4RUyG1Sx3g= github.com/secure-systems-lab/go-securesystemslib v0.9.1/go.mod h1:np53YzT0zXGMv6x4iEWc9Z59uR+x+ndLwCLqPYpLXVU= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970= github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI= -github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw= -github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk= +github.com/sigstore/fulcio v1.7.1 h1:RcoW20Nz49IGeZyu3y9QYhyyV3ZKQ85T+FXPKkvE+aQ= +github.com/sigstore/fulcio v1.7.1/go.mod h1:7lYY+hsd8Dt+IvKQRC+KEhWpCZ/GlmNvwIa5JhypMS8= github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/sigstore v1.9.5 h1:Wm1LT9yF4LhQdEMy5A2JeGRHTrAWGjT3ubE5JUSrGVU= @@ -379,8 +377,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/sylabs/sif/v2 v2.21.1 h1:GZ0b5//AFAqJEChd8wHV/uSKx/l1iuGYwjR8nx+4wPI= -github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI= +github.com/sylabs/sif/v2 v2.22.0 h1:Y+xXufp4RdgZe02SR3nWEg7S6q4tPWN237WHYzkDSKA= +github.com/sylabs/sif/v2 v2.22.0/go.mod h1:W1XhWTmG1KcG7j5a3KSYdMcUIFvbs240w/MMVW627hs= github.com/tchap/go-patricia/v2 v2.3.3 h1:xfNEsODumaEcCcY3gI0hYPZ/PcpVv5ju6RMAhgwZDDc= github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -431,30 +429,24 @@ go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.podman.io/common v0.65.0 h1:8JNl25U4VpKDkFHSymSPm4te7ZQHJbfAB/l2FqtmYEg= -go.podman.io/common v0.65.0/go.mod h1:+lJu8KHeoDQsD9HDdiFaMaOUiqPLQnK406WuLnqM7Z0= -go.podman.io/image/v5 v5.37.0 h1:yzgQybwuWIIeK63hu+mQqna/wOh96XD5cpVc6j8Dg5M= -go.podman.io/image/v5 v5.37.0/go.mod h1:+s2Sx5dia/jVeT8tI3r2NAPrARMiDdbEq3QPIQogx3I= -go.podman.io/storage v1.60.0 h1:bWNSrR58nxg39VNFDSx3m0AswbvyzPGOo5XsUfomTao= -go.podman.io/storage v1.60.0/go.mod h1:NK+rsWJVuQeCM7ifv7cxD3abegWxwtW/3OkuSUJJoE4= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= @@ -498,8 +490,8 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -578,10 +570,10 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e h1:UdXH7Kzbj+Vzastr5nVfccbmFsmYNygVLSPk1pEfDoY= +google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e/go.mod h1:085qFyf2+XaZlRdCgKNCIZ3afY2p4HHZdoIRpId8F4A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= diff --git a/libpod/runtime_img.go b/libpod/runtime_img.go index 98474ee90a..f5bcb6f7cd 100644 --- a/libpod/runtime_img.go +++ b/libpod/runtime_img.go @@ -14,6 +14,7 @@ import ( "github.com/sirupsen/logrus" "go.podman.io/common/libimage" "go.podman.io/image/v5/docker/reference" + "go.podman.io/storage" ) // Runtime API @@ -115,6 +116,11 @@ func (r *Runtime) newImageBuildCompleteEvent(idOrName string) { // Build adds the runtime to the imagebuildah call func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions, dockerfiles ...string) (string, reference.Canonical, error) { + return r.BuildWithDigest(ctx, options, "", dockerfiles...) +} + +// BuildWithDigest adds the runtime to the imagebuildah call with optional digest algorithm override +func (r *Runtime) BuildWithDigest(ctx context.Context, options buildahDefine.BuildOptions, digestAlgorithm string, dockerfiles ...string) (string, reference.Canonical, error) { if options.Runtime == "" { options.Runtime = r.GetOCIRuntimePath() } @@ -122,7 +128,35 @@ func (r *Runtime) Build(ctx context.Context, options buildahDefine.BuildOptions, // share the network interface between podman and buildah options.NetworkInterface = r.network - id, ref, err := imagebuildah.BuildDockerfiles(ctx, r.store, options, dockerfiles...) + + // Determine which store to use for the build + var buildStore storage.Store + + if digestAlgorithm != "" && digestAlgorithm != r.store.GetDigestType() { + // Temporarily modify the store's digest type for the build + originalDigestType := r.store.GetDigestType() + logrus.Debugf("Temporarily setting store digest algorithm from %s to %s", originalDigestType, digestAlgorithm) + r.store.SetDigestType(digestAlgorithm) + + // Ensure we restore the original digest type even if build fails + defer func() { + r.store.SetDigestType(originalDigestType) + logrus.Debugf("Restored store digest algorithm to %s", originalDigestType) + }() + + buildStore = r.store + } else { + // Use the existing store + buildStore = r.store + if digestAlgorithm != "" { + logrus.Debugf("Using existing store digest algorithm: %s (matches requested)", digestAlgorithm) + } + } + + // Note: Stores created by storage.GetStore() are automatically managed + // by the storage library and don't require explicit cleanup + + id, ref, err := imagebuildah.BuildDockerfiles(ctx, buildStore, options, dockerfiles...) // Write event for build completion r.newImageBuildCompleteEvent(id) return id, ref, err diff --git a/pkg/domain/entities/images.go b/pkg/domain/entities/images.go index 491f153277..3c329d1526 100644 --- a/pkg/domain/entities/images.go +++ b/pkg/domain/entities/images.go @@ -162,6 +162,9 @@ type ImagePushOptions struct { // CompressionFormat is used exclusively, and blobs of other compression // algorithms are not reused. ForceCompressionFormat bool + // DigestAlgorithm specifies the digest algorithm to use for content addressing. + // If empty, defaults to the digest algorithm configured in storage.conf or SHA256. + DigestAlgorithm string } // ImagePushReport is the response from pushing an image. diff --git a/pkg/domain/entities/types/types.go b/pkg/domain/entities/types/types.go index e9bbb14d61..549e02bb5b 100644 --- a/pkg/domain/entities/types/types.go +++ b/pkg/domain/entities/types/types.go @@ -68,6 +68,8 @@ type BuildOptions struct { // so need to pass this to the main build functions LogFileToClose *os.File TmpDirToClose string + // DigestAlgorithm specifies the digest algorithm to use for content addressing + DigestAlgorithm string } // BuildReport is the image-build report. diff --git a/pkg/domain/infra/abi/images.go b/pkg/domain/infra/abi/images.go index 5775b7c92c..307c35020c 100644 --- a/pkg/domain/infra/abi/images.go +++ b/pkg/domain/infra/abi/images.go @@ -422,7 +422,43 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri pushedManifestBytes, pushError := ir.Libpod.LibimageRuntime().Push(ctx, source, destination, pushOptions) if pushError == nil { - manifestDigest, err := manifest.Digest(pushedManifestBytes) + // Determine the appropriate digest algorithm for manifest computation + var digestAlgorithm digest.Algorithm = digest.Canonical // Default fallback to SHA256 + + // Second priority: Try to detect the digest algorithm from the source image + if options.DigestAlgorithm == "" { + if sourceImage, _, err := ir.Libpod.LibimageRuntime().LookupImage(source, nil); err == nil { + imageID := sourceImage.ID() + // Detect digest algorithm from image ID length + // SHA256 = 64 chars, SHA512 = 128 chars + switch len(imageID) { + case 64: + digestAlgorithm = digest.SHA256 + logrus.Debugf("Push: Auto-detected SHA256 from source image ID length") + case 128: + digestAlgorithm = digest.SHA512 + logrus.Debugf("Push: Auto-detected SHA512 from source image ID length") + default: + logrus.Debugf("Push: Unknown image ID length %d, using default %s", len(imageID), digestAlgorithm.String()) + } + } else { + logrus.Debugf("Push: Could not lookup source image %s for digest algorithm detection: %v", source, err) + } + } + + // First priority: Use the digest algorithm specified in push options if provided + if options.DigestAlgorithm != "" { + switch options.DigestAlgorithm { + case "sha256": + digestAlgorithm = digest.SHA256 + case "sha512": + digestAlgorithm = digest.SHA512 + default: + logrus.Warnf("Unknown digest algorithm %q, falling back to auto-detected or default", options.DigestAlgorithm) + } + } + + manifestDigest, err := manifest.DigestWithAlgorithm(pushedManifestBytes, digestAlgorithm) if err != nil { return nil, err } @@ -581,7 +617,13 @@ func (ir *ImageEngine) Config(_ context.Context) (*config.Config, error) { } func (ir *ImageEngine) Build(ctx context.Context, containerFiles []string, opts entities.BuildOptions) (*entities.BuildReport, error) { - id, _, err := ir.Libpod.Build(ctx, opts.BuildOptions, containerFiles...) + // Handle digest algorithm configuration + if opts.DigestAlgorithm != "" { + logrus.Debugf("Using digest algorithm for build operation: %s", opts.DigestAlgorithm) + } + + // Use the new BuildWithDigest method that supports digest algorithm override + id, _, err := ir.Libpod.BuildWithDigest(ctx, opts.BuildOptions, opts.DigestAlgorithm, containerFiles...) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 832abc07d6..66bbc75c9c 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -35,7 +35,7 @@ env: DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20250422t130822z-f42f41d13" + IMAGE_SUFFIX: "c20250910t092246z-f42f41d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -124,7 +124,7 @@ vendor_task: # Runs within Cirrus's "community cluster" container: - image: docker.io/library/golang:1.23.3 + image: docker.io/library/golang:1.24.0 cpu: 1 memory: 1 diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 9de72ca8dc..27d67b6331 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -132,8 +132,8 @@ type Builder struct { ImageHistoryComment string `json:"history-comment,omitempty"` // Image metadata and runtime settings, in multiple formats. - OCIv1 v1.Image `json:"ociv1,omitempty"` - Docker docker.V2Image `json:"docker,omitempty"` + OCIv1 v1.Image `json:"ociv1"` + Docker docker.V2Image `json:"docker"` // DefaultMountsFilePath is the file path holding the mounts to be mounted in "host-path:container-path" format. DefaultMountsFilePath string `json:"defaultMountsFilePath,omitempty"` diff --git a/vendor/github.com/containers/buildah/chroot/run_common.go b/vendor/github.com/containers/buildah/chroot/run_common.go index 0e50cf0e8e..fbd0689f6a 100644 --- a/vendor/github.com/containers/buildah/chroot/run_common.go +++ b/vendor/github.com/containers/buildah/chroot/run_common.go @@ -12,6 +12,7 @@ import ( "os/signal" "path/filepath" "runtime" + "slices" "strconv" "strings" "sync" @@ -743,6 +744,15 @@ func runUsingChrootExecMain() { os.Exit(1) } + // Set $PATH to the value for the container, so that when args[0] is not an absolute path, + // exec.Command() can find it using exec.LookPath(). + for _, env := range slices.Backward(options.Spec.Process.Env) { + if val, ok := strings.CutPrefix(env, "PATH="); ok { + os.Setenv("PATH", val) + break + } + } + // Actually run the specified command. cmd := exec.Command(args[0], args[1:]...) setPdeathsig(cmd) diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index 6ffe52d86f..9aa662cb68 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -162,13 +162,13 @@ type request struct { preservedDirectory string Globs []string `json:",omitempty"` // used by stat, get preservedGlobs []string - StatOptions StatOptions `json:",omitempty"` - GetOptions GetOptions `json:",omitempty"` - PutOptions PutOptions `json:",omitempty"` - MkdirOptions MkdirOptions `json:",omitempty"` - RemoveOptions RemoveOptions `json:",omitempty"` - EnsureOptions EnsureOptions `json:",omitempty"` - ConditionalRemoveOptions ConditionalRemoveOptions `json:",omitempty"` + StatOptions StatOptions + GetOptions GetOptions + PutOptions PutOptions + MkdirOptions MkdirOptions + RemoveOptions RemoveOptions + EnsureOptions EnsureOptions + ConditionalRemoveOptions ConditionalRemoveOptions } func (req *request) Excludes() []string { @@ -248,15 +248,15 @@ func (req *request) GIDMap() []idtools.IDMap { // Response encodes a single response. type response struct { - Error string `json:",omitempty"` - Stat statResponse `json:",omitempty"` - Eval evalResponse `json:",omitempty"` - Get getResponse `json:",omitempty"` - Put putResponse `json:",omitempty"` - Mkdir mkdirResponse `json:",omitempty"` - Remove removeResponse `json:",omitempty"` - Ensure ensureResponse `json:",omitempty"` - ConditionalRemove conditionalRemoveResponse `json:",omitempty"` + Error string `json:",omitempty"` + Stat statResponse + Eval evalResponse + Get getResponse + Put putResponse + Mkdir mkdirResponse + Remove removeResponse + Ensure ensureResponse + ConditionalRemove conditionalRemoveResponse } // statResponse encodes a response for a single Stat request. @@ -801,7 +801,7 @@ func copierWithSubprocess(bulkReader io.Reader, bulkWriter io.Writer, req reques } loggedOutput := strings.TrimSuffix(errorBuffer.String(), "\n") if len(loggedOutput) > 0 { - for _, output := range strings.Split(loggedOutput, "\n") { + for output := range strings.SplitSeq(loggedOutput, "\n") { logrus.Debug(output) } } @@ -1588,8 +1588,8 @@ func mapWithPrefixedKeysWithoutKeyPrefix[K any](m map[string]K, p string) map[st } cloned := make(map[string]K, len(m)) for k, v := range m { - if strings.HasPrefix(k, p) { - cloned[strings.TrimPrefix(k, p)] = v + if after, ok := strings.CutPrefix(k, p); ok { + cloned[after] = v } } return cloned @@ -1819,7 +1819,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM return fmt.Errorf("%q is not a subdirectory of %q: %w", directory, req.Root, err) } subdir := "" - for _, component := range strings.Split(rel, string(os.PathSeparator)) { + for component := range strings.SplitSeq(rel, string(os.PathSeparator)) { subdir = filepath.Join(subdir, component) path := filepath.Join(req.Root, subdir) if err := os.Mkdir(path, 0o700); err == nil { @@ -2187,7 +2187,7 @@ func copierHandlerPut(bulkReader io.Reader, req request, idMappings *idtools.IDM } func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, func() error, error) { - errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) { + errorResponse := func(fmtspec string, args ...any) (*response, func() error, error) { //nolint:unparam return &response{Error: fmt.Sprintf(fmtspec, args...), Mkdir: mkdirResponse{}}, nil, nil } dirUID, dirGID := 0, 0 @@ -2219,7 +2219,7 @@ func copierHandlerMkdir(req request, idMappings *idtools.IDMappings) (*response, subdir := "" var created []string - for _, component := range strings.Split(rel, string(os.PathSeparator)) { + for component := range strings.SplitSeq(rel, string(os.PathSeparator)) { subdir = filepath.Join(subdir, component) path := filepath.Join(req.Root, subdir) if err := os.Mkdir(path, 0o700); err == nil { diff --git a/vendor/github.com/containers/buildah/copier/xattrs.go b/vendor/github.com/containers/buildah/copier/xattrs.go index 2e4b944adb..73fc4b6df9 100644 --- a/vendor/github.com/containers/buildah/copier/xattrs.go +++ b/vendor/github.com/containers/buildah/copier/xattrs.go @@ -65,7 +65,7 @@ func Lgetxattrs(path string) (map[string]string, error) { return nil, fmt.Errorf("unable to read list of attributes for %q: size would have been too big", path) } m := make(map[string]string) - for _, attribute := range strings.Split(string(list), string('\000')) { + for attribute := range strings.SplitSeq(string(list), string('\000')) { if isRelevantXattr(attribute) { attributeSize := initialXattrValueSize var attributeValue []byte diff --git a/vendor/github.com/containers/buildah/docker/types.go b/vendor/github.com/containers/buildah/docker/types.go index 997698709d..352ae4799f 100644 --- a/vendor/github.com/containers/buildah/docker/types.go +++ b/vendor/github.com/containers/buildah/docker/types.go @@ -124,7 +124,7 @@ type V1Compatibility struct { Created time.Time `json:"created"` ContainerConfig struct { Cmd []string - } `json:"container_config,omitempty"` + } `json:"container_config"` Author string `json:"author,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } @@ -143,7 +143,7 @@ type V1Image struct { // Container is the id of the container used to commit Container string `json:"container,omitempty"` // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Config `json:"container_config,omitempty"` + ContainerConfig Config `json:"container_config"` // DockerVersion specifies the version of Docker that was used to build the image DockerVersion string `json:"docker_version,omitempty"` // Author is the name of the author that was specified when committing the image diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 469db66107..cba08218b8 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -535,7 +535,7 @@ func (mb *dockerSchema2ManifestBuilder) manifestAndConfig() ([]byte, []byte, err logrus.Debugf("Docker v2s2 config = %s", dconfig) // Add the configuration blob to the manifest. - mb.dmanifest.Config.Digest = digest.Canonical.FromBytes(dconfig) + mb.dmanifest.Config.Digest = mb.i.store.GetDigestAlgorithm().FromBytes(dconfig) mb.dmanifest.Config.Size = int64(len(dconfig)) mb.dmanifest.Config.MediaType = manifest.DockerV2Schema2ConfigMediaType @@ -772,7 +772,7 @@ func (mb *ociManifestBuilder) manifestAndConfig() ([]byte, []byte, error) { logrus.Debugf("OCIv1 config = %s", oconfig) // Add the configuration blob to the manifest. - mb.omanifest.Config.Digest = digest.Canonical.FromBytes(oconfig) + mb.omanifest.Config.Digest = mb.i.store.GetDigestAlgorithm().FromBytes(oconfig) mb.omanifest.Config.Size = int64(len(oconfig)) mb.omanifest.Config.MediaType = v1.MediaTypeImageConfig @@ -1068,7 +1068,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC } } } - srcHasher := digest.Canonical.Digester() + srcHasher := i.store.GetDigestAlgorithm().Digester() // Set up to write the possibly-recompressed blob. layerFile, err := os.OpenFile(filepath.Join(path, "layer"), os.O_CREATE|os.O_WRONLY, 0o600) if err != nil { @@ -1085,7 +1085,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC diffBeingAltered = diffBeingAltered || i.layerModTime != nil || i.layerLatestModTime != nil diffBeingAltered = diffBeingAltered || len(layerExclusions) != 0 if diffBeingAltered { - destHasher = digest.Canonical.Digester() + destHasher = i.store.GetDigestAlgorithm().Digester() multiWriter = io.MultiWriter(counter, destHasher.Hash()) } else { destHasher = srcHasher @@ -1172,7 +1172,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, _ *types.SystemC names: i.names, compression: i.compression, config: config, - configDigest: digest.Canonical.FromBytes(config), + configDigest: i.store.GetDigestAlgorithm().FromBytes(config), manifest: imageManifest, manifestType: i.preferredManifestType, blobDirectory: i.blobDirectory, @@ -1313,7 +1313,7 @@ func (i *containerImageRef) makeExtraImageContentDiff(includeFooter bool, timest os.Remove(diff.Name()) } }() - digester := digest.Canonical.Digester() + digester := i.store.GetDigestAlgorithm().Digester() counter := ioutils.NewWriteCounter(digester.Hash()) tw := tar.NewWriter(io.MultiWriter(diff, counter)) if timestamp == nil { @@ -1473,7 +1473,7 @@ func (b *Builder) makeLinkedLayerInfos(layers []LinkedLayer, layerType string, l } } - digester := digest.Canonical.Digester() + digester := b.store.GetDigestAlgorithm().Digester() sizeCountedFile := ioutils.NewWriteCounter(io.MultiWriter(digester.Hash(), f)) wc := makeFilteredLayerWriteCloser(ioutils.NopWriteCloser(sizeCountedFile), layerModTime, layerLatestModTime, nil) _, copyErr := io.Copy(wc, rc) diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 904849a914..02bdde5934 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -836,12 +836,12 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image } case "ADD", "COPY": for _, flag := range child.Flags { // flags for this instruction - if strings.HasPrefix(flag, "--from=") { + if after, ok := strings.CutPrefix(flag, "--from="); ok { // TODO: this didn't undergo variable and // arg expansion, so if the previous stage // was named using argument values, we might // not record the right value here. - rootfs := strings.TrimPrefix(flag, "--from=") + rootfs := after b.rootfsMap[rootfs] = struct{}{} logrus.Debugf("rootfs needed for COPY in stage %d: %q", stageIndex, rootfs) // Populate dependency tree and check @@ -885,8 +885,8 @@ func (b *Executor) Build(ctx context.Context, stages imagebuilder.Stages) (image // dependency calculation. if strings.HasPrefix(flag, "--mount=") && strings.Contains(flag, "from") { mountFlags := strings.TrimPrefix(flag, "--mount=") - fields := strings.Split(mountFlags, ",") - for _, field := range fields { + fields := strings.SplitSeq(mountFlags, ",") + for field := range fields { if mountFrom, hasFrom := strings.CutPrefix(field, "from="); hasFrom { // Check if this base is a stage if yes // add base to current stage's dependency tree diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 00a96bfbcd..7a57d80eff 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -1913,7 +1913,7 @@ func (s *StageExecutor) getCreatedBy(node *parser.Node, addedContentSummary stri switch command { case "ARG": - for _, variable := range strings.Fields(node.Original) { + for variable := range strings.FieldsSeq(node.Original) { if variable != "ARG" { s.argsFromContainerfile = append(s.argsFromContainerfile, variable) } diff --git a/vendor/github.com/containers/buildah/info.go b/vendor/github.com/containers/buildah/info.go index 56a28429c9..d0dd48932f 100644 --- a/vendor/github.com/containers/buildah/info.go +++ b/vendor/github.com/containers/buildah/info.go @@ -183,11 +183,11 @@ func getHostDistributionInfo() map[string]string { l := bufio.NewScanner(f) for l.Scan() { - if strings.HasPrefix(l.Text(), "ID=") { - dist["Distribution"] = strings.TrimPrefix(l.Text(), "ID=") + if after, ok := strings.CutPrefix(l.Text(), "ID="); ok { + dist["Distribution"] = after } - if strings.HasPrefix(l.Text(), "VERSION_ID=") { - dist["Version"] = strings.Trim(strings.TrimPrefix(l.Text(), "VERSION_ID="), "\"") + if after, ok := strings.CutPrefix(l.Text(), "VERSION_ID="); ok { + dist["Version"] = strings.Trim(after, "\"") } } return dist diff --git a/vendor/github.com/containers/buildah/internal/config/override.go b/vendor/github.com/containers/buildah/internal/config/override.go index 7d391bdafc..55933de468 100644 --- a/vendor/github.com/containers/buildah/internal/config/override.go +++ b/vendor/github.com/containers/buildah/internal/config/override.go @@ -2,6 +2,7 @@ package config import ( "fmt" + "maps" "os" "slices" "strings" @@ -136,9 +137,7 @@ func OverrideOCI(oconfig *v1.ImageConfig, overrideChanges []string, overrideConf if oconfig.Labels == nil { oconfig.Labels = make(map[string]string) } - for k, v := range overrideConfig.Labels { - oconfig.Labels[k] = v - } + maps.Copy(oconfig.Labels, overrideConfig.Labels) } oconfig.StopSignal = overrideConfig.StopSignal } @@ -206,9 +205,7 @@ func OverrideDocker(dconfig *docker.Config, overrideChanges []string, overrideCo if dconfig.Labels == nil { dconfig.Labels = make(map[string]string) } - for k, v := range overrideConfig.Labels { - dconfig.Labels[k] = v - } + maps.Copy(dconfig.Labels, overrideConfig.Labels) } dconfig.StopSignal = overrideConfig.StopSignal dconfig.StopTimeout = overrideConfig.StopTimeout diff --git a/vendor/github.com/containers/buildah/internal/mkcw/archive.go b/vendor/github.com/containers/buildah/internal/mkcw/archive.go index f462a8fa9f..d136298658 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/archive.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/archive.go @@ -543,7 +543,7 @@ func slop(size int64, slop string) int64 { if slop == "" { return size * 5 / 4 } - for _, factor := range strings.Split(slop, "+") { + for factor := range strings.SplitSeq(slop, "+") { factor = strings.TrimSpace(factor) if factor == "" { continue diff --git a/vendor/github.com/containers/buildah/internal/mkcw/attest.go b/vendor/github.com/containers/buildah/internal/mkcw/attest.go index 3b7273976d..b974ca5242 100644 --- a/vendor/github.com/containers/buildah/internal/mkcw/attest.go +++ b/vendor/github.com/containers/buildah/internal/mkcw/attest.go @@ -240,8 +240,8 @@ func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) scanner := bufio.NewScanner(&stdout) for scanner.Scan() { line := scanner.Text() - if strings.HasPrefix(line, prefix+":") { - return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil + if after, ok := strings.CutPrefix(line, prefix+":"); ok { + return strings.TrimSpace(after), nil } } return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":") diff --git a/vendor/github.com/containers/buildah/internal/sbom/merge.go b/vendor/github.com/containers/buildah/internal/sbom/merge.go index dddea84c51..57487b083b 100644 --- a/vendor/github.com/containers/buildah/internal/sbom/merge.go +++ b/vendor/github.com/containers/buildah/internal/sbom/merge.go @@ -202,7 +202,7 @@ func Merge(mergeStrategy define.SBOMMergeStrategy, inputOutputSBOM, inputSBOM, o Dependencies []string `json:"dependencies,omitempty"` } type purlDocument struct { - ImageContents purlImageContents `json:"image_contents,omitempty"` + ImageContents purlImageContents `json:"image_contents"` } purls := []string{} seenPurls := make(map[string]struct{}) diff --git a/vendor/github.com/containers/buildah/pkg/cli/build.go b/vendor/github.com/containers/buildah/pkg/cli/build.go index bdbd1105f8..9449ac883a 100644 --- a/vendor/github.com/containers/buildah/pkg/cli/build.go +++ b/vendor/github.com/containers/buildah/pkg/cli/build.go @@ -474,7 +474,7 @@ func readBuildArgFile(buildargfile string, args map[string]string) error { if err != nil { return err } - for _, arg := range strings.Split(string(argfile), "\n") { + for arg := range strings.SplitSeq(string(argfile), "\n") { if len(arg) == 0 || arg[0] == '#' { continue } diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 053c043478..911d5dedac 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -733,7 +733,7 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { isStdout := false typeSelected := "" pathSelected := "" - for _, option := range strings.Split(buildOutput, ",") { + for option := range strings.SplitSeq(buildOutput, ",") { key, value, found := strings.Cut(option, "=") if !found { return define.BuildOutputOption{}, fmt.Errorf("invalid build output options %q, expected format key=value", buildOutput) @@ -789,7 +789,7 @@ func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOpti TempDir: GetTempDir(), } defaults := options - for _, option := range strings.Split(arg, ",") { + for option := range strings.SplitSeq(arg, ",") { var err error switch { case strings.HasPrefix(option, "type="): @@ -936,7 +936,7 @@ func GetAutoOptions(base string) (*storageTypes.AutoUserNsOptions, error) { if len(parts) == 1 { return &options, nil } - for _, o := range strings.Split(parts[1], ",") { + for o := range strings.SplitSeq(parts[1], ",") { v := strings.SplitN(o, "=", 2) if len(v) != 2 { return nil, fmt.Errorf("invalid option specified: %q", o) diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index 0863ec2fb2..bd501f3bd3 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -2121,6 +2121,12 @@ func (b *Builder) createMountTargets(spec *specs.Spec) ([]copier.ConditionalRemo // forced permissions mode = &perms } + if mode == nil && destination != cleanedDestination { + // parent directories default to 0o755, for + // the sake of commands running as UID != 0 + perms := os.FileMode(0o755) + mode = &perms + } targets.Paths = append(targets.Paths, copier.EnsurePath{ Path: destination, Typeflag: typeFlag, diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go deleted file mode 100644 index 5bdbdef200..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_deprecated.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package archive provides helper functions for dealing with archive files. -package archive - -import ( - "archive/tar" - "io" - "os" - - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" - "github.com/moby/go-archive/compression" - "github.com/moby/go-archive/tarheader" -) - -// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a -// tar, but that do not have their own header entry. -// -// Deprecated: use [archive.ImpliedDirectoryMode] instead. -const ImpliedDirectoryMode = archive.ImpliedDirectoryMode - -type ( - // Compression is the state represents if compressed or not. - // - // Deprecated: use [compression.Compression] instead. - Compression = compression.Compression - // WhiteoutFormat is the format of whiteouts unpacked - // - // Deprecated: use [archive.WhiteoutFormat] instead. - WhiteoutFormat = archive.WhiteoutFormat - - // TarOptions wraps the tar options. - // - // Deprecated: use [archive.TarOptions] instead. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression compression.Compression - NoLchown bool - IDMap idtools.IdentityMapping - ChownOpts *idtools.Identity - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat archive.WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - // Allow unpacking to succeed in spite of failures to set extended - // attributes on the unpacked files due to the destination filesystem - // not supporting them or a lack of permissions. Extended attributes - // were probably in the archive for a reason, so set this option at - // your own peril. - BestEffortXattrs bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -// -// Deprecated: use [archive.Archiver] instead. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMapping idtools.IdentityMapping -} - -// NewDefaultArchiver returns a new Archiver without any IdentityMapping -// -// Deprecated: use [archive.NewDefaultArchiver] instead. -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar} -} - -const ( - Uncompressed = compression.None // Deprecated: use [compression.None] instead. - Bzip2 = compression.Bzip2 // Deprecated: use [compression.Bzip2] instead. - Gzip = compression.Gzip // Deprecated: use [compression.Gzip] instead. - Xz = compression.Xz // Deprecated: use [compression.Xz] instead. - Zstd = compression.Zstd // Deprecated: use [compression.Zstd] instead. -) - -const ( - AUFSWhiteoutFormat = archive.AUFSWhiteoutFormat // Deprecated: use [archive.AUFSWhiteoutFormat] instead. - OverlayWhiteoutFormat = archive.OverlayWhiteoutFormat // Deprecated: use [archive.OverlayWhiteoutFormat] instead. -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -// -// Deprecated: use [archive.IsArchivePath] instead. -func IsArchivePath(path string) bool { - return archive.IsArchivePath(path) -} - -// DetectCompression detects the compression algorithm of the source. -// -// Deprecated: use [compression.Detect] instead. -func DetectCompression(source []byte) archive.Compression { - return compression.Detect(source) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -// -// Deprecated: use [compression.DecompressStream] instead. -func DecompressStream(arch io.Reader) (io.ReadCloser, error) { - return compression.DecompressStream(arch) -} - -// CompressStream compresses the dest with specified compression algorithm. -// -// Deprecated: use [compression.CompressStream] instead. -func CompressStream(dest io.Writer, comp compression.Compression) (io.WriteCloser, error) { - return compression.CompressStream(dest, comp) -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper. -// -// Deprecated: use [archive.TarModifierFunc] instead. -type TarModifierFunc = archive.TarModifierFunc - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. -// -// Deprecated: use [archive.ReplaceFileTarWrapper] instead. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]archive.TarModifierFunc) io.ReadCloser { - return archive.ReplaceFileTarWrapper(inputTarStream, mods) -} - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Deprecated: use [tarheader.FileInfoHeaderNoLookups] instead. -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - return tarheader.FileInfoHeaderNoLookups(fi, link) -} - -// FileInfoHeader creates a populated Header from fi. -// -// Deprecated: use [archive.FileInfoHeader] instead. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - return archive.FileInfoHeader(name, fi, link) -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -// -// Deprecated: use [archive.ReadSecurityXattrToTarHeader] instead. -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - return archive.ReadSecurityXattrToTarHeader(path, hdr) -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -// -// Deprecated: use [archive.Tar] instead. -func Tar(path string, compression archive.Compression) (io.ReadCloser, error) { - return archive.TarWithOptions(path, &archive.TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive with the given options. -// -// Deprecated: use [archive.TarWithOptions] instead. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - return archive.TarWithOptions(srcPath, toArchiveOpt(options)) -} - -// Tarballer is a lower-level interface to TarWithOptions. -// -// Deprecated: use [archive.Tarballer] instead. -type Tarballer = archive.Tarballer - -// NewTarballer constructs a new tarballer using TarWithOptions. -// -// Deprecated: use [archive.Tarballer] instead. -func NewTarballer(srcPath string, options *TarOptions) (*archive.Tarballer, error) { - return archive.NewTarballer(srcPath, toArchiveOpt(options)) -} - -// Unpack unpacks the decompressedArchive to dest with options. -// -// Deprecated: use [archive.Unpack] instead. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - return archive.Unpack(decompressedArchive, dest, toArchiveOpt(options)) -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// -// Deprecated: use [archive.Untar] instead. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return archive.Untar(tarArchive, dest, toArchiveOpt(options)) -} - -// UntarUncompressed reads a stream of bytes from `tarArchive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -// -// Deprecated: use [archive.UntarUncompressed] instead. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return archive.UntarUncompressed(tarArchive, dest, toArchiveOpt(options)) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, &TarOptions{ - IDMap: archiver.IDMapping, - }) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, &TarOptions{ - IDMap: archiver.IDMapping, - }) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, nil) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - return (&archive.Archiver{ - Untar: func(reader io.Reader, s string, options *archive.TarOptions) error { - return archiver.Untar(reader, s, nil) - }, - IDMapping: idtools.ToUserIdentityMapping(archiver.IDMapping), - }).CopyFileWithTar(src, dst) -} - -// IdentityMapping returns the IdentityMapping of the archiver. -func (archiver *Archiver) IdentityMapping() idtools.IdentityMapping { - return archiver.IDMapping -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go deleted file mode 100644 index 48c75235c3..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_deprecated.go +++ /dev/null @@ -1,56 +0,0 @@ -package archive - -import ( - "io" - - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" -) - -// ChangeType represents the change -// -// Deprecated: use [archive.ChangeType] instead. -type ChangeType = archive.ChangeType - -const ( - ChangeModify = archive.ChangeModify // Deprecated: use [archive.ChangeModify] instead. - ChangeAdd = archive.ChangeAdd // Deprecated: use [archive.ChangeAdd] instead. - ChangeDelete = archive.ChangeDelete // Deprecated: use [archive.ChangeDelete] instead. -) - -// Change represents a change. -// -// Deprecated: use [archive.Change] instead. -type Change = archive.Change - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -// -// Deprecated: use [archive.Changes] instead. -func Changes(layers []string, rw string) ([]archive.Change, error) { - return archive.Changes(layers, rw) -} - -// FileInfo describes the information of a file. -// -// Deprecated: use [archive.FileInfo] instead. -type FileInfo = archive.FileInfo - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// -// Deprecated: use [archive.ChangesDirs] instead. -func ChangesDirs(newDir, oldDir string) ([]archive.Change, error) { - return archive.ChangesDirs(newDir, oldDir) -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -// -// Deprecated: use [archive.ChangesSize] instead. -func ChangesSize(newDir string, changes []archive.Change) int64 { - return archive.ChangesSize(newDir, changes) -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []archive.Change, idMap idtools.IdentityMapping) (io.ReadCloser, error) { - return archive.ExportChanges(dir, changes, idtools.ToUserIdentityMapping(idMap)) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go deleted file mode 100644 index 1901e55c53..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_deprecated.go +++ /dev/null @@ -1,130 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" - "github.com/moby/go-archive/compression" -) - -var ( - ErrNotDirectory = archive.ErrNotDirectory // Deprecated: use [archive.ErrNotDirectory] instead. - ErrDirNotExists = archive.ErrDirNotExists // Deprecated: use [archive.ErrDirNotExists] instead. - ErrCannotCopyDir = archive.ErrCannotCopyDir // Deprecated: use [archive.ErrCannotCopyDir] instead. - ErrInvalidCopySource = archive.ErrInvalidCopySource // Deprecated: use [archive.ErrInvalidCopySource] instead. -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path. -// -// Deprecated: use [archive.PreserveTrailingDotOrSeparator] instead. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string) string { - return archive.PreserveTrailingDotOrSeparator(cleanedPath, originalPath) -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename. -// -// Deprecated: use [archive.SplitPathDirEntry] instead. -func SplitPathDirEntry(path string) (dir, base string) { - return archive.SplitPathDirEntry(path) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. -// -// Deprecated: use [archive.TarResource] instead. -func TarResource(sourceInfo archive.CopyInfo) (content io.ReadCloser, err error) { - return archive.TarResource(sourceInfo) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -// -// Deprecated: use [archive.TarResourceRebase] instead. -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, _ error) { - return archive.TarResourceRebase(sourcePath, rebaseName) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions. -// -// Deprecated: use [archive.TarResourceRebaseOpts] instead. -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: compression.None, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source or destination path of a copy operation. -// -// Deprecated: use [archive.CopyInfo] instead. -type CopyInfo = archive.CopyInfo - -// CopyInfoSourcePath stats the given path to create a CopyInfo struct. -// struct representing that resource for the source of an archive copy -// operation. -// -// Deprecated: use [archive.CopyInfoSourcePath] instead. -func CopyInfoSourcePath(path string, followLink bool) (archive.CopyInfo, error) { - return archive.CopyInfoSourcePath(path, followLink) -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. -// -// Deprecated: use [archive.CopyInfoDestinationPath] instead. -func CopyInfoDestinationPath(path string) (info archive.CopyInfo, err error) { - return archive.CopyInfoDestinationPath(path) -} - -// PrepareArchiveCopy prepares the given srcContent archive. -// -// Deprecated: use [archive.PrepareArchiveCopy] instead. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo archive.CopyInfo) (dstDir string, content io.ReadCloser, err error) { - return archive.PrepareArchiveCopy(srcContent, srcInfo, dstInfo) -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -// -// Deprecated: use [archive.RebaseArchiveEntries] instead. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - return archive.RebaseArchiveEntries(srcContent, oldBase, newBase) -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. -// -// Deprecated: use [archive.CopyResource] instead. -func CopyResource(srcPath, dstPath string, followLink bool) error { - return archive.CopyResource(srcPath, dstPath, followLink) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -// -// Deprecated: use [archive.CopyTo] instead. -func CopyTo(content io.Reader, srcInfo archive.CopyInfo, dstPath string) error { - return archive.CopyTo(content, srcInfo, dstPath) -} - -// ResolveHostSourcePath decides real path need to be copied. -// -// Deprecated: use [archive.ResolveHostSourcePath] instead. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, _ error) { - return archive.ResolveHostSourcePath(path, followLink) -} - -// GetRebaseName normalizes and compares path and resolvedPath. -// -// Deprecated: use [archive.GetRebaseName] instead. -func GetRebaseName(path, resolvedPath string) (string, string) { - return archive.GetRebaseName(path, resolvedPath) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go deleted file mode 100644 index dd5e0d5ef5..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff_deprecated.go +++ /dev/null @@ -1,37 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" -) - -// UnpackLayer unpack `layer` to a `dest`. -// -// Deprecated: use [archive.UnpackLayer] instead. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - return archive.UnpackLayer(dest, layer, toArchiveOpt(options)) -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. -// -// Deprecated: use [archive.ApplyLayer] instead. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return archive.ApplyLayer(dest, layer) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. -// -// Deprecated: use [archive.ApplyUncompressedLayer] instead. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return archive.ApplyUncompressedLayer(dest, layer, toArchiveOpt(options)) -} - -// IsEmpty checks if the tar archive is empty (doesn't contain any entries). -// -// Deprecated: use [archive.IsEmpty] instead. -func IsEmpty(rd io.Reader) (bool, error) { - return archive.IsEmpty(rd) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go deleted file mode 100644 index 0fa74de68f..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/path_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package archive - -import "github.com/moby/go-archive" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path is the system drive. -// -// Deprecated: use [archive.CheckSystemDriveAndRemoveDriveLetter] instead. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return archive.CheckSystemDriveAndRemoveDriveLetter(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/utils.go b/vendor/github.com/docker/docker/pkg/archive/utils.go deleted file mode 100644 index 692cf1602d..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package archive - -import ( - "github.com/docker/docker/pkg/idtools" - "github.com/moby/go-archive" -) - -// ToArchiveOpt converts an [TarOptions] to a [archive.TarOptions]. -// -// Deprecated: use [archive.TarOptions] instead, this utility is for internal use to transition to the [github.com/moby/go-archive] module. -func ToArchiveOpt(options *TarOptions) *archive.TarOptions { - return toArchiveOpt(options) -} - -func toArchiveOpt(options *TarOptions) *archive.TarOptions { - if options == nil { - return nil - } - - var chownOpts *archive.ChownOpts - if options.ChownOpts != nil { - chownOpts = &archive.ChownOpts{ - UID: options.ChownOpts.UID, - GID: options.ChownOpts.GID, - } - } - - return &archive.TarOptions{ - IncludeFiles: options.IncludeFiles, - ExcludePatterns: options.ExcludePatterns, - Compression: options.Compression, - NoLchown: options.NoLchown, - IDMap: idtools.ToUserIdentityMapping(options.IDMap), - ChownOpts: chownOpts, - IncludeSourceDir: options.IncludeSourceDir, - WhiteoutFormat: options.WhiteoutFormat, - NoOverwriteDirNonDir: options.NoOverwriteDirNonDir, - RebaseNames: options.RebaseNames, - InUserNS: options.InUserNS, - BestEffortXattrs: options.BestEffortXattrs, - } -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go deleted file mode 100644 index 0ab8590b14..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts_deprecated.go +++ /dev/null @@ -1,10 +0,0 @@ -package archive - -import "github.com/moby/go-archive" - -const ( - WhiteoutPrefix = archive.WhiteoutPrefix // Deprecated: use [archive.WhiteoutPrefix] instead. - WhiteoutMetaPrefix = archive.WhiteoutMetaPrefix // Deprecated: use [archive.WhiteoutMetaPrefix] instead. - WhiteoutLinkDir = archive.WhiteoutLinkDir // Deprecated: use [archive.WhiteoutLinkDir] instead. - WhiteoutOpaqueDir = archive.WhiteoutOpaqueDir // Deprecated: use [archive.WhiteoutOpaqueDir] instead. -) diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go b/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go deleted file mode 100644 index e5d3fa9a95..0000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap_deprecated.go +++ /dev/null @@ -1,14 +0,0 @@ -package archive - -import ( - "io" - - "github.com/moby/go-archive" -) - -// Generate generates a new archive from the content provided as input. -// -// Deprecated: use [archive.Generate] instead. -func Generate(input ...string) (io.Reader, error) { - return archive.Generate(input...) -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index 982f81d4f2..0000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,223 +0,0 @@ -package idtools - -import ( - "fmt" - "os" - - "github.com/moby/sys/user" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -// -// Deprecated: use [user.IDMap] instead. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership and permissions. -// -// Deprecated: use [user.MkdirAllAndChown] instead. -func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership and permissions. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -// -// Deprecated: use [user.MkdirAndChown] instead. -func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAndChown(path, mode, owner.UID, owner.GID) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership or permissions will be performed -// -// Deprecated: use [user.MkdirAllAndChown] with the [user.WithOnlyNew] option instead. -func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { - return user.MkdirAllAndChown(path, mode, owner.UID, owner.GID, user.WithOnlyNew) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -// -// Deprecated: use [(user.IdentityMapping).RootPair] instead. -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - return getRootUIDGID(uidMap, gidMap) -} - -// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// Identity is either a UID and GID pair or a SID (but not both) -type Identity struct { - UID int - GID int - SID string -} - -// Chown changes the numeric uid and gid of the named file to id.UID and id.GID. -// -// Deprecated: this method is deprecated and will be removed in the next release. -func (id Identity) Chown(name string) error { - return os.Chown(name, id.UID, id.GID) -} - -// IdentityMapping contains a mappings of UIDs and GIDs. -// The zero value represents an empty mapping. -// -// Deprecated: this type is deprecated and will be removed in the next release. -type IdentityMapping struct { - UIDMaps []IDMap `json:"UIDMaps"` - GIDMaps []IDMap `json:"GIDMaps"` -} - -// FromUserIdentityMapping converts a [user.IdentityMapping] to an [idtools.IdentityMapping]. -// -// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. -func FromUserIdentityMapping(u user.IdentityMapping) IdentityMapping { - return IdentityMapping{ - UIDMaps: fromUserIDMap(u.UIDMaps), - GIDMaps: fromUserIDMap(u.GIDMaps), - } -} - -func fromUserIDMap(u []user.IDMap) []IDMap { - if u == nil { - return nil - } - m := make([]IDMap, len(u)) - for i := range u { - m[i] = IDMap{ - ContainerID: int(u[i].ID), - HostID: int(u[i].ParentID), - Size: int(u[i].Count), - } - } - return m -} - -// ToUserIdentityMapping converts an [idtools.IdentityMapping] to a [user.IdentityMapping]. -// -// Deprecated: use [user.IdentityMapping] directly, this is transitioning to user package. -func ToUserIdentityMapping(u IdentityMapping) user.IdentityMapping { - return user.IdentityMapping{ - UIDMaps: toUserIDMap(u.UIDMaps), - GIDMaps: toUserIDMap(u.GIDMaps), - } -} - -func toUserIDMap(u []IDMap) []user.IDMap { - if u == nil { - return nil - } - m := make([]user.IDMap, len(u)) - for i := range u { - m[i] = user.IDMap{ - ID: int64(u[i].ContainerID), - ParentID: int64(u[i].HostID), - Count: int64(u[i].Size), - } - } - return m -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i IdentityMapping) RootPair() Identity { - uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps) - return Identity{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i IdentityMapping) ToHost(pair Identity) (Identity, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.UIDMaps) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.GIDMaps) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i IdentityMapping) ToContainer(pair Identity) (int, int, error) { - uid, err := toContainer(pair.UID, i.UIDMaps) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.GIDMaps) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i IdentityMapping) Empty() bool { - return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 -} - -// CurrentIdentity returns the identity of the current process -// -// Deprecated: use [os.Getuid] and [os.Getegid] instead. -func CurrentIdentity() Identity { - return Identity{UID: os.Getuid(), GID: os.Getegid()} -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index f83f59f30c..0000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package idtools - -const ( - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -// TODO(thaJeztah): these magic consts need a source of reference, and should be defined in a canonical location -const ( - ContainerAdministratorSidString = "S-1-5-93-2-1" - - ContainerUserSidString = "S-1-5-93-2-2" -) diff --git a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml index cdbf80dec9..f6e588a65a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml +++ b/vendor/github.com/fsouza/go-dockerclient/.golangci.yaml @@ -1,5 +1,24 @@ +version: "2" linters: - disable-all: true + default: none + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ +formatters: enable: - gofumpt - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/fsouza/go-dockerclient/container_update.go b/vendor/github.com/fsouza/go-dockerclient/container_update.go index e8de21365b..d2116dbd73 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container_update.go +++ b/vendor/github.com/fsouza/go-dockerclient/container_update.go @@ -30,7 +30,7 @@ type UpdateContainerOptions struct { // // See https://goo.gl/Y6fXUy for more details. func (c *Client) UpdateContainer(id string, opts UpdateContainerOptions) error { - resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/"+id+"/update"), doOptions{ + resp, err := c.do(http.MethodPost, fmt.Sprintf("/containers/%s/update", id), doOptions{ data: opts, forceJSON: true, context: opts.Context, diff --git a/vendor/github.com/fsouza/go-dockerclient/event.go b/vendor/github.com/fsouza/go-dockerclient/event.go index ce1fb5021b..22d64f6ab7 100644 --- a/vendor/github.com/fsouza/go-dockerclient/event.go +++ b/vendor/github.com/fsouza/go-dockerclient/event.go @@ -271,11 +271,13 @@ func (eventState *eventMonitoringState) monitorEvents(c *Client, opts EventsOpti return } if ev == EOFEvent { - eventState.disableEventMonitoring() + go eventState.disableEventMonitoring() return } - eventState.updateLastSeen(ev) - eventState.sendEvent(ev) + go func(ev *APIEvents) { + eventState.updateLastSeen(ev) + eventState.sendEvent(ev) + }(ev) case err = <-eventState.errC: if errors.Is(err, ErrNoListeners) { eventState.disableEventMonitoring() diff --git a/vendor/github.com/fsouza/go-dockerclient/tar.go b/vendor/github.com/fsouza/go-dockerclient/tar.go index b764285b92..0d40c644ab 100644 --- a/vendor/github.com/fsouza/go-dockerclient/tar.go +++ b/vendor/github.com/fsouza/go-dockerclient/tar.go @@ -12,7 +12,7 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/pkg/archive" + "github.com/moby/go-archive" "github.com/moby/patternmatcher" ) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index 28f6967ba7..5b8eb4ff46 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -17,6 +17,7 @@ package name import ( // nolint: depguard _ "crypto/sha256" // Recommended by go-digest. + "encoding" "encoding/json" "strings" @@ -32,8 +33,11 @@ type Digest struct { original string } -// Ensure Digest implements Reference var _ Reference = (*Digest)(nil) +var _ encoding.TextMarshaler = (*Digest)(nil) +var _ encoding.TextUnmarshaler = (*Digest)(nil) +var _ json.Marshaler = (*Digest)(nil) +var _ json.Unmarshaler = (*Digest)(nil) // Context implements Reference. func (d Digest) Context() Repository { @@ -79,6 +83,21 @@ func (d *Digest) UnmarshalJSON(data []byte) error { return nil } +// MarshalText formats the digest into a string for text serialization. +func (d Digest) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses a text string into a Digest. +func (d *Digest) UnmarshalText(data []byte) error { + n, err := NewDigest(string(data)) + if err != nil { + return err + } + *d = n + return nil +} + // NewDigest returns a new Digest representing the given name. func NewDigest(name string, opts ...Option) (Digest, error) { // Split on "@" diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index 5b0d01769c..5e6b6e62a0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -15,6 +15,8 @@ package name import ( + "encoding" + "encoding/json" "net" "net/url" "path" @@ -37,6 +39,11 @@ type Registry struct { registry string } +var _ encoding.TextMarshaler = (*Registry)(nil) +var _ encoding.TextUnmarshaler = (*Registry)(nil) +var _ json.Marshaler = (*Registry)(nil) +var _ json.Unmarshaler = (*Registry)(nil) + // RegistryStr returns the registry component of the Registry. func (r Registry) RegistryStr() string { return r.registry @@ -140,3 +147,33 @@ func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { opts = append(opts, Insecure) return NewRegistry(name, opts...) } + +// MarshalJSON formats the Registry into a string for JSON serialization. +func (r Registry) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Registry. +func (r *Registry) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRegistry(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the registry into a string for text serialization. +func (r Registry) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Registry. +func (r *Registry) UnmarshalText(data []byte) error { + n, err := NewRegistry(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go index 9250e36252..290797575e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -15,6 +15,8 @@ package name import ( + "encoding" + "encoding/json" "fmt" "strings" ) @@ -31,6 +33,11 @@ type Repository struct { repository string } +var _ encoding.TextMarshaler = (*Repository)(nil) +var _ encoding.TextUnmarshaler = (*Repository)(nil) +var _ json.Marshaler = (*Repository)(nil) +var _ json.Unmarshaler = (*Repository)(nil) + // See https://docs.docker.com/docker-hub/official_repos func hasImplicitNamespace(repo string, reg Registry) bool { return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry @@ -119,3 +126,33 @@ func (r Repository) Digest(identifier string) Digest { d.original = d.Name() return d } + +// MarshalJSON formats the Repository into a string for JSON serialization. +func (r Repository) MarshalJSON() ([]byte, error) { return json.Marshal(r.String()) } + +// UnmarshalJSON parses a JSON string into a Repository. +func (r *Repository) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewRepository(s) + if err != nil { + return err + } + *r = n + return nil +} + +// MarshalText formats the repository name into a string for text serialization. +func (r Repository) MarshalText() ([]byte, error) { return []byte(r.String()), nil } + +// UnmarshalText parses a text string into a Repository. +func (r *Repository) UnmarshalText(data []byte) error { + n, err := NewRepository(string(data)) + if err != nil { + return err + } + *r = n + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go index 66bd1bec3d..cfa923f59d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -15,6 +15,8 @@ package name import ( + "encoding" + "encoding/json" "strings" ) @@ -31,8 +33,11 @@ type Tag struct { original string } -// Ensure Tag implements Reference var _ Reference = (*Tag)(nil) +var _ encoding.TextMarshaler = (*Tag)(nil) +var _ encoding.TextUnmarshaler = (*Tag)(nil) +var _ json.Marshaler = (*Tag)(nil) +var _ json.Unmarshaler = (*Tag)(nil) // Context implements Reference. func (t Tag) Context() Repository { @@ -80,6 +85,9 @@ func NewTag(name string, opts ...Option) (Tag, error) { if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { base = strings.Join(parts[:len(parts)-1], tagDelim) tag = parts[len(parts)-1] + if tag == "" { + return Tag{}, newErrBadName("%s must specify a tag name after the colon", name) + } } // We don't require a tag, but if we get one check it's valid, @@ -106,3 +114,33 @@ func NewTag(name string, opts ...Option) (Tag, error) { original: name, }, nil } + +// MarshalJSON formats the Tag into a string for JSON serialization. +func (t Tag) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } + +// UnmarshalJSON parses a JSON string into a Tag. +func (t *Tag) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + n, err := NewTag(s) + if err != nil { + return err + } + *t = n + return nil +} + +// MarshalText formats the tag into a string for text serialization. +func (t Tag) MarshalText() ([]byte, error) { return []byte(t.String()), nil } + +// UnmarshalText parses a text string into a Tag. +func (t *Tag) UnmarshalText(data []byte) error { + n, err := NewTag(string(data)) + if err != nil { + return err + } + *t = n + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go index f78a5fa89e..d81593bd59 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -16,12 +16,12 @@ package v1 import ( "crypto" + "encoding" "encoding/hex" "encoding/json" "fmt" "hash" "io" - "strconv" "strings" ) @@ -34,6 +34,11 @@ type Hash struct { Hex string } +var _ encoding.TextMarshaler = (*Hash)(nil) +var _ encoding.TextUnmarshaler = (*Hash)(nil) +var _ json.Marshaler = (*Hash)(nil) +var _ json.Unmarshaler = (*Hash)(nil) + // String reverses NewHash returning the string-form of the hash. func (h Hash) String() string { return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) @@ -49,14 +54,12 @@ func NewHash(s string) (Hash, error) { } // MarshalJSON implements json.Marshaler -func (h Hash) MarshalJSON() ([]byte, error) { - return json.Marshal(h.String()) -} +func (h Hash) MarshalJSON() ([]byte, error) { return json.Marshal(h.String()) } // UnmarshalJSON implements json.Unmarshaler func (h *Hash) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { + var s string + if err := json.Unmarshal(data, &s); err != nil { return err } return h.parse(s) @@ -64,15 +67,11 @@ func (h *Hash) UnmarshalJSON(data []byte) error { // MarshalText implements encoding.TextMarshaler. This is required to use // v1.Hash as a key in a map when marshalling JSON. -func (h Hash) MarshalText() (text []byte, err error) { - return []byte(h.String()), nil -} +func (h Hash) MarshalText() ([]byte, error) { return []byte(h.String()), nil } // UnmarshalText implements encoding.TextUnmarshaler. This is required to use // v1.Hash as a key in a map when unmarshalling JSON. -func (h *Hash) UnmarshalText(text []byte) error { - return h.parse(string(text)) -} +func (h *Hash) UnmarshalText(text []byte) error { return h.parse(string(text)) } // Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") func Hasher(name string) (hash.Hash, error) { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index ba4d746407..8a51690be4 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -17,6 +17,7 @@ package profile import ( "encoding/binary" "fmt" + "slices" "sort" "strconv" "strings" @@ -78,12 +79,10 @@ func Merge(srcs []*Profile) (*Profile, error) { } } - for _, s := range p.Sample { - if isZeroSample(s) { - // If there are any zero samples, re-merge the profile to GC - // them. - return Merge([]*Profile{p}) - } + if slices.ContainsFunc(p.Sample, isZeroSample) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) } return p, nil diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index f47a243903..43f561d445 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -24,6 +24,7 @@ import ( "math" "path/filepath" "regexp" + "slices" "sort" "strings" "sync" @@ -734,12 +735,7 @@ func (p *Profile) RemoveLabel(key string) { // HasLabel returns true if a sample has a label with indicated key and value. func (s *Sample) HasLabel(key, value string) bool { - for _, v := range s.Label[key] { - if v == value { - return true - } - } - return false + return slices.Contains(s.Label[key], value) } // SetNumLabel sets the specified key to the specified value for all samples in the @@ -852,7 +848,17 @@ func (p *Profile) HasFileLines() bool { // "[vdso]", "[vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" + switch { + case strings.HasPrefix(name, "["): + case strings.HasPrefix(name, "linux-vdso"): + case strings.HasPrefix(m.File, "/dev/dri/"): + case m.File == "//anon": + case m.File == "": + case strings.HasPrefix(m.File, "/memfd:"): + default: + return false + } + return true } // Copy makes a fully independent copy of a profile. diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index b2f9fd5466..7bba31e8ce 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -19,6 +19,7 @@ package profile import ( "fmt" "regexp" + "slices" "strings" ) @@ -40,13 +41,7 @@ func simplifyFunc(f string) string { // Account for unsimplified names -- try to remove the argument list by trimming // starting from the first '(', but skipping reserved names that have '('. for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { - foundReserved := false - for _, res := range reservedNames { - if funcName[ind[0]:ind[1]] == res { - foundReserved = true - break - } - } + foundReserved := slices.Contains(reservedNames, funcName[ind[0]:ind[1]]) if !foundReserved { funcName = funcName[:ind[0]] break diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index a01bfafbe0..6bd50d4c9b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -176,6 +176,10 @@ func WithMessageEvents(events ...event) Option { // WithSpanNameFormatter takes a function that will be called on every // request and the returned string will become the Span Name. +// +// When using [http.ServeMux] (or any middleware that sets the Pattern of [http.Request]), +// the span name formatter will run twice. Once when the span is created, and +// second time after the middleware, so the pattern can be used. func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { return optionFunc(func(c *config) { c.SpanNameFormatter = f diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 3ea05d0199..937f9b4e73 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -98,7 +98,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r, semconv.RequestTraceAttrsOpts{})...), } opts = append(opts, h.spanStartOptions...) @@ -176,7 +176,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx = ContextWithLabeler(ctx, labeler) } - next.ServeHTTP(w, r.WithContext(ctx)) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + + if r.Pattern != "" { + span.SetName(h.spanNameFormatter(h.operation, r)) + } statusCode := rww.StatusCode() bytesWritten := rww.BytesWritten() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go index 866aa21dce..d032aa841b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/request/body_wrapper.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package request provides types and functionality to handle HTTP request +// handling. package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" import ( @@ -56,7 +58,7 @@ func (w *BodyWrapper) updateReadData(n int64, err error) { } } -// Closes closes the io.ReadCloser. +// Close closes the io.ReadCloser. func (w *BodyWrapper) Close() error { return w.ReadCloser.Close() } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index 73184e7d00..ca2e4c14c7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/request/resp_writer_wrapper.go.tmpl // Copyright The OpenTelemetry Authors @@ -105,7 +105,7 @@ func (w *RespWriterWrapper) BytesWritten() int64 { return w.written } -// BytesWritten returns the HTTP status code that was sent. +// StatusCode returns the HTTP status code that was sent. func (w *RespWriterWrapper) StatusCode() int { w.mu.RLock() defer w.mu.RUnlock() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 4693a01949..7cb9693d98 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/env.go.tmpl // Copyright The OpenTelemetry Authors @@ -20,7 +20,7 @@ import ( ) // OTelSemConvStabilityOptIn is an environment variable. -// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +// That can be set to "http/dup" to keep getting the old HTTP semantic conventions. const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" type ResponseTelemetry struct { @@ -61,19 +61,23 @@ type HTTPServer struct { // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.RequestTraceAttrs(server, req, opts) if s.duplicate { - return append(OldHTTPServer{}.RequestTraceAttrs(server, req), CurrentHTTPServer{}.RequestTraceAttrs(server, req)...) + return OldHTTPServer{}.RequestTraceAttrs(server, req, attrs) } - return OldHTTPServer{}.RequestTraceAttrs(server, req) + return attrs } func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { if s.duplicate { - return append([]attribute.KeyValue{OldHTTPServer{}.NetworkTransportAttr(network)}, CurrentHTTPServer{}.NetworkTransportAttr(network)) + return []attribute.KeyValue{ + OldHTTPServer{}.NetworkTransportAttr(network), + CurrentHTTPServer{}.NetworkTransportAttr(network), + } } return []attribute.KeyValue{ - OldHTTPServer{}.NetworkTransportAttr(network), + CurrentHTTPServer{}.NetworkTransportAttr(network), } } @@ -81,15 +85,16 @@ func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attrs := CurrentHTTPServer{}.ResponseTraceAttrs(resp) if s.duplicate { - return append(OldHTTPServer{}.ResponseTraceAttrs(resp), CurrentHTTPServer{}.ResponseTraceAttrs(resp)...) + return OldHTTPServer{}.ResponseTraceAttrs(resp, attrs) } - return OldHTTPServer{}.ResponseTraceAttrs(resp) + return attrs } // Route returns the attribute for the route. func (s HTTPServer) Route(route string) attribute.KeyValue { - return OldHTTPServer{}.Route(route) + return CurrentHTTPServer{}.Route(route) } // Status returns a span status code and message for an HTTP status code @@ -121,6 +126,8 @@ type MetricAttributes struct { type MetricData struct { RequestSize int64 + + // The request duration, in milliseconds ElapsedTime float64 } @@ -139,7 +146,19 @@ var ( ) func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + if s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime/1000.0, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } + + if s.duplicate && s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) @@ -150,29 +169,28 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { *addOpts = (*addOpts)[:0] metricAddOptionPool.Put(addOpts) } +} - if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { - attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) - *recordOpts = append(*recordOpts, o) - s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) - s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) - s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) - *recordOpts = (*recordOpts)[:0] - metricRecordOptionPool.Put(recordOpts) +// hasOptIn returns true if the comma-separated version string contains the +// exact optIn value. +func hasOptIn(version, optIn string) bool { + for _, v := range strings.Split(version, ",") { + if strings.TrimSpace(v) == optIn { + return true + } } + return false } func NewHTTPServer(meter metric.Meter) HTTPServer { env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := env == "http/dup" + duplicate := hasOptIn(env, "http/dup") server := HTTPServer{ duplicate: duplicate, } - server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) if duplicate { - server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) } return server } @@ -192,13 +210,13 @@ type HTTPClient struct { func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) - duplicate := env == "http/dup" + duplicate := hasOptIn(env, "http/dup") client := HTTPClient{ duplicate: duplicate, } - client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) if duplicate { - client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) } return client @@ -206,19 +224,20 @@ func NewHTTPClient(meter metric.Meter) HTTPClient { // RequestTraceAttrs returns attributes for an HTTP request made by a client. func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.RequestTraceAttrs(req) if c.duplicate { - return append(OldHTTPClient{}.RequestTraceAttrs(req), CurrentHTTPClient{}.RequestTraceAttrs(req)...) + return OldHTTPClient{}.RequestTraceAttrs(req, attrs) } - return OldHTTPClient{}.RequestTraceAttrs(req) + return attrs } // ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.ResponseTraceAttrs(resp) if c.duplicate { - return append(OldHTTPClient{}.ResponseTraceAttrs(resp), CurrentHTTPClient{}.ResponseTraceAttrs(resp)...) + return OldHTTPClient{}.ResponseTraceAttrs(resp, attrs) } - - return OldHTTPClient{}.ResponseTraceAttrs(resp) + return attrs } func (c HTTPClient) Status(code int) (codes.Code, string) { @@ -232,11 +251,7 @@ func (c HTTPClient) Status(code int) (codes.Code, string) { } func (c HTTPClient) ErrorType(err error) attribute.KeyValue { - if c.duplicate { - return CurrentHTTPClient{}.ErrorType(err) - } - - return attribute.KeyValue{} + return CurrentHTTPClient{}.ErrorType(err) } type MetricOpts struct { @@ -255,17 +270,17 @@ func (o MetricOpts) AddOptions() metric.AddOption { func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { opts := map[string]MetricOpts{} - attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["old"] = MetricOpts{ + opts["new"] = MetricOpts{ measurement: set, addOptions: set, } if c.duplicate { - attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - opts["new"] = MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } @@ -275,17 +290,17 @@ func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { } func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { - if s.requestBytesCounter == nil || s.latencyMeasure == nil { + if s.requestBodySize == nil || s.requestDuration == nil { // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime/1000, opts["new"].MeasurementOption()) if s.duplicate { - s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) - s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) } } @@ -299,9 +314,10 @@ func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, } func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + attrs := CurrentHTTPClient{}.TraceAttributes(host) if s.duplicate { - return append(OldHTTPClient{}.TraceAttributes(host), CurrentHTTPClient{}.TraceAttributes(host)...) + return OldHTTPClient{}.TraceAttributes(host, attrs) } - return OldHTTPClient{}.TraceAttributes(host) + return attrs } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 8b85eff90a..53976b0d5a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,9 +1,11 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/httpconv.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconv provides OpenTelemetry semantic convention types and +// functionality. package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( @@ -20,9 +22,14 @@ import ( semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) +type RequestTraceAttrsOpts struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + type CurrentHTTPServer struct{} -// TraceRequest returns trace attributes for an HTTP request received by a +// RequestTraceAttrs returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this @@ -38,7 +45,7 @@ type CurrentHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { +func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request, opts RequestTraceAttrsOpts) []attribute.KeyValue { count := 3 // ServerAddress, Method, Scheme var host string @@ -65,7 +72,8 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ scheme := n.scheme(req.TLS != nil) - if peer, peerPort := SplitHostPort(req.RemoteAddr); peer != "" { + peer, peerPort := SplitHostPort(req.RemoteAddr) + if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. count++ @@ -79,7 +87,17 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ count++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { count++ } @@ -96,6 +114,11 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ count++ } + route := httpRoute(req.Pattern) + if route != "" { + count++ + } + attrs := make([]attribute.KeyValue, 0, count) attrs = append(attrs, semconvNew.ServerAddress(host), @@ -119,7 +142,7 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ } } - if useragent := req.UserAgent(); useragent != "" { + if useragent != "" { attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) } @@ -138,10 +161,14 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) } + if route != "" { + attrs = append(attrs, n.Route(route)) + } + return attrs } -func (o CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { +func (n CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { switch network { case "tcp", "tcp4", "tcp6": return semconvNew.NetworkTransportTCP @@ -176,9 +203,11 @@ func (n CurrentHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:rev return semconvNew.URLScheme("http") } -// TraceResponse returns trace attributes for telemetry from an HTTP response. +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP +// response. // -// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +// If any of the fields in the ResponseTelemetry are not set the attribute will +// be omitted. func (n CurrentHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { var count int @@ -241,6 +270,7 @@ func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histo semconvNew.HTTPServerRequestDurationName, metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), ) handleErr(err) @@ -459,6 +489,7 @@ func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histo semconvNew.HTTPClientRequestDurationName, metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + metric.WithExplicitBucketBoundaries(0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1, 2.5, 5, 7.5, 10), ) handleErr(err) @@ -501,7 +532,7 @@ func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, a attributes = append(attributes, semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), semconvNew.ServerAddress(requestHost), - n.scheme(req.TLS != nil), + n.scheme(req), ) if port > 0 { @@ -520,15 +551,18 @@ func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, a return attributes } -// Attributes for httptrace. +// TraceAttributes returns attributes for httptrace. func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { return []attribute.KeyValue{ semconvNew.ServerAddress(host), } } -func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive - if https { +func (n CurrentHTTPClient) scheme(req *http.Request) attribute.KeyValue { + if req.URL != nil && req.URL.Scheme != "" { + return semconvNew.URLScheme(req.URL.Scheme) + } + if req.TLS != nil { return semconvNew.URLScheme("https") } return semconvNew.URLScheme("http") diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index 315d3dd29c..bc1f7751db 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/util.go.tmpl // Copyright The OpenTelemetry Authors @@ -28,17 +28,17 @@ func SplitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { - addrEnd := strings.LastIndex(hostport, "]") + addrEnd := strings.LastIndexByte(hostport, ']') if addrEnd < 0 { // Invalid hostport. return } - if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + if i := strings.LastIndexByte(hostport[addrEnd:], ':'); i < 0 { host = hostport[1:addrEnd] return } } else { - if i := strings.LastIndex(hostport, ":"); i < 0 { + if i := strings.LastIndexByte(hostport, ':'); i < 0 { host = hostport return } @@ -70,12 +70,19 @@ func requiredHTTPPort(https bool, port int) int { // nolint:revive } func serverClientIP(xForwardedFor string) string { - if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + if idx := strings.IndexByte(xForwardedFor, ','); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } +func httpRoute(pattern string) string { + if idx := strings.IndexByte(pattern, '/'); idx >= 0 { + return pattern[idx:] + } + return "" +} + func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") switch name { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index 742c2113e1..ba7fccf1ef 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconv/v120.0.go.tmpl // Copyright The OpenTelemetry Authors @@ -37,8 +37,8 @@ type OldHTTPServer struct{} // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. -func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPServerRequest(server, req) +func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req, semconvutil.HTTPServerRequestOptions{}, attrs) } func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { @@ -48,9 +48,7 @@ func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. -func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { - attributes := []attribute.KeyValue{} - +func (o OldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry, attributes []attribute.KeyValue) []attribute.KeyValue { if resp.ReadBytes > 0 { attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) } @@ -179,12 +177,12 @@ func (o OldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive type OldHTTPClient struct{} -func (o OldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { - return semconvutil.HTTPClientRequest(req) +func (o OldHTTPClient) RequestTraceAttrs(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req, attrs) } -func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { - return semconvutil.HTTPClientResponse(resp) +func (o OldHTTPClient) ResponseTraceAttrs(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp, attrs) } func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { @@ -269,9 +267,7 @@ func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } -// Attributes for httptrace. -func (c OldHTTPClient) TraceAttributes(host string) []attribute.KeyValue { - return []attribute.KeyValue{ - semconv.NetHostName(host), - } +// TraceAttributes returns attributes for httptrace. +func (c OldHTTPClient) TraceAttributes(host string, attrs []attribute.KeyValue) []attribute.KeyValue { + return append(attrs, semconv.NetHostName(host)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index a73bb06e90..b997354793 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -1,14 +1,16 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/httpconv.go.tmpl // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package semconvutil provides OpenTelemetry semantic convention utilities. package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" import ( "fmt" "net/http" + "slices" "strings" "go.opentelemetry.io/otel/attribute" @@ -16,6 +18,11 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) +type HTTPServerRequestOptions struct { + // If set, this is used as value for the "http.client_ip" attribute. + HTTPClientIP string +} + // HTTPClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", @@ -26,9 +33,9 @@ import ( // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) -func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { - return hc.ClientResponse(resp) +// HTTPClientResponse(resp, ClientRequest(resp.Request))) +func HTTPClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientResponse(resp, attrs) } // HTTPClientRequest returns trace attributes for an HTTP request made by a client. @@ -36,8 +43,8 @@ func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length". -func HTTPClientRequest(req *http.Request) []attribute.KeyValue { - return hc.ClientRequest(req) +func HTTPClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ClientRequest(req, attrs) } // HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. @@ -75,8 +82,8 @@ func HTTPClientStatus(code int) (codes.Code, string) { // "http.target", "net.host.name". The following attributes are returned if // they related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip". -func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { - return hc.ServerRequest(server, req) +func HTTPServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { + return hc.ServerRequest(server, req, opts, attrs) } // HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a @@ -153,8 +160,8 @@ var hc = &httpConv{ // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // -// append(ClientResponse(resp), ClientRequest(resp.Request)...) -func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { +// ClientResponse(resp, ClientRequest(resp.Request)) +func (c *httpConv) ClientResponse(resp *http.Response, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.status_code int http.response_content_length int @@ -166,8 +173,11 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { if resp.ContentLength > 0 { n++ } + if n == 0 { + return attrs + } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } @@ -182,7 +192,7 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { // "net.peer.name". The following attributes are returned if the related values // are defined in req: "net.peer.port", "user_agent.original", // "http.request_content_length", "user_agent.original". -func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { +func (c *httpConv) ClientRequest(req *http.Request, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string user_agent.original string @@ -221,8 +231,7 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { n++ } - attrs := make([]attribute.KeyValue, 0, n) - + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) var u string @@ -305,7 +314,7 @@ func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue // related values are defined in req: "net.host.port", "net.sock.peer.addr", // "net.sock.peer.port", "user_agent.original", "http.client_ip", // "net.protocol.name", "net.protocol.version". -func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { +func (c *httpConv) ServerRequest(server string, req *http.Request, opts HTTPServerRequestOptions, attrs []attribute.KeyValue) []attribute.KeyValue { /* The following semantic conventions are returned if present: http.method string http.scheme string @@ -358,7 +367,17 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + // For client IP, use, in order: + // 1. The value passed in the options + // 2. The value in the X-Forwarded-For header + // 3. The peer address + clientIP := opts.HTTPClientIP + if clientIP == "" { + clientIP = serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP == "" { + clientIP = peer + } + } if clientIP != "" { n++ } @@ -378,7 +397,7 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K n++ } - attrs := make([]attribute.KeyValue, 0, n) + attrs = slices.Grow(attrs, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index de74fa252a..df97255e41 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -1,4 +1,4 @@ -// Code created by gotmpl. DO NOT MODIFY. +// Code generated by gotmpl. DO NOT MODIFY. // source: internal/shared/semconvutil/netconv.go.tmpl // Copyright The OpenTelemetry Authors diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go index ea504e396f..d62ce44b00 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -35,14 +35,14 @@ func (l *Labeler) Get() []attribute.KeyValue { type labelerContextKeyType int -const lablelerContextKey labelerContextKeyType = 0 +const labelerContextKey labelerContextKeyType = 0 // ContextWithLabeler returns a new context with the provided Labeler instance. // Attributes added to the specified labeler will be injected into metrics // emitted by the instrumentation. Only one labeller can be injected into the // context. Injecting it multiple times will override the previous calls. func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { - return context.WithValue(parent, lablelerContextKey, l) + return context.WithValue(parent, labelerContextKey, l) } // LabelerFromContext retrieves a Labeler instance from the provided context if @@ -50,7 +50,7 @@ func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { // Labeler is returned and the second return value is false. In this case it is // safe to use the Labeler but any attributes added to it will not be used. func LabelerFromContext(ctx context.Context) (*Labeler, bool) { - l, ok := ctx.Value(lablelerContextKey).(*Labeler) + l, ok := ctx.Value(labelerContextKey).(*Labeler) if !ok { l = &Labeler{} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 1ec9a00c7a..6be4c1fde2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,13 +5,6 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.60.0" + return "0.61.0" // This string is updated by the pre_release.sh script during release } - -// SemVersion is the semantic version to be supplied to tracer/meter creation. -// -// Deprecated: Use [Version] instead. -func SemVersion() string { - return Version() -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index c58e48ab0c..888e5da802 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -1,13 +1,9 @@ -# See https://github.com/golangci/golangci-lint#config-file +version: "2" run: - issues-exit-code: 1 #Default - tests: true #Default - + issues-exit-code: 1 + tests: true linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. + default: none enable: - asasalint - bodyclose @@ -15,10 +11,7 @@ linters: - errcheck - errorlint - godot - - gofumpt - - goimports - gosec - - gosimple - govet - ineffassign - misspell @@ -26,227 +19,230 @@ linters: - revive - staticcheck - testifylint - - typecheck - unconvert - - unused - unparam + - unused - usestdlibvars - usetesting - + settings: + depguard: + rules: + auto/sdk: + files: + - '!internal/global/trace.go' + - ~internal/global/trace_test.go + deny: + - pkg: go.opentelemetry.io/auto/sdk + desc: Do not use SDK from automatic instrumentation. + non-tests: + files: + - '!$test' + - '!**/*test/*.go' + - '!**/internal/matchers/*.go' + deny: + - pkg: testing + - pkg: github.com/stretchr/testify + - pkg: crypto/md5 + - pkg: crypto/sha1 + - pkg: crypto/**/pkix + otel-internal: + files: + - '**/sdk/*.go' + - '**/sdk/**/*.go' + - '**/exporters/*.go' + - '**/exporters/**/*.go' + - '**/schema/*.go' + - '**/schema/**/*.go' + - '**/metric/*.go' + - '**/metric/**/*.go' + - '**/bridge/*.go' + - '**/bridge/**/*.go' + - '**/trace/*.go' + - '**/trace/**/*.go' + - '**/log/*.go' + - '**/log/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/internal$ + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/internaltest + desc: Do not use cross-module internal packages. + - pkg: go.opentelemetry.io/otel/internal/matchers + desc: Do not use cross-module internal packages. + otlp-internal: + files: + - '!**/exporters/otlp/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/internal + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - '!**/exporters/otlp/otlpmetric/internal/*.go' + - '!**/exporters/otlp/otlpmetric/internal/**/*.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - '!**/exporters/otlp/otlptrace/*.go' + - '!**/exporters/otlp/otlptrace/internal/**.go' + deny: + - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - ^[ ]*[-•] + # Exclude sentences prefixing a list. + - :$ + misspell: + locale: US + ignore-rules: + - cancelled + perfsprint: + int-conversion: true + err-error: true + errorf: true + sprintf1: true + strconcat: true + revive: + confidence: 0.01 + rules: + - name: blank-imports + - name: bool-literal-in-expr + - name: constant-logical-expr + - name: context-as-argument + arguments: + - allowTypesBefore: '*testing.T' + disabled: true + - name: context-keys-type + - name: deep-exit + - name: defer + arguments: + - - call-chain + - loop + - name: dot-imports + - name: duplicated-imports + - name: early-return + arguments: + - preserveScope + - name: empty-block + - name: empty-lines + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + arguments: + - sayRepetitiveInsteadOfStutters + - name: flag-parameter + - name: identical-branches + - name: if-return + - name: import-shadowing + - name: increment-decrement + - name: indent-error-flow + arguments: + - preserveScope + - name: package-comments + - name: range + - name: range-val-in-closure + - name: range-val-address + - name: redefines-builtin-id + - name: string-format + arguments: + - - panic + - /^[^\n]*$/ + - must not contain line breaks + - name: struct-tag + - name: superfluous-else + arguments: + - preserveScope + - name: time-equal + - name: unconditional-recursion + - name: unexported-return + - name: unhandled-error + arguments: + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + - fmt.Print + - fmt.Printf + - fmt.Println + - name: unnecessary-stmt + - name: useless-break + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + - name: waitgroup-by-value + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error + exclusions: + generated: lax + presets: + - common-false-positives + - legacy + - std-error-handling + rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - linters: + - revive + path: .*internal/.* + text: exported (method|function|type|const) (.+) should have comment or be unexported + # Yes, they are, but it's okay in a test. + - linters: + - revive + path: _test\.go + text: exported func.*returns unexported type.*which can be annoying to use + # Example test functions should be treated like main. + - linters: + - revive + path: example.*_test\.go + text: calls to (.+) only in main[(][)] or init[(][)] functions + # It's okay to not run gosec and perfsprint in a test. + - linters: + - gosec + - perfsprint + path: _test\.go + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - linters: + - gosec + text: 'G404:' + # Ignoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - linters: + - gosec + text: 'G402: TLS MinVersion too low.' + paths: + - third_party$ + - builtin$ + - examples$ issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - # It's okay to not run gosec and perfsprint in a test. - - path: _test\.go - linters: - - gosec - - perfsprint - # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) - # as we commonly use it in tests and examples. - - text: "G404:" - linters: - - gosec - # Ignoring gosec G402: TLS MinVersion too low - # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - - text: "G402: TLS MinVersion too low." - linters: - - gosec - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - rules: - non-tests: - files: - - "!$test" - - "!**/*test/*.go" - - "!**/internal/matchers/*.go" - deny: - - pkg: "testing" - - pkg: "github.com/stretchr/testify" - - pkg: "crypto/md5" - - pkg: "crypto/sha1" - - pkg: "crypto/**/pkix" - auto/sdk: - files: - - "!internal/global/trace.go" - - "~internal/global/trace_test.go" - deny: - - pkg: "go.opentelemetry.io/auto/sdk" - desc: Do not use SDK from automatic instrumentation. - otlp-internal: - files: - - "!**/exporters/otlp/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" - desc: Do not use cross-module internal packages. - otlptrace-internal: - files: - - "!**/exporters/otlp/otlptrace/*.go" - - "!**/exporters/otlp/otlptrace/internal/**.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" - desc: Do not use cross-module internal packages. - otlpmetric-internal: - files: - - "!**/exporters/otlp/otlpmetric/internal/*.go" - - "!**/exporters/otlp/otlpmetric/internal/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" - desc: Do not use cross-module internal packages. - otel-internal: - files: - - "**/sdk/*.go" - - "**/sdk/**/*.go" - - "**/exporters/*.go" - - "**/exporters/**/*.go" - - "**/schema/*.go" - - "**/schema/**/*.go" - - "**/metric/*.go" - - "**/metric/**/*.go" - - "**/bridge/*.go" - - "**/bridge/**/*.go" - - "**/trace/*.go" - - "**/trace/**/*.go" - - "**/log/*.go" - - "**/log/**/*.go" - deny: - - pkg: "go.opentelemetry.io/otel/internal$" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/attribute" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/internaltest" - desc: Do not use cross-module internal packages. - - pkg: "go.opentelemetry.io/otel/internal/matchers" - desc: Do not use cross-module internal packages. - godot: - exclude: - # Exclude links. - - '^ *\[[^]]+\]:' - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - perfsprint: - err-error: true - errorf: true - int-conversion: true - sprintf1: true - strconcat: true - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md - rules: - - name: blank-imports - - name: bool-literal-in-expr - - name: constant-logical-expr - - name: context-as-argument - disabled: true - arguments: - - allowTypesBefore: "*testing.T" - - name: context-keys-type - - name: deep-exit - - name: defer - arguments: - - ["call-chain", "loop"] - - name: dot-imports - - name: duplicated-imports - - name: early-return - arguments: - - "preserveScope" - - name: empty-block - - name: empty-lines - - name: error-naming - - name: error-return - - name: error-strings - - name: errorf - - name: exported - arguments: - - "sayRepetitiveInsteadOfStutters" - - name: flag-parameter - - name: identical-branches - - name: if-return - - name: import-shadowing - - name: increment-decrement - - name: indent-error-flow - arguments: - - "preserveScope" - - name: package-comments - - name: range - - name: range-val-in-closure - - name: range-val-address - - name: redefines-builtin-id - - name: string-format - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - - name: struct-tag - - name: superfluous-else - arguments: - - "preserveScope" - - name: time-equal - - name: unconditional-recursion - - name: unexported-return - - name: unhandled-error - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - - name: unnecessary-stmt - - name: useless-break - - name: var-declaration - - name: var-naming - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - - name: waitgroup-by-value - testifylint: - enable-all: true - disable: - - float-compare - - go-require - - require-error +formatters: + enable: + - gofumpt + - goimports + - golines + settings: + goimports: + local-prefixes: + - go.opentelemetry.io + golines: + max-len: 120 + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c076db2823..648e4abab8 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,57 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.36.0/0.58.0/0.12.0] 2025-05-20 + +### Added + +- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421) +- The `go.opentelemetry.io/otel/semconv/v1.31.0` package. + The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479) +- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752) +- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688) +- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973) +- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973) +- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973) +- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662) +- The `go.opentelemetry.io/otel/semconv/v1.32.0` package. + The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782) +- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794) +- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796) + +### Removed + +- Drop support for [Go 1.22]. (#6381, #6418) +- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494) +- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492) +- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507) +- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662) + +### Changed + +- āš ļø Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433) +- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455) +- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465) +- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466) +- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507) +- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641) + +### Deprecated + +- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449) + +### Fixes + +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392) +- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456) +- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472) + ## [1.35.0/0.57.0/0.11.0] 2025-03-05 This release is the last to support [Go 1.22]. @@ -3237,7 +3288,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD +[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7b8af585aa..1902dac057 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -643,6 +643,7 @@ should be canceled. ### Triagers +- [Alex Kats](https://github.com/akats7), Capital One - [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent ### Approvers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index 226410d742..62a56f4d34 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -43,8 +43,11 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit +VERIFYREADMES = $(TOOLS)/verifyreadmes +$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes + GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell @@ -68,7 +71,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -213,11 +216,8 @@ go-mod-tidy/%: crosslink && cd $(DIR) \ && $(GO) mod tidy -compat=1.21 -.PHONY: lint-modules -lint-modules: go-mod-tidy - .PHONY: lint -lint: misspell lint-modules golangci-lint govulncheck +lint: misspell go-mod-tidy golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: $(PORTO) @@ -319,10 +319,11 @@ add-tags: verify-mods @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} +MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) .PHONY: lint-markdown lint-markdown: - docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md .PHONY: verify-readmes -verify-readmes: - ./verify_readmes.sh +verify-readmes: $(VERIFYREADMES) + $(VERIFYREADMES) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 8421cd7e59..b600788121 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -6,6 +6,7 @@ [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) +[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -53,25 +54,18 @@ Currently, this project supports the following environments. |----------|------------|--------------| | Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | -| Ubuntu | 1.22 | amd64 | | Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | -| Ubuntu | 1.22 | 386 | | Ubuntu | 1.24 | arm64 | | Ubuntu | 1.23 | arm64 | -| Ubuntu | 1.22 | arm64 | | macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | -| macOS 13 | 1.22 | amd64 | | macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | -| macOS | 1.22 | arm64 | | Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | -| Windows | 1.22 | amd64 | | Windows | 1.24 | 386 | | Windows | 1.23 | 386 | -| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 1e13ae54f7..7c1a9119dc 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -1,5 +1,9 @@ # Release Process +## Create a `Version Release` issue + +Create a `Version Release` issue to track the release process. + ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. @@ -123,6 +127,16 @@ Importantly, bump any package versions referenced to be the latest one you just [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go +### Close the milestone + +Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone. +This helps track what changes were included in each release. + +- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr) +- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged). + +Once all related issues and PRs have been added to the milestone, close the milestone. + ### Demo Repository Bump the dependencies in the following Go services: @@ -130,3 +144,7 @@ Bump the dependencies in the following Go services: - [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) + +### Close the `Version Release` issue + +Once the todo list in the `Version Release` issue is complete, close the issue. diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go index be9cd922d8..3eeaa5d442 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/filter.go +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -19,7 +19,7 @@ func NewAllowKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return false } } - allowed := make(map[Key]struct{}) + allowed := make(map[Key]struct{}, len(keys)) for _, k := range keys { allowed[k] = struct{}{} } @@ -38,7 +38,7 @@ func NewDenyKeysFilter(keys ...Key) Filter { return func(kv KeyValue) bool { return true } } - forbid := make(map[Key]struct{}) + forbid := make(map[Key]struct{}, len(keys)) for _, k := range keys { forbid[k] = struct{}{} } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go similarity index 97% rename from vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go rename to vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go index 691d96c755..b76d2bbfdb 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go @@ -5,7 +5,7 @@ Package attribute provide several helper functions for some commonly used logic of processing attributes. */ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" +package attribute // import "go.opentelemetry.io/otel/attribute/internal" import ( "reflect" diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go new file mode 100644 index 0000000000..5791c6e7aa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "math" +) + +func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func rawToBool(r uint64) bool { + return r != 0 +} + +func int64ToRaw(i int64) uint64 { + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec +} + +func rawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func rawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 9ea0ecbbd2..817eecacf1 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -9,8 +9,7 @@ import ( "reflect" "strconv" - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" + attribute "go.opentelemetry.io/otel/attribute/internal" ) //go:generate stringer -type=Type @@ -51,7 +50,7 @@ const ( func BoolValue(v bool) Value { return Value{ vtype: BOOL, - numeric: internal.BoolToRaw(v), + numeric: boolToRaw(v), } } @@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value { func Int64Value(v int64) Value { return Value{ vtype: INT64, - numeric: internal.Int64ToRaw(v), + numeric: int64ToRaw(v), } } @@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value { func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), + numeric: float64ToRaw(v), } } @@ -125,7 +124,7 @@ func (v Value) Type() Type { // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) + return rawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is @@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool { // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) + return rawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is @@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 { // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) + return rawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile index e4c4a753c8..51fb76b30d 100644 --- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile +++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -1,3 +1,4 @@ # This is a renovate-friendly source of Docker images. -FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python -FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver +FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python +FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver +FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 93e80ea306..0000000000 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go deleted file mode 100644 index 4259f0320d..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/gen.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go -//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go -//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go - -//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go -//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go -//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go -//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go -//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go -//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go -//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go index c657ff8e75..2e47b2964c 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/handler.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -1,6 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 +// Package global provides the OpenTelemetry global API. package global // import "go.opentelemetry.io/otel/internal/global" import ( diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index a6acd8dca6..adb37b5b0e 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return i, nil } -func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { +func (m *meter) Int64UpDownCounter( + name string, + options ...metric.Int64UpDownCounterOption, +) (metric.Int64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return i, nil } -func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (m *meter) Int64ObservableCounter( + name string, + options ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return i, nil } -func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (m *meter) Int64ObservableUpDownCounter( + name string, + options ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return i, nil } -func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { +func (m *meter) Int64ObservableGauge( + name string, + options ...metric.Int64ObservableGaugeOption, +) (metric.Int64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return i, nil } -func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { +func (m *meter) Float64UpDownCounter( + name string, + options ...metric.Float64UpDownCounterOption, +) (metric.Float64UpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return i, nil } -func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { +func (m *meter) Float64Histogram( + name string, + options ...metric.Float64HistogramOption, +) (metric.Float64Histogram, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return i, nil } -func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (m *meter) Float64ObservableCounter( + name string, + options ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return i, nil } -func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (m *meter) Float64ObservableUpDownCounter( + name string, + options ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return i, nil } -func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (m *meter) Float64ObservableGauge( + name string, + options ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { m.mtx.Lock() defer m.mtx.Unlock() diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index 8982aa0dc5..49e4ac4faa 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -158,7 +158,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart // a nonRecordingSpan by default. var autoInstEnabled = new(bool) -func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { +// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF +// uprobe to this code. +// +// "noinline" pragma prevents the method from ever being inlined. +// +//go:noinline +func (t *tracer) newSpan( + ctx context.Context, + autoSpan *bool, + name string, + opts []trace.SpanStartOption, +) (context.Context, trace.Span) { // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // so the auto-instrumentation can define a uprobe for (*t).newSpan and be // provided with the address of the bool autoInstEnabled points to. It diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index b2fe3e41d3..0000000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - // Assumes original was a valid int64 (overflow not checked). - return uint64(i) // nolint: gosec -} - -func RawToInt64(r uint64) int64 { - // Assumes original was a valid int64 (overflow not checked). - return int64(r) // nolint: gosec -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - // Assumes original was a valid *float64 (overflow not checked). - return (*float64)(unsafe.Pointer(r)) // nolint: gosec -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - // Assumes original was a valid *int64 (overflow not checked). - return (*int64)(unsafe.Pointer(r)) // nolint: gosec -} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index f8435d8f28..b7fc973a66 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct { // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. -func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { +func NewFloat64ObservableUpDownCounterConfig( + opts ...Float64ObservableUpDownCounterOption, +) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) @@ -239,12 +241,16 @@ type float64CallbackOpt struct { cback Float64Callback } -func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableCounter( + cfg Float64ObservableCounterConfig, +) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } -func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter( + cfg Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index e079aaef16..4404b71a22 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct { // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. -func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { +func NewInt64ObservableUpDownCounterConfig( + opts ...Int64ObservableUpDownCounterOption, +) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) @@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter return cfg } -func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter( + cfg Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index a535782e1d..9f48d5f117 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o descOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o descOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } @@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) return c } -func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { +func (o unitOpt) applyFloat64ObservableUpDownCounter( + c Float64ObservableUpDownCounterConfig, +) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } @@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int return c } -func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { +func (o unitOpt) applyInt64ObservableUpDownCounter( + c Int64ObservableUpDownCounterConfig, +) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 14e08c24a4..fdd2a7011c 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -110,7 +110,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + Int64ObservableUpDownCounter( + name string, + options ...Int64ObservableUpDownCounterOption, + ) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used @@ -194,7 +197,10 @@ type Meter interface { // The name needs to conform to the OpenTelemetry instrument name syntax. // See the Instrument Name section of the package documentation for more // information. - Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + Float64ObservableUpDownCounter( + name string, + options ...Float64ObservableUpDownCounterOption, + ) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go index ca6fcbdc09..9afb69e583 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -86,13 +86,19 @@ func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { +func (Meter) Int64ObservableCounter( + string, + ...metric.Int64ObservableCounterOption, +) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { +func (Meter) Int64ObservableUpDownCounter( + string, + ...metric.Int64ObservableUpDownCounterOption, +) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } @@ -128,19 +134,28 @@ func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64G // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { +func (Meter) Float64ObservableCounter( + string, + ...metric.Float64ObservableCounterOption, +) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. -func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { +func (Meter) Float64ObservableUpDownCounter( + string, + ...metric.Float64ObservableUpDownCounterOption, +) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. -func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { +func (Meter) Float64ObservableGauge( + string, + ...metric.Float64ObservableGaugeOption, +) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go index 552263ba73..ebda5026d6 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { } // Extract returns a copy of parent with the baggage from the carrier added. +// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked +// for multiple values extraction. Otherwise, Get is called. func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + if multiCarrier, ok := carrier.(ValuesGetter); ok { + return extractMultiBaggage(parent, multiCarrier) + } + return extractSingleBaggage(parent, carrier) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} + +func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent @@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context return baggage.ContextWithBaggage(parent, bag) } -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} +func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context { + bVals := carrier.Values(baggageHeader) + if len(bVals) == 0 { + return parent + } + var members []baggage.Member + for _, bStr := range bVals { + currBag, err := baggage.Parse(bStr) + if err != nil { + continue + } + members = append(members, currBag.Members()...) + } + + b, err := baggage.New(members...) + if err != nil || b.Len() == 0 { + return parent + } + return baggage.ContextWithBaggage(parent, b) } diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go index 8c8286aab4..5c8c26ea2e 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -9,6 +9,7 @@ import ( ) // TextMapCarrier is the storage medium used by a TextMapPropagator. +// See ValuesGetter for how a TextMapCarrier can get multiple values for a key. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. @@ -29,6 +30,18 @@ type TextMapCarrier interface { // must never be done outside of a new major release. } +// ValuesGetter can return multiple values for a single key, +// with contrast to TextMapCarrier.Get which returns a single value. +type ValuesGetter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Values returns all values associated with the passed key. + Values(key string) []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string @@ -55,14 +68,25 @@ func (c MapCarrier) Keys() []string { return keys } -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces. type HeaderCarrier http.Header -// Get returns the value associated with the passed key. +// Compile time check that HeaderCarrier implements ValuesGetter. +var _ TextMapCarrier = HeaderCarrier{} + +// Compile time check that HeaderCarrier implements TextMapCarrier. +var _ ValuesGetter = HeaderCarrier{} + +// Get returns the first value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } +// Values returns all values associated with the passed key. +func (hc HeaderCarrier) Values(key string) []string { + return http.Header(hc).Values(key) +} + // Set stores the key-value pair. func (hc HeaderCarrier) Set(key string, value string) { http.Header(hc).Set(key, value) @@ -89,6 +113,8 @@ type TextMapPropagator interface { // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. + // Implementations may check if the carrier implements ValuesGetter, + // to support extraction of multiple values per key. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index a6fa353f95..fa5acf2d3b 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,8 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:best-practices" + "config:best-practices", + "helpers:pinGitHubActionDigestsToSemver" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -25,6 +26,10 @@ { "matchPackageNames": ["golang.org/x/**"], "groupName": "golang.org/x" + }, + { + "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"], + "enabled": false } ] } diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go index 7e2910025a..d90af8f673 100644 --- a/vendor/go.opentelemetry.io/otel/trace/auto.go +++ b/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -57,14 +57,15 @@ type autoTracer struct { var _ Tracer = autoTracer{} func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { - var psc SpanContext + var psc, sc SpanContext sampled := true span := new(autoSpan) // Ask eBPF for sampling decision and span context info. - t.start(ctx, span, &psc, &sampled, &span.spanContext) + t.start(ctx, span, &psc, &sampled, &sc) span.sampled.Store(sampled) + span.spanContext = sc ctx = ContextWithSpan(ctx, span) diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go index 3c5e1cdb1b..e7ca62c660 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -251,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error { type SpanFlags int32 const ( + // SpanFlagsTraceFlagsMask is a mask for trace-flags. + // // Bits 0-7 are used for trace flags. SpanFlagsTraceFlagsMask SpanFlags = 255 - // Bits 8 and 9 are used to indicate that the parent span or link span is remote. - // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. - // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. SpanFlagsContextHasIsRemoteMask SpanFlags = 256 - // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status. + // + // Bits 8 and 9 are used to indicate that the parent span or link span is + // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is + // remote. SpanFlagsContextIsRemoteMask SpanFlags = 512 ) @@ -266,27 +273,31 @@ const ( type SpanKind int32 const ( - // Indicates that the span represents an internal operation within an application, - // as opposed to an operation happening at the boundaries. Default value. + // SpanKindInternal indicates that the span represents an internal + // operation within an application, as opposed to an operation happening at + // the boundaries. SpanKindInternal SpanKind = 1 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. + // SpanKindServer indicates that the span covers server-side handling of an + // RPC or other remote network request. SpanKindServer SpanKind = 2 - // Indicates that the span describes a request to some remote service. + // SpanKindClient indicates that the span describes a request to some + // remote service. SpanKindClient SpanKind = 3 - // Indicates that the span describes a producer sending a message to a broker. - // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship - // between producer and consumer spans. A PRODUCER span ends when the message was accepted - // by the broker while the logical processing of the message might span a much longer time. + // SpanKindProducer indicates that the span describes a producer sending a + // message to a broker. Unlike SpanKindClient and SpanKindServer, there is + // often no direct critical path latency relationship between producer and + // consumer spans. A SpanKindProducer span ends when the message was + // accepted by the broker while the logical processing of the message might + // span a much longer time. SpanKindProducer SpanKind = 4 - // Indicates that the span describes consumer receiving a message from a broker. - // Like the PRODUCER kind, there is often no direct critical path latency relationship - // between producer and consumer spans. + // SpanKindConsumer indicates that the span describes a consumer receiving + // a message from a broker. Like SpanKindProducer, there is often no direct + // critical path latency relationship between producer and consumer spans. SpanKindConsumer SpanKind = 5 ) -// Event is a time-stamped annotation of the span, consisting of user-supplied -// text description and key-value pairs. +// SpanEvent is a time-stamped annotation of the span, consisting of +// user-supplied text description and key-value pairs. type SpanEvent struct { // time_unix_nano is the time the event occurred. Time time.Time `json:"timeUnixNano,omitempty"` @@ -369,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error { return nil } -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. +// SpanLink is a reference from the current span to another span in the same +// trace or in a different trace. For example, this can be used in batching +// operations, where a single batch handler processes multiple requests from +// different traces or when the handler receives a request from a different +// project. type SpanLink struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go index 1d013a8fa8..1039bf40cd 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -3,17 +3,19 @@ package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" +// StatusCode is the status of a Span. +// // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type StatusCode int32 const ( - // The default status. + // StatusCodeUnset is the default status. StatusCodeUnset StatusCode = 0 - // The Span has been validated by an Application developer or Operator to - // have completed successfully. + // StatusCodeOK is used when the Span has been validated by an Application + // developer or Operator to have completed successfully. StatusCodeOK StatusCode = 1 - // The Span contains an error. + // StatusCodeError is used when the Span contains an error. StatusCodeError StatusCode = 2 ) @@ -30,7 +32,7 @@ func (s StatusCode) String() string { return "" } -// The Status type defines a logical error model that is suitable for different +// Status defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. type Status struct { // A developer-facing human readable error message. diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go index b039407081..e5f10767ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error { return nil } -// A collection of ScopeSpans from a Resource. +// ResourceSpans is a collection of ScopeSpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. @@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { return nil } -// A collection of Spans produced by an InstrumentationScope. +// ScopeSpans is a collection of Spans produced by an InstrumentationScope. type ScopeSpans struct { // The instrumentation scope information for the spans in this message. // Semantically when InstrumentationScope isn't set, it is equivalent with diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go index 7251492da0..ae9ce102a9 100644 --- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go +++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -316,7 +316,7 @@ func (v Value) String() string { case ValueKindBool: return strconv.FormatBool(v.asBool()) case ValueKindBytes: - return fmt.Sprint(v.asBytes()) + return string(v.asBytes()) case ValueKindMap: return fmt.Sprint(v.asMap()) case ValueKindSlice: diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index c8b1ae5d67..0f56e4dbb3 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -95,6 +95,8 @@ var autoInstEnabled = new(bool) // tracerProvider return a noopTracerProvider if autoEnabled is false, // otherwise it will return a TracerProvider from the sdk package used in // auto-instrumentation. +// +//go:noinline func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { if *autoEnabled { return newAutoTracerProvider() diff --git a/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/vendor/go.opentelemetry.io/otel/verify_readmes.sh deleted file mode 100644 index 1e87855eea..0000000000 --- a/vendor/go.opentelemetry.io/otel/verify_readmes.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) - -missingReadme=false -for dir in $dirs; do - if [ ! -f "$dir/README.md" ]; then - echo "couldn't find README.md for $dir" - missingReadme=true - fi -done - -if [ "$missingReadme" = true ] ; then - echo "Error: some READMEs couldn't be found." - exit 1 -fi diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index d5fa71f674..ac3c0b15da 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.35.0" + return "1.36.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 2b4cb4b418..79f82f3d05 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.35.0 + version: v1.36.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.57.0 + version: v0.58.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.11.0 + version: v0.12.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -40,4 +40,6 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/log/logtest + - go.opentelemetry.io/otel/sdk/log/logtest - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/vendor/go.podman.io/common/libimage/copier.go b/vendor/go.podman.io/common/libimage/copier.go index e81dfaf0f8..3c7d538104 100644 --- a/vendor/go.podman.io/common/libimage/copier.go +++ b/vendor/go.podman.io/common/libimage/copier.go @@ -9,6 +9,7 @@ import ( "io" "net" "os" + "slices" "strings" "time" @@ -396,7 +397,7 @@ func (c *Copier) copyInternal(ctx context.Context, source, destination types.Ima // TimeoutStartSec=, the service manager will allow the service to continue to start, provided the // service repeats "EXTEND_TIMEOUT_USEC=..." within the interval specified until the service startup // status is finished by "READY=1"." - extendValue := []byte(fmt.Sprintf("EXTEND_TIMEOUT_USEC=%d", extension.Microseconds())) + extendValue := fmt.Appendf(nil, "EXTEND_TIMEOUT_USEC=%d", extension.Microseconds()) extendTimeout := func() { if _, err := conn.Write(extendValue); err != nil { logrus.Errorf("Increasing EXTEND_TIMEOUT_USEC failed: %v", err) @@ -555,11 +556,9 @@ func checkRegistrySourcesAllows(dest types.ImageReference) (insecure *bool, err return nil, fmt.Errorf("registry %q denied by policy: not in allowed registries list (%s)", reference.Domain(dref), registrySources) } - for _, insecureDomain := range sources.InsecureRegistries { - if insecureDomain == reference.Domain(dref) { - insecure := true - return &insecure, nil - } + if slices.Contains(sources.InsecureRegistries, reference.Domain(dref)) { + insecure := true + return &insecure, nil } return nil, nil diff --git a/vendor/go.podman.io/common/libimage/filter/filter.go b/vendor/go.podman.io/common/libimage/filter/filter.go index 2f3f03baa4..b3be95d57a 100644 --- a/vendor/go.podman.io/common/libimage/filter/filter.go +++ b/vendor/go.podman.io/common/libimage/filter/filter.go @@ -24,25 +24,25 @@ type SearchFilter struct { func ParseSearchFilter(filter []string) (*SearchFilter, error) { sFilter := new(SearchFilter) for _, f := range filter { - arr := strings.SplitN(f, "=", 2) - switch arr[0] { + keyword, value, ok := strings.Cut(f, "=") + switch keyword { case define.SearchFilterStars: - if len(arr) < 2 { + if !ok { return nil, fmt.Errorf("invalid filter %q, should be stars=", filter) } - stars, err := strconv.Atoi(arr[1]) + stars, err := strconv.Atoi(value) if err != nil { return nil, fmt.Errorf("incorrect value type for stars filter: %w", err) } sFilter.Stars = stars case define.SearchFilterAutomated: - if len(arr) == 2 && arr[1] == "false" { + if ok && value == "false" { sFilter.IsAutomated = types.OptionalBoolFalse } else { sFilter.IsAutomated = types.OptionalBoolTrue } case define.SearchFilterOfficial: - if len(arr) == 2 && arr[1] == "false" { + if ok && value == "false" { sFilter.IsOfficial = types.OptionalBoolFalse } else { sFilter.IsOfficial = types.OptionalBoolTrue diff --git a/vendor/go.podman.io/common/libimage/filters.go b/vendor/go.podman.io/common/libimage/filters.go index dd55713c8d..c0f20e1e47 100644 --- a/vendor/go.podman.io/common/libimage/filters.go +++ b/vendor/go.podman.io/common/libimage/filters.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" filtersPkg "go.podman.io/common/pkg/filters" "go.podman.io/common/pkg/timetype" @@ -89,18 +90,16 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp var key, value string var filter filterFunc negate := false - split := strings.SplitN(f, "!=", 2) - if len(split) == 2 { + key, value, ok := strings.Cut(f, "!=") + if ok { negate = true } else { - split = strings.SplitN(f, "=", 2) - if len(split) != 2 { + key, value, ok = strings.Cut(f, "=") + if !ok { return nil, false, fmt.Errorf(filterInvalidValue, f) } } - key = split[0] - value = split[1] switch key { case "after", "since": img, err := r.time(key, value) @@ -483,9 +482,45 @@ func filterID(value string) filterFunc { // filterDigest creates a digest filter for matching the specified value. func filterDigest(value string) (filterFunc, error) { - if !strings.HasPrefix(value, "sha256:") { - return nil, fmt.Errorf("invalid value %q for digest filter", value) + // Check if it's a valid complete digest + if _, err := digest.Parse(value); err == nil { + // Valid complete digest - use it as is + return func(img *Image, _ *layerTree) (bool, error) { + return img.containsDigestPrefix(value), nil + }, nil + } + + // Not a complete digest - check if it's a valid partial digest with algorithm prefix + if !strings.Contains(value, ":") { + return nil, fmt.Errorf("invalid value %q for digest filter: must have algorithm prefix (e.g., sha256:)", value) + } + + parts := strings.SplitN(value, ":", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid value %q for digest filter: invalid format", value) + } + + algorithm := parts[0] + hashPart := parts[1] + + // Validate the algorithm is known + switch algorithm { + case "sha256", "sha512": // common algorithms + // Valid algorithm prefix + default: + return nil, fmt.Errorf("invalid value %q for digest filter: unsupported algorithm %q", value, algorithm) + } + + // Validate hash part contains only hex characters + if len(hashPart) == 0 { + return nil, fmt.Errorf("invalid value %q for digest filter: empty hash part", value) } + for _, c := range hashPart { + if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { + return nil, fmt.Errorf("invalid value %q for digest filter: hash part contains non-hex characters", value) + } + } + return func(img *Image, _ *layerTree) (bool, error) { return img.containsDigestPrefix(value), nil }, nil diff --git a/vendor/go.podman.io/common/libimage/image.go b/vendor/go.podman.io/common/libimage/image.go index d0eb1b4c0e..366914114e 100644 --- a/vendor/go.podman.io/common/libimage/image.go +++ b/vendor/go.podman.io/common/libimage/image.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "path/filepath" + "slices" "sort" "strings" "time" @@ -173,12 +174,7 @@ func (i *Image) Digests() []digest.Digest { // hasDigest returns whether the specified value matches any digest of the // image. func (i *Image) hasDigest(wantedDigest digest.Digest) bool { - for _, d := range i.Digests() { - if d == wantedDigest { - return true - } - } - return false + return slices.Contains(i.Digests(), wantedDigest) } // containsDigestPrefix returns whether the specified value matches any digest of the @@ -478,7 +474,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma // error. if referencedBy != "" && numNames != 1 { byID := strings.HasPrefix(i.ID(), referencedBy) - byDigest := strings.HasPrefix(referencedBy, "sha256:") + byDigest := isDigestReference(referencedBy) if !options.Force { if byID && numNames > 1 { return processedIDs, fmt.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) @@ -581,7 +577,7 @@ var errTagDigest = errors.New("tag by digest not supported") // Tag the image with the specified name and store it in the local containers // storage. The name is normalized according to the rules of NormalizeName. func (i *Image) Tag(name string) error { - if strings.HasPrefix(name, "sha256:") { // ambiguous input + if isDigestReference(name) { // ambiguous input return fmt.Errorf("%s: %w", name, errTagDigest) } @@ -617,7 +613,7 @@ var errUntagDigest = errors.New("untag by digest not supported") // the local containers storage. The name is normalized according to the rules // of NormalizeName. func (i *Image) Untag(name string) error { - if strings.HasPrefix(name, "sha256:") { // ambiguous input + if isDigestReference(name) { // ambiguous input return fmt.Errorf("%s: %w", name, errUntagDigest) } @@ -638,16 +634,9 @@ func (i *Image) Untag(name string) error { name = ref.String() - foundName := false - for _, n := range i.Names() { - if n == name { - foundName = true - break - } - } // Return an error if the name is not found, the c/storage // RemoveNames() API does not create one if no match is found. - if !foundName { + if !slices.Contains(i.Names(), name) { return fmt.Errorf("%s: %w", name, errTagUnknown) } @@ -1039,6 +1028,25 @@ func getImageID(ctx context.Context, src types.ImageReference, sys *types.System return "@" + imageDigest.Encoded(), nil } +// getImageDigestString creates an image object and returns the full digest string +// (with algorithm prefix) of the config blob for use in image names. +func getImageDigestString(ctx context.Context, src types.ImageReference, sys *types.SystemContext) (string, error) { + newImg, err := src.NewImage(ctx, sys) + if err != nil { + return "", err + } + defer func() { + if err := newImg.Close(); err != nil { + logrus.Errorf("Failed to close image: %q", err) + } + }() + imageDigest := newImg.ConfigInfo().Digest + if err = imageDigest.Validate(); err != nil { + return "", fmt.Errorf("getting config info: %w", err) + } + return imageDigest.String(), nil +} + // Checks whether the image matches the specified platform. // Returns // - 1) a matching error that can be used for logging (or returning) what does not match diff --git a/vendor/go.podman.io/common/libimage/image_config.go b/vendor/go.podman.io/common/libimage/image_config.go index 6c6bb063e6..a34bc3be92 100644 --- a/vendor/go.podman.io/common/libimage/image_config.go +++ b/vendor/go.podman.io/common/libimage/image_config.go @@ -40,17 +40,17 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: for _, change := range changes { // First, let's assume proper Dockerfile format - space // separator between instruction and value - split := strings.SplitN(change, " ", 2) + outerKey, value, ok := strings.Cut(change, " ") - if len(split) != 2 { - split = strings.SplitN(change, "=", 2) - if len(split) != 2 { + if !ok { + outerKey, value, ok = strings.Cut(change, "=") + if !ok { return nil, fmt.Errorf("invalid change %q - must be formatted as KEY VALUE", change) } } - outerKey := strings.ToUpper(strings.TrimSpace(split[0])) - value := strings.TrimSpace(split[1]) + outerKey = strings.ToUpper(strings.TrimSpace(outerKey)) + value = strings.TrimSpace(value) switch outerKey { case "USER": // Assume literal contents are the user. @@ -96,18 +96,11 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // For now: we only support key=value // We will attempt to strip quotation marks if present. - var key, val string - - splitEnv := strings.SplitN(value, "=", 2) - key = splitEnv[0] + key, val, _ := strings.Cut(value, "=") // val is "" if there is no "=" // We do need a key if key == "" { return nil, fmt.Errorf("invalid change %q - ENV must have at least one argument", change) } - // Perfectly valid to not have a value - if len(splitEnv) == 2 { - val = splitEnv[1] - } if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) @@ -192,17 +185,11 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: // Potentially problematic: LABEL might theoretically // allow an = in the key? If people really do this, we // may need to investigate more advanced parsing. - var ( - key, val string - ) - - splitLabel := strings.SplitN(value, "=", 2) + key, val, ok := strings.Cut(value, "=") // Unlike ENV, LABEL must have a value - if len(splitLabel) != 2 { + if !ok { return nil, fmt.Errorf("invalid change %q - LABEL must be formatted key=value", change) } - key = splitLabel[0] - val = splitLabel[1] if strings.HasPrefix(key, `"`) && strings.HasSuffix(key, `"`) { key = strings.TrimPrefix(strings.TrimSuffix(key, `"`), `"`) diff --git a/vendor/go.podman.io/common/libimage/import.go b/vendor/go.podman.io/common/libimage/import.go index 54a77e4e95..c6e34fef0d 100644 --- a/vendor/go.podman.io/common/libimage/import.go +++ b/vendor/go.podman.io/common/libimage/import.go @@ -128,5 +128,11 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption } } - return "sha256:" + name, nil + // Get the proper digest string with the correct algorithm + digestString, err := getImageDigestString(ctx, srcRef, r.systemContextCopy()) + if err != nil { + return "", err + } + + return digestString, nil } diff --git a/vendor/go.podman.io/common/libimage/manifest_list.go b/vendor/go.podman.io/common/libimage/manifest_list.go index 1aa31b1d79..f084bbe55f 100644 --- a/vendor/go.podman.io/common/libimage/manifest_list.go +++ b/vendor/go.podman.io/common/libimage/manifest_list.go @@ -303,10 +303,8 @@ func (m *ManifestList) LookupInstance(ctx context.Context, architecture, os, var } for _, image := range allImages { - for _, imageDigest := range append(image.Digests(), image.Digest()) { - if imageDigest == instanceDigest { - return image, nil - } + if slices.Contains(image.Digests(), instanceDigest) || instanceDigest == image.Digest() { + return image, nil } } diff --git a/vendor/go.podman.io/common/libimage/manifests/manifests.go b/vendor/go.podman.io/common/libimage/manifests/manifests.go index b99d8fc2c2..2c77653ede 100644 --- a/vendor/go.podman.io/common/libimage/manifests/manifests.go +++ b/vendor/go.podman.io/common/libimage/manifests/manifests.go @@ -252,10 +252,8 @@ func (l *list) InstanceByFile(file string) (digest.Digest, error) { return "", err } for instanceDigest, files := range l.artifacts.Files { - for _, file := range files { - if file == abs { - return instanceDigest, nil - } + if slices.Contains(files, abs) { + return instanceDigest, nil } } return "", os.ErrNotExist @@ -698,6 +696,7 @@ type AddArtifactOptions struct { Annotations map[string]string // optional, default is none SubjectReference types.ImageReference // optional ExcludeTitles bool // don't add "org.opencontainers.image.title" annotations set to file base names + DigestAlgorithm *digest.Algorithm // optional digest algorithm for content addressing, defaults to SHA256 } // AddArtifact creates an artifact manifest describing the specified file or @@ -707,6 +706,11 @@ type AddArtifactOptions struct { // the image index and get the same end-result, but this should save them some // work. func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, options AddArtifactOptions, files ...string) (digest.Digest, error) { + // Determine the digest algorithm to use, defaulting to SHA256 for OCI compatibility + digestAlgorithm := digest.SHA256 + if options.DigestAlgorithm != nil { + digestAlgorithm = *options.DigestAlgorithm + } // If we were given a subject, build a descriptor for it first, since // it might be remote, and anything else we do before looking at it // might have to get thrown away if we can't get to it for whatever @@ -765,7 +769,7 @@ func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, option defer f.Close() // Hang on to a copy of the first 512 bytes, but digest the whole thing. - digester := digest.Canonical.Digester() + digester := digestAlgorithm.Digester() writeCounter := ioutils.NewWriteCounter(digester.Hash()) var detectableData bytes.Buffer _, err = io.CopyN(writeCounter, io.TeeReader(f, &detectableData), 512) @@ -855,7 +859,7 @@ func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, option if err != nil { return "", fmt.Errorf("recording artifact config data file %q: %w", options.ConfigFile, err) } - digester := digest.Canonical.Digester() + digester := digestAlgorithm.Digester() counter := ioutils.NewWriteCounter(digester.Hash()) if err := func() error { f, err := os.Open(filePath) @@ -876,7 +880,7 @@ func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, option configFilePath = filePath } else { decoder := bytes.NewReader(configDescriptor.Data) - digester := digest.Canonical.Digester() + digester := digestAlgorithm.Digester() counter := ioutils.NewWriteCounter(digester.Hash()) if _, err := io.Copy(counter, decoder); err != nil { return "", fmt.Errorf("digesting inlined artifact config data: %w", err) @@ -886,7 +890,7 @@ func (l *list) AddArtifact(ctx context.Context, sys *types.SystemContext, option } } else { configDescriptor.Data = nil - configDescriptor.Digest = digest.Canonical.FromString("") + configDescriptor.Digest = digestAlgorithm.FromString("") } // Construct the manifest. @@ -964,7 +968,7 @@ func LockerForImage(store storage.Store, image string) (lockfile.Locker, error) if err != nil { return nil, fmt.Errorf("locating image %q for locating lock: %w", image, err) } - d := digest.NewDigestFromEncoded(digest.Canonical, img.ID) + d := digest.NewDigestFromEncoded(store.GetDigestAlgorithm(), img.ID) if err := d.Validate(); err != nil { return nil, fmt.Errorf("coercing image ID for %q into a digest: %w", image, err) } diff --git a/vendor/go.podman.io/common/libimage/pull.go b/vendor/go.podman.io/common/libimage/pull.go index 32a391fea6..d50b2a4b6c 100644 --- a/vendor/go.podman.io/common/libimage/pull.go +++ b/vendor/go.podman.io/common/libimage/pull.go @@ -28,6 +28,55 @@ import ( "go.podman.io/storage" ) +// isDigestReference checks if the given name is a digest reference (e.g., "sha256:..." or "sha512:..."). +func isDigestReference(name string) bool { + // Check if it has the format of a digest reference: algorithm:hexstring + if !strings.Contains(name, ":") { + return false + } + + parts := strings.SplitN(name, ":", 2) + if len(parts) != 2 { + return false + } + + algorithm := parts[0] + hashPart := parts[1] + + // Check if the algorithm is a known digest algorithm + switch algorithm { + case "sha256", "sha512": + // Valid algorithm, now check if hash looks reasonable + if len(hashPart) == 0 { + return false + } + // Check if it's all hex characters + for _, c := range hashPart { + if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { + return false + } + } + return true + default: + return false + } +} + +// trimDigestPrefix removes any digest algorithm prefix from the name. +// If the name is not a digest reference, returns the original name. +func trimDigestPrefix(name string) string { + if !isDigestReference(name) { + return name + } + + parts := strings.SplitN(name, ":", 2) + if len(parts) != 2 { + return name + } + + return parts[1] +} + // PullOptions allows for customizing image pulls. type PullOptions struct { CopyOptions @@ -101,7 +150,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // If the image clearly refers to a local one, we can look it up directly. // In fact, we need to since they are not parseable. - if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { + if isDigestReference(name) || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { if pullPolicy == config.PullPolicyAlways { return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", name) } @@ -253,17 +302,20 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, storageName = imageName case ociTransport.Transport.Name(): - split := strings.SplitN(ref.StringWithinTransport(), ":", 2) - if len(split) == 1 || split[1] == "" { + _, refName, ok := strings.Cut(ref.StringWithinTransport(), ":") + if !ok || refName == "" { // Same trick as for the dir transport: we cannot use // the path to a directory as the name. storageName, err = getImageID(ctx, ref, nil) if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + imageName, err = getImageDigestString(ctx, ref, nil) + if err != nil { + return nil, nil, err + } } else { // If the OCI-reference includes an image reference, use it - storageName = split[1] + storageName = refName imageName = storageName } @@ -280,7 +332,10 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + imageName, err = getImageDigestString(ctx, ref, nil) + if err != nil { + return nil, nil, err + } default: named, err := NormalizeName(storageName) if err != nil { @@ -306,7 +361,10 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + imageName, err = getImageDigestString(ctx, ref, nil) + if err != nil { + return nil, nil, err + } } // Create a storage reference. @@ -340,8 +398,12 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte } destNames = append(destNames, destName) // Make sure the image can be loaded after the pull by - // replacing the @ with sha256:. - imageNames = append(imageNames, "sha256:"+destName[1:]) + // using the proper digest string with correct algorithm. + digestString, err := getImageDigestString(ctx, readerRef, &r.systemContext) + if err != nil { + return nil, nil, err + } + imageNames = append(imageNames, digestString) } else { for i := range destNames { ref, err := NormalizeName(destNames[i]) diff --git a/vendor/go.podman.io/common/libimage/runtime.go b/vendor/go.podman.io/common/libimage/runtime.go index 3378e6120a..a79a46ffc2 100644 --- a/vendor/go.podman.io/common/libimage/runtime.go +++ b/vendor/go.podman.io/common/libimage/runtime.go @@ -273,9 +273,9 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, byDigest := false originalName := name - if strings.HasPrefix(name, "sha256:") { + if isDigestReference(name) { byDigest = true - name = strings.TrimPrefix(name, "sha256:") + name = trimDigestPrefix(name) } byFullID := reference.IsFullIdentifier(name) diff --git a/vendor/go.podman.io/common/libimage/search.go b/vendor/go.podman.io/common/libimage/search.go index 513852ec85..cda0930672 100644 --- a/vendor/go.podman.io/common/libimage/search.go +++ b/vendor/go.podman.io/common/libimage/search.go @@ -94,11 +94,11 @@ func (r *Runtime) Search(ctx context.Context, term string, options *SearchOption // that we cannot use the reference parser from the containers/image // library as the search term may container arbitrary input such as // wildcards. See bugzilla.redhat.com/show_bug.cgi?id=1846629. - spl := strings.SplitN(term, "/", 2) + perhapsRegistry, perhapsTerm, ok := strings.Cut(term, "/") switch { - case len(spl) > 1: - searchRegistries = []string{spl[0]} - term = spl[1] + case ok: + searchRegistries = []string{perhapsRegistry} + term = perhapsTerm case len(options.Registries) > 0: searchRegistries = options.Registries default: @@ -203,15 +203,9 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri // limit is the number of results to output // if the total number of results is less than the limit, output all // if the limit has been set by the user, output those number of queries - limit = searchMaxQueries - if len(results) < limit { - limit = len(results) - } + limit = min(len(results), searchMaxQueries) if options.Limit != 0 { - limit = len(results) - if options.Limit < len(results) { - limit = options.Limit - } + limit = min(len(results), options.Limit) } paramsArr := []SearchResult{} @@ -264,15 +258,9 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr if err != nil { return nil, fmt.Errorf("getting repository tags: %v", err) } - limit := searchMaxQueries - if len(tags) < limit { - limit = len(tags) - } + limit := min(len(tags), searchMaxQueries) if options.Limit != 0 { - limit = len(tags) - if options.Limit < limit { - limit = options.Limit - } + limit = min(len(tags), options.Limit) } paramsArr := []SearchResult{} for i := range limit { diff --git a/vendor/go.podman.io/common/libnetwork/cni/cni_conversion.go b/vendor/go.podman.io/common/libnetwork/cni/cni_conversion.go index 7b5d4eab75..9cbb82953f 100644 --- a/vendor/go.podman.io/common/libnetwork/cni/cni_conversion.go +++ b/vendor/go.podman.io/common/libnetwork/cni/cni_conversion.go @@ -15,6 +15,7 @@ import ( "time" "github.com/containernetworking/cni/libcni" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" internalutil "go.podman.io/common/libnetwork/internal/util" "go.podman.io/common/libnetwork/types" @@ -22,10 +23,10 @@ import ( "golang.org/x/sys/unix" ) -func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string) (*types.Network, error) { +func createNetworkFromCNIConfigList(conf *libcni.NetworkConfigList, confPath string, digestAlgorithm digest.Algorithm) (*types.Network, error) { network := types.Network{ Name: conf.Name, - ID: getNetworkIDFromName(conf.Name), + ID: getNetworkIDFromName(conf.Name, digestAlgorithm), Labels: map[string]string{}, Options: map[string]string{}, IPAMOptions: map[string]string{}, @@ -355,9 +356,7 @@ func convertSpecgenPortsToCNIPorts(ports []types.PortMapping) ([]cniPortMapEntry if port.Protocol == "" { return nil, errors.New("port protocol should not be empty") } - protocols := strings.Split(port.Protocol, ",") - - for _, protocol := range protocols { + for protocol := range strings.SplitSeq(port.Protocol, ",") { if !slices.Contains([]string{"tcp", "udp", "sctp"}, protocol) { return nil, fmt.Errorf("unknown port protocol %s", protocol) } diff --git a/vendor/go.podman.io/common/libnetwork/cni/config.go b/vendor/go.podman.io/common/libnetwork/cni/config.go index a8060bd723..03386ea68e 100644 --- a/vendor/go.podman.io/common/libnetwork/cni/config.go +++ b/vendor/go.podman.io/common/libnetwork/cni/config.go @@ -9,6 +9,7 @@ import ( "os" "slices" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" internalutil "go.podman.io/common/libnetwork/internal/util" "go.podman.io/common/libnetwork/types" @@ -105,7 +106,7 @@ func (n *cniNetwork) networkCreate(newNetwork *types.Network, defaultNet bool) ( } // generate the network ID - newNetwork.ID = getNetworkIDFromName(newNetwork.Name) + newNetwork.ID = getNetworkIDFromName(newNetwork.Name, digest.SHA256) // when we do not have ipam we must disable dns internalutil.IpamNoneDisableDNS(newNetwork) diff --git a/vendor/go.podman.io/common/libnetwork/cni/network.go b/vendor/go.podman.io/common/libnetwork/cni/network.go index 8a22773388..8462875941 100644 --- a/vendor/go.podman.io/common/libnetwork/cni/network.go +++ b/vendor/go.podman.io/common/libnetwork/cni/network.go @@ -4,8 +4,6 @@ package cni import ( "context" - "crypto/sha256" - "encoding/hex" "errors" "fmt" "io/fs" @@ -15,6 +13,7 @@ import ( "time" "github.com/containernetworking/cni/libcni" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "go.podman.io/common/libnetwork/internal/rootlessnetns" "go.podman.io/common/libnetwork/types" @@ -212,7 +211,7 @@ func (n *cniNetwork) loadNetworks() error { continue } - net, err := createNetworkFromCNIConfigList(conf, file) + net, err := createNetworkFromCNIConfigList(conf, file, digest.SHA256) if err != nil { // ignore ENOENT as the config has been removed in the meantime so we can just ignore this case if !errors.Is(err, fs.ErrNotExist) { @@ -286,11 +285,14 @@ func (n *cniNetwork) getNetwork(nameOrID string) (*network, error) { return nil, fmt.Errorf("unable to find network with name or ID %s: %w", nameOrID, types.ErrNoSuchNetwork) } -// getNetworkIDFromName creates a network ID from the name. It is just the -// sha256 hash so it is not safe but it should be safe enough for our use case. -func getNetworkIDFromName(name string) string { - hash := sha256.Sum256([]byte(name)) - return hex.EncodeToString(hash[:]) +// getNetworkIDFromName creates a network ID from the name using the specified digest algorithm. +func getNetworkIDFromName(name string, algorithm digest.Algorithm) string { + // Use the digest library's built-in functionality + digester := algorithm.Digester() + digester.Hash().Write([]byte(name)) + digest := digester.Digest() + // Return just the hex part without the algorithm prefix + return digest.Encoded() } // Implement the NetUtil interface for easy code sharing with other network interfaces. diff --git a/vendor/go.podman.io/common/libnetwork/cni/run.go b/vendor/go.podman.io/common/libnetwork/cni/run.go index ef2b158f10..7877891a9a 100644 --- a/vendor/go.podman.io/common/libnetwork/cni/run.go +++ b/vendor/go.podman.io/common/libnetwork/cni/run.go @@ -177,9 +177,9 @@ func getRuntimeConfig(netns, conName, conID, networkName string, ports []cniPort } // Propagate environment CNI_ARGS - for _, kvpairs := range strings.Split(os.Getenv("CNI_ARGS"), ";") { - if keyval := strings.SplitN(kvpairs, "=", 2); len(keyval) == 2 { - rt.Args = append(rt.Args, [2]string{keyval[0], keyval[1]}) + for kvpairs := range strings.SplitSeq(os.Getenv("CNI_ARGS"), ";") { + if key, val, ok := strings.Cut(kvpairs, "="); ok { + rt.Args = append(rt.Args, [2]string{key, val}) } } diff --git a/vendor/go.podman.io/common/libnetwork/etchosts/hosts.go b/vendor/go.podman.io/common/libnetwork/etchosts/hosts.go index dbde190fe0..2e3ccaa27c 100644 --- a/vendor/go.podman.io/common/libnetwork/etchosts/hosts.go +++ b/vendor/go.podman.io/common/libnetwork/etchosts/hosts.go @@ -236,24 +236,23 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool { func parseExtraHosts(extraHosts []string, hostContainersInternalIP string) (HostEntries, error) { entries := make(HostEntries, 0, len(extraHosts)) for _, entry := range extraHosts { - values := strings.SplitN(entry, ":", 2) - if len(values) != 2 { + namesString, ip, ok := strings.Cut(entry, ":") + if !ok { return nil, fmt.Errorf("unable to parse host entry %q: incorrect format", entry) } - if values[0] == "" { + if namesString == "" { return nil, fmt.Errorf("hostname in host entry %q is empty", entry) } - if values[1] == "" { + if ip == "" { return nil, fmt.Errorf("IP address in host entry %q is empty", entry) } - ip := values[1] - if values[1] == HostGateway { + if ip == HostGateway { if hostContainersInternalIP == "" { return nil, fmt.Errorf("unable to replace %q of host entry %q: host containers internal IP address is empty", HostGateway, entry) } ip = hostContainersInternalIP } - names := strings.Split(values[0], ";") + names := strings.Split(namesString, ";") e := HostEntry{IP: ip, Names: names} entries = append(entries, e) } diff --git a/vendor/go.podman.io/common/libnetwork/pasta/pasta_linux.go b/vendor/go.podman.io/common/libnetwork/pasta/pasta_linux.go index 33043e8275..a72e7adfde 100644 --- a/vendor/go.podman.io/common/libnetwork/pasta/pasta_linux.go +++ b/vendor/go.podman.io/common/libnetwork/pasta/pasta_linux.go @@ -212,8 +212,7 @@ func createPastaArgs(opts *SetupOptions) ([]string, []string, []string, error) { } for _, i := range opts.Ports { - protocols := strings.Split(i.Protocol, ",") - for _, protocol := range protocols { + for protocol := range strings.SplitSeq(i.Protocol, ",") { var addr string if i.HostIP != "" { diff --git a/vendor/go.podman.io/common/libnetwork/resolvconf/resolvconf.go b/vendor/go.podman.io/common/libnetwork/resolvconf/resolvconf.go index 5724dfcc2e..a3647528fc 100644 --- a/vendor/go.podman.io/common/libnetwork/resolvconf/resolvconf.go +++ b/vendor/go.podman.io/common/libnetwork/resolvconf/resolvconf.go @@ -76,9 +76,8 @@ func filterResolvDNS(resolvConf []byte, ipv6Enabled bool, netnsEnabled bool) []b // getLines parses input into lines and strips away comments. func getLines(input []byte) [][]byte { - lines := bytes.Split(input, []byte("\n")) var output [][]byte - for _, currentLine := range lines { + for currentLine := range bytes.SplitSeq(input, []byte("\n")) { commentIndex := bytes.Index(currentLine, []byte("#")) if commentIndex == -1 { output = append(output, currentLine) diff --git a/vendor/go.podman.io/common/libnetwork/slirp4netns/const.go b/vendor/go.podman.io/common/libnetwork/slirp4netns/const.go index d75785025b..82f3bff3a0 100644 --- a/vendor/go.podman.io/common/libnetwork/slirp4netns/const.go +++ b/vendor/go.podman.io/common/libnetwork/slirp4netns/const.go @@ -3,14 +3,7 @@ package slirp4netns import "net" const ( - ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/default/accept_dad" - BinaryName = "slirp4netns" - - // defaultMTU the default MTU override. - defaultMTU = 65520 - - // default slirp4ns subnet. - defaultSubnet = "10.0.2.0/24" + BinaryName = "slirp4netns" ) // SetupResult return type from Setup(). diff --git a/vendor/go.podman.io/common/libnetwork/slirp4netns/const_linux.go b/vendor/go.podman.io/common/libnetwork/slirp4netns/const_linux.go new file mode 100644 index 0000000000..8e2742fe3f --- /dev/null +++ b/vendor/go.podman.io/common/libnetwork/slirp4netns/const_linux.go @@ -0,0 +1,11 @@ +package slirp4netns + +const ( + ipv6ConfDefaultAcceptDadSysctl = "/proc/sys/net/ipv6/conf/default/accept_dad" + + // defaultMTU the default MTU override. + defaultMTU = 65520 + + // default slirp4ns subnet. + defaultSubnet = "10.0.2.0/24" +) diff --git a/vendor/go.podman.io/common/libnetwork/slirp4netns/slirp4netns.go b/vendor/go.podman.io/common/libnetwork/slirp4netns/slirp4netns.go index c4020cae25..083a4e5fcc 100644 --- a/vendor/go.podman.io/common/libnetwork/slirp4netns/slirp4netns.go +++ b/vendor/go.podman.io/common/libnetwork/slirp4netns/slirp4netns.go @@ -124,11 +124,10 @@ func parseNetworkOptions(config *config.Config, extraOptions []string) (*network enableIPv6: true, } for _, o := range options { - parts := strings.SplitN(o, "=", 2) - if len(parts) < 2 { + option, value, ok := strings.Cut(o, "=") + if !ok { return nil, fmt.Errorf("unknown option for slirp4netns: %q", o) } - option, value := parts[0], parts[1] switch option { case "cidr": ipv4, _, err := net.ParseCIDR(value) @@ -639,8 +638,7 @@ func setupRootlessPortMappingViaSlirp(ports []types.PortMapping, cmd *exec.Cmd, // for each port we want to add we need to open a connection to the slirp4netns control socket // and send the add_hostfwd command. for _, port := range ports { - protocols := strings.Split(port.Protocol, ",") - for _, protocol := range protocols { + for protocol := range strings.SplitSeq(port.Protocol, ",") { hostIP := port.HostIP if hostIP == "" { hostIP = "0.0.0.0" diff --git a/vendor/go.podman.io/common/pkg/apparmor/apparmor_linux.go b/vendor/go.podman.io/common/pkg/apparmor/apparmor_linux.go index 9b0c766653..d677729d8c 100644 --- a/vendor/go.podman.io/common/pkg/apparmor/apparmor_linux.go +++ b/vendor/go.podman.io/common/pkg/apparmor/apparmor_linux.go @@ -208,14 +208,14 @@ func parseAAParserVersion(output string) (int, error) { // AppArmor parser version 2.9.1 // Copyright (C) 1999-2008 Novell Inc. // Copyright 2009-2012 Canonical Ltd. - lines := strings.SplitN(output, "\n", 2) - words := strings.Split(lines[0], " ") + firstLine, _, _ := strings.Cut(output, "\n") + words := strings.Split(firstLine, " ") version := words[len(words)-1] // trim "-beta1" suffix from version="3.0.0-beta1" if exists - version = strings.SplitN(version, "-", 2)[0] + version, _, _ = strings.Cut(version, "-") // also trim "~..." suffix used historically (https://gitlab.com/apparmor/apparmor/-/commit/bca67d3d27d219d11ce8c9cc70612bd637f88c10) - version = strings.SplitN(version, "~", 2)[0] + version, _, _ = strings.Cut(version, "~") // split by major minor version v := strings.Split(version, ".") diff --git a/vendor/go.podman.io/common/pkg/auth/auth.go b/vendor/go.podman.io/common/pkg/auth/auth.go index 8cb9f3a633..b6f7228546 100644 --- a/vendor/go.podman.io/common/pkg/auth/auth.go +++ b/vendor/go.podman.io/common/pkg/auth/auth.go @@ -233,8 +233,7 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str return "", "", err } - split := strings.Split(key, "/") - registry = split[0] + registry, _, _ = strings.Cut(key, "/") if !acceptRepositories { return registry, registry, nil diff --git a/vendor/go.podman.io/common/pkg/cgroups/cgroups_linux.go b/vendor/go.podman.io/common/pkg/cgroups/cgroups_linux.go index 322436a93e..1c66a8d9cc 100644 --- a/vendor/go.podman.io/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/go.podman.io/common/pkg/cgroups/cgroups_linux.go @@ -111,7 +111,7 @@ func getAvailableControllers(exclude map[string]controllerHandler, cgroup2 bool) if err != nil { return nil, fmt.Errorf("failed while reading controllers for cgroup v2: %w", err) } - for _, controllerName := range strings.Fields(string(controllersFileBytes)) { + for controllerName := range strings.FieldsSeq(string(controllersFileBytes)) { c := controller{ name: controllerName, symlink: false, @@ -197,10 +197,9 @@ func getCgroupPathForCurrentProcess() (string, error) { s := bufio.NewScanner(f) for s.Scan() { text := s.Text() - procEntries := strings.SplitN(text, "::", 2) // set process cgroupPath only if entry is valid - if len(procEntries) > 1 { - cgroupPath = procEntries[1] + if _, p, ok := strings.Cut(text, "::"); ok { + cgroupPath = p } } if err := s.Err(); err != nil { @@ -278,10 +277,10 @@ func readFileByKeyAsUint64(path, key string) (uint64, error) { if err != nil { return 0, err } - for _, line := range strings.Split(string(content), "\n") { - fields := strings.SplitN(line, " ", 2) - if fields[0] == key { - v := cleanString(fields[1]) + for line := range strings.SplitSeq(string(content), "\n") { + k, v, _ := strings.Cut(line, " ") + if k == key { + v := cleanString(v) if v == "max" { return math.MaxUint64, nil } @@ -684,7 +683,7 @@ func readAcctList(ctr *CgroupControl, name string) ([]uint64, error) { return nil, err } r := []uint64{} - for _, s := range strings.Split(string(data), " ") { + for s := range strings.SplitSeq(string(data), " ") { s = cleanString(s) if s == "" { break @@ -874,7 +873,7 @@ func rmDirRecursively(path string) error { } // kill all the processes that are still part of the cgroup if procs, err := os.ReadFile(filepath.Join(path, "cgroup.procs")); err == nil { - for _, pidS := range strings.Split(string(procs), "\n") { + for pidS := range strings.SplitSeq(string(procs), "\n") { if pid, err := strconv.Atoi(pidS); err == nil { _ = unix.Kill(pid, signal) } diff --git a/vendor/go.podman.io/common/pkg/cgroups/cgroups_unsupported.go b/vendor/go.podman.io/common/pkg/cgroups/cgroups_unsupported.go index 1602912122..5940dc82d9 100644 --- a/vendor/go.podman.io/common/pkg/cgroups/cgroups_unsupported.go +++ b/vendor/go.podman.io/common/pkg/cgroups/cgroups_unsupported.go @@ -2,10 +2,6 @@ package cgroups -import ( - "os" -) - // IsCgroup2UnifiedMode returns whether we are running in cgroup 2 cgroup2 mode. func IsCgroup2UnifiedMode() (bool, error) { return false, nil @@ -16,7 +12,3 @@ func IsCgroup2UnifiedMode() (bool, error) { func UserOwnsCurrentSystemdCgroup() (bool, error) { return false, nil } - -func rmDirRecursively(path string) error { - return os.RemoveAll(path) -} diff --git a/vendor/go.podman.io/common/pkg/cgroups/systemd_linux.go b/vendor/go.podman.io/common/pkg/cgroups/systemd_linux.go index 4ae3c0af46..c0bc6d9d38 100644 --- a/vendor/go.podman.io/common/pkg/cgroups/systemd_linux.go +++ b/vendor/go.podman.io/common/pkg/cgroups/systemd_linux.go @@ -280,7 +280,7 @@ func resourcesToProps(res *cgroups.Resources, v2 bool) (map[string]uint64, map[s func rangeToBits(str string) ([]byte, error) { bits := new(big.Int) - for _, r := range strings.Split(str, ",") { + for r := range strings.SplitSeq(str, ",") { // allow extra spaces around r = strings.TrimSpace(r) // allow empty elements (extra commas) diff --git a/vendor/go.podman.io/common/pkg/cgroups/utils_linux.go b/vendor/go.podman.io/common/pkg/cgroups/utils_linux.go index 2143358e67..a1b18a9695 100644 --- a/vendor/go.podman.io/common/pkg/cgroups/utils_linux.go +++ b/vendor/go.podman.io/common/pkg/cgroups/utils_linux.go @@ -270,7 +270,7 @@ func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { if err != nil { return err } - for _, pid := range bytes.Split(processesData, []byte("\n")) { + for pid := range bytes.SplitSeq(processesData, []byte("\n")) { if len(pid) == 0 { continue } diff --git a/vendor/go.podman.io/common/pkg/cgroupv2/cgroups_unsupported.go b/vendor/go.podman.io/common/pkg/cgroupv2/cgroups_unsupported.go index 56269aa42d..8de8e60d80 100644 --- a/vendor/go.podman.io/common/pkg/cgroupv2/cgroups_unsupported.go +++ b/vendor/go.podman.io/common/pkg/cgroupv2/cgroups_unsupported.go @@ -2,7 +2,7 @@ package cgroupv2 -// Enabled returns whether we are running on cgroup v2 +// Enabled returns whether we are running on cgroup v2. func Enabled() (bool, error) { return false, nil } diff --git a/vendor/go.podman.io/common/pkg/completion/completion.go b/vendor/go.podman.io/common/pkg/completion/completion.go index fef95b7f35..f487d68630 100644 --- a/vendor/go.podman.io/common/pkg/completion/completion.go +++ b/vendor/go.podman.io/common/pkg/completion/completion.go @@ -72,7 +72,7 @@ func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective) var names []string scanner := bufio.NewScanner(file) for scanner.Scan() { - name := strings.SplitN(scanner.Text(), ":", 2)[0] + name, _, _ := strings.Cut(scanner.Text(), ":") names = append(names, name) } if err = scanner.Err(); err != nil { diff --git a/vendor/go.podman.io/common/pkg/config/config.go b/vendor/go.podman.io/common/pkg/config/config.go index 9d5a339375..2a11ccb005 100644 --- a/vendor/go.podman.io/common/pkg/config/config.go +++ b/vendor/go.podman.io/common/pkg/config/config.go @@ -761,9 +761,9 @@ func (c *Config) CheckCgroupsAndAdjustConfig() { } } } else { - for _, part := range strings.Split(session, ",") { - if strings.HasPrefix(part, "unix:path=") { - err := fileutils.Exists(strings.TrimPrefix(part, "unix:path=")) + for part := range strings.SplitSeq(session, ",") { + if path, ok := strings.CutPrefix(part, "unix:path="); ok { + err := fileutils.Exists(path) hasSession = err == nil break } @@ -1158,17 +1158,17 @@ func (c *Config) ImageCopyTmpDir() (string, error) { // setupEnv sets the environment variables for the engine. func (c *Config) setupEnv() error { for _, env := range c.Engine.Env.Get() { - splitEnv := strings.SplitN(env, "=", 2) - if len(splitEnv) != 2 { + key, value, ok := strings.Cut(env, "=") + if !ok { logrus.Warnf("invalid environment variable for engine %s, valid configuration is KEY=value pair", env) continue } // skip if the env is already defined - if _, ok := os.LookupEnv(splitEnv[0]); ok { - logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", splitEnv[0]) + if _, ok := os.LookupEnv(key); ok { + logrus.Debugf("environment variable %s is already defined, skip the settings from containers.conf", key) continue } - if err := os.Setenv(splitEnv[0], splitEnv[1]); err != nil { + if err := os.Setenv(key, value); err != nil { return err } } @@ -1202,7 +1202,7 @@ func (e eventsLogMaxSize) MarshalText() ([]byte, error) { v := []byte{} return v, nil } - return []byte(fmt.Sprintf("%d", e)), nil + return fmt.Appendf(nil, "%d", e), nil } func ValidateImageVolumeMode(mode string) error { diff --git a/vendor/go.podman.io/common/pkg/config/config_unsupported.go b/vendor/go.podman.io/common/pkg/config/config_unsupported.go index 793a20ea8b..94938e243b 100644 --- a/vendor/go.podman.io/common/pkg/config/config_unsupported.go +++ b/vendor/go.podman.io/common/pkg/config/config_unsupported.go @@ -7,7 +7,7 @@ func selinuxEnabled() bool { } // Capabilities returns the capabilities parses the Add and Drop capability -// list from the default capabilities for the container +// list from the default capabilities for the container. func (c *Config) Capabilities(user string, addCapabilities, dropCapabilities []string) ([]string, error) { return nil, nil } diff --git a/vendor/go.podman.io/common/pkg/config/containers.conf b/vendor/go.podman.io/common/pkg/config/containers.conf index d0da362002..57a9fcfa4c 100644 --- a/vendor/go.podman.io/common/pkg/config/containers.conf +++ b/vendor/go.podman.io/common/pkg/config/containers.conf @@ -317,11 +317,13 @@ default_sysctls = [ # #umask = "0022" -# Default way to to create a User namespace for the container +# Default way to create a USER namespace for the container. # Options are: -# `auto` Create unique User Namespace for the container. -# `host` Share host User Namespace with the container. -# +# `private` Create private USER Namespace for the container, without adding any UID mappings. +# `host` Share host USER Namespace with the container. Root in the container is mapped to the host user UID. +# `auto` Automatically create a USER namespace with a unique mapping. +# `keep-id` Like `private`, but container UIDs are mapped to the host user's subordinate UIDs listed in `/etc/subuid`, and the current user's `UID:GID` are mapped to the same values in the container. +# `no-map` Like `keep-id`, but the current user's `UID:GID` does not map to any `UID:GID` inside the container. #userns = "host" # Default way to to create a UTS namespace for the container diff --git a/vendor/go.podman.io/common/pkg/config/default.go b/vendor/go.podman.io/common/pkg/config/default.go index c48185646f..3bf0bc1692 100644 --- a/vendor/go.podman.io/common/pkg/config/default.go +++ b/vendor/go.podman.io/common/pkg/config/default.go @@ -145,16 +145,6 @@ var ( // helper binary in a different location. additionalHelperBinariesDir string - defaultUnixComposeProviders = []string{ - "$HOME/.docker/cli-plugins/docker-compose", - "/usr/local/lib/docker/cli-plugins/docker-compose", - "/usr/local/libexec/docker/cli-plugins/docker-compose", - "/usr/lib/docker/cli-plugins/docker-compose", - "/usr/libexec/docker/cli-plugins/docker-compose", - "docker-compose", - "podman-compose", - } - defaultContainerEnv = []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"} ) diff --git a/vendor/go.podman.io/common/pkg/config/default_bsd.go b/vendor/go.podman.io/common/pkg/config/default_bsd.go index 2e87dc8b0f..6d7d0d6dc6 100644 --- a/vendor/go.podman.io/common/pkg/config/default_bsd.go +++ b/vendor/go.podman.io/common/pkg/config/default_bsd.go @@ -28,7 +28,3 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } - -func getDefaultComposeProviders() []string { - return defaultUnixComposeProviders -} diff --git a/vendor/go.podman.io/common/pkg/config/default_darwin.go b/vendor/go.podman.io/common/pkg/config/default_darwin.go index 86fa6d5087..03d12a2a30 100644 --- a/vendor/go.podman.io/common/pkg/config/default_darwin.go +++ b/vendor/go.podman.io/common/pkg/config/default_darwin.go @@ -12,7 +12,7 @@ func getLibpodTmpDir() string { return "/run/libpod" } -// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded) +// getDefaultMachineVolumes returns default mounted volumes (possibly with env vars, which will be expanded). func getDefaultMachineVolumes() []string { return []string{ "/Users:/Users", diff --git a/vendor/go.podman.io/common/pkg/config/default_linux.go b/vendor/go.podman.io/common/pkg/config/default_linux.go index ae9810fad7..fdcc6f3912 100644 --- a/vendor/go.podman.io/common/pkg/config/default_linux.go +++ b/vendor/go.podman.io/common/pkg/config/default_linux.go @@ -29,7 +29,3 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } - -func getDefaultComposeProviders() []string { - return defaultUnixComposeProviders -} diff --git a/vendor/go.podman.io/common/pkg/config/default_unix_notdarwin.go b/vendor/go.podman.io/common/pkg/config/default_unix_notdarwin.go new file mode 100644 index 0000000000..913dd774e8 --- /dev/null +++ b/vendor/go.podman.io/common/pkg/config/default_unix_notdarwin.go @@ -0,0 +1,17 @@ +//go:build linux || freebsd || netbsd || openbsd + +package config + +var defaultUnixComposeProviders = []string{ + "$HOME/.docker/cli-plugins/docker-compose", + "/usr/local/lib/docker/cli-plugins/docker-compose", + "/usr/local/libexec/docker/cli-plugins/docker-compose", + "/usr/lib/docker/cli-plugins/docker-compose", + "/usr/libexec/docker/cli-plugins/docker-compose", + "docker-compose", + "podman-compose", +} + +func getDefaultComposeProviders() []string { + return defaultUnixComposeProviders +} diff --git a/vendor/go.podman.io/common/pkg/config/default_unsupported.go b/vendor/go.podman.io/common/pkg/config/default_unsupported.go index 46653e3996..6b0ed468f0 100644 --- a/vendor/go.podman.io/common/pkg/config/default_unsupported.go +++ b/vendor/go.podman.io/common/pkg/config/default_unsupported.go @@ -4,17 +4,7 @@ package config import "os" -// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. -func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { - return false, nil -} - -// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format -func getDefaultProcessLimits() []string { - return []string{} -} - -// getDefaultTmpDir for linux +// getDefaultTmpDir for linux. func getDefaultTmpDir() string { // first check the TMPDIR env var if path, found := os.LookupEnv("TMPDIR"); found { diff --git a/vendor/go.podman.io/common/pkg/config/default_windows.go b/vendor/go.podman.io/common/pkg/config/default_windows.go index d57e775b5a..c21fff76ab 100644 --- a/vendor/go.podman.io/common/pkg/config/default_windows.go +++ b/vendor/go.podman.io/common/pkg/config/default_windows.go @@ -9,16 +9,6 @@ import ( "go.podman.io/storage/pkg/homedir" ) -// isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. -func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { - return false, nil -} - -// getDefaultProcessLimits returns the nofile and nproc for the current process in ulimits format -func getDefaultProcessLimits() []string { - return []string{} -} - // getDefaultTmpDir for windows func getDefaultTmpDir() string { // first check the Temp env var diff --git a/vendor/go.podman.io/common/pkg/config/systemd.go b/vendor/go.podman.io/common/pkg/config/systemd.go index 5996302b72..e7c15b5909 100644 --- a/vendor/go.podman.io/common/pkg/config/systemd.go +++ b/vendor/go.podman.io/common/pkg/config/systemd.go @@ -5,16 +5,14 @@ package config import ( "os" "path/filepath" - "strings" "sync" "go.podman.io/common/pkg/cgroupv2" + "go.podman.io/common/pkg/systemd" "go.podman.io/storage/pkg/unshare" ) var ( - systemdOnce sync.Once - usesSystemd bool journaldOnce sync.Once usesJournald bool ) @@ -51,14 +49,7 @@ func defaultLogDriver() string { } func useSystemd() bool { - systemdOnce.Do(func() { - dat, err := os.ReadFile("/proc/1/comm") - if err == nil { - val := strings.TrimSuffix(string(dat), "\n") - usesSystemd = (val == "systemd") - } - }) - return usesSystemd + return systemd.RunsOnSystemd() } func useJournald() bool { diff --git a/vendor/go.podman.io/common/pkg/manifests/manifests.go b/vendor/go.podman.io/common/pkg/manifests/manifests.go index 5198fc0ae6..6884c13d04 100644 --- a/vendor/go.podman.io/common/pkg/manifests/manifests.go +++ b/vendor/go.podman.io/common/pkg/manifests/manifests.go @@ -4,6 +4,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "os" "slices" "strings" @@ -238,9 +239,7 @@ func (l *list) SetAnnotations(instanceDigest *digest.Digest, annotations map[str if *a == nil { (*a) = make(map[string]string) } - for k, v := range annotations { - (*a)[k] = v - } + maps.Copy((*a), annotations) if len(*a) == 0 { *a = nil } @@ -259,9 +258,7 @@ func (l *list) Annotations(instanceDigest *digest.Digest) (map[string]string, er a = oci.Annotations } annotations := make(map[string]string) - for k, v := range a { - annotations[k] = v - } + maps.Copy(annotations, a) return annotations, nil } diff --git a/vendor/go.podman.io/common/pkg/report/formatter.go b/vendor/go.podman.io/common/pkg/report/formatter.go index dcc09fdb16..e05e9762f4 100644 --- a/vendor/go.podman.io/common/pkg/report/formatter.go +++ b/vendor/go.podman.io/common/pkg/report/formatter.go @@ -2,6 +2,7 @@ package report import ( "io" + "maps" "strings" "text/tabwriter" "text/template" @@ -109,12 +110,8 @@ func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) { // A default template function will be replaced if there is a key collision. func (f *Formatter) Funcs(funcMap template.FuncMap) *Formatter { m := make(template.FuncMap, len(DefaultFuncs)+len(funcMap)) - for k, v := range DefaultFuncs { - m[k] = v - } - for k, v := range funcMap { - m[k] = v - } + maps.Copy(m, DefaultFuncs) + maps.Copy(m, funcMap) f.template = f.template.Funcs(funcMap) return f } diff --git a/vendor/go.podman.io/common/pkg/report/template.go b/vendor/go.podman.io/common/pkg/report/template.go index bb6ce7bea5..a1113d869c 100644 --- a/vendor/go.podman.io/common/pkg/report/template.go +++ b/vendor/go.podman.io/common/pkg/report/template.go @@ -3,6 +3,7 @@ package report import ( "bytes" "encoding/json" + "maps" "reflect" "strings" "text/template" @@ -95,7 +96,7 @@ func truncateWithLength(source string, length int) string { // 3) --format 'table {{.ID}}' # includes headers func Headers(object any, overrides map[string]string) []map[string]string { value := reflect.ValueOf(object) - if value.Kind() == reflect.Ptr { + if value.Kind() == reflect.Pointer { value = value.Elem() } @@ -106,9 +107,7 @@ func Headers(object any, overrides map[string]string) []map[string]string { // Recurse to find field names from promoted structs if field.Type.Kind() == reflect.Struct && field.Anonymous { h := Headers(reflect.New(field.Type).Interface(), nil) - for k, v := range h[0] { - headers[k] = v - } + maps.Copy(headers, h[0]) continue } name := strings.Join(camelcase.Split(field.Name), " ") @@ -146,12 +145,8 @@ func (t *Template) Parse(text string) (*Template, error) { // A default template function will be replace if there is a key collision. func (t *Template) Funcs(funcMap FuncMap) *Template { m := make(FuncMap) - for k, v := range DefaultFuncs { - m[k] = v - } - for k, v := range funcMap { - m[k] = v - } + maps.Copy(m, DefaultFuncs) + maps.Copy(m, funcMap) return &Template{Template: t.Template.Funcs(template.FuncMap(m)), isTable: t.isTable} } diff --git a/vendor/go.podman.io/common/pkg/seccomp/filter.go b/vendor/go.podman.io/common/pkg/seccomp/filter_linux.go similarity index 100% rename from vendor/go.podman.io/common/pkg/seccomp/filter.go rename to vendor/go.podman.io/common/pkg/seccomp/filter_linux.go diff --git a/vendor/go.podman.io/common/pkg/seccomp/seccomp_unsupported.go b/vendor/go.podman.io/common/pkg/seccomp/seccomp_unsupported.go index 1bf8155ddc..1311de0c75 100644 --- a/vendor/go.podman.io/common/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/go.podman.io/common/pkg/seccomp/seccomp_unsupported.go @@ -14,12 +14,12 @@ import ( var errNotSupported = errors.New("seccomp not enabled in this build") -// LoadProfile returns an error on unsupported systems +// LoadProfile returns an error on unsupported systems. func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } -// GetDefaultProfile returns an error on unsupported systems +// GetDefaultProfile returns an error on unsupported systems. func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } @@ -29,7 +29,7 @@ func LoadProfileFromBytes(body []byte, rs *specs.Spec) (*specs.LinuxSeccomp, err return nil, errNotSupported } -// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp +// LoadProfileFromConfig takes a Seccomp struct and a spec to retrieve a LinuxSeccomp. func LoadProfileFromConfig(config *Seccomp, specgen *specs.Spec) (*specs.LinuxSeccomp, error) { return nil, errNotSupported } diff --git a/vendor/go.podman.io/common/pkg/seccomp/validate.go b/vendor/go.podman.io/common/pkg/seccomp/validate_linux.go similarity index 100% rename from vendor/go.podman.io/common/pkg/seccomp/validate.go rename to vendor/go.podman.io/common/pkg/seccomp/validate_linux.go diff --git a/vendor/go.podman.io/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/go.podman.io/common/pkg/secrets/shelldriver/shelldriver.go index 5cf6aa48d2..c8b806d2a0 100644 --- a/vendor/go.podman.io/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/go.podman.io/common/pkg/secrets/shelldriver/shelldriver.go @@ -89,8 +89,7 @@ func (d *Driver) List() (secrets []string, err error) { return nil, err } - parts := bytes.Split(buf.Bytes(), []byte("\n")) - for _, part := range parts { + for part := range bytes.SplitSeq(buf.Bytes(), []byte("\n")) { id := strings.Trim(string(part), " \r\n") if len(id) > 0 { secrets = append(secrets, id) diff --git a/vendor/go.podman.io/common/pkg/ssh/connection_golang.go b/vendor/go.podman.io/common/pkg/ssh/connection_golang.go index 591e1a7575..c8e3c5bd85 100644 --- a/vendor/go.podman.io/common/pkg/ssh/connection_golang.go +++ b/vendor/go.podman.io/common/pkg/ssh/connection_golang.go @@ -166,8 +166,7 @@ func golangConnectionScp(options ConnectionScpOptions) (*ConnectionScpReport, er parent := filepath.Dir(remoteFile) path := string(filepath.Separator) - dirs := strings.Split(parent, path) - for _, dir := range dirs { + for dir := range strings.SplitSeq(parent, path) { path = filepath.Join(path, dir) // ignore errors due to most of the dirs already existing _ = sc.Mkdir(path) diff --git a/vendor/go.podman.io/common/pkg/ssh/connection_native.go b/vendor/go.podman.io/common/pkg/ssh/connection_native.go index 39127bbab4..a274db88ab 100644 --- a/vendor/go.podman.io/common/pkg/ssh/connection_native.go +++ b/vendor/go.podman.io/common/pkg/ssh/connection_native.go @@ -38,8 +38,8 @@ func nativeConnectionCreate(options ConnectionCreateOptions) error { return err } - if strings.Contains(uri.Host, "/run") { - uri.Host = strings.Split(uri.Host, "/run")[0] + if host, _, ok := strings.Cut(uri.Host, "/run"); ok { + uri.Host = host } conf, err := config.Default() if err != nil { @@ -114,8 +114,8 @@ func nativeConnectionExec(options ConnectionExecOptions, input io.Reader) (*Conn output := &bytes.Buffer{} errors := &bytes.Buffer{} - if strings.Contains(uri.Host, "/run") { - uri.Host = strings.Split(uri.Host, "/run")[0] + if host, _, ok := strings.Cut(uri.Host, "/run"); ok { + uri.Host = host } options.Args = append([]string{uri.User.String() + "@" + uri.Hostname()}, options.Args...) diff --git a/vendor/go.podman.io/common/pkg/subscriptions/subscriptions.go b/vendor/go.podman.io/common/pkg/subscriptions/subscriptions.go index a907ea6758..a8921941ba 100644 --- a/vendor/go.podman.io/common/pkg/subscriptions/subscriptions.go +++ b/vendor/go.podman.io/common/pkg/subscriptions/subscriptions.go @@ -144,15 +144,12 @@ func getMounts(filePath string) []string { } // getHostAndCtrDir separates the host:container paths. -func getMountsMap(path string) (string, string, error) { //nolint - arr := strings.SplitN(path, ":", 2) - switch len(arr) { - case 1: - return arr[0], arr[0], nil - case 2: - return arr[0], arr[1], nil - } - return "", "", fmt.Errorf("unable to get host and container dir from path: %s", path) +func getMountsMap(path string) (string, string) { + host, ctr, ok := strings.Cut(path, ":") + if !ok { + return path, path + } + return host, ctr } // Return true iff the system is in FIPS mode as determined by reading @@ -238,10 +235,7 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string defaultMountsPaths := getMounts(filePath) mounts := make([]rspec.Mount, 0, len(defaultMountsPaths)) for _, path := range defaultMountsPaths { - hostDirOrFile, ctrDirOrFile, err := getMountsMap(path) - if err != nil { - return nil, err - } + hostDirOrFile, ctrDirOrFile := getMountsMap(path) // skip if the hostDirOrFile path doesn't exist fileInfo, err := os.Stat(hostDirOrFile) if err != nil { diff --git a/vendor/go.podman.io/common/pkg/timetype/timestamp.go b/vendor/go.podman.io/common/pkg/timetype/timestamp.go index 2fc99d0e39..ca404c84f3 100644 --- a/vendor/go.podman.io/common/pkg/timetype/timestamp.go +++ b/vendor/go.podman.io/common/pkg/timetype/timestamp.go @@ -117,19 +117,19 @@ func ParseTimestamps(value string, def int64) (secs, nanoSecs int64, err error) } func parseTimestamp(value string) (int64, int64, error) { - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) + secStr, nsStr, ok := strings.Cut(value, ".") + s, err := strconv.ParseInt(secStr, 10, 64) if err != nil { return s, 0, err } - if len(sa) != 2 { + if !ok { return s, 0, nil } - n, err := strconv.ParseInt(sa[1], 10, 64) + n, err := strconv.ParseInt(nsStr, 10, 64) if err != nil { return s, n, err } // should already be in nanoseconds but just in case convert n to nanoseconds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(nsStr)))) return s, n, nil } diff --git a/vendor/go.podman.io/common/pkg/timezone/timezone.go b/vendor/go.podman.io/common/pkg/timezone/timezone.go index 2dd55d8358..40333841e2 100644 --- a/vendor/go.podman.io/common/pkg/timezone/timezone.go +++ b/vendor/go.podman.io/common/pkg/timezone/timezone.go @@ -1,4 +1,4 @@ -//go:build !windows +//go:build linux || freebsd package timezone @@ -104,3 +104,7 @@ func copyTimezoneFile(containerRunDir, zonePath string) (string, error) { } return localtimeCopy, err } + +func openDirectory(path string) (fd int, err error) { + return unix.Open(path, unix.O_RDONLY|O_PATH|unix.O_CLOEXEC, 0) +} diff --git a/vendor/go.podman.io/common/pkg/timezone/timezone_freebsd.go b/vendor/go.podman.io/common/pkg/timezone/timezone_freebsd.go new file mode 100644 index 0000000000..008b474f72 --- /dev/null +++ b/vendor/go.podman.io/common/pkg/timezone/timezone_freebsd.go @@ -0,0 +1,7 @@ +//go:build !windows && !linux + +package timezone + +// O_PATH value on freebsd. We must define O_PATH ourselves +// until https://github.com/golang/go/issues/54355 is fixed. +const O_PATH = 0x00400000 //nolint:staticcheck // ST1003: should not use ALL_CAPS diff --git a/vendor/go.podman.io/common/pkg/timezone/timezone_linux.go b/vendor/go.podman.io/common/pkg/timezone/timezone_linux.go index ef096af59d..56e5eaa718 100644 --- a/vendor/go.podman.io/common/pkg/timezone/timezone_linux.go +++ b/vendor/go.podman.io/common/pkg/timezone/timezone_linux.go @@ -4,6 +4,5 @@ import ( "golang.org/x/sys/unix" ) -func openDirectory(path string) (fd int, err error) { - return unix.Open(path, unix.O_RDONLY|unix.O_PATH|unix.O_CLOEXEC, 0) -} +// O_PATH value on linux. +const O_PATH = unix.O_PATH //nolint:staticcheck // ST1003: should not use ALL_CAPS diff --git a/vendor/go.podman.io/common/pkg/timezone/timezone_unix.go b/vendor/go.podman.io/common/pkg/timezone/timezone_unix.go deleted file mode 100644 index bb57036f82..0000000000 --- a/vendor/go.podman.io/common/pkg/timezone/timezone_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !windows && !linux - -package timezone - -import ( - "golang.org/x/sys/unix" -) - -func openDirectory(path string) (fd int, err error) { - const O_PATH = 0x00400000 - return unix.Open(path, unix.O_RDONLY|O_PATH|unix.O_CLOEXEC, 0) -} diff --git a/vendor/go.podman.io/common/pkg/timezone/timezone_windows.go b/vendor/go.podman.io/common/pkg/timezone/timezone_windows.go deleted file mode 100644 index d89090eeb9..0000000000 --- a/vendor/go.podman.io/common/pkg/timezone/timezone_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package timezone - -func ConfigureContainerTimeZone(timezone, containerRunDir, mountPoint, etcPath, containerID string) (string, error) { - return "", nil -} diff --git a/vendor/go.podman.io/common/pkg/version/version.go b/vendor/go.podman.io/common/pkg/version/version.go index 9b13f484d1..80261c125e 100644 --- a/vendor/go.podman.io/common/pkg/version/version.go +++ b/vendor/go.podman.io/common/pkg/version/version.go @@ -28,10 +28,9 @@ func queryPackageVersion(cmdArg ...string) string { switch cmdArg[0] { case "/usr/bin/dlocate": // can return multiple matches - l := strings.Split(output, "\n") - output = l[0] - r := strings.Split(output, ": ") - regexpFormat := `^..\s` + r[0] + `\s` + output, _, _ := strings.Cut(output, "\n") + r, _, _ := strings.Cut(output, ": ") + regexpFormat := `^..\s` + r + `\s` cmd = exec.Command(cmdArg[0], "-P", regexpFormat, "-l") cmd.Env = []string{"COLUMNS=160"} // show entire value // dlocate always returns exit code 1 for list command @@ -46,9 +45,9 @@ func queryPackageVersion(cmdArg ...string) string { } } case "/usr/bin/dpkg": - r := strings.Split(output, ": ") + r, _, _ := strings.Cut(output, ": ") queryFormat := `${Package}_${Version}_${Architecture}` - cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) + cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r) if outp, err := cmd.Output(); err == nil { output = string(outp) } diff --git a/vendor/go.podman.io/common/version/version.go b/vendor/go.podman.io/common/version/version.go index e13dd10e59..bdfd10c645 100644 --- a/vendor/go.podman.io/common/version/version.go +++ b/vendor/go.podman.io/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.65.0" +const Version = "0.66.0-dev" diff --git a/vendor/go.podman.io/image/v5/copy/compression.go b/vendor/go.podman.io/image/v5/copy/compression.go index 0ecc851866..e3e5230292 100644 --- a/vendor/go.podman.io/image/v5/copy/compression.go +++ b/vendor/go.podman.io/image/v5/copy/compression.go @@ -27,9 +27,10 @@ var ( // expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed // using the algorithm that the media type says it should be compressed with expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{ - imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, - imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, - manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip, + imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd, + manifest.DockerV2Schema2LayerMediaType: &compression.Gzip, + manifest.DockerV2SchemaLayerMediaTypeZstd: &compression.Zstd, } ) diff --git a/vendor/go.podman.io/image/v5/copy/single.go b/vendor/go.podman.io/image/v5/copy/single.go index 5c81fd2d53..e72b84bd8f 100644 --- a/vendor/go.podman.io/image/v5/copy/single.go +++ b/vendor/go.podman.io/image/v5/copy/single.go @@ -379,7 +379,14 @@ func (ic *imageCopier) noPendingManifestUpdates() bool { // compareImageDestinationManifestEqual compares the source and destination image manifests (reading the manifest from the // (possibly remote) destination). If they are equal, it returns a full copySingleImageResult, nil otherwise. func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, targetInstance *digest.Digest) (*copySingleImageResult, error) { - srcManifestDigest, err := manifest.Digest(ic.src.ManifestBlob) + // Get the digest algorithm from the destination store if available + digestAlgorithm := digest.Canonical // Default fallback + if digestProvider, ok := ic.c.dest.(interface{ GetDigestAlgorithm() digest.Algorithm }); ok { + digestAlgorithm = digestProvider.GetDigestAlgorithm() + } + + + srcManifestDigest, err := manifest.DigestWithAlgorithm(ic.src.ManifestBlob, digestAlgorithm) if err != nil { return nil, fmt.Errorf("calculating manifest digest: %w", err) } @@ -397,7 +404,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, return nil, nil } - destManifestDigest, err := manifest.Digest(destManifest) + destManifestDigest, err := manifest.DigestWithAlgorithm(destManifest, digestAlgorithm) if err != nil { return nil, fmt.Errorf("calculating manifest digest: %w", err) } @@ -601,7 +608,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc } ic.c.Printf("Writing manifest to image destination\n") - manifestDigest, err := manifest.Digest(man) + // Get the digest algorithm from the destination store if available + digestAlgorithm := digest.Canonical // Default fallback + if digestProvider, ok := ic.c.dest.(interface{ GetDigestAlgorithm() digest.Algorithm }); ok { + digestAlgorithm = digestProvider.GetDigestAlgorithm() + } + + + manifestDigest, err := manifest.DigestWithAlgorithm(man, digestAlgorithm) if err != nil { return nil, "", err } @@ -856,7 +870,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to } defer srcStream.Close() - blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer) + // Get the digest algorithm from the destination store if available + digestAlgorithm := digest.Canonical // Default fallback + if digestProvider, ok := ic.c.dest.(interface{ GetDigestAlgorithm() digest.Algorithm }); ok { + digestAlgorithm = digestProvider.GetDigestAlgorithm() + } + + blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer, digestAlgorithm) if err != nil { return types.BlobInfo{}, "", err } @@ -929,12 +949,12 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse return res } -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate ā€œdeferā€ scope. +// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate "defer" scope. // it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest, // perhaps (de/re/)compressing the stream, // and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) { + diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool, digestAlgorithm digest.Algorithm) (types.BlobInfo, <-chan diffIDResult, error) { var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil var diffIDChan chan diffIDResult @@ -948,13 +968,13 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer { // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. + // reading from the pipe has failed, we don't really care. // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, // the return value includes an error indication, which we do check. // // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader + go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor, digestAlgorithm) // Closes pipeReader return pipeWriter } } @@ -965,7 +985,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea } // diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) { +func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc, digestAlgorithm digest.Algorithm) { result := diffIDResult{ digest: "", err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), @@ -973,11 +993,11 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo defer func() { dest <- result }() defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - result.digest, result.err = computeDiffID(layerStream, decompressor) + result.digest, result.err = computeDiffID(layerStream, decompressor, digestAlgorithm) } // computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { +func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc, digestAlgorithm digest.Algorithm) (digest.Digest, error) { if decompressor != nil { s, err := decompressor(stream) if err != nil { @@ -987,7 +1007,8 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF stream = s } - return digest.Canonical.FromReader(stream) + // Use the provided digest algorithm for digest agility support + return digestAlgorithm.FromReader(stream) } // algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names diff --git a/vendor/go.podman.io/image/v5/docker/docker_client.go b/vendor/go.podman.io/image/v5/docker/docker_client.go index a83e19a36a..1c0d67105e 100644 --- a/vendor/go.podman.io/image/v5/docker/docker_client.go +++ b/vendor/go.podman.io/image/v5/docker/docker_client.go @@ -1066,6 +1066,8 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) blobSize, err := getBlobSize(res) if err != nil { + // See above, we don't guarantee returning a size + logrus.Debugf("failed to get blob size: %v", err) blobSize = -1 } diff --git a/vendor/go.podman.io/image/v5/docker/internal/tarfile/src.go b/vendor/go.podman.io/image/v5/docker/internal/tarfile/src.go index 56421afa13..335c9c5e0d 100644 --- a/vendor/go.podman.io/image/v5/docker/internal/tarfile/src.go +++ b/vendor/go.podman.io/image/v5/docker/internal/tarfile/src.go @@ -109,6 +109,8 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error { // Success; commit. s.tarManifest = tarManifest s.configBytes = configBytes + // Note: Using canonical digest for config digest storage. + // This is used internally and should be consistent. s.configDigest = digest.FromBytes(configBytes) s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs s.knownLayers = knownLayers diff --git a/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go b/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go index 9305524a08..62022a8d0b 100644 --- a/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go +++ b/vendor/go.podman.io/image/v5/internal/image/docker_schema2.go @@ -110,9 +110,16 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + // Use the same digest algorithm as the expected digest for verification + expectedDigest := m.m.ConfigDescriptor.Digest + var computedDigest digest.Digest + if expectedDigest != "" && expectedDigest.Algorithm().Available() { + computedDigest = expectedDigest.Algorithm().FromBytes(blob) + } else { + computedDigest = digest.FromBytes(blob) + } + if computedDigest != expectedDigest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, expectedDigest) } m.configBlob = blob } @@ -218,6 +225,8 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma config := imgspecv1.Descriptor{ MediaType: imgspecv1.MediaTypeImageConfig, Size: int64(len(configOCIBytes)), + // Note: Using canonical digest for manifest generation. + // For digest agility, this could be configurable in the future. Digest: digest.FromBytes(configOCIBytes), } @@ -233,6 +242,8 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma layers[idx].MediaType = imgspecv1.MediaTypeImageLayer case manifest.DockerV2Schema2LayerMediaType: layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip + case manifest.DockerV2SchemaLayerMediaTypeZstd: + layers[idx].MediaType = imgspecv1.MediaTypeImageLayerZstd default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) } diff --git a/vendor/go.podman.io/image/v5/internal/image/oci.go b/vendor/go.podman.io/image/v5/internal/image/oci.go index 8b73b91ffa..ee2324bb31 100644 --- a/vendor/go.podman.io/image/v5/internal/image/oci.go +++ b/vendor/go.podman.io/image/v5/internal/image/oci.go @@ -74,9 +74,16 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + // Use the same digest algorithm as the expected digest for verification + expectedDigest := m.m.Config.Digest + var computedDigest digest.Digest + if expectedDigest != "" && expectedDigest.Algorithm().Available() { + computedDigest = expectedDigest.Algorithm().FromBytes(blob) + } else { + computedDigest = digest.FromBytes(blob) + } + if computedDigest != expectedDigest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, expectedDigest) } m.configBlob = blob } @@ -288,7 +295,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type case imgspecv1.MediaTypeImageLayerGzip: layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not officially supported for docker images", layers[idx].MediaType) case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) diff --git a/vendor/go.podman.io/image/v5/internal/manifest/manifest.go b/vendor/go.podman.io/image/v5/internal/manifest/manifest.go index 7dfe77844e..530ef0a30f 100644 --- a/vendor/go.podman.io/image/v5/internal/manifest/manifest.go +++ b/vendor/go.podman.io/image/v5/internal/manifest/manifest.go @@ -26,6 +26,8 @@ const ( DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" + // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. + DockerV2SchemaLayerMediaTypeZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd" // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. @@ -121,9 +123,31 @@ func Digest(manifest []byte) (digest.Digest, error) { } } + // Note: This uses the canonical digest algorithm. For digest agility, + // callers should use a specific algorithm via digest.Algorithm.FromBytes() return digest.FromBytes(manifest), nil } +// DigestWithAlgorithm returns the digest of a docker manifest using the specified digest algorithm, +// with any necessary implied transformations like stripping v1s1 signatures. +func DigestWithAlgorithm(manifest []byte, algorithm digest.Algorithm) (digest.Digest, error) { + if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { + sig, err := libtrust.ParsePrettySignature(manifest, "signatures") + if err != nil { + return "", err + } + manifest, err = sig.Payload() + if err != nil { + // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string + // that libtrust itself has josebase64UrlEncode()d + return "", err + } + } + + // Use the provided digest algorithm for digest agility + return algorithm.FromBytes(manifest), nil +} + // MatchesDigest returns true iff the manifest matches expectedDigest. // Error may be set if this returns false. // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, diff --git a/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go index ce50542751..b3018c8f61 100644 --- a/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go +++ b/vendor/go.podman.io/image/v5/internal/putblobdigest/put_blob_digest.go @@ -13,22 +13,29 @@ type Digester struct { digester digest.Digester // Or nil } -// newDigester initiates computation of a digest.Canonical digest of stream, +// newDigester initiates computation of a digest of stream using the same algorithm as knownDigest if available, // if !validDigest; otherwise it just records knownDigest to be returned later. // The caller MUST use the returned stream instead of the original value. func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { if validDigest { return Digester{knownDigest: knownDigest}, stream } else { + // Use the algorithm from knownDigest if available and valid, otherwise fall back to canonical + algorithm := digest.Canonical + if knownDigest != "" { + if algo := knownDigest.Algorithm(); algo.Available() { + algorithm = algo + } + } res := Digester{ - digester: digest.Canonical.Digester(), + digester: algorithm.Digester(), } stream = io.TeeReader(stream, res.digester.Hash()) return res, stream } } -// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, +// DigestIfUnknown initiates computation of a digest of stream using the same algorithm as the provided digest, // if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will // be used (accepting any algorithm). // The caller MUST use the returned stream instead of the original value. @@ -37,7 +44,7 @@ func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Re return newDigester(stream, d, d != "") } -// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, +// DigestIfCanonicalUnknown initiates computation of a digest using the same algorithm as the provided digest, // if a digest.Canonical digest is not supplied in the provided blobInfo; // otherwise blobInfo.Digest will be used. // The caller MUST use the returned stream instead of the original value. diff --git a/vendor/go.podman.io/image/v5/manifest/docker_schema1.go b/vendor/go.podman.io/image/v5/manifest/docker_schema1.go index 28c9fea30e..c3f327c380 100644 --- a/vendor/go.podman.io/image/v5/manifest/docker_schema1.go +++ b/vendor/go.podman.io/image/v5/manifest/docker_schema1.go @@ -342,5 +342,7 @@ func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { if err != nil { return "", err } + // Note: Using canonical digest for ImageID generation to maintain compatibility. + // ImageIDs need to be consistent across different systems. return digest.FromBytes(image).Encoded(), nil } diff --git a/vendor/go.podman.io/image/v5/manifest/manifest.go b/vendor/go.podman.io/image/v5/manifest/manifest.go index 45118fa4e1..2f2f9499ab 100644 --- a/vendor/go.podman.io/image/v5/manifest/manifest.go +++ b/vendor/go.podman.io/image/v5/manifest/manifest.go @@ -26,6 +26,9 @@ const ( DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed + // DockerV2SchemaLayerMediaTypeZstd is the mediaType used for zstd layers. + // Warning: This mediaType is not officially supported in https://github.com/distribution/distribution/blob/main/docs/content/spec/manifest-v2-2.md but some images may exhibit it. Support is partial. + DockerV2SchemaLayerMediaTypeZstd = manifest.DockerV2SchemaLayerMediaTypeZstd // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list DockerV2ListMediaType = manifest.DockerV2ListMediaType // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. @@ -41,7 +44,7 @@ type NonImageArtifactError = manifest.NonImageArtifactError // SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. func SupportedSchema2MediaType(m string) error { switch m { - case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: + case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed, DockerV2SchemaLayerMediaTypeZstd: return nil default: return fmt.Errorf("unsupported docker v2s2 media type: %q", m) @@ -110,6 +113,12 @@ func Digest(manifestBlob []byte) (digest.Digest, error) { return manifest.Digest(manifestBlob) } +// DigestWithAlgorithm returns the digest of a docker manifest using the specified digest algorithm, +// with any necessary implied transformations like stripping v1s1 signatures. +func DigestWithAlgorithm(manifestBlob []byte, algorithm digest.Algorithm) (digest.Digest, error) { + return manifest.DigestWithAlgorithm(manifestBlob, algorithm) +} + // MatchesDigest returns true iff the manifest matches expectedDigest. // Error may be set if this returns false. // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, diff --git a/vendor/go.podman.io/image/v5/openshift/openshift-copies.go b/vendor/go.podman.io/image/v5/openshift/openshift-copies.go index 1180d2dc8e..2799038644 100644 --- a/vendor/go.podman.io/image/v5/openshift/openshift-copies.go +++ b/vendor/go.podman.io/image/v5/openshift/openshift-copies.go @@ -843,10 +843,9 @@ func transportNew(config *restConfig) (http.RoundTripper, error) { func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") cidrs := []netip.Prefix{} - for _, noProxyRule := range noProxyRules { + for noProxyRule := range strings.SplitSeq(noProxyEnv, ",") { prefix, err := netip.ParsePrefix(noProxyRule) if err == nil { cidrs = append(cidrs, prefix) diff --git a/vendor/go.podman.io/image/v5/pkg/compression/compression.go b/vendor/go.podman.io/image/v5/pkg/compression/compression.go index 6a6c4d4a39..a56b6e6cb9 100644 --- a/vendor/go.podman.io/image/v5/pkg/compression/compression.go +++ b/vendor/go.podman.io/image/v5/pkg/compression/compression.go @@ -7,6 +7,7 @@ import ( "io" "github.com/klauspost/pgzip" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" "github.com/ulikunitz/xz" "go.podman.io/image/v5/pkg/compression/internal" @@ -76,7 +77,7 @@ func XzDecompressor(r io.Reader) (io.ReadCloser, error) { } // gzipCompressor is a CompressorFunc for the gzip compression algorithm. -func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { +func gzipCompressor(r io.Writer, metadata map[string]string, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { if level != nil { return pgzip.NewWriterLevel(r, *level) } @@ -84,19 +85,18 @@ func gzipCompressor(r io.Writer, metadata map[string]string, level *int) (io.Wri } // bzip2Compressor is a CompressorFunc for the bzip2 compression algorithm. -func bzip2Compressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { +func bzip2Compressor(r io.Writer, metadata map[string]string, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { return nil, fmt.Errorf("bzip2 compression not supported") } // xzCompressor is a CompressorFunc for the xz compression algorithm. -func xzCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { +func xzCompressor(r io.Writer, metadata map[string]string, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { return xz.NewWriter(r) } // CompressStream returns the compressor by its name func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, error) { - m := map[string]string{} - return internal.AlgorithmCompressor(algo)(dest, m, level) + return CompressStreamWithDigest(dest, algo, level, digest.SHA256) } // CompressStreamWithMetadata returns the compressor by its name. @@ -112,7 +112,29 @@ func CompressStream(dest io.Writer, algo Algorithm, level *int) (io.WriteCloser, // // If the compression generates such metadata, it is written to the provided metadata map. func CompressStreamWithMetadata(dest io.Writer, metadata map[string]string, algo Algorithm, level *int) (io.WriteCloser, error) { - return internal.AlgorithmCompressor(algo)(dest, metadata, level) + return CompressStreamWithMetadataAndDigest(dest, metadata, algo, level, digest.SHA256) +} + +// CompressStreamWithDigest returns the compressor by its name with the specified digest algorithm. +func CompressStreamWithDigest(dest io.Writer, algo Algorithm, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { + m := map[string]string{} + return internal.AlgorithmCompressor(algo)(dest, m, level, digestAlgorithm) +} + +// CompressStreamWithMetadataAndDigest returns the compressor by its name with the specified digest algorithm. +// +// Compressing a stream may create integrity data that allows consuming the compressed byte stream +// while only using subsets of the compressed data (if the compressed data is seekable and most +// of the uncompressed data is already present via other means), while still protecting integrity +// of the compressed stream against unwanted modification. (In OCI container images, this metadata +// is usually carried in manifest annotations.) +// +// Such a partial decompression is not implemented by this package; it is consumed e.g. by +// github.com/containers/storage/pkg/chunked . +// +// If the compression generates such metadata, it is written to the provided metadata map. +func CompressStreamWithMetadataAndDigest(dest io.Writer, metadata map[string]string, algo Algorithm, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { + return internal.AlgorithmCompressor(algo)(dest, metadata, level, digestAlgorithm) } // DetectCompressionFormat returns an Algorithm and DecompressorFunc if the input is recognized as a compressed format, an invalid diff --git a/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go b/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go index e715705b43..15611a6b7d 100644 --- a/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go +++ b/vendor/go.podman.io/image/v5/pkg/compression/internal/types.go @@ -1,19 +1,30 @@ package internal -import "io" +import ( + "io" + + "github.com/opencontainers/go-digest" +) // CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The metadata value is filled by the compressor, and can be used to store information about +// the compressed stream. The metadata values which are meaningful for _this_ layer data +// _may_ be _copied_ from a previous layer (passed to this compressor), but that is not guaranteed, +// and the metadata is going to be recorded for this layer, so this function should set metadata +// values only for the current layer, not for any old / reused layer data. +// Metadata keys must be unique within a single layer, and include a namespace, e.g. gzip stores +// the header information in metadata["gzip.header"] = base64(header), in a compressed tar stream. // -// Compressing a stream may create integrity data that allows consuming the compressed byte stream -// while only using subsets of the compressed data (if the compressed data is seekable and most -// of the uncompressed data is already present via other means), while still protecting integrity +// As for the general file's digest metadata, it is the caller's responsibility to set the +// relevant information in annotations or elsewhere — this metadata is expected to be more layer-specific, +// and the caller will have better visibility to ensure that the metadata ends up in the correct layer digest // of the compressed stream against unwanted modification. (In OCI container images, this metadata // is usually carried in manifest annotations.) // // If the compression generates such metadata, it is written to the provided metadata map. // // The caller must call Close() on the stream (even if the input stream does not need closing!). -type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) +type CompressorFunc func(io.Writer, map[string]string, *int, digest.Algorithm) (io.WriteCloser, error) // DecompressorFunc returns the decompressed stream, given a compressed stream. // The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). diff --git a/vendor/go.podman.io/image/v5/pkg/compression/zstd.go b/vendor/go.podman.io/image/v5/pkg/compression/zstd.go index 39ae014d2e..bbc575cdca 100644 --- a/vendor/go.podman.io/image/v5/pkg/compression/zstd.go +++ b/vendor/go.podman.io/image/v5/pkg/compression/zstd.go @@ -4,6 +4,7 @@ import ( "io" "github.com/klauspost/compress/zstd" + "github.com/opencontainers/go-digest" ) type wrapperZstdDecoder struct { @@ -46,7 +47,7 @@ func zstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { } // zstdCompressor is a CompressorFunc for the zstd compression algorithm. -func zstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { +func zstdCompressor(r io.Writer, metadata map[string]string, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { if level == nil { return zstdWriter(r) } diff --git a/vendor/go.podman.io/image/v5/storage/storage_dest.go b/vendor/go.podman.io/image/v5/storage/storage_dest.go index 7ea6cd0531..4687c1d669 100644 --- a/vendor/go.podman.io/image/v5/storage/storage_dest.go +++ b/vendor/go.podman.io/image/v5/storage/storage_dest.go @@ -193,6 +193,12 @@ func (s *storageImageDestination) Reference() types.ImageReference { return s.imageRef } +// GetDigestAlgorithm returns the digest algorithm configured for the storage destination. +// This enables digest agility for layer DiffID computation. +func (s *storageImageDestination) GetDigestAlgorithm() digest.Algorithm { + return s.imageRef.transport.store.GetDigestAlgorithm() +} + // Close cleans up the temporary directory and additional layer store handlers. func (s *storageImageDestination) Close() error { // This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock. @@ -1033,7 +1039,16 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, err } } else if trusted.diffID != untrustedDiffID { - return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID) + // For digest agility: if the digests use different algorithms but both are valid, + // skip the validation rather than failing. This allows images with non-canonical + // DiffIDs to be pulled successfully. + if trusted.diffID != "" && untrustedDiffID != "" && + trusted.diffID.Algorithm() != untrustedDiffID.Algorithm() && + trusted.diffID.Algorithm().Available() && untrustedDiffID.Algorithm().Available() { + logrus.Debugf("Skipping DiffID validation for layer %d: computed %s, config expects %s (different algorithms)", index, trusted.diffID, untrustedDiffID) + } else { + return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID) + } } } @@ -1122,7 +1137,7 @@ func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayer } } - flags := make(map[string]interface{}) + flags := make(map[string]any) if untrustedUncompressedDigest != "" { flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest.String() logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID) @@ -1490,13 +1505,13 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: s.lockProtected.configDigest.String(), Data: v, - Digest: digest.Canonical.FromBytes(v), + Digest: s.imageRef.transport.store.GetDigestAlgorithm().FromBytes(v), }) } // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. if !bytes.Equal(toplevelManifest, s.manifest) { - manifestDigest, err := manifest.Digest(toplevelManifest) + manifestDigest, err := manifest.DigestWithAlgorithm(toplevelManifest, s.imageRef.transport.store.GetDigestAlgorithm()) if err != nil { return fmt.Errorf("digesting top-level manifest: %w", err) } @@ -1645,7 +1660,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options // PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { - digest, err := manifest.Digest(manifestBlob) + digest, err := manifest.DigestWithAlgorithm(manifestBlob, s.imageRef.transport.store.GetDigestAlgorithm()) if err != nil { return err } diff --git a/vendor/go.podman.io/image/v5/storage/storage_src.go b/vendor/go.podman.io/image/v5/storage/storage_src.go index d48381b59d..e76b9ceb03 100644 --- a/vendor/go.podman.io/image/v5/storage/storage_src.go +++ b/vendor/go.podman.io/image/v5/storage/storage_src.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "io" + "maps" "os" "slices" "sync" @@ -26,6 +27,7 @@ import ( "go.podman.io/image/v5/types" "go.podman.io/storage" "go.podman.io/storage/pkg/archive" + "go.podman.io/storage/pkg/chunked/toc" "go.podman.io/storage/pkg/ioutils" ) @@ -295,14 +297,17 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige } uncompressedLayerType := "" + gzipCompressedLayerType := "" switch manifestType { case imgspecv1.MediaTypeImageManifest: uncompressedLayerType = imgspecv1.MediaTypeImageLayer + gzipCompressedLayerType = imgspecv1.MediaTypeImageLayerGzip case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: uncompressedLayerType = manifest.DockerV2SchemaLayerMediaTypeUncompressed + gzipCompressedLayerType = manifest.DockerV2Schema2LayerMediaType } - physicalBlobInfos := []types.BlobInfo{} // Built reversed + physicalBlobInfos := []layerForCopy{} // Built reversed layerID := s.image.TopLayer for layerID != "" { layer, err := s.imageRef.transport.store.Layer(layerID) @@ -337,44 +342,69 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige s.getBlobMutex.Lock() s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID s.getBlobMutex.Unlock() - blobInfo := types.BlobInfo{ - Digest: blobDigest, - Size: size, - MediaType: uncompressedLayerType, + layerInfo := layerForCopy{ + digest: blobDigest, + size: size, + mediaType: uncompressedLayerType, } - physicalBlobInfos = append(physicalBlobInfos, blobInfo) + physicalBlobInfos = append(physicalBlobInfos, layerInfo) layerID = layer.Parent } slices.Reverse(physicalBlobInfos) - res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos) + res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos, gzipCompressedLayerType) if err != nil { return nil, fmt.Errorf("creating LayerInfosForCopy of image %q: %w", s.image.ID, err) } return res, nil } +// layerForCopy is information about a physical layer, an edit to be made by buildLayerInfosForCopy. +type layerForCopy struct { + digest digest.Digest + size int64 + mediaType string +} + // buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest, // but using layer data which we can actually produce — physicalInfos for non-empty layers, -// and image.GzippedEmptyLayer for empty ones. +// and image.GzippedEmptyLayer with gzipCompressedLayerType for empty ones. // (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.) -func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) { +func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []layerForCopy, gzipCompressedLayerType string) ([]types.BlobInfo, error) { nextPhysical := 0 res := make([]types.BlobInfo, len(manifestInfos)) for i, mi := range manifestInfos { if mi.EmptyLayer { res[i] = types.BlobInfo{ - Digest: image.GzippedEmptyLayerDigest, - Size: int64(len(image.GzippedEmptyLayer)), - MediaType: mi.MediaType, + Digest: image.GzippedEmptyLayerDigest, + Size: int64(len(image.GzippedEmptyLayer)), + URLs: mi.URLs, + Annotations: mi.Annotations, + MediaType: gzipCompressedLayerType, } } else { if nextPhysical >= len(physicalInfos) { return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos)) } - res[i] = physicalInfos[nextPhysical] // FIXME? Should we preserve more data in manifestInfos? Notably the current approach correctly removes zstd:chunked metadata annotations. + res[i] = types.BlobInfo{ + Digest: physicalInfos[nextPhysical].digest, + Size: physicalInfos[nextPhysical].size, + URLs: mi.URLs, + Annotations: mi.Annotations, + MediaType: physicalInfos[nextPhysical].mediaType, + } nextPhysical++ } + // We have changed the compression format, so strip compression-related annotations. + if res[i].Annotations != nil { + maps.DeleteFunc(res[i].Annotations, func(key string, _ string) bool { + _, ok := toc.ChunkedAnnotations[key] + return ok + }) + if len(res[i].Annotations) == 0 { + res[i].Annotations = nil + } + } } if nextPhysical != len(physicalInfos) { return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos)) diff --git a/vendor/go.podman.io/image/v5/storage/storage_transport.go b/vendor/go.podman.io/image/v5/storage/storage_transport.go index 2f0a18787c..aa45688132 100644 --- a/vendor/go.podman.io/image/v5/storage/storage_transport.go +++ b/vendor/go.podman.io/image/v5/storage/storage_transport.go @@ -409,6 +409,17 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { // validateImageID returns nil if id is a valid (full) image ID, or an error func validateImageID(id string) error { - _, err := digest.Parse("sha256:" + id) + // For digest agility, determine the algorithm based on the hex length + var algorithm string + switch len(id) { + case 64: // SHA-256 + algorithm = "sha256" + case 128: // SHA-512 + algorithm = "sha512" + default: + // Fall back to SHA-256 for backward compatibility + algorithm = "sha256" + } + _, err := digest.Parse(algorithm + ":" + id) return err } diff --git a/vendor/go.podman.io/image/v5/version/version.go b/vendor/go.podman.io/image/v5/version/version.go index e577735ee1..ac62a17cec 100644 --- a/vendor/go.podman.io/image/v5/version/version.go +++ b/vendor/go.podman.io/image/v5/version/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 37 + VersionMinor = 38 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/go.podman.io/storage/VERSION b/vendor/go.podman.io/storage/VERSION index 4d5fde5bd1..8a37c6cd9c 100644 --- a/vendor/go.podman.io/storage/VERSION +++ b/vendor/go.podman.io/storage/VERSION @@ -1 +1 @@ -1.60.0 +1.61.0-dev diff --git a/vendor/go.podman.io/storage/check.go b/vendor/go.podman.io/storage/check.go index f0902dfced..3fb648d8c1 100644 --- a/vendor/go.podman.io/storage/check.go +++ b/vendor/go.podman.io/storage/check.go @@ -989,15 +989,15 @@ func (c *checkDirectory) add(path string, typeflag byte, uid, gid int, size int6 // remove removes an item from a checkDirectory func (c *checkDirectory) remove(path string) { - components := strings.Split(path, "/") - if len(components) == 1 { - delete(c.directory, components[0]) - delete(c.file, components[0]) + parent, rest, ok := strings.Cut(path, "/") + if !ok { + delete(c.directory, parent) + delete(c.file, parent) return } - subdirectory := c.directory[components[0]] + subdirectory := c.directory[parent] if subdirectory != nil { - subdirectory.remove(strings.Join(components[1:], "/")) + subdirectory.remove(rest) } } @@ -1019,7 +1019,7 @@ func (c *checkDirectory) header(hdr *tar.Header) { // root directory of the archive, which is not always the // same as being relative to hdr.Name directory := c - for _, component := range strings.Split(path.Clean(hdr.Linkname), "/") { + for component := range strings.SplitSeq(path.Clean(hdr.Linkname), "/") { if component == "." || component == ".." { continue } diff --git a/vendor/go.podman.io/storage/containers.go b/vendor/go.podman.io/storage/containers.go index 5c10453771..33b317c6dc 100644 --- a/vendor/go.podman.io/storage/containers.go +++ b/vendor/go.podman.io/storage/containers.go @@ -145,9 +145,10 @@ type rwContainerStore interface { type containerStore struct { // The following fields are only set when constructing containerStore, and must never be modified afterwards. // They are safe to access without any other locking. - lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process. - dir string - jsonPath [numContainerLocationIndex]string + lockfile *lockfile.LockFile // Synchronizes readers vs. writers of the _filesystem data_, both cross-process and in-process. + dir string + digestType string + jsonPath [numContainerLocationIndex]string inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. @@ -571,7 +572,7 @@ func (r *containerStore) saveFor(modifiedContainer *Container) error { return r.save(containerLocation(modifiedContainer)) } -func newContainerStore(dir string, runDir string, transient bool) (rwContainerStore, error) { +func newContainerStore(dir string, runDir string, transient bool, digestType string) (rwContainerStore, error) { if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } @@ -587,8 +588,9 @@ func newContainerStore(dir string, runDir string, transient bool) (rwContainerSt return nil, err } cstore := containerStore{ - lockfile: lockfile, - dir: dir, + lockfile: lockfile, + dir: dir, + digestType: digestType, jsonPath: [numContainerLocationIndex]string{ filepath.Join(dir, "containers.json"), filepath.Join(volatileDir, "volatile-containers.json"), @@ -932,7 +934,8 @@ func (r *containerStore) SetBigData(id, key string, data []byte) error { c.BigDataDigests = make(map[string]digest.Digest) } oldDigest, digestOk := c.BigDataDigests[key] - newDigest := digest.Canonical.FromBytes(data) + digestAlgorithm := getDigestAlgorithmFromType(r.digestType) + newDigest := digestAlgorithm.FromBytes(data) c.BigDataDigests[key] = newDigest if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { save = true diff --git a/vendor/go.podman.io/storage/drivers/aufs/aufs.go b/vendor/go.podman.io/storage/drivers/aufs/aufs.go deleted file mode 100644 index b3eb33730a..0000000000 --- a/vendor/go.podman.io/storage/drivers/aufs/aufs.go +++ /dev/null @@ -1,795 +0,0 @@ -//go:build linux - -/* - -aufs driver directory structure - - . - ā”œā”€ā”€ layers // Metadata of layers - │ ā”œā”€ā”€ 1 - │ ā”œā”€ā”€ 2 - │ └── 3 - ā”œā”€ā”€ diff // Content of the layer - │ ā”œā”€ā”€ 1 // Contains layers that need to be mounted for the id - │ ā”œā”€ā”€ 2 - │ └── 3 - └── mnt // Mount points for the rw layers to be mounted - ā”œā”€ā”€ 1 - ā”œā”€ā”€ 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "errors" - "fmt" - "io" - "io/fs" - "os" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - graphdriver "go.podman.io/storage/drivers" - "go.podman.io/storage/internal/tempdir" - "go.podman.io/storage/pkg/archive" - "go.podman.io/storage/pkg/chrootarchive" - "go.podman.io/storage/pkg/directory" - "go.podman.io/storage/pkg/fileutils" - "go.podman.io/storage/pkg/idtools" - "go.podman.io/storage/pkg/locker" - mountpk "go.podman.io/storage/pkg/mount" - "go.podman.io/storage/pkg/parsers" - "go.podman.io/storage/pkg/system" - "go.podman.io/storage/pkg/unshare" - "golang.org/x/sys/unix" -) - -var ( - // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("aufs was not found in /proc/filesystems") - // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("aufs cannot be used in non-init user namespace") - backingFs = "" - - enableDirpermLock sync.Once - enableDirperm bool -) - -const defaultPerms = os.FileMode(0o555) - -func init() { - graphdriver.MustRegister("aufs", Init) -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - sync.Mutex - root string - ctr *graphdriver.RefCounter - pathCacheLock sync.Mutex - pathCache map[string]string - naiveDiff graphdriver.DiffDriver - locker *locker.Locker - mountOptions string -} - -// Init returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) { - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, fmt.Errorf("kernel does not support aufs: %w", graphdriver.ErrNotSupported) - } - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, fmt.Errorf("aufs is not supported over %q: %w", backingFs, graphdriver.ErrIncompatibleFS) - } - - var mountOptions string - for _, option := range options.DriverOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "aufs.mountopt": - mountOptions = val - default: - return nil, fmt.Errorf("option %s not supported", option) - } - } - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: home, - pathCache: make(map[string]string), - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), - locker: locker.New(), - mountOptions: mountOptions, - } - - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := os.MkdirAll(home, 0o700); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - if err := mountpk.MakePrivate(home); err != nil { - return nil, err - } - - // Populate the dir structure - for _, p := range paths { - if err := os.MkdirAll(path.Join(home, p), 0o700); err != nil { - return nil, err - } - } - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - }) - - for _, path := range []string{"mnt", "diff"} { - p := filepath.Join(home, path) - entries, err := os.ReadDir(p) - if err != nil { - logger.WithError(err).WithField("dir", p).Error("error reading dir entries") - continue - } - for _, entry := range entries { - if !entry.IsDir() { - continue - } - if strings.HasSuffix(entry.Name(), "-removing") { - logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") - if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { - logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") - } - } - } - } - - a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, a) - return a, nil -} - -// Return a nil error if the kernel supports aufs -func supportsAufs() error { - if unshare.IsRootless() { - return ErrAufsNested - } - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a *Driver) rootPath() string { - return a.root -} - -func (*Driver) String() string { - return "aufs" -} - -// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. -func (a *Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, - } -} - -// Metadata not implemented -func (a *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil //nolint: nilnil -} - -// Exists returns true if the given id is registered with -// this driver -func (a *Driver) Exists(id string) bool { - if err := fileutils.Lexists(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// ListLayers() returns all of the layers known to the driver. -func (a *Driver) ListLayers() ([]string, error) { - diffsDir := filepath.Join(a.rootPath(), "diff") - entries, err := os.ReadDir(diffsDir) - if err != nil { - return nil, err - } - results := make([]string, 0, len(entries)) - for _, entry := range entries { - if !entry.IsDir() { - continue - } - results = append(results, entry.Name()) - } - return results, nil -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (a *Driver) AdditionalImageStores() []string { - return nil -} - -// CreateFromTemplate creates a layer with the same contents and parent as another layer. -func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error { - if opts == nil { - opts = &graphdriver.CreateOpts{} - } - return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return a.Create(id, parent, opts) -} - -// Create three folders for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for aufs") - } - - if err := a.createDirsFor(id, parent); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIDs(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - - return nil -} - -// createDirsFor creates two directories for the given id. -// mnt and diff -func (a *Driver) createDirsFor(id, parent string) error { - paths := []string{ - "mnt", - "diff", - } - - // Directory permission is 0555. - // The path of directories are /mnt/ - // and /diff/ - for _, p := range paths { - rootPair := idtools.IDPair{UID: 0, GID: 0} - rootPerms := defaultPerms - if parent != "" { - st, err := system.Stat(path.Join(a.rootPath(), p, parent)) - if err != nil { - return err - } - rootPerms = os.FileMode(st.Mode()) - rootPair.UID = int(st.UID()) - rootPair.GID = int(st.GID()) - } - if err := idtools.MkdirAllAndChownNew(path.Join(a.rootPath(), p, id), rootPerms, rootPair); err != nil { - return err - } - } - return nil -} - -// Remove will unmount and remove the given id. -func (a *Driver) Remove(id string) error { - a.locker.Lock(id) - defer func() { - _ = a.locker.Unlock(id) - }() - a.pathCacheLock.Lock() - mountpoint, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - if !exists { - mountpoint = a.getMountpoint(id) - } - - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - "layer": id, - }) - - var retries int - for { - mounted, err := a.mounted(mountpoint) - if err != nil { - if os.IsNotExist(err) { - break - } - return err - } - if !mounted { - break - } - - err = a.unmount(mountpoint) - if err == nil { - break - } - - if err != unix.EBUSY { - return fmt.Errorf("aufs: unmount error: %s: %w", mountpoint, err) - } - if retries >= 5 { - return fmt.Errorf("aufs: unmount error after retries: %s: %w", mountpoint, err) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) - } - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("removing layers dir for %s: %w", id, err) - } - - if err := atomicRemove(a.getDiffPath(id)); err != nil { - return fmt.Errorf("could not remove diff path for id %s: %w", id, err) - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that container runtime doesn't find it anymore) before doing removal of - // the whole tree. - if err := atomicRemove(mountpoint); err != nil { - if errors.Is(err, unix.EBUSY) { - logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") - } - return fmt.Errorf("could not remove mountpoint for id %s: %w", id, err) - } - - a.pathCacheLock.Lock() - delete(a.pathCache, id) - a.pathCacheLock.Unlock() - return nil -} - -func atomicRemove(source string) error { - target := source + "-removing" - - err := os.Rename(source, target) - switch { - case err == nil, os.IsNotExist(err): - case os.IsExist(err): - // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if e := fileutils.Exists(source); !os.IsNotExist(e) { - return fmt.Errorf("target rename dir '%s' exists but should not, this needs to be manually cleaned up: %w", target, err) - } - default: - return fmt.Errorf("preparing atomic delete: %w", err) - } - - return system.EnsureRemoveAll(target) -} - -// Get returns the rootfs path for the id. -// This will mount the dir at its given path -func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) { - a.locker.Lock(id) - defer func() { - _ = a.locker.Unlock(id) - }() - - parents, err := a.getParentLayerPaths(id) - if err != nil && !os.IsNotExist(err) { - return "", err - } - - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - - if !exists { - m = a.getDiffPath(id) - if len(parents) > 0 { - m = a.getMountpoint(id) - } - } - if count := a.ctr.Increment(m); count > 1 { - return m, nil - } - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - if len(parents) > 0 { - if err := a.mount(id, m, parents, options); err != nil { - return "", err - } - } - - a.pathCacheLock.Lock() - a.pathCache[id] = m - a.pathCacheLock.Unlock() - return m, nil -} - -// Put unmounts and updates list of active mounts. -func (a *Driver) Put(id string) error { - a.locker.Lock(id) - defer func() { - _ = a.locker.Unlock(id) - }() - - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - if !exists { - m = a.getMountpoint(id) - a.pathCache[id] = m - } - a.pathCacheLock.Unlock() - if count := a.ctr.Decrement(m); count > 0 { - return nil - } - - err := a.unmount(m) - if err != nil { - logrus.Debugf("Failed to unmount %s aufs: %v", id, err) - } - return err -} - -// ReadWriteDiskUsage returns the disk usage of the writable directory for the ID. -// For AUFS, it queries the mountpoint for this ID. -func (a *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) { - a.locker.Lock(id) - defer func() { - _ = a.locker.Unlock(id) - }() - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - if !exists { - m = a.getMountpoint(id) - a.pathCache[id] = m - } - a.pathCacheLock.Unlock() - return directory.Usage(m) -} - -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (a *Driver) isParent(id, parent string) bool { - parents, _ := getParentIDs(a.rootPath(), id) - if parent == "" && len(parents) > 0 { - return false - } - return len(parents) == 0 || parent == parents[0] -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (a *Driver) Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Diff(id, idMappings, parent, parentMappings, mountLabel) - } - - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - - // AUFS doesn't need the parent layer to produce a diff. - return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: idMappings.UIDs(), - GIDMaps: idMappings.GIDs(), - }) -} - -type fileGetNilCloser struct { - storage.FileGetter -} - -func (f fileGetNilCloser) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p := path.Join(a.rootPath(), "diff", id) - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil -} - -func (a *Driver) applyDiff(id string, idMappings *idtools.IDMappings, diff io.Reader) error { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: idMappings.UIDs(), - GIDMaps: idMappings.GIDs(), - }) -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (a *Driver) DiffSize(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.DiffSize(id, idMappings, parent, parentMappings, mountLabel) - } - // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(path.Join(a.rootPath(), "diff", id)) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.ApplyDiff(id, parent, options) - } - - // AUFS doesn't need the parent id to apply the diff if it is the direct parent. - if err = a.applyDiff(id, options.Mappings, options.Diff); err != nil { - return - } - - return directory.Size(path.Join(a.rootPath(), "diff", id)) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Changes(id, idMappings, parent, parentMappings, mountLabel) - } - - // AUFS doesn't have snapshots, so we need to get changes from all parent - // layers. - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIDs(a.rootPath(), id) - if err != nil { - return nil, err - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error { - a.Lock() - defer a.Unlock() - - // If the id is mounted or we get an error return - if mounted, err := a.mounted(target); err != nil || mounted { - return err - } - - rw := a.getDiffPath(id) - - if err := a.aufsMount(layers, rw, target, options); err != nil { - return fmt.Errorf("creating aufs mount to %s: %w", target, err) - } - return nil -} - -func (a *Driver) unmount(mountPath string) error { - a.Lock() - defer a.Unlock() - - if mounted, err := a.mounted(mountPath); err != nil || !mounted { - return err - } - if err := Unmount(mountPath); err != nil { - return err - } - return nil -} - -func (a *Driver) mounted(mountpoint string) (bool, error) { - return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) -} - -// Cleanup aufs and unmount all mountpoints -func (a *Driver) Cleanup() error { - var dirs []string - if err := filepath.WalkDir(a.mntPath(), func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - if !d.IsDir() { - return nil - } - dirs = append(dirs, path) - return nil - }); err != nil { - return err - } - - for _, m := range dirs { - if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", m, err) - } - } - return mountpk.Unmount(a.root) -} - -func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) { - defer func() { - if err != nil { - if err1 := Unmount(target); err1 != nil { - logrus.Warnf("Unmount %q: %v", target, err1) - } - } - }() - - // Mount options are clipped to page size(4096 bytes). If there are more - // layers then these are remounted individually using append. - - offset := 54 - if useDirperm() { - offset += len(",dirperm1") - } - b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel - bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - - index := 0 - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } - - opts := "dio,xino=/dev/shm/aufs.xino" - mountOptions := a.mountOptions - if len(options.Options) > 0 { - mountOptions = strings.Join(options.Options, ",") - } - if mountOptions != "" { - opts += fmt.Sprintf(",%s", mountOptions) - } - - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel) - if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { - return - } - } - - return -} - -// useDirperm checks dirperm1 mount option can be used with the current -// version of aufs. -func useDirperm() bool { - enableDirpermLock.Do(func() { - base, err := os.MkdirTemp("", "storage-aufs-base") - if err != nil { - logrus.Errorf("Checking dirperm1: %v", err) - return - } - defer os.RemoveAll(base) - - union, err := os.MkdirTemp("", "storage-aufs-union") - if err != nil { - logrus.Errorf("Checking dirperm1: %v", err) - return - } - defer os.RemoveAll(union) - - opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) - if err := mount("none", union, "aufs", 0, opts); err != nil { - return - } - enableDirperm = true - if err := Unmount(union); err != nil { - logrus.Errorf("Checking dirperm1: failed to unmount %v", err) - } - }) - return enableDirperm -} - -// UpdateLayerIDMap updates ID mappings in a layer from matching the ones -// specified by toContainer to those specified by toHost. -func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error { - return fmt.Errorf("aufs doesn't support changing ID mappings") -} - -// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs to the provided mapping in an userNS -func (a *Driver) SupportsShifting(uidmap, gidmap []idtools.IDMap) bool { - return false -} - -// Dedup performs deduplication of the driver's storage. -func (a *Driver) Dedup(req graphdriver.DedupArgs) (graphdriver.DedupResult, error) { - return graphdriver.DedupResult{}, nil -} - -// DeferredRemove is not implemented. -// It calls Remove directly. -func (a *Driver) DeferredRemove(id string) (tempdir.CleanupTempDirFunc, error) { - return nil, a.Remove(id) -} - -// GetTempDirRootDirs is not implemented. -func (a *Driver) GetTempDirRootDirs() []string { - return []string{} -} diff --git a/vendor/go.podman.io/storage/drivers/aufs/dirs.go b/vendor/go.podman.io/storage/drivers/aufs/dirs.go deleted file mode 100644 index 9587bf63c6..0000000000 --- a/vendor/go.podman.io/storage/drivers/aufs/dirs.go +++ /dev/null @@ -1,63 +0,0 @@ -//go:build linux - -package aufs - -import ( - "bufio" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := os.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIDs(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} - -func (a *Driver) getMountpoint(id string) string { - return path.Join(a.mntPath(), id) -} - -func (a *Driver) mntPath() string { - return path.Join(a.rootPath(), "mnt") -} - -func (a *Driver) getDiffPath(id string) string { - return path.Join(a.diffPath(), id) -} - -func (a *Driver) diffPath() string { - return path.Join(a.rootPath(), "diff") -} diff --git a/vendor/go.podman.io/storage/drivers/aufs/mount.go b/vendor/go.podman.io/storage/drivers/aufs/mount.go deleted file mode 100644 index 51b3d6dfa6..0000000000 --- a/vendor/go.podman.io/storage/drivers/aufs/mount.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux - -package aufs - -import ( - "os/exec" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Unmount the target specified. -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) - } - if err := unix.Unmount(target, 0); err != nil { - return err - } - return nil -} diff --git a/vendor/go.podman.io/storage/drivers/aufs/mount_linux.go b/vendor/go.podman.io/storage/drivers/aufs/mount_linux.go deleted file mode 100644 index 937104ba3f..0000000000 --- a/vendor/go.podman.io/storage/drivers/aufs/mount_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package aufs - -import "golang.org/x/sys/unix" - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return unix.Mount(source, target, fstype, flags, data) -} diff --git a/vendor/go.podman.io/storage/drivers/chown_darwin.go b/vendor/go.podman.io/storage/drivers/chown_darwin.go index 7490973212..eea633520c 100644 --- a/vendor/go.podman.io/storage/drivers/chown_darwin.go +++ b/vendor/go.podman.io/storage/drivers/chown_darwin.go @@ -37,7 +37,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai i := inode{ Dev: uint64(st.Dev), - Ino: uint64(st.Ino), + Ino: st.Ino, } c.mutex.Lock() _, found := c.inodes[i] diff --git a/vendor/go.podman.io/storage/drivers/driver_linux.go b/vendor/go.podman.io/storage/drivers/driver_linux.go index 7a95a1eaa6..9e49d98af4 100644 --- a/vendor/go.podman.io/storage/drivers/driver_linux.go +++ b/vendor/go.podman.io/storage/drivers/driver_linux.go @@ -93,7 +93,6 @@ var ( // Slice of drivers that should be used in an order Priority = []string{ "overlay", - "aufs", "btrfs", "zfs", "vfs", diff --git a/vendor/go.podman.io/storage/drivers/fsdiff.go b/vendor/go.podman.io/storage/drivers/fsdiff.go index d80d00368c..37684466d2 100644 --- a/vendor/go.podman.io/storage/drivers/fsdiff.go +++ b/vendor/go.podman.io/storage/drivers/fsdiff.go @@ -22,7 +22,6 @@ var ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer // capability of the Diffing methods which it may or may not // support on its own. See the comment on the exported // NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. type NaiveDiffDriver struct { ProtoDriver LayerIDMapUpdater diff --git a/vendor/go.podman.io/storage/drivers/overlay/mount.go b/vendor/go.podman.io/storage/drivers/overlay/mount.go index 250a185a93..97af37c772 100644 --- a/vendor/go.podman.io/storage/drivers/overlay/mount.go +++ b/vendor/go.podman.io/storage/drivers/overlay/mount.go @@ -101,7 +101,7 @@ func mountOverlayFromMain() { // Split out the various options, since we need to manipulate the // paths, but we don't want to mess with other options. var upperk, upperv, workk, workv, lowerk, lowerv, labelk, labelv, others string - for _, arg := range strings.Split(options.Label, ",") { + for arg := range strings.SplitSeq(options.Label, ",") { key, val, _ := strings.Cut(arg, "=") switch key { case "upperdir": @@ -139,10 +139,9 @@ func mountOverlayFromMain() { // Get a descriptor for each lower, and use that descriptor's name as // the new value for the list of lowers, because it's shorter. if lowerv != "" { - lowers := strings.Split(lowerv, ":") var newLowers []string dataOnly := false - for _, lowerPath := range lowers { + for lowerPath := range strings.SplitSeq(lowerv, ":") { if lowerPath == "" { dataOnly = true continue diff --git a/vendor/go.podman.io/storage/drivers/overlay/overlay.go b/vendor/go.podman.io/storage/drivers/overlay/overlay.go index f65b2c515a..c08e060466 100644 --- a/vendor/go.podman.io/storage/drivers/overlay/overlay.go +++ b/vendor/go.podman.io/storage/drivers/overlay/overlay.go @@ -501,7 +501,7 @@ func parseOptions(options []string) (*overlayOptions, error) { if val == "" { continue } - for _, store := range strings.Split(val, ",") { + for store := range strings.SplitSeq(val, ",") { store = filepath.Clean(store) if !filepath.IsAbs(store) { return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) @@ -521,7 +521,7 @@ func parseOptions(options []string) (*overlayOptions, error) { if val == "" { continue } - for _, lstore := range strings.Split(val, ",") { + for lstore := range strings.SplitSeq(val, ",") { elems := strings.Split(lstore, ":") lstore = filepath.Clean(elems[0]) if !filepath.IsAbs(lstore) { @@ -1196,8 +1196,8 @@ func (d *Driver) getLower(parent string) (string, error) { parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { - parentLowers := strings.Split(string(parentLower), ":") - lowers = append(lowers, parentLowers...) + parentLowers := strings.SplitSeq(string(parentLower), ":") + lowers = slices.AppendSeq(lowers, parentLowers) } return strings.Join(lowers, ":"), nil } @@ -1247,7 +1247,7 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { - for _, s := range strings.Split(string(lowers), ":") { + for s := range strings.SplitSeq(string(lowers), ":") { lower := d.dir(s) lp, err := os.Readlink(lower) // if the link does not exist, we lost the symlinks during a sudden reboot. diff --git a/vendor/go.podman.io/storage/drivers/register/register_aufs.go b/vendor/go.podman.io/storage/drivers/register/register_aufs.go deleted file mode 100644 index 595c25c219..0000000000 --- a/vendor/go.podman.io/storage/drivers/register/register_aufs.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !exclude_graphdriver_aufs && linux - -package register - -import ( - // register the aufs graphdriver - _ "go.podman.io/storage/drivers/aufs" -) diff --git a/vendor/go.podman.io/storage/drivers/vfs/driver.go b/vendor/go.podman.io/storage/drivers/vfs/driver.go index c86e174e77..ffd3bd24ea 100644 --- a/vendor/go.podman.io/storage/drivers/vfs/driver.go +++ b/vendor/go.podman.io/storage/drivers/vfs/driver.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "strconv" "strings" @@ -52,7 +53,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) key = strings.ToLower(key) switch key { case "vfs.imagestore", ".imagestore": - d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...) + d.additionalHomes = slices.AppendSeq(d.additionalHomes, strings.SplitSeq(val, ",")) continue case "vfs.mountopt": return nil, fmt.Errorf("vfs driver does not support mount options") diff --git a/vendor/go.podman.io/storage/drivers/zfs/zfs.go b/vendor/go.podman.io/storage/drivers/zfs/zfs.go index b804cf0b3b..8660dbaa3e 100644 --- a/vendor/go.podman.io/storage/drivers/zfs/zfs.go +++ b/vendor/go.podman.io/storage/drivers/zfs/zfs.go @@ -188,8 +188,8 @@ func (d *Driver) Cleanup() error { // Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', // 'Space Available', 'Parent Quota' and 'Compression'. func (d *Driver) Status() [][2]string { - parts := strings.Split(d.dataset.Name, "/") - pool, err := zfs.GetZpool(parts[0]) + fsName, _, _ := strings.Cut(d.dataset.Name, "/") + pool, err := zfs.GetZpool(fsName) var poolName, poolHealth string if err == nil { diff --git a/vendor/go.podman.io/storage/images.go b/vendor/go.podman.io/storage/images.go index e535b541e3..c10deb34fc 100644 --- a/vendor/go.podman.io/storage/images.go +++ b/vendor/go.podman.io/storage/images.go @@ -164,8 +164,9 @@ type rwImageStore interface { type imageStore struct { // The following fields are only set when constructing imageStore, and must never be modified afterwards. // They are safe to access without any other locking. - lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. - dir string + lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only image stores. + dir string + digestType string inProcessLock sync.RWMutex // Can _only_ be obtained with lockfile held. // The following fields can only be read/written with read/write ownership of inProcessLock, respectively. @@ -589,7 +590,7 @@ func (r *imageStore) Save() error { return nil } -func newImageStore(dir string) (rwImageStore, error) { +func newImageStore(dir string, digestType string) (rwImageStore, error) { if err := os.MkdirAll(dir, 0o700); err != nil { return nil, err } @@ -598,8 +599,9 @@ func newImageStore(dir string) (rwImageStore, error) { return nil, err } istore := imageStore{ - lockfile: lockfile, - dir: dir, + lockfile: lockfile, + dir: dir, + digestType: digestType, images: []*Image{}, byid: make(map[string]*Image), @@ -620,14 +622,15 @@ func newImageStore(dir string) (rwImageStore, error) { return &istore, nil } -func newROImageStore(dir string) (roImageStore, error) { +func newROImageStore(dir string, digestType string) (roImageStore, error) { lockfile, err := lockfile.GetROLockFile(filepath.Join(dir, "images.lock")) if err != nil { return nil, err } istore := imageStore{ - lockfile: lockfile, - dir: dir, + lockfile: lockfile, + dir: dir, + digestType: digestType, images: []*Image{}, byid: make(map[string]*Image), @@ -763,7 +766,8 @@ func (r *imageStore) create(id string, names []string, layer string, options Ima } for _, item := range options.BigData { if item.Digest == "" { - item.Digest = digest.Canonical.FromBytes(item.Data) + digestAlgorithm := getDigestAlgorithmFromType(r.digestType) + item.Digest = digestAlgorithm.FromBytes(item.Data) } if err = r.setBigData(image, item.Key, item.Data, item.Digest); err != nil { return nil, err @@ -988,7 +992,8 @@ func (r *imageStore) SetBigData(id, key string, data []byte, digestManifest func return fmt.Errorf("digesting manifest: %w", err) } } else { - newDigest = digest.Canonical.FromBytes(data) + digestAlgorithm := getDigestAlgorithmFromType(r.digestType) + newDigest = digestAlgorithm.FromBytes(data) } return r.setBigData(image, key, data, newDigest) } diff --git a/vendor/go.podman.io/storage/internal/dedup/dedup.go b/vendor/go.podman.io/storage/internal/dedup/dedup.go index 56d746a304..49b6d41558 100644 --- a/vendor/go.podman.io/storage/internal/dedup/dedup.go +++ b/vendor/go.podman.io/storage/internal/dedup/dedup.go @@ -2,6 +2,7 @@ package dedup import ( "crypto/sha256" + "crypto/sha512" "encoding/binary" "errors" "fmt" @@ -20,6 +21,7 @@ const ( DedupHashCRC DedupHashFileSize DedupHashSHA256 + DedupHashSHA512 ) type DedupHashMethod int @@ -50,6 +52,14 @@ func getFileChecksum(hashMethod DedupHashMethod, path string, info fs.FileInfo) } return string(h.Sum(nil)), nil }) + case DedupHashSHA512: + return readAllFile(path, info, func(buf []byte) (string, error) { + h := sha512.New() + if _, err := h.Write(buf); err != nil { + return "", err + } + return string(h.Sum(nil)), nil + }) case DedupHashCRC: return readAllFile(path, info, func(buf []byte) (string, error) { c := crc64.New(crc64.MakeTable(crc64.ECMA)) diff --git a/vendor/go.podman.io/storage/internal/tempdir/tempdir.go b/vendor/go.podman.io/storage/internal/tempdir/tempdir.go index 91959b75c8..666742c5a0 100644 --- a/vendor/go.podman.io/storage/internal/tempdir/tempdir.go +++ b/vendor/go.podman.io/storage/internal/tempdir/tempdir.go @@ -3,6 +3,7 @@ package tempdir import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "strings" @@ -102,10 +103,10 @@ func listPotentialStaleDirs(rootDir string) (map[string]struct{}, error) { dirContent, err := os.ReadDir(rootDir) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, nil } - return nil, fmt.Errorf("error reading temp dir %s: %w", rootDir, err) + return nil, fmt.Errorf("error reading temp dir: %w", err) } for _, entry := range dirContent { @@ -128,7 +129,7 @@ func listPotentialStaleDirs(rootDir string) (map[string]struct{}, error) { func RecoverStaleDirs(rootDir string) error { potentialStaleDirs, err := listPotentialStaleDirs(rootDir) if err != nil { - return fmt.Errorf("error listing potential stale temp dirs in %s: %w", rootDir, err) + return fmt.Errorf("error listing potential stale temp dirs: %w", err) } if len(potentialStaleDirs) == 0 { @@ -147,11 +148,11 @@ func RecoverStaleDirs(rootDir string) error { continue } - if rmErr := os.RemoveAll(tempDirPath); rmErr != nil && !os.IsNotExist(rmErr) { - recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir %s: %w", tempDirPath, rmErr)) + if rmErr := os.RemoveAll(tempDirPath); rmErr != nil { + recoveryErrors = append(recoveryErrors, fmt.Errorf("error removing stale temp dir: %w", rmErr)) } if unlockErr := instanceLock.UnlockAndDelete(); unlockErr != nil { - recoveryErrors = append(recoveryErrors, fmt.Errorf("error unlocking and deleting stale lock file %s: %w", lockPath, unlockErr)) + recoveryErrors = append(recoveryErrors, fmt.Errorf("error unlocking and deleting stale lock file: %w", unlockErr)) } } @@ -164,7 +165,7 @@ func RecoverStaleDirs(rootDir string) error { // Note: The caller MUST ensure that returned TempDir instance is cleaned up with .Cleanup(). func NewTempDir(rootDir string) (*TempDir, error) { if err := os.MkdirAll(rootDir, 0o700); err != nil { - return nil, fmt.Errorf("creating root temp directory %s failed: %w", rootDir, err) + return nil, fmt.Errorf("creating root temp directory failed: %w", err) } td := &TempDir{ @@ -172,7 +173,7 @@ func NewTempDir(rootDir string) (*TempDir, error) { } tempDirLock, tempDirLockFileName, err := staging_lockfile.CreateAndLock(td.RootDir, tempdirLockPrefix) if err != nil { - return nil, fmt.Errorf("creating and locking temp dir instance lock in %s failed: %w", td.RootDir, err) + return nil, fmt.Errorf("creating and locking temp dir instance lock failed: %w", err) } td.tempDirLock = tempDirLock td.tempDirLockPath = filepath.Join(td.RootDir, tempDirLockFileName) @@ -181,7 +182,7 @@ func NewTempDir(rootDir string) (*TempDir, error) { id := strings.TrimPrefix(tempDirLockFileName, tempdirLockPrefix) actualTempDirPath := filepath.Join(td.RootDir, tempDirPrefix+id) if err := os.MkdirAll(actualTempDirPath, 0o700); err != nil { - return nil, fmt.Errorf("creating temp directory %s failed: %w", actualTempDirPath, err) + return nil, fmt.Errorf("creating temp directory failed: %w", err) } td.tempDirPath = actualTempDirPath td.counter = 0 @@ -217,8 +218,8 @@ func (td *TempDir) Cleanup() error { return nil } - if err := os.RemoveAll(td.tempDirPath); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("removing temp dir %s failed: %w", td.tempDirPath, err) + if err := os.RemoveAll(td.tempDirPath); err != nil { + return fmt.Errorf("removing temp dir failed: %w", err) } lock := td.tempDirLock diff --git a/vendor/go.podman.io/storage/layers.go b/vendor/go.podman.io/storage/layers.go index c6752927e3..8bb2339f06 100644 --- a/vendor/go.podman.io/storage/layers.go +++ b/vendor/go.podman.io/storage/layers.go @@ -444,6 +444,9 @@ type layerStore struct { // FIXME: This field is only set when constructing layerStore, but locking rules of the driver // interface itself are not documented here. driver drivers.Driver + // store is a reference to the parent store for accessing digest + // configuration + store *store } func copyLayer(l *Layer) *Layer { @@ -1190,6 +1193,7 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers. bymount: make(map[string]*Layer), driver: driver, + store: s, } if err := rlstore.startWritingWithReload(false); err != nil { return nil, err @@ -1207,7 +1211,7 @@ func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers. return &rlstore, nil } -func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roLayerStore, error) { +func newROLayerStore(rundir string, layerdir string, driver drivers.Driver, store *store) (roLayerStore, error) { lockfile, err := lockfile.GetROLockFile(filepath.Join(layerdir, "layers.lock")) if err != nil { return nil, err @@ -1228,6 +1232,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL bymount: make(map[string]*Layer), driver: driver, + store: store, } if err := rlstore.startReadingWithReload(false); err != nil { return nil, err @@ -2420,17 +2425,26 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions, // Decide if we need to compute digests var compressedDigest, uncompressedDigest digest.Digest // = "" var compressedDigester, uncompressedDigester digest.Digester // = nil - if layerOptions != nil && layerOptions.OriginalDigest != "" && - layerOptions.OriginalDigest.Algorithm() == digest.Canonical { + digestAlgorithm := r.store.GetDigestAlgorithm() + if layerOptions != nil && layerOptions.OriginalDigest != "" { + // Use the existing digest if available compressedDigest = layerOptions.OriginalDigest } else { - compressedDigester = digest.Canonical.Digester() + compressedDigester = digestAlgorithm.Digester() } - if layerOptions != nil && layerOptions.UncompressedDigest != "" && - layerOptions.UncompressedDigest.Algorithm() == digest.Canonical { + if layerOptions != nil && layerOptions.UncompressedDigest != "" { + // Use the existing uncompressed digest if available uncompressedDigest = layerOptions.UncompressedDigest } else if compression != archive.Uncompressed { - uncompressedDigester = digest.Canonical.Digester() + // For computing new uncompressed digests, prefer the algorithm from the expected compressed digest + // if available and valid, otherwise fall back to the storage's digest algorithm + compressedDigestAlgorithm := digestAlgorithm + if layerOptions != nil && layerOptions.OriginalDigest != "" { + if algo := layerOptions.OriginalDigest.Algorithm(); algo.Available() { + compressedDigestAlgorithm = algo + } + } + uncompressedDigester = compressedDigestAlgorithm.Digester() } var compressedWriter io.Writer diff --git a/vendor/go.podman.io/storage/pkg/archive/archive_other.go b/vendor/go.podman.io/storage/pkg/archive/archive_other.go index b342ff75ee..f7c7352fd3 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive_other.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive_other.go @@ -2,7 +2,7 @@ package archive -func GetWhiteoutConverter(format WhiteoutFormat, data interface{}) TarWhiteoutConverter { +func GetWhiteoutConverter(format WhiteoutFormat, data any) TarWhiteoutConverter { return nil } diff --git a/vendor/go.podman.io/storage/pkg/archive/archive_unix.go b/vendor/go.podman.io/storage/pkg/archive/archive_unix.go index 2d9d68de2c..0ff5716584 100644 --- a/vendor/go.podman.io/storage/pkg/archive/archive_unix.go +++ b/vendor/go.podman.io/storage/pkg/archive/archive_unix.go @@ -32,8 +32,10 @@ func statUnix(fi os.FileInfo, hdr *tar.Header) error { if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert + // _nolint_: Whether this conversion is required is hardware- and OS-dependent: the value might be uint64 on Linux, int32 on macOS. + // So, this might trigger either "uncovert" (if the conversion is unnecessary) or "nolintlint" (if it is required) + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint:unconvert,nolintlint + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint:unconvert,nolintlint } return nil @@ -74,8 +76,10 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat any) (err erro // Currently go does not fill in the major/minors if s.Mode&unix.S_IFBLK != 0 || s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert + // _nolint_: Whether this conversion is required is hardware- and OS-dependent: the value might be uint64 on Linux, int32 on macOS. + // So, this might trigger either "uncovert" (if the conversion is unnecessary) or "nolintlint" (if it is required) + hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert,nolintlint + hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert,nolintlint } } diff --git a/vendor/go.podman.io/storage/pkg/archive/changes.go b/vendor/go.podman.io/storage/pkg/archive/changes.go index 051ab69528..e9e3519819 100644 --- a/vendor/go.podman.io/storage/pkg/archive/changes.go +++ b/vendor/go.podman.io/storage/pkg/archive/changes.go @@ -281,8 +281,7 @@ func (info *FileInfo) LookUp(path string) *FileInfo { return info } - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { + for elem := range strings.SplitSeq(path, string(os.PathSeparator)) { if elem != "" { child := parent.children[elem] if child == nil { diff --git a/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go b/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go index 829c95ef14..7584b14ce5 100644 --- a/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go +++ b/vendor/go.podman.io/storage/pkg/archive/fflags_bsd.go @@ -77,7 +77,7 @@ var ( func parseFileFlags(fflags string) (uint32, uint32, error) { var set, clear uint32 = 0, 0 - for _, fflag := range strings.Split(fflags, ",") { + for fflag := range strings.SplitSeq(fflags, ",") { isClear := false if clean, ok := strings.CutPrefix(fflag, "no"); ok { isClear = true diff --git a/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go b/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go index 87bd065a80..fdbcab497a 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/cache_linux.go @@ -246,7 +246,7 @@ func (c *layersCache) createCacheFileFromTOC(layerID string) (*layer, error) { return nil, fmt.Errorf("read manifest file: %w", err) } - cacheFile, err := writeCache(manifest, lcd.Format, layerID, c.store) + cacheFile, err := writeCache(manifest, lcd.Format, layerID, c.store, c.store.GetDigestAlgorithm()) if err != nil { return nil, err } @@ -321,8 +321,8 @@ func (c *layersCache) load() error { // calculateHardLinkFingerprint calculates a hash that can be used to verify if a file // is usable for deduplication with hardlinks. // To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs. -func calculateHardLinkFingerprint(f *fileMetadata) (string, error) { - digester := digest.Canonical.Digester() +func calculateHardLinkFingerprint(f *fileMetadata, digestAlgorithm digest.Algorithm) (string, error) { + digester := digestAlgorithm.Digester() modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode) hash := digester.Hash() @@ -479,7 +479,7 @@ func writeCacheFileToWriter(writer io.Writer, bloomFilter *bloomFilter, tags [][ // - digest(file.payload)) // - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) // - digest(i) for each i in chunks(file payload) -func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*cacheFile, error) { +func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData, digestAlgorithm digest.Algorithm) (*cacheFile, error) { var vdata, tagsBuffer, fnames bytes.Buffer tagLen := 0 digestLen := 0 @@ -533,7 +533,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin } tags = append(tags, tag) - fp, err := calculateHardLinkFingerprint(k) + fp, err := calculateHardLinkFingerprint(k, digestAlgorithm) if err != nil { return nil, err } @@ -836,7 +836,7 @@ func (c *layersCache) findFileInOtherLayers(file *fileMetadata, useHardLinks boo digest := file.Digest if useHardLinks { var err error - digest, err = calculateHardLinkFingerprint(file) + digest, err = calculateHardLinkFingerprint(file, c.store.GetDigestAlgorithm()) if err != nil { return "", "", err } diff --git a/vendor/go.podman.io/storage/pkg/chunked/compression.go b/vendor/go.podman.io/storage/pkg/chunked/compression.go index 29d4f28644..a112cc13ad 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/compression.go +++ b/vendor/go.podman.io/storage/pkg/chunked/compression.go @@ -3,6 +3,7 @@ package chunked import ( "io" + "github.com/opencontainers/go-digest" "go.podman.io/storage/pkg/chunked/compressor" "go.podman.io/storage/pkg/chunked/internal/minimal" ) @@ -21,5 +22,5 @@ const ( // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. // Deprecated: Use pkg/chunked/compressor.ZstdCompressor. func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { - return compressor.ZstdCompressor(r, metadata, level) + return compressor.ZstdCompressor(r, metadata, level, digest.SHA256) } diff --git a/vendor/go.podman.io/storage/pkg/chunked/compression_linux.go b/vendor/go.podman.io/storage/pkg/chunked/compression_linux.go index 502bb7f4a0..18f833884b 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/compression_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/compression_linux.go @@ -48,7 +48,7 @@ func typeToTarType(t string) (byte, error) { // readEstargzChunkedManifest reads the estargz manifest from the seekable stream blobStream. // It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. -func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest) ([]byte, int64, error) { +func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, tocDigest digest.Digest, digestAlgorithm digest.Algorithm) ([]byte, int64, error) { // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md footerSize := int64(51) if blobSize <= footerSize { @@ -146,7 +146,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, return nil, 0, errors.New("manifest not found") } - manifestDigester := digest.Canonical.Digester() + manifestDigester := digestAlgorithm.Digester() manifestChecksum := manifestDigester.Hash() if _, err := manifestChecksum.Write(manifestUncompressed); err != nil { return nil, 0, err @@ -188,7 +188,7 @@ func openTmpFileNoTmpFile(tmpDir string) (*os.File, error) { // The compressed parameter indicates whether the manifest and tar-split data are zstd-compressed // (true) or stored uncompressed (false). Uncompressed data is used only for an optimization to convert // a regular OCI layer to zstd:chunked when convert_images is set, and it is not used for distributed images. -func readZstdChunkedManifest(tmpDir string, blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string, compressed bool) (_ []byte, _ *minimal.TOC, _ *os.File, _ int64, retErr error) { +func readZstdChunkedManifest(tmpDir string, blobStream ImageSourceSeekable, tocDigest digest.Digest, annotations map[string]string, compressed bool, digestAlgorithm digest.Algorithm) (_ []byte, _ *minimal.TOC, _ *os.File, _ int64, retErr error) { offsetMetadata := annotations[minimal.ManifestInfoKey] if offsetMetadata == "" { return nil, nil, nil, 0, fmt.Errorf("%q annotation missing", minimal.ManifestInfoKey) diff --git a/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go b/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go index 23bcbda519..935e8a9042 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go +++ b/vendor/go.podman.io/storage/pkg/chunked/compressor/compressor.go @@ -173,7 +173,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) { return false, -1, err } if holeLen > 0 { - for j := int64(0); j < holeLen; j++ { + for range holeLen { rc.rollsum.Roll(0) } rc.pendingHole = holeLen @@ -205,9 +205,9 @@ type tarSplitData struct { packer storage.Packer } -func newTarSplitData(createZstdWriter minimal.CreateZstdWriterFunc) (*tarSplitData, error) { +func newTarSplitData(createZstdWriter minimal.CreateZstdWriterFunc, digestAlgorithm digest.Algorithm) (*tarSplitData, error) { compressed := bytes.NewBuffer(nil) - digester := digest.Canonical.Digester() + digester := digestAlgorithm.Digester() zstdWriter, err := createZstdWriter(io.MultiWriter(compressed, digester.Hash())) if err != nil { @@ -226,11 +226,11 @@ func newTarSplitData(createZstdWriter minimal.CreateZstdWriterFunc) (*tarSplitDa }, nil } -func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, createZstdWriter minimal.CreateZstdWriterFunc) error { +func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, createZstdWriter minimal.CreateZstdWriterFunc, digestAlgorithm digest.Algorithm) error { // total written so far. Used to retrieve partial offsets in the file dest := ioutils.NewWriteCounter(destFile) - tarSplitData, err := newTarSplitData(createZstdWriter) + tarSplitData, err := newTarSplitData(createZstdWriter, digestAlgorithm) if err != nil { return err } @@ -287,8 +287,8 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r return err } - payloadDigester := digest.Canonical.Digester() - chunkDigester := digest.Canonical.Digester() + payloadDigester := digestAlgorithm.Digester() + chunkDigester := digestAlgorithm.Digester() // Now handle the payload, if any startOffset := int64(0) @@ -353,7 +353,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r lastOffset = off lastChunkOffset = rcReader.WrittenOut - chunkDigester = digest.Canonical.Digester() + chunkDigester = digestAlgorithm.Digester() payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter) } if errRead == io.EOF { @@ -419,7 +419,7 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r UncompressedSize: tarSplitData.uncompressedCounter.Count, } - return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, createZstdWriter) + return minimal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, createZstdWriter, digestAlgorithm) } type zstdChunkedWriter struct { @@ -461,12 +461,12 @@ func (w zstdChunkedWriter) Write(p []byte) (int, error) { // [SKIPPABLE FRAME 1]: [ZSTD SKIPPABLE FRAME, SIZE=MANIFEST LENGTH][MANIFEST] // [SKIPPABLE FRAME 2]: [ZSTD SKIPPABLE FRAME, SIZE=16][MANIFEST_OFFSET][MANIFEST_LENGTH][MANIFEST_LENGTH_UNCOMPRESSED][MANIFEST_TYPE][CHUNKED_ZSTD_MAGIC_NUMBER] // MANIFEST_OFFSET, MANIFEST_LENGTH, MANIFEST_LENGTH_UNCOMPRESSED and CHUNKED_ZSTD_MAGIC_NUMBER are 64 bits unsigned in little endian format. -func makeZstdChunkedWriter(out io.Writer, metadata map[string]string, createZstdWriter minimal.CreateZstdWriterFunc) (io.WriteCloser, error) { +func makeZstdChunkedWriter(out io.Writer, metadata map[string]string, createZstdWriter minimal.CreateZstdWriterFunc, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { ch := make(chan error, 1) r, w := io.Pipe() go func() { - ch <- writeZstdChunkedStream(out, metadata, r, createZstdWriter) + ch <- writeZstdChunkedStream(out, metadata, r, createZstdWriter, digestAlgorithm) _, _ = io.Copy(io.Discard, r) // Ordinarily writeZstdChunkedStream consumes all of r. If it fails, ensure the write end never blocks and eventually terminates. r.Close() close(ch) @@ -479,7 +479,7 @@ func makeZstdChunkedWriter(out io.Writer, metadata map[string]string, createZstd } // ZstdCompressor is a CompressorFunc for the zstd compression algorithm. -func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) { +func ZstdCompressor(r io.Writer, metadata map[string]string, level *int, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { if level == nil { l := 10 level = &l @@ -489,7 +489,7 @@ func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.Wri return minimal.ZstdWriterWithLevel(dest, *level) } - return makeZstdChunkedWriter(r, metadata, createZstdWriter) + return makeZstdChunkedWriter(r, metadata, createZstdWriter, digestAlgorithm) } type noCompression struct { @@ -516,9 +516,9 @@ func (n *noCompression) Reset(dest io.Writer) { // // Such an output does not follow the zstd:chunked spec and cannot be generally consumed; this function // only exists for internal purposes and should not be called from outside c/storage. -func NoCompression(r io.Writer, metadata map[string]string) (io.WriteCloser, error) { +func NoCompression(r io.Writer, metadata map[string]string, digestAlgorithm digest.Algorithm) (io.WriteCloser, error) { createZstdWriter := func(dest io.Writer) (minimal.ZstdWriter, error) { return &noCompression{dest: dest}, nil } - return makeZstdChunkedWriter(r, metadata, createZstdWriter) + return makeZstdChunkedWriter(r, metadata, createZstdWriter, digestAlgorithm) } diff --git a/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go b/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go index 256365759c..0e5b9a7a4c 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go +++ b/vendor/go.podman.io/storage/pkg/chunked/internal/minimal/compression.go @@ -146,7 +146,9 @@ const ( // TarSplitChecksumKey is no longer used and is replaced by the TOC.TarSplitDigest field instead. // The value is retained here as a constant as a historical reference for older zstd:chunked images. - // TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" + // + // Deprecated: This field should never be relied on - use the digest in the TOC instead. + TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum" // ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file. ManifestTypeCRFS = 1 @@ -188,7 +190,7 @@ type TarSplitData struct { UncompressedSize int64 } -func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, createZstdWriter CreateZstdWriterFunc) error { +func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, createZstdWriter CreateZstdWriterFunc, digestAlgorithm digest.Algorithm) error { // 8 is the size of the zstd skippable frame header + the frame size const zstdSkippableFrameHeader = 8 manifestOffset := offset + zstdSkippableFrameHeader @@ -220,7 +222,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off } compressedManifest := compressedBuffer.Bytes() - manifestDigester := digest.Canonical.Digester() + manifestDigester := digestAlgorithm.Digester() manifestChecksum := manifestDigester.Hash() if _, err := manifestChecksum.Write(compressedManifest); err != nil { return err diff --git a/vendor/go.podman.io/storage/pkg/chunked/internal/path/path.go b/vendor/go.podman.io/storage/pkg/chunked/internal/path/path.go index 55ba74550e..e33038e94a 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/internal/path/path.go +++ b/vendor/go.podman.io/storage/pkg/chunked/internal/path/path.go @@ -19,7 +19,7 @@ func CleanAbsPath(path string) string { // // The caller MUST ensure d is a valid digest (in particular, that it contains no path separators or .. entries) func RegularFilePathForValidatedDigest(d digest.Digest) (string, error) { - if algo := d.Algorithm(); algo != digest.SHA256 { + if algo := d.Algorithm(); algo != digest.SHA256 && algo != digest.SHA512 { return "", fmt.Errorf("unexpected digest algorithm %q", algo) } e := d.Encoded() diff --git a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go index e6e3c9c6d1..780b4b0238 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go +++ b/vendor/go.podman.io/storage/pkg/chunked/storage_linux.go @@ -170,13 +170,13 @@ func (c *chunkedDiffer) convertTarToZstdChunked(destDirectory string, payload *o } newAnnotations := make(map[string]string) - chunked, err := compressor.NoCompression(f, newAnnotations) + chunked, err := compressor.NoCompression(f, newAnnotations, c.layersCache.store.GetDigestAlgorithm()) if err != nil { f.Close() return 0, nil, "", nil, err } - convertedOutputDigester := digest.Canonical.Digester() + convertedOutputDigester := c.layersCache.store.GetDigestAlgorithm().Digester() copied, err := io.CopyBuffer(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff, c.copyBuffer) if err != nil { f.Close() @@ -340,7 +340,7 @@ func makeConvertFromRawDiffer(store storage.Store, blobDigest digest.Digest, blo // makeZstdChunkedDiffer sets up a chunkedDiffer for a zstd:chunked layer. // It may return an error matching ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert. func makeZstdChunkedDiffer(store storage.Store, blobSize int64, tocDigest digest.Digest, annotations map[string]string, iss ImageSourceSeekable, pullOptions pullOptions) (_ *chunkedDiffer, retErr error) { - manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(store.RunRoot(), iss, tocDigest, annotations, true) + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(store.RunRoot(), iss, tocDigest, annotations, true, store.GetDigestAlgorithm()) if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -399,7 +399,7 @@ func makeEstargzChunkedDiffer(store storage.Store, blobSize int64, tocDigest dig } } - manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest) + manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, tocDigest, store.GetDigestAlgorithm()) if err != nil { // May be ErrFallbackToOrdinaryLayerDownload / errFallbackCanConvert return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -772,7 +772,7 @@ type destinationFile struct { recordFsVerity recordFsVerityFunc } -func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc, digestAlgorithm digest.Algorithm) (*destinationFile, error) { file, err := openFileUnderRoot(dirfd, metadata.Name, newFileFlags, 0) if err != nil { return nil, err @@ -785,7 +785,7 @@ func openDestinationFile(dirfd int, metadata *fileMetadata, options *archive.Tar if skipValidation { to = file } else { - digester = digest.Canonical.Digester() + digester = digestAlgorithm.Digester() hash = digester.Hash() to = io.MultiWriter(file, hash) } @@ -985,7 +985,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan if c.useFsVerity == graphdriver.DifferFsVerityDisabled { recordFsVerity = nil } - destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity) + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity, c.layersCache.store.GetDigestAlgorithm()) if err != nil { Err = err goto exit @@ -1363,7 +1363,7 @@ func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, return "", err } - originalRawDigester := digest.Canonical.Digester() + originalRawDigester := c.layersCache.store.GetDigestAlgorithm().Digester() for soe := range streamsOrErrors { if soe.stream != nil { r := io.TeeReader(soe.stream, originalRawDigester.Hash()) @@ -1466,7 +1466,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if tocDigest == nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("internal error: just-created zstd:chunked missing TOC digest") } - manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(dest, fileSource, *tocDigest, annotations, false) + manifest, toc, tarSplit, tocOffset, err := readZstdChunkedManifest(dest, fileSource, *tocDigest, annotations, false, c.layersCache.store.GetDigestAlgorithm()) if err != nil { return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -1821,7 +1821,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff if err != nil { return output, err } - if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer) { + if offset >= 0 && validateChunkChecksum(chunk, root, path, offset, c.copyBuffer, c.layersCache.store.GetDigestAlgorithm()) { missingPartsSize -= size mp.OriginFile = &originFile{ Root: root, @@ -1878,7 +1878,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff } metadata := tsStorage.NewJSONUnpacker(output.TarSplit) fg := newStagedFileGetter(dirFile, flatPathNameMap) - digester := digest.Canonical.Digester() + digester := c.layersCache.store.GetDigestAlgorithm().Digester() if err := asm.WriteOutputTarStream(fg, metadata, digester.Hash()); err != nil { return output, fmt.Errorf("digesting staged uncompressed stream: %w", err) } @@ -1986,7 +1986,7 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []m // validateChunkChecksum checks if the file at $root/$path[offset:chunk.ChunkSize] has the // same digest as chunk.ChunkDigest -func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte) bool { +func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offset int64, copyBuffer []byte, digestAlgorithm digest.Algorithm) bool { parentDirfd, err := unix.Open(root, unix.O_PATH|unix.O_CLOEXEC, 0) if err != nil { return false @@ -2004,7 +2004,7 @@ func validateChunkChecksum(chunk *minimal.FileMetadata, root, path string, offse } r := io.LimitReader(fd, chunk.ChunkSize) - digester := digest.Canonical.Digester() + digester := digestAlgorithm.Digester() if _, err := io.CopyBuffer(digester.Hash(), r, copyBuffer); err != nil { return false diff --git a/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go b/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go index 7059b5406d..fafa40e607 100644 --- a/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go +++ b/vendor/go.podman.io/storage/pkg/chunked/toc/toc.go @@ -7,6 +7,20 @@ import ( "go.podman.io/storage/pkg/chunked/internal/minimal" ) +// ChunkedAnnotations contains various annotations that might be set or used by the pkg/chunked-supported +// compression formats. +// +// This set does not define their semantics in detail as a public API. +// The _only_ intended use of this set is: code that _changes_ layer compression to a format +// which is not chunked can/should remove these annotations. +var ChunkedAnnotations = map[string]struct{}{ + minimal.ManifestChecksumKey: {}, + minimal.ManifestInfoKey: {}, + minimal.TarSplitInfoKey: {}, + minimal.TarSplitChecksumKey: {}, //nolint:staticcheck // The field is deprecated, so removing it when changing compressionn is all the more desirable. + tocJSONDigestAnnotation: {}, +} + // tocJSONDigestAnnotation is the annotation key for the digest of the estargz // TOC JSON. // It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation diff --git a/vendor/go.podman.io/storage/pkg/config/config.go b/vendor/go.podman.io/storage/pkg/config/config.go index 560df3cf5e..942ff12ed7 100644 --- a/vendor/go.podman.io/storage/pkg/config/config.go +++ b/vendor/go.podman.io/storage/pkg/config/config.go @@ -5,11 +5,6 @@ import ( "os" ) -type AufsOptionsConfig struct { - // MountOpt specifies extra mount options used when mounting - MountOpt string `toml:"mountopt,omitempty"` -} - type BtrfsOptionsConfig struct { // MinSpace is the minimal spaces allocated to the device MinSpace string `toml:"min_space,omitempty"` @@ -98,9 +93,6 @@ type OptionsConfig struct { // created automatically. AutoUsernsMaxSize uint32 `toml:"auto-userns-max-size,omitempty"` - // Aufs container options to be handed to aufs drivers - Aufs struct{ AufsOptionsConfig } `toml:"aufs,omitempty"` - // Btrfs container options to be handed to btrfs drivers Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs,omitempty"` @@ -131,19 +123,17 @@ type OptionsConfig struct { // DisableVolatile doesn't allow volatile mounts when it is set. DisableVolatile bool `toml:"disable-volatile,omitempty"` + + // DigestType specifies the hash algorithm to use for content addressing. + // Supported values are: "sha256", "sha512" + // Default is "sha256" + DigestType string `toml:"digest_type,omitempty"` } // GetGraphDriverOptions returns the driver specific options func GetGraphDriverOptions(driverName string, options OptionsConfig) []string { var doptions []string switch driverName { - case "aufs": - if options.Aufs.MountOpt != "" { - return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt)) - } else if options.MountOpt != "" { - doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt)) - } - case "btrfs": if options.Btrfs.MinSpace != "" { return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace)) diff --git a/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go b/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go index 85ce2d5260..434979825c 100644 --- a/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go +++ b/vendor/go.podman.io/storage/pkg/fileutils/fileutils.go @@ -51,7 +51,6 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { return nil, err } newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) pm.patterns = append(pm.patterns, newp) } return pm, nil @@ -168,7 +167,6 @@ func (pm *PatternMatcher) Patterns() []*Pattern { // Pattern defines a single regexp used to filter file paths. type Pattern struct { cleanedPattern string - dirs []string regexp *regexp.Regexp exclusion bool } diff --git a/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go b/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go index 817b59aed1..e7c2643676 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go +++ b/vendor/go.podman.io/storage/pkg/idtools/idtools_unix.go @@ -8,7 +8,6 @@ import ( "io" "os" "path/filepath" - "strings" "sync" "syscall" @@ -112,7 +111,7 @@ func LookupUser(username string) (user.User, error) { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + usr, err = getentUser(username) if err != nil { return user.User{}, err } @@ -128,11 +127,11 @@ func LookupUID(uid int) (user.User, error) { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) + return getentUser(fmt.Sprintf("%d", uid)) } -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) +func getentUser(key string) (user.User, error) { + reader, err := callGetent("passwd", key) if err != nil { return user.User{}, err } @@ -141,7 +140,7 @@ func getentUser(args string) (user.User, error) { return user.User{}, err } if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", key) } return users[0], nil } @@ -155,7 +154,7 @@ func LookupGroup(groupname string) (user.Group, error) { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) + return getentGroup(groupname) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, @@ -167,11 +166,11 @@ func LookupGID(gid int) (user.Group, error) { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) + return getentGroup(fmt.Sprintf("%d", gid)) } -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) +func getentGroup(key string) (user.Group, error) { + reader, err := callGetent("group", key) if err != nil { return user.Group{}, err } @@ -180,18 +179,18 @@ func getentGroup(args string) (user.Group, error) { return user.Group{}, err } if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", key) } return groups[0], nil } -func callGetent(args string) (io.Reader, error) { +func callGetent(db, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("") } - out, err := execCmd(getentCmd, args) + out, err := execCmd(getentCmd, db, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { @@ -201,8 +200,7 @@ func callGetent(args string) (io.Reader, error) { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, db) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: diff --git a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go index d2ff4466c1..ee80ce6a78 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go +++ b/vendor/go.podman.io/storage/pkg/idtools/usergroupadd_linux.go @@ -2,6 +2,7 @@ package idtools import ( "fmt" + "slices" "sort" "strconv" "strings" @@ -17,19 +18,12 @@ import ( var ( once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } + userCommand []string // command, args…, to be finished by adding an user name idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) // default length for a UID/GID subordinate range defaultRangeLen = 65536 defaultRangeStart = 100000 - userMod = "usermod" ) // AddNamespaceRangesUser takes a username and uses the standard system @@ -72,16 +66,16 @@ func addUser(userName string) error { once.Do(func() { // set up which commands are used for adding users/groups dependent on distro if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" + userCommand = []string{"adduser", "--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group"} } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" + userCommand = []string{"useradd", "-r", "-s", "/bin/false"} } }) - if userCommand == "" { + if userCommand == nil { return fmt.Errorf("cannot add user; no useradd/adduser binary found") } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) + args := append(slices.Clone(userCommand), userName) + out, err := execCmd(args[0], args[1:]...) if err != nil { return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) } @@ -101,7 +95,7 @@ func createSubordinateRanges(name string) error { if err != nil { return fmt.Errorf("can't find available subuid range: %w", err) } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) if err != nil { return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) } @@ -117,7 +111,7 @@ func createSubordinateRanges(name string) error { if err != nil { return fmt.Errorf("can't find available subgid range: %w", err) } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) if err != nil { return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) } diff --git a/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go b/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go index f34462a23a..10606fba8a 100644 --- a/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go +++ b/vendor/go.podman.io/storage/pkg/idtools/utils_unix.go @@ -6,7 +6,6 @@ import ( "fmt" "os/exec" "path/filepath" - "strings" ) func resolveBinary(binname string) (string, error) { @@ -26,7 +25,7 @@ func resolveBinary(binname string) (string, error) { return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) } -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) +func execCmd(cmd string, args ...string) ([]byte, error) { + execCmd := exec.Command(cmd, args...) return execCmd.CombinedOutput() } diff --git a/vendor/go.podman.io/storage/pkg/ioutils/readers.go b/vendor/go.podman.io/storage/pkg/ioutils/readers.go index 146e1a5ff0..716724e45a 100644 --- a/vendor/go.podman.io/storage/pkg/ioutils/readers.go +++ b/vendor/go.podman.io/storage/pkg/ioutils/readers.go @@ -2,9 +2,9 @@ package ioutils import ( "context" - "crypto/sha256" - "encoding/hex" "io" + + "github.com/opencontainers/go-digest" ) type readCloserWrapper struct { @@ -62,13 +62,13 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { +// HashData returns the sha256 sum of src using the specified algorithm +func HashData(src io.Reader, algorithm digest.Algorithm) (string, error) { + digester := algorithm.Digester() + if _, err := io.Copy(digester.Hash(), src); err != nil { return "", err } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil + return digester.Digest().String(), nil } // OnEOFReader wraps an io.ReadCloser and a function diff --git a/vendor/go.podman.io/storage/pkg/locker/README.md b/vendor/go.podman.io/storage/pkg/locker/README.md deleted file mode 100644 index ad15e89af1..0000000000 --- a/vendor/go.podman.io/storage/pkg/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however, the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/containers/storage/pkg/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - s.mu.Lock() - i.data[name] = data - s.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modifying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/vendor/go.podman.io/storage/pkg/locker/locker.go b/vendor/go.podman.io/storage/pkg/locker/locker.go deleted file mode 100644 index 0b22ddfab8..0000000000 --- a/vendor/go.podman.io/storage/pkg/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/vendor/go.podman.io/storage/pkg/mount/flags.go b/vendor/go.podman.io/storage/pkg/mount/flags.go index 40a229932b..9325e25977 100644 --- a/vendor/go.podman.io/storage/pkg/mount/flags.go +++ b/vendor/go.podman.io/storage/pkg/mount/flags.go @@ -119,7 +119,7 @@ func ParseOptions(options string) (int, string) { data []string ) - for _, o := range strings.Split(options, ",") { + for o := range strings.SplitSeq(options, ",") { // If the option does not exist in the flags table or the flag // is not supported on the platform, // then it is a data value for a specific fs type @@ -139,7 +139,7 @@ func ParseOptions(options string) (int, string) { // ParseTmpfsOptions parse fstab type mount options into flags and data func ParseTmpfsOptions(options string) (int, string, error) { flags, data := ParseOptions(options) - for _, o := range strings.Split(data, ",") { + for o := range strings.SplitSeq(data, ",") { opt, _, _ := strings.Cut(o, "=") if !validFlags[opt] { return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) diff --git a/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go b/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go index 61d6d1c595..1c99ff4618 100644 --- a/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go +++ b/vendor/go.podman.io/storage/pkg/mount/mounter_freebsd.go @@ -33,8 +33,7 @@ func mount(device, target, mType string, flag uintptr, data string) error { options := []string{"fspath", target} if data != "" { - xs := strings.Split(data, ",") - for _, x := range xs { + for x := range strings.SplitSeq(data, ",") { if x == "bind" { isNullFS = true continue diff --git a/vendor/go.podman.io/storage/pkg/parsers/parsers.go b/vendor/go.podman.io/storage/pkg/parsers/parsers.go index 7b20b06287..d87358e6e0 100644 --- a/vendor/go.podman.io/storage/pkg/parsers/parsers.go +++ b/vendor/go.podman.io/storage/pkg/parsers/parsers.go @@ -38,10 +38,9 @@ func ParseUintList(val string) (map[int]bool, error) { } availableInts := make(map[int]bool) - split := strings.Split(val, ",") errInvalidFormat := fmt.Errorf("invalid format: %s", val) - for _, r := range split { + for r := range strings.SplitSeq(val, ",") { minS, maxS, ok := strings.Cut(r, "-") if !ok { v, err := strconv.Atoi(r) diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go b/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go index 27ada2083e..d574e9e61c 100644 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go +++ b/vendor/go.podman.io/storage/pkg/system/xattrs_darwin.go @@ -74,7 +74,7 @@ func Llistxattr(path string) ([]string, error) { } var attrs []string - for _, token := range bytes.Split(dest[:sz], []byte{0}) { + for token := range bytes.SplitSeq(dest[:sz], []byte{0}) { if len(token) > 0 { attrs = append(attrs, string(token)) } diff --git a/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go b/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go index 12462cca33..3322707a43 100644 --- a/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go +++ b/vendor/go.podman.io/storage/pkg/system/xattrs_linux.go @@ -77,7 +77,7 @@ func Llistxattr(path string) ([]string, error) { } var attrs []string - for _, token := range bytes.Split(dest[:sz], []byte{0}) { + for token := range bytes.SplitSeq(dest[:sz], []byte{0}) { if len(token) > 0 { attrs = append(attrs, string(token)) } diff --git a/vendor/go.podman.io/storage/storage.conf b/vendor/go.podman.io/storage/storage.conf index 2fff0cecf2..723737cec1 100644 --- a/vendor/go.podman.io/storage/storage.conf +++ b/vendor/go.podman.io/storage/storage.conf @@ -49,6 +49,11 @@ graphroot = "/var/lib/containers/storage" [storage.options] # Storage options to be passed to underlying storage drivers +# DigestType specifies the hash algorithm to use for content addressing. +# Supported values are: "sha256", "sha512" +# Default is "sha256" +# digest_type = "sha256" + # AdditionalImageStores is used to pass paths to additional Read/Only image stores # Must be comma separated list. additionalimagestores = [ diff --git a/vendor/go.podman.io/storage/store.go b/vendor/go.podman.io/storage/store.go index fa34f4eecc..85733767c7 100644 --- a/vendor/go.podman.io/storage/store.go +++ b/vendor/go.podman.io/storage/store.go @@ -175,6 +175,7 @@ const ( DedupHashCRC = dedup.DedupHashCRC DedupHashFileSize = dedup.DedupHashFileSize DedupHashSHA256 = dedup.DedupHashSHA256 + DedupHashSHA512 = dedup.DedupHashSHA512 ) type ( @@ -203,6 +204,15 @@ type Store interface { UIDMap() []idtools.IDMap GIDMap() []idtools.IDMap + // GetDigestType returns the configured digest type for the store. + GetDigestType() string + + // SetDigestType temporarily sets the digest type for the store. + SetDigestType(digestType string) + + // GetDigestAlgorithm returns the digest algorithm based on the configured digest type. + GetDigestAlgorithm() digest.Algorithm + // GraphDriver obtains and returns a handle to the graph Driver object used // by the Store. GraphDriver() (drivers.Driver, error) @@ -774,6 +784,7 @@ type store struct { digestLockRoot string disableVolatile bool transientStore bool + digestType string // The following fields can only be accessed with graphLock held. graphLockLastWrite lockfile.LastWrite @@ -905,6 +916,7 @@ func GetStore(options types.StoreOptions) (Store, error) { autoNsMaxSize: autoNsMaxSize, disableVolatile: options.DisableVolatile, transientStore: options.TransientStore, + digestType: options.DigestType, additionalUIDs: nil, additionalGIDs: nil, @@ -987,7 +999,7 @@ func (s *store) load() error { if err := os.MkdirAll(gipath, 0o700); err != nil { return err } - imageStore, err := newImageStore(gipath) + imageStore, err := newImageStore(gipath, s.digestType) if err != nil { return err } @@ -1004,7 +1016,7 @@ func (s *store) load() error { return err } - rcs, err := newContainerStore(gcpath, rcpath, s.transientStore) + rcs, err := newContainerStore(gcpath, rcpath, s.transientStore, s.digestType) if err != nil { return err } @@ -1021,14 +1033,14 @@ func (s *store) load() error { var ris roImageStore // both the graphdriver and the imagestore must be used read-write. if store == s.imageStoreDir || store == s.graphRoot { - imageStore, err := newImageStore(gipath) + imageStore, err := newImageStore(gipath, s.digestType) if err != nil { return err } s.rwImageStores = append(s.rwImageStores, imageStore) ris = imageStore } else { - ris, err = newROImageStore(gipath) + ris, err = newROImageStore(gipath, s.digestType) if err != nil { if errors.Is(err, syscall.EROFS) { logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err) @@ -1178,7 +1190,7 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) { for _, store := range s.graphDriver.AdditionalImageStores() { glpath := filepath.Join(store, driverPrefix+"layers") - rls, err := newROLayerStore(rlpath, glpath, s.graphDriver) + rls, err := newROLayerStore(rlpath, glpath, s.graphDriver, s) if err != nil { return nil, err } @@ -3496,7 +3508,6 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return struct{}{}, true, err } for _, image := range imageList { - image := image if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) { images = append(images, &image) } @@ -3991,3 +4002,33 @@ func (s *store) Dedup(req DedupArgs) (drivers.DedupResult, error) { return rlstore.dedup(r) }) } + +func (s *store) GetDigestType() string { + if s.digestType == "" { + return "sha256" // default value + } + return s.digestType +} + +// SetDigestType temporarily sets the digest type for this store. +// This is used for build operations that need a specific digest algorithm. +func (s *store) SetDigestType(digestType string) { + s.digestType = digestType +} + +// GetDigestAlgorithm returns the digest algorithm based on the configured digest type +func (s *store) GetDigestAlgorithm() digest.Algorithm { + return getDigestAlgorithmFromType(s.GetDigestType()) +} + +// getDigestAlgorithmFromType returns the digest algorithm for a given digest type string +func getDigestAlgorithmFromType(digestType string) digest.Algorithm { + switch digestType { + case "sha512": + return digest.SHA512 + case "sha256": + return digest.SHA256 + default: + return digest.Canonical + } +} diff --git a/vendor/go.podman.io/storage/types/options.go b/vendor/go.podman.io/storage/types/options.go index 2c2be5c22d..9f37c0f578 100644 --- a/vendor/go.podman.io/storage/types/options.go +++ b/vendor/go.podman.io/storage/types/options.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "strings" "sync" "time" @@ -269,6 +270,10 @@ type StoreOptions struct { DisableVolatile bool `json:"disable-volatile,omitempty"` // If transient, don't persist containers over boot (stores db in runroot) TransientStore bool `json:"transient_store,omitempty"` + // DigestType specifies the hash algorithm to use for content addressing. + // Supported values are: "sha256", "sha512" + // Default is "sha256" + DigestType string `json:"digest_type,omitempty"` } // isRootlessDriver returns true if the given storage driver is valid for containers running as non root @@ -362,7 +367,7 @@ func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) { } if os.Getenv("STORAGE_OPTS") != "" { - opts.GraphDriverOptions = append(opts.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) + opts.GraphDriverOptions = slices.AppendSeq(opts.GraphDriverOptions, strings.SplitSeq(os.Getenv("STORAGE_OPTS"), ",")) } return opts, nil @@ -493,6 +498,12 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro storeOptions.DisableVolatile = config.Storage.Options.DisableVolatile storeOptions.TransientStore = config.Storage.TransientStore + if config.Storage.Options.DigestType != "" { + storeOptions.DigestType = config.Storage.Options.DigestType + } else { + storeOptions.DigestType = "sha256" // default value + } + storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) if opts, ok := os.LookupEnv("STORAGE_OPTS"); ok { diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go index 8b462f3dfe..0b789e2c5e 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index db7806cb99..f840481726 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 08505ba3fe..5d583b8660 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go index a462e7d013..53e9dd1e99 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index c93b4f5248..d30fcee4ce 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index a1c543a948..175974a869 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index 2b54db3045..b8c4aa71f2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 498020e33c..a69c1d4734 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/modules.txt b/vendor/modules.txt index cbf1019431..e425127569 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -75,11 +75,11 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.7.1 -## explicit; go 1.23.0 +# github.com/containernetworking/plugins v1.8.0 +## explicit; go 1.24.2 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.41.1-0.20250829135344-3367a9bc2c9f -## explicit; go 1.23.3 +# github.com/containers/buildah v1.41.1-0.20250829135344-3367a9bc2c9f => /home/lsm5/repositories/containers/buildah +## explicit; go 1.24.2 github.com/containers/buildah github.com/containers/buildah/bind github.com/containers/buildah/chroot @@ -236,9 +236,7 @@ github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client -github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/homedir -github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/jsonmessage github.com/docker/docker/pkg/meminfo github.com/docker/docker/pkg/namesgenerator @@ -274,8 +272,8 @@ github.com/felixge/httpsnoop ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal -# github.com/fsouza/go-dockerclient v1.12.1 -## explicit; go 1.23 +# github.com/fsouza/go-dockerclient v1.12.2 +## explicit; go 1.24.0 github.com/fsouza/go-dockerclient # github.com/go-jose/go-jose/v4 v4.0.5 ## explicit; go 1.21 @@ -312,16 +310,16 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.3 -## explicit; go 1.23.0 +# github.com/google/go-containerregistry v0.20.6 +## explicit; go 1.24 github.com/google/go-containerregistry/pkg/name github.com/google/go-containerregistry/pkg/v1 github.com/google/go-containerregistry/pkg/v1/types # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset -# github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 -## explicit; go 1.23 +# github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 +## explicit; go 1.24.0 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 @@ -428,7 +426,7 @@ github.com/miekg/pkcs11 # github.com/mistifyio/go-zfs/v3 v3.0.1 ## explicit; go 1.14 github.com/mistifyio/go-zfs/v3 -# github.com/moby/buildkit v0.23.2 +# github.com/moby/buildkit v0.24.0 ## explicit; go 1.23.0 github.com/moby/buildkit/frontend/dockerfile/command github.com/moby/buildkit/frontend/dockerfile/parser @@ -534,7 +532,7 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.3.0 +# github.com/opencontainers/runc v1.3.1 ## explicit; go 1.23.0 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/devices @@ -552,7 +550,7 @@ github.com/opencontainers/runtime-tools/validate/capabilities github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/openshift/imagebuilder v1.2.16-0.20250828154754-e22ebd3ff511 +# github.com/openshift/imagebuilder v1.2.17 ## explicit; go 1.23.3 github.com/openshift/imagebuilder github.com/openshift/imagebuilder/dockerfile/command @@ -614,8 +612,8 @@ github.com/shirou/gopsutil/v4/internal/common github.com/shirou/gopsutil/v4/mem github.com/shirou/gopsutil/v4/net github.com/shirou/gopsutil/v4/process -# github.com/sigstore/fulcio v1.6.6 -## explicit; go 1.23.3 +# github.com/sigstore/fulcio v1.7.1 +## explicit; go 1.24.0 github.com/sigstore/fulcio/pkg/api github.com/sigstore/fulcio/pkg/certificate # github.com/sigstore/protobuf-specs v0.4.1 @@ -657,8 +655,8 @@ github.com/stefanberger/go-pkcs11uri github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require -# github.com/sylabs/sif/v2 v2.21.1 -## explicit; go 1.23.0 +# github.com/sylabs/sif/v2 v2.22.0 +## explicit; go 1.24.0 github.com/sylabs/sif/v2/pkg/sif # github.com/tchap/go-patricia/v2 v2.3.3 ## explicit; go 1.16 @@ -712,38 +710,37 @@ go.etcd.io/bbolt/internal/freelist ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 +## explicit; go 1.23.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/attribute/internal go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/metric v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/trace v1.35.0 -## explicit; go 1.22.0 +# go.opentelemetry.io/otel/trace v1.36.0 +## explicit; go 1.23.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.podman.io/common v0.65.0 -## explicit; go 1.23.3 +# go.podman.io/common v0.65.0 => /home/lsm5/repositories/containers/container-libs/common/ +## explicit; go 1.24.2 go.podman.io/common/internal go.podman.io/common/internal/attributedstring go.podman.io/common/libimage @@ -809,8 +806,8 @@ go.podman.io/common/pkg/umask go.podman.io/common/pkg/util go.podman.io/common/pkg/version go.podman.io/common/version -# go.podman.io/image/v5 v5.37.0 -## explicit; go 1.23.3 +# go.podman.io/image/v5 v5.37.0 => /home/lsm5/repositories/containers/container-libs/image +## explicit; go 1.24.0 go.podman.io/image/v5/copy go.podman.io/image/v5/directory go.podman.io/image/v5/directory/explicitfilepath @@ -882,11 +879,10 @@ go.podman.io/image/v5/transports go.podman.io/image/v5/transports/alltransports go.podman.io/image/v5/types go.podman.io/image/v5/version -# go.podman.io/storage v1.60.0 -## explicit; go 1.23.3 +# go.podman.io/storage v1.60.0 => /home/lsm5/repositories/containers/container-libs/storage +## explicit; go 1.24.0 go.podman.io/storage go.podman.io/storage/drivers -go.podman.io/storage/drivers/aufs go.podman.io/storage/drivers/btrfs go.podman.io/storage/drivers/copy go.podman.io/storage/drivers/overlay @@ -916,7 +912,6 @@ go.podman.io/storage/pkg/homedir go.podman.io/storage/pkg/idmap go.podman.io/storage/pkg/idtools go.podman.io/storage/pkg/ioutils -go.podman.io/storage/pkg/locker go.podman.io/storage/pkg/lockfile go.podman.io/storage/pkg/longpath go.podman.io/storage/pkg/loopback @@ -994,8 +989,8 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.30.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.31.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.17.0 @@ -1043,11 +1038,11 @@ golang.org/x/time/rate golang.org/x/tools/cover golang.org/x/tools/go/ast/edge golang.org/x/tools/go/ast/inspector -# google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb +# google.golang.org/genproto/googleapis/api v0.0.0-20250414145226-207652e42e2e ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e ## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.72.2 @@ -1172,3 +1167,7 @@ tags.cncf.io/container-device-interface/pkg/parser # tags.cncf.io/container-device-interface/specs-go v1.0.0 ## explicit; go 1.19 tags.cncf.io/container-device-interface/specs-go +# go.podman.io/common => /home/lsm5/repositories/containers/container-libs/common/ +# go.podman.io/image/v5 => /home/lsm5/repositories/containers/container-libs/image +# go.podman.io/storage => /home/lsm5/repositories/containers/container-libs/storage +# github.com/containers/buildah => /home/lsm5/repositories/containers/buildah