diff --git a/build/opt.go b/build/opt.go
index ed9ca5a02ed3..62f397deb89e 100644
--- a/build/opt.go
+++ b/build/opt.go
@@ -65,6 +65,30 @@ var sendGitQueryAsInput = sync.OnceValue(func() bool {
return false
})
+// defaultPolicyEnabled reports whether the builtin default source policy is
+// enabled via the BUILDX_DEFAULT_POLICY environment variable. It is opt-in
+// for now; a future release may flip the default to on.
+var defaultPolicyEnabled = sync.OnceValue(func() bool {
+ if v, ok := os.LookupEnv("BUILDX_DEFAULT_POLICY"); ok {
+ if vv, err := strconv.ParseBool(v); err == nil {
+ return vv
+ }
+ }
+ return false
+})
+
+// policyExplicitlyDisabled reports whether the user passed `--policy
+// disabled=true`, which suppresses both user-defined and builtin default
+// policies.
+func policyExplicitlyDisabled(configs []buildflags.PolicyConfig) bool {
+ for _, cfg := range configs {
+ if cfg.Disabled {
+ return true
+ }
+ }
+ return false
+}
+
type policyProgressLogger struct {
ch chan *client.SolveStatus
done chan struct{}
@@ -601,10 +625,32 @@ func proxyArgKeyExists(buildArgs map[string]string, key string) bool {
}
func configureSourcePolicy(ctx context.Context, np *noderesolver.ResolvedNode, opt *Options, cfg *confutil.Config, bopts gateway.BuildOpts, so *client.SolveOpt, pw progress.Writer) (defers []func(error), err error) {
+ var callbackOnly []policysession.PolicyCallback
+ var fileConfigs []buildflags.PolicyConfig
+ for _, p := range opt.Policy {
+ if p.Callback != nil && len(p.Files) == 0 {
+ callbackOnly = append(callbackOnly, p.Callback)
+ continue
+ }
+ fileConfigs = append(fileConfigs, p)
+ }
+
+ // Any callback-only entry requires the session policy capability, the
+ // same way a Strict declarative policy does.
+ if len(callbackOnly) > 0 {
+ if bopts.LLBCaps.Supports(pb.CapSourcePolicySession) != nil {
+ return nil, errors.New("session source policy is not supported by the current BuildKit daemon, please upgrade to version v0.27+")
+ }
+ }
+
if opt.Inputs.policy == nil {
- if len(opt.Policy) > 0 {
+ if len(fileConfigs) > 0 {
return nil, errors.New("policy file specified but no policy FS in build context")
}
+ if len(callbackOnly) > 0 {
+ so.SourcePolicyProvider = policysession.NewPolicyProvider(policy.MultiPolicyCallback(callbackOnly...))
+ return nil, nil
+ }
so.SourcePolicyProvider = nil
return nil, nil
}
@@ -620,11 +666,31 @@ func configureSourcePolicy(ctx context.Context, np *noderesolver.ResolvedNode, o
env.Target = opt.Target
env.Labels = opt.Labels
- popts, err := withPolicyConfig(*opt.Inputs.policy, opt.Policy)
+ popts, err := withPolicyConfig(*opt.Inputs.policy, fileConfigs)
if err != nil {
return nil, err
}
+
+ // Prepend the builtin default policy when enabled and not explicitly
+ // disabled. The default policy verifies trust for Docker-managed images
+ // (docker/dockerfile, docker/dockerfile-upstream) that may be implicitly
+ // loaded during a build, and passes through any other source so user
+ // policies retain full control.
+ if defaultPolicyEnabled() && !policyExplicitlyDisabled(fileConfigs) {
+ builtin := policyOpt{
+ Files: []policyFileSpec{{
+ Filename: policy.DefaultPolicyFilename,
+ Data: policy.DefaultPolicyData(),
+ }},
+ }
+ popts = append([]policyOpt{builtin}, popts...)
+ }
+
if len(popts) == 0 {
+ if len(callbackOnly) > 0 {
+ so.SourcePolicyProvider = policysession.NewPolicyProvider(policy.MultiPolicyCallback(callbackOnly...))
+ return nil, nil
+ }
so.SourcePolicyProvider = nil
return nil, nil
}
@@ -704,6 +770,9 @@ func configureSourcePolicy(ctx context.Context, np *noderesolver.ResolvedNode, o
}
}
}
+ // Callback-only policy entries compose as the last (most-strict)
+ // entries, allowing file-based policies to still run first.
+ cbs = append(cbs, callbackOnly...)
so.SourcePolicyProvider = policysession.NewPolicyProvider(policy.MultiPolicyCallback(cbs...))
return defers, nil
}
diff --git a/commands/replay/build.go b/commands/replay/build.go
new file mode 100644
index 000000000000..3e1318b994bc
--- /dev/null
+++ b/commands/replay/build.go
@@ -0,0 +1,270 @@
+package replay
+
+import (
+ "encoding/json"
+
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/replay"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/cobrautil/completion"
+ "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/util/progress/progressui"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+// buildOptions holds the parsed flags for `replay build`.
+type buildOptions struct {
+ commonOptions
+ mode string
+ outputs []string
+ tags []string
+ exportLoad bool
+ exportPush bool
+ dryRun bool
+}
+
+func buildCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+ var opts buildOptions
+
+ cmd := &cobra.Command{
+ Use: "build [OPTIONS] SUBJECT",
+ Short: "Rebuild an image from provenance and pinned materials",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.builder = *rootOpts.Builder
+ return runBuild(cmd, dockerCli, &opts, args[0])
+ },
+ ValidArgsFunction: completion.Disable,
+ DisableFlagsInUseLine: true,
+ }
+
+ installCommonFlags(cmd, &opts.commonOptions)
+
+ flags := cmd.Flags()
+ flags.StringVar(&opts.mode, "replay-mode", "materials", `Replay mode ("materials" | "frontend" | "llb")`)
+ flags.StringArrayVarP(&opts.outputs, "output", "o", nil, `Output destination (format: "type=local,dest=path")`)
+ flags.StringArrayVarP(&opts.tags, "tag", "t", nil, `Image identifier (format: "[registry/]repository[:tag]")`)
+ flags.BoolVar(&opts.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
+ flags.BoolVar(&opts.exportPush, "push", false, `Shorthand for "--output=type=registry,unpack=false"`)
+ flags.BoolVar(&opts.dryRun, "dry-run", false, "Print a JSON plan of the replay without solving or exporting")
+
+ return cmd
+}
+
+// runBuild wires the CLI flags to the replay.Build entry point.
+func runBuild(cmd *cobra.Command, dockerCli command.Cli, opts *buildOptions, input string) error {
+ ctx := cmd.Context()
+
+ mode := replay.BuildMode(opts.mode)
+ switch mode {
+ case replay.BuildModeMaterials, replay.BuildModeFrontend:
+ // ok
+ case replay.BuildModeLLB:
+ // Still stubbed in this slice.
+ return replay.ErrNotImplemented("llb replay mode")
+ default:
+ return errors.Errorf("unknown --replay-mode %q", opts.mode)
+ }
+
+ // Materials resolver.
+ resolver, err := replay.NewMaterialsResolver(opts.materials)
+ if err != nil {
+ return err
+ }
+
+ // Parse flags.
+ secretSpecs, err := buildflags.ParseSecretSpecs(opts.secrets)
+ if err != nil {
+ return errors.Wrap(err, "parse --secret")
+ }
+ sshSpecs, err := buildflags.ParseSSHSpecs(opts.ssh)
+ if err != nil {
+ return errors.Wrap(err, "parse --ssh")
+ }
+ exportSpecs, err := buildflags.ParseExports(opts.outputs)
+ if err != nil {
+ return errors.Wrap(err, "parse --output")
+ }
+ exportSpecs = applyExportShorthands(exportSpecs, opts.exportPush, opts.exportLoad)
+
+ // Subject + predicate.
+ subjects, err := replay.LoadSubjects(ctx, dockerCli, opts.builder, input)
+ if err != nil {
+ return err
+ }
+
+ subjects, err = filterSubjectsByPlatform(subjects, opts.platforms)
+ if err != nil {
+ return err
+ }
+ if len(subjects) == 0 {
+ return errors.New("no subjects matched the --platform filter")
+ }
+
+ targets := make([]replay.Target, 0, len(subjects))
+ for _, s := range subjects {
+ pred, err := s.Predicate(ctx)
+ if err != nil {
+ return err
+ }
+ targets = append(targets, replay.Target{Subject: s, Predicate: pred})
+ }
+
+ req := &replay.BuildRequest{
+ Targets: targets,
+ Mode: mode,
+ Materials: resolver,
+ NetworkMode: opts.network,
+ Secrets: secretSpecs,
+ SSH: sshSpecs,
+ Exports: exportSpecs,
+ Tags: opts.tags,
+ Progress: progressui.DisplayMode(opts.progress),
+ }
+
+ if opts.dryRun {
+ plan, err := replay.MakeBuildPlan(req)
+ if err != nil {
+ return err
+ }
+ enc := json.NewEncoder(cmd.OutOrStdout())
+ enc.SetIndent("", " ")
+ return enc.Encode(plan)
+ }
+ return replay.Build(ctx, dockerCli, opts.builder, req)
+}
+
+// applyExportShorthands mirrors the --push / --load handling in
+// commands/build.go. --push sets push=true (+ unpack=false) on any
+// existing type=image export, or appends one; --load appends a
+// type=docker export unless an equivalent one is already present.
+func applyExportShorthands(exports []*buildflags.ExportEntry, push, load bool) []*buildflags.ExportEntry {
+ if push {
+ var used bool
+ for _, e := range exports {
+ if e.Type == "image" {
+ if e.Attrs == nil {
+ e.Attrs = map[string]string{}
+ }
+ e.Attrs["push"] = "true"
+ if _, ok := e.Attrs["unpack"]; !ok {
+ e.Attrs["unpack"] = "false"
+ }
+ used = true
+ }
+ }
+ if !used {
+ exports = append(exports, &buildflags.ExportEntry{
+ Type: "image",
+ Attrs: map[string]string{"push": "true", "unpack": "false"},
+ })
+ }
+ }
+ if load {
+ var used bool
+ for _, e := range exports {
+ if e.Type == "docker" {
+ if _, ok := e.Attrs["dest"]; !ok {
+ used = true
+ break
+ }
+ }
+ }
+ if !used {
+ exports = append(exports, &buildflags.ExportEntry{
+ Type: "docker",
+ Attrs: map[string]string{},
+ })
+ }
+ }
+ return exports
+}
+
+// filterSubjectsByPlatform narrows a subject list to the requested platforms.
+//
+// Contract:
+// - platformFilter == ["all"] keeps every subject.
+// - platformFilter empty defaults to the host's current platform
+// (platforms.DefaultSpec) — replay is single-platform by default.
+// - Otherwise each entry is matched through platforms.Only so that a
+// request for "linux/arm64/v8" accepts a subject tagged "linux/arm64"
+// with an unspecified variant, and vice versa.
+//
+// An explicit --platform that does not match any subject is an error.
+// Subjects with a nil Descriptor.Platform (single-platform images that
+// have no per-platform index) are kept unconditionally.
+func filterSubjectsByPlatform(subjects []*replay.Subject, platformFilter []string) ([]*replay.Subject, error) {
+ if len(platformFilter) == 1 && platformFilter[0] == "all" {
+ return subjects, nil
+ }
+ explicit := len(platformFilter) > 0
+ if !explicit {
+ platformFilter = []string{platforms.Format(platforms.DefaultSpec())}
+ }
+
+ wantNames := make([]string, 0, len(platformFilter))
+ matchers := make([]platforms.MatchComparer, 0, len(platformFilter))
+ for _, p := range platformFilter {
+ pp, err := platforms.Parse(p)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid --platform %q", p)
+ }
+ matchers = append(matchers, platforms.Only(pp))
+ wantNames = append(wantNames, platforms.Format(pp))
+ }
+
+ // For each requested platform pick the single best-matching subject —
+ // Only() is intentionally permissive (e.g. arm64/v8 matches arm/v5–v7
+ // because an arm64 host can run arm32) and we want just the closest
+ // platform for the replay.
+ matchedAny := make([]bool, len(matchers))
+ chosen := map[int]struct{}{}
+ for i, m := range matchers {
+ best := -1
+ for j, s := range subjects {
+ if s.Descriptor.Platform == nil {
+ continue
+ }
+ sp := *s.Descriptor.Platform
+ if !m.Match(sp) {
+ continue
+ }
+ if best < 0 || m.Less(sp, *subjects[best].Descriptor.Platform) {
+ best = j
+ }
+ }
+ if best >= 0 {
+ chosen[best] = struct{}{}
+ matchedAny[i] = true
+ }
+ }
+
+ var out []*replay.Subject
+ for j, s := range subjects {
+ if s.Descriptor.Platform == nil {
+ out = append(out, s)
+ continue
+ }
+ if _, ok := chosen[j]; ok {
+ out = append(out, s)
+ }
+ }
+
+ if explicit {
+ var missing []string
+ for i, w := range wantNames {
+ if !matchedAny[i] {
+ missing = append(missing, w)
+ }
+ }
+ if len(missing) > 0 {
+ return nil, errors.Errorf("requested platform(s) not present: %v", missing)
+ }
+ }
+ if len(out) == 0 {
+ return nil, errors.Errorf("no subjects for platform %v — pass --platform
or --platform all", wantNames)
+ }
+ return out, nil
+}
diff --git a/commands/replay/build_test.go b/commands/replay/build_test.go
new file mode 100644
index 000000000000..ded0be1761b9
--- /dev/null
+++ b/commands/replay/build_test.go
@@ -0,0 +1,44 @@
+package replay
+
+import (
+ "testing"
+
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/replay"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFilterSubjectsByPlatform(t *testing.T) {
+ amd := &replay.Subject{Descriptor: ocispecs.Descriptor{Platform: &ocispecs.Platform{OS: "linux", Architecture: "amd64"}}}
+ arm := &replay.Subject{Descriptor: ocispecs.Descriptor{Platform: &ocispecs.Platform{OS: "linux", Architecture: "arm64"}}}
+ subjects := []*replay.Subject{amd, arm}
+
+ // "all" keeps every subject.
+ out, err := filterSubjectsByPlatform(subjects, []string{"all"})
+ require.NoError(t, err)
+ require.Len(t, out, 2)
+
+ // An empty filter collapses to the host's default platform, which
+ // must match exactly one of our fake subjects.
+ out, err = filterSubjectsByPlatform(subjects, nil)
+ require.NoError(t, err)
+ require.Len(t, out, 1)
+ hostArch := platforms.DefaultSpec().Architecture
+ require.Equal(t, hostArch, out[0].Descriptor.Platform.Architecture)
+
+ // Explicit match on an alternate platform.
+ other := "arm64"
+ if hostArch == "arm64" {
+ other = "amd64"
+ }
+ out, err = filterSubjectsByPlatform(subjects, []string{"linux/" + other})
+ require.NoError(t, err)
+ require.Len(t, out, 1)
+ require.Equal(t, other, out[0].Descriptor.Platform.Architecture)
+
+ // Explicit platform with no matching subject is an error.
+ _, err = filterSubjectsByPlatform([]*replay.Subject{amd}, []string{"linux/arm64"})
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "not present")
+}
diff --git a/commands/replay/root.go b/commands/replay/root.go
new file mode 100644
index 000000000000..fa3df72bdca4
--- /dev/null
+++ b/commands/replay/root.go
@@ -0,0 +1,60 @@
+package replay
+
+import (
+ "github.com/docker/buildx/util/cobrautil/completion"
+ "github.com/docker/cli/cli/command"
+ "github.com/spf13/cobra"
+)
+
+// RootOptions mirrors the shape used by history/policy/imagetools
+// (see commands/history/root.go).
+type RootOptions struct {
+ Builder *string
+}
+
+// commonOptions is the shared flag set for every replay subcommand.
+type commonOptions struct {
+ builder string
+ materials []string
+ network string
+ secrets []string
+ ssh []string
+ platforms []string
+ progress string
+}
+
+// installCommonFlags registers the shared flag set on the supplied
+// subcommand. Each subcommand owns its own flag registration so that
+// `--help` on any leaf prints the full contract.
+func installCommonFlags(cmd *cobra.Command, opts *commonOptions) {
+ flags := cmd.Flags()
+
+ flags.StringArrayVar(&opts.materials, "materials", nil, `Materials store (repeatable; format: "provenance" | "registry://[" | "oci-layout://[:]" | "" | "=")`)
+ flags.StringVar(&opts.network, "network", "default", `Network mode for RUN instructions ("default" | "none")`)
+ flags.StringArrayVar(&opts.secrets, "secret", nil, `Secret to expose to the replayed build (format: "id=mysecret[,src=/local/secret]")`)
+ flags.StringArrayVar(&opts.ssh, "ssh", nil, `SSH agent socket or keys to expose (format: "default|[=|[,]]")`)
+ flags.StringArrayVar(&opts.platforms, "platform", nil, `Subjects to replay (defaults to the current host platform; "all" keeps every platform)`)
+ flags.StringVar(&opts.progress, "progress", "auto", `Set type of progress output ("auto" | "plain" | "tty" | "quiet" | "rawjson")`)
+}
+
+// RootCmd returns the `buildx replay` root command. The rootcmd argument is
+// the buildx root; its RunE is reused when no subcommand is given, matching
+// the pattern in commands/history/root.go.
+func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "replay",
+ Short: "Replay a build from its provenance",
+ ValidArgsFunction: completion.Disable,
+ RunE: rootcmd.RunE,
+
+ DisableFlagsInUseLine: true,
+ }
+
+ cmd.AddCommand(
+ buildCmd(dockerCli, opts),
+ snapshotCmd(dockerCli, opts),
+ verifyCmd(dockerCli, opts),
+ )
+
+ return cmd
+}
diff --git a/commands/replay/snapshot.go b/commands/replay/snapshot.go
new file mode 100644
index 000000000000..10f92e4fadd9
--- /dev/null
+++ b/commands/replay/snapshot.go
@@ -0,0 +1,189 @@
+package replay
+
+import (
+ "encoding/json"
+ "os"
+
+ "github.com/docker/buildx/replay"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/cobrautil/completion"
+ "github.com/docker/buildx/util/progress"
+ "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/util/progress/progressui"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+ "golang.org/x/term"
+)
+
+// snapshotOptions holds the parsed flags for `replay snapshot`.
+type snapshotOptions struct {
+ commonOptions
+ includeMaterials bool
+ outputs []string
+ dryRun bool
+}
+
+func snapshotCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+ var opts snapshotOptions
+
+ cmd := &cobra.Command{
+ Use: "snapshot [OPTIONS] SUBJECT",
+ Short: "Export replay inputs for a subject as a reusable materials store",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.builder = *rootOpts.Builder
+ return runSnapshot(cmd, dockerCli, &opts, args[0])
+ },
+ ValidArgsFunction: completion.Disable,
+ DisableFlagsInUseLine: true,
+ }
+
+ installCommonFlags(cmd, &opts.commonOptions)
+
+ flags := cmd.Flags()
+ flags.BoolVar(&opts.includeMaterials, "include-materials", true, "Include material content in the snapshot")
+ flags.StringArrayVarP(&opts.outputs, "output", "o", nil, `Output destination (default: "-" — oci tar to stdout; bare "" writes an oci-layout directory; "type=oci,dest=X[,tar=true|false]"; "type=registry,name=][")`)
+ flags.BoolVar(&opts.dryRun, "dry-run", false, "Print a JSON plan of the snapshot without writing output")
+
+ return cmd
+}
+
+// runSnapshot wires the CLI flags to the replay.Snapshot entry point.
+func runSnapshot(cmd *cobra.Command, dockerCli command.Cli, opts *snapshotOptions, input string) error {
+ ctx := cmd.Context()
+
+ // Resolve --output → a normalized snapshot export spec. Dry-run does not
+ // write anything so we skip the TTY refusal and terminal checks there.
+ var exportSpec *buildflags.ExportEntry
+ if !opts.dryRun {
+ spec, err := resolveSnapshotOutput(opts.outputs)
+ if err != nil {
+ return err
+ }
+ exportSpec = spec
+ }
+
+ // Materials resolver — used to lookup pre-pinned content when
+ // --materials is supplied.
+ resolver, err := replay.NewMaterialsResolver(opts.materials)
+ if err != nil {
+ return err
+ }
+
+ subjects, err := replay.LoadSubjects(ctx, dockerCli, opts.builder, input)
+ if err != nil {
+ return err
+ }
+
+ subjects, err = filterSubjectsByPlatform(subjects, opts.platforms)
+ if err != nil {
+ return err
+ }
+ if len(subjects) == 0 {
+ return errors.New("no subjects matched the --platform filter")
+ }
+
+ targets := make([]replay.Target, 0, len(subjects))
+ for _, s := range subjects {
+ pred, err := s.Predicate(ctx)
+ if err != nil {
+ return err
+ }
+ targets = append(targets, replay.Target{Subject: s, Predicate: pred})
+ }
+
+ req := &replay.SnapshotRequest{
+ Targets: targets,
+ IncludeMaterials: opts.includeMaterials,
+ Materials: resolver,
+ Output: exportSpec,
+ }
+
+ // Both real-run and dry-run do the same staging work (dry-run just
+ // skips the final output), so both get a progress printer.
+ printer, err := progress.NewPrinter(ctx, os.Stderr, progressui.DisplayMode(opts.progress))
+ if err != nil {
+ return err
+ }
+ req.Progress = printer
+
+ if opts.dryRun {
+ plan, planErr := replay.MakeSnapshotPlan(ctx, dockerCli, opts.builder, req)
+ // Wait for the progress printer to drain before writing the JSON
+ // plan: in auto/tty mode the printer owns the terminal and its
+ // final redraw otherwise interleaves with stdout.
+ waitErr := printer.Wait()
+ if planErr != nil {
+ return planErr
+ }
+ if waitErr != nil {
+ return waitErr
+ }
+ enc := json.NewEncoder(cmd.OutOrStdout())
+ enc.SetIndent("", " ")
+ return enc.Encode(plan)
+ }
+
+ snapErr := replay.Snapshot(ctx, dockerCli, opts.builder, req)
+ if waitErr := printer.Wait(); snapErr == nil {
+ snapErr = waitErr
+ }
+ return snapErr
+}
+
+// resolveSnapshotOutput turns raw --output values into a normalized
+// ExportEntry with Type ∈ {"oci", "registry"}. The command surface is:
+//
+// (unset) → type=oci, dest=- (stdout tar)
+// -o - → type=oci, dest=- (stdout tar)
+// -o → type=oci, dest=, tar=false (layout dir)
+// -o type=oci,dest=[,tar=...] → oci, defaults to tar=true
+// -o type=registry,name=][ → registry push
+//
+// A TTY on stdout with no --output (or -o -) is refused: writing a
+// multi-megabyte binary tar to a terminal is never what the user wants.
+func resolveSnapshotOutput(outputs []string) (*buildflags.ExportEntry, error) {
+ if len(outputs) > 1 {
+ return nil, errors.Errorf("snapshot: exactly one --output is required (got %d)", len(outputs))
+ }
+
+ var out buildflags.ExportEntry
+ if len(outputs) == 0 {
+ out = buildflags.ExportEntry{Type: "oci", Destination: "-"}
+ } else {
+ parsed, err := buildflags.ParseExports(outputs)
+ if err != nil {
+ return nil, errors.Wrap(err, "parse --output")
+ }
+ if len(parsed) != 1 {
+ return nil, errors.Errorf("snapshot: exactly one --output is required (got %d)", len(parsed))
+ }
+ out = *parsed[0]
+ }
+
+ // buildflags.ParseExports maps a bare "-" to type="tar" and a bare
+ // "" to type="local". Translate both into our oci surface.
+ switch out.Type {
+ case "tar":
+ out.Type = "oci"
+ case "local":
+ // Bare path → oci-layout directory.
+ out.Type = "oci"
+ if out.Attrs == nil {
+ out.Attrs = map[string]string{}
+ }
+ out.Attrs["tar"] = "false"
+ }
+
+ if out.Type == "oci" && out.Destination == "-" {
+ if term.IsTerminal(int(os.Stdout.Fd())) {
+ return nil, errors.New("refusing to write binary snapshot to terminal — set an --output file or directory")
+ }
+ }
+
+ if out.Type != "oci" && out.Type != "registry" {
+ return nil, errors.Errorf("snapshot: unsupported --output type %q (want oci | registry)", out.Type)
+ }
+ return &out, nil
+}
diff --git a/commands/replay/verify.go b/commands/replay/verify.go
new file mode 100644
index 000000000000..1999eb7e2fd8
--- /dev/null
+++ b/commands/replay/verify.go
@@ -0,0 +1,122 @@
+package replay
+
+import (
+ "github.com/docker/buildx/replay"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/cobrautil/completion"
+ "github.com/docker/cli/cli"
+ "github.com/docker/cli/cli/command"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
+)
+
+// verifyOptions holds the parsed flags for `replay verify`.
+type verifyOptions struct {
+ commonOptions
+ compare string
+ outputs []string
+}
+
+func verifyCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
+ var opts verifyOptions
+
+ cmd := &cobra.Command{
+ Use: "verify [OPTIONS] SUBJECT",
+ Short: "Replay a subject and compare the result against the original artifact",
+ Args: cli.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ opts.builder = *rootOpts.Builder
+ return runVerify(cmd, dockerCli, &opts, args[0])
+ },
+ ValidArgsFunction: completion.Disable,
+ DisableFlagsInUseLine: true,
+ }
+
+ installCommonFlags(cmd, &opts.commonOptions)
+
+ flags := cmd.Flags()
+ flags.StringVar(&opts.compare, "compare", "digest", `Comparison mode ("digest" | "artifact" | "semantic")`)
+ flags.StringArrayVarP(&opts.outputs, "output", "o", nil, `Output destination for the verification result (VSA) (format: "type=local,dest=path" | "type=oci,dest=file" | "type=attest")`)
+
+ return cmd
+}
+
+// runVerify wires the CLI flags to replay.Verify. Per-subject verification
+// runs on a single platform at a time; when an input is multi-platform the
+// command iterates over the loaded subjects and returns the first non-nil
+// error so the caller's exit code is well-defined.
+func runVerify(cmd *cobra.Command, dockerCli command.Cli, opts *verifyOptions, input string) error {
+ ctx := cmd.Context()
+
+ mode := opts.compare
+ switch mode {
+ case "", replay.CompareModeDigest, replay.CompareModeArtifact:
+ // ok
+ case replay.CompareModeSemantic:
+ // Short-circuit the predicate load: semantic comparison is not
+ // yet implemented, so the user should see the typed
+ // ErrNotImplemented regardless of the subject's shape.
+ return replay.ErrNotImplemented("--compare=semantic")
+ default:
+ return errors.Errorf("unknown --compare %q", opts.compare)
+ }
+
+ // Parse --output (optional).
+ var exportSpec *buildflags.ExportEntry
+ if len(opts.outputs) > 0 {
+ specs, err := buildflags.ParseExports(opts.outputs)
+ if err != nil {
+ return errors.Wrap(err, "parse --output")
+ }
+ if len(specs) != 1 {
+ return errors.Errorf("verify: exactly one --output is required (got %d)", len(specs))
+ }
+ exportSpec = specs[0]
+ }
+
+ resolver, err := replay.NewMaterialsResolver(opts.materials)
+ if err != nil {
+ return err
+ }
+ secretSpecs, err := buildflags.ParseSecretSpecs(opts.secrets)
+ if err != nil {
+ return errors.Wrap(err, "parse --secret")
+ }
+ sshSpecs, err := buildflags.ParseSSHSpecs(opts.ssh)
+ if err != nil {
+ return errors.Wrap(err, "parse --ssh")
+ }
+
+ subjects, err := replay.LoadSubjects(ctx, dockerCli, opts.builder, input)
+ if err != nil {
+ return err
+ }
+ subjects, err = filterSubjectsByPlatform(subjects, opts.platforms)
+ if err != nil {
+ return err
+ }
+ if len(subjects) == 0 {
+ return errors.New("no subjects matched the --platform filter")
+ }
+
+ for _, s := range subjects {
+ pred, err := s.Predicate(ctx)
+ if err != nil {
+ return err
+ }
+ req := &replay.VerifyRequest{
+ Subject: s,
+ Predicate: pred,
+ Mode: mode,
+ Materials: resolver,
+ Network: opts.network,
+ Secrets: secretSpecs,
+ SSH: sshSpecs,
+ Output: exportSpec,
+ }
+ if _, err := replay.Verify(ctx, dockerCli, opts.builder, req); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/commands/root.go b/commands/root.go
index 9082d5aa1107..7532ca2a91f6 100644
--- a/commands/root.go
+++ b/commands/root.go
@@ -8,6 +8,7 @@ import (
historycmd "github.com/docker/buildx/commands/history"
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
policycmd "github.com/docker/buildx/commands/policy"
+ replaycmd "github.com/docker/buildx/commands/replay"
"github.com/docker/buildx/util/cobrautil/completion"
"github.com/docker/buildx/util/confutil"
"github.com/docker/buildx/util/logutil"
@@ -127,6 +128,7 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
duCmd(dockerCli, opts),
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
+ replaycmd.RootCmd(cmd, dockerCli, replaycmd.RootOptions{Builder: &opts.builder}),
dapCmd(dockerCli, opts),
)
if confutil.IsExperimental() {
diff --git a/docs/reference/buildx.md b/docs/reference/buildx.md
index 0541572d0589..e788e3275f26 100644
--- a/docs/reference/buildx.md
+++ b/docs/reference/buildx.md
@@ -24,6 +24,7 @@ Extended build capabilities with BuildKit
| [`ls`](buildx_ls.md) | List builder instances |
| [`policy`](buildx_policy.md) | Commands for working with build policies |
| [`prune`](buildx_prune.md) | Remove build cache |
+| [`replay`](buildx_replay.md) | Replay a build from its provenance |
| [`rm`](buildx_rm.md) | Remove one or more builder instances |
| [`stop`](buildx_stop.md) | Stop builder instance |
| [`use`](buildx_use.md) | Set the current builder instance |
diff --git a/docs/reference/buildx_replay.md b/docs/reference/buildx_replay.md
new file mode 100644
index 000000000000..838f40453a7f
--- /dev/null
+++ b/docs/reference/buildx_replay.md
@@ -0,0 +1,48 @@
+# docker buildx replay
+
+```text
+docker buildx replay [OPTIONS] COMMAND
+```
+
+
+Replay a build from its provenance
+
+### Subcommands
+
+| Name | Description |
+|:----------------------------------------|:----------------------------------------------------------------------|
+| [`build`](buildx_replay_build.md) | Rebuild an image from provenance and pinned materials |
+| [`snapshot`](buildx_replay_snapshot.md) | Export replay inputs for a subject as a reusable materials store |
+| [`verify`](buildx_replay_verify.md) | Replay a subject and compare the result against the original artifact |
+
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------|:---------|:--------|:-----------------------------------------|
+| `--builder` | `string` | | Override the configured builder instance |
+| `-D`, `--debug` | `bool` | | Enable debug logging |
+
+
+
+
+## Description
+
+`buildx replay` consumes a build's SLSA v1 provenance attestation and
+reproduces the build with the recorded frontend, attrs, and material pins.
+The feature is entirely client-side: it works against any BuildKit daemon
+that supports the session source-policy capability.
+
+Subjects are accepted in three forms:
+
+- `docker-image://][` or a bare `][` — resolve through the registry.
+- `oci-layout://[:]` — read from a local OCI layout.
+- A local in-toto attestation file (`.intoto.jsonl` or a DSSE envelope).
+
+Multi-platform inputs expand into N subjects, one per child manifest. Each
+subject is replayed independently.
+
+## Related
+
+- [SLSA Provenance v1](https://slsa.dev/provenance/v1)
+- [`docker buildx history`](buildx_history.md) — inspect locally recorded builds
diff --git a/docs/reference/buildx_replay_build.md b/docs/reference/buildx_replay_build.md
new file mode 100644
index 000000000000..cf2e76e97b95
--- /dev/null
+++ b/docs/reference/buildx_replay_build.md
@@ -0,0 +1,61 @@
+# docker buildx replay build
+
+
+Rebuild an image from provenance and pinned materials
+
+### Options
+
+| Name | Type | Default | Description |
+|:-----------------|:--------------|:------------|:--------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--builder` | `string` | | Override the configured builder instance |
+| `-D`, `--debug` | `bool` | | Enable debug logging |
+| `--dry-run` | `bool` | | Print a JSON plan of the replay without solving or exporting |
+| `--load` | `bool` | | Shorthand for `--output=type=docker` |
+| `--materials` | `stringArray` | | Materials store (repeatable; format: `provenance` \| `registry://][` \| `oci-layout://[:]` \| `` \| `=`) |
+| `--network` | `string` | `default` | Network mode for RUN instructions (`default` \| `none`) |
+| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
+| `--platform` | `stringArray` | | Subjects to replay (defaults to the current host platform; `all` keeps every platform) |
+| `--progress` | `string` | `auto` | Set type of progress output (`auto` \| `plain` \| `tty` \| `quiet` \| `rawjson`) |
+| `--push` | `bool` | | Shorthand for `--output=type=registry,unpack=false` |
+| `--replay-mode` | `string` | `materials` | Replay mode (`materials` \| `frontend` \| `llb`) |
+| `--secret` | `stringArray` | | Secret to expose to the replayed build (format: `id=mysecret[,src=/local/secret]`) |
+| `--ssh` | `stringArray` | | SSH agent socket or keys to expose (format: `default\|[=\|[,]]`) |
+| `-t`, `--tag` | `stringArray` | | Image identifier (format: `[registry/]repository[:tag]`) |
+
+
+
+
+## Description
+
+`replay build` reconstructs an image from the provenance attestation attached
+to an existing subject. The default mode (`materials`) enforces strict source
+pinning via BuildKit's session source-policy callback — every resolution
+must match the digest recorded in `resolvedDependencies` or the solve fails.
+
+## Examples
+
+### Replay a registry image and export to an OCI tar
+
+```console
+docker buildx replay build docker-image://example.com/app@sha256:deadbeef \
+ --output=type=oci,dest=replay.oci.tar
+```
+
+### Dry-run a replay to inspect the plan
+
+```console
+docker buildx replay build docker-image://example.com/app@sha256:deadbeef --dry-run | jq
+```
+
+### Use a pre-pinned snapshot as the materials store
+
+```console
+docker buildx replay build docker-image://example.com/app@sha256:deadbeef \
+ --materials=oci-layout:///path/to/snapshot \
+ --output=type=oci,dest=replay.oci.tar
+```
+
+## Exit codes
+
+`replay build` maps typed errors to stable exit codes so CI tooling can
+react deterministically. See `replay` documentation for the full list.
diff --git a/docs/reference/buildx_replay_snapshot.md b/docs/reference/buildx_replay_snapshot.md
new file mode 100644
index 000000000000..0c46c77dbf4d
--- /dev/null
+++ b/docs/reference/buildx_replay_snapshot.md
@@ -0,0 +1,66 @@
+# docker buildx replay snapshot
+
+
+Export replay inputs for a subject as a reusable materials store
+
+### Options
+
+| Name | Type | Default | Description |
+|:----------------------|:--------------|:----------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--builder` | `string` | | Override the configured builder instance |
+| `-D`, `--debug` | `bool` | | Enable debug logging |
+| `--dry-run` | `bool` | | Print a JSON plan of the snapshot without writing output |
+| `--include-materials` | `bool` | `true` | Include material content in the snapshot |
+| `--materials` | `stringArray` | | Materials store (repeatable; format: `provenance` \| `registry://][` \| `oci-layout://[:]` \| `` \| `=`) |
+| `--network` | `string` | `default` | Network mode for RUN instructions (`default` \| `none`) |
+| `-o`, `--output` | `stringArray` | | Output destination (default: `-` — oci tar to stdout; bare `` writes an oci-layout directory; `type=oci,dest=X[,tar=true\|false]`; `type=registry,name=][`) |
+| `--platform` | `stringArray` | | Subjects to replay (defaults to the current host platform; `all` keeps every platform) |
+| `--progress` | `string` | `auto` | Set type of progress output (`auto` \| `plain` \| `tty` \| `quiet` \| `rawjson`) |
+| `--secret` | `stringArray` | | Secret to expose to the replayed build (format: `id=mysecret[,src=/local/secret]`) |
+| `--ssh` | `stringArray` | | SSH agent socket or keys to expose (format: `default\|[=\|[,]]`) |
+
+
+
+
+## Description
+
+`replay snapshot` packages the provenance predicate, the attestation
+manifest, and every recorded material into a self-contained OCI index that
+can later be used as `--materials=` for `replay build` or
+`replay verify`.
+
+The snapshot is an OCI image-spec 1.1 index:
+
+- `artifactType = application/vnd.docker.buildx.snapshots.v1+json`
+- `subject` points at the original provenance attestation manifest.
+- `manifests[0]` is a materials artifact manifest whose layers hold the
+ http / container-blob materials plus an opaque copy of each image
+ material's root index.
+- Remaining `manifests[]` entries are per-image-material platform
+ manifests.
+
+For multi-platform subjects, `replay snapshot` emits an outer OCI index that
+wraps one per-platform snapshot per architecture.
+
+## Examples
+
+### Local OCI layout
+
+```console
+docker buildx replay snapshot docker-image://example.com/app@sha256:deadbeef \
+ --output=type=local,dest=./my-snapshot
+```
+
+### Push to a registry
+
+```console
+docker buildx replay snapshot docker-image://example.com/app@sha256:deadbeef \
+ --output=type=registry,name=registry.example.com/snapshots/app:latest
+```
+
+### OCI tar
+
+```console
+docker buildx replay snapshot docker-image://example.com/app@sha256:deadbeef \
+ --output=type=oci,dest=./snapshot.oci.tar
+```
diff --git a/docs/reference/buildx_replay_verify.md b/docs/reference/buildx_replay_verify.md
new file mode 100644
index 000000000000..e6fdb392d4e3
--- /dev/null
+++ b/docs/reference/buildx_replay_verify.md
@@ -0,0 +1,68 @@
+# docker buildx replay verify
+
+
+Replay a subject and compare the result against the original artifact
+
+### Options
+
+| Name | Type | Default | Description |
+|:-----------------|:--------------|:----------|:--------------------------------------------------------------------------------------------------------------------------------------------------|
+| `--builder` | `string` | | Override the configured builder instance |
+| `--compare` | `string` | `digest` | Comparison mode (`digest` \| `artifact` \| `semantic`) |
+| `-D`, `--debug` | `bool` | | Enable debug logging |
+| `--materials` | `stringArray` | | Materials store (repeatable; format: `provenance` \| `registry://][` \| `oci-layout://[:]` \| `` \| `=`) |
+| `--network` | `string` | `default` | Network mode for RUN instructions (`default` \| `none`) |
+| `-o`, `--output` | `stringArray` | | Output destination for the verification result (VSA) (format: `type=local,dest=path` \| `type=oci,dest=file` \| `type=attest`) |
+| `--platform` | `stringArray` | | Subjects to replay (defaults to the current host platform; `all` keeps every platform) |
+| `--progress` | `string` | `auto` | Set type of progress output (`auto` \| `plain` \| `tty` \| `quiet` \| `rawjson`) |
+| `--secret` | `stringArray` | | Secret to expose to the replayed build (format: `id=mysecret[,src=/local/secret]`) |
+| `--ssh` | `stringArray` | | SSH agent socket or keys to expose (format: `default\|[=\|[,]]`) |
+
+
+
+
+## Description
+
+`replay verify` replays the subject to an ephemeral OCI layout and compares
+the result against the original. Three comparison modes are supported:
+
+- `digest` (default) — manifest-digest equality. Cheapest; passes only on
+ byte-for-byte reproducibility.
+- `artifact` — walk both content stores and produce a JSON divergence
+ report using a very basic event tree comparator intended for demo use.
+- `semantic` — not implemented in v1.
+
+`artifact` mode currently avoids the `diffoci` package because that dependency
+still requires older containerd plumbing and private `linkname`-based linking.
+TODO: experiment with `diffoci` again once those constraints are gone.
+
+On mismatch the command exits with code 8 (`CompareMismatchError`).
+
+When `--output` is set, verify emits a SLSA Verification Summary Attestation
+(predicate type `https://slsa.dev/verification_summary/v1`) and — in
+artifact mode — a sidecar diff report.
+
+## Output formats
+
+- `type=local,dest=` — writes `vsa.intoto.jsonl` and (for artifact
+ mode) `diff.json` into ``.
+- `type=oci,dest=` — packages both blobs as an OCI artifact with
+ `artifactType = application/vnd.docker.buildx.snapshots.verify.v1+json`.
+- `type=attest` — attaches the VSA as a referrer on the subject in the
+ registry (only valid when the subject is a registry image).
+
+## Examples
+
+### Digest compare against a registry image
+
+```console
+docker buildx replay verify docker-image://example.com/app@sha256:deadbeef
+```
+
+### Artifact compare and persist the VSA locally
+
+```console
+docker buildx replay verify docker-image://example.com/app@sha256:deadbeef \
+ --compare=artifact \
+ --output=type=local,dest=./verify-out
+```
diff --git a/go.mod b/go.mod
index 12972ba426ed..417fef59c057 100644
--- a/go.mod
+++ b/go.mod
@@ -41,6 +41,7 @@ require (
github.com/open-policy-agent/opa v1.10.1
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.1
+ github.com/package-url/packageurl-go v0.1.1
github.com/pelletier/go-toml/v2 v2.2.4
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
github.com/pkg/errors v0.9.1
@@ -103,6 +104,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/clipperhouse/uax29/v2 v2.2.0 // indirect
github.com/cloudflare/circl v1.6.3 // indirect
+ github.com/containerd/containerd v1.7.30 // indirect
github.com/containerd/containerd/api v1.10.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/containerd/ttrpc v1.2.8 // indirect
@@ -182,7 +184,6 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
github.com/oklog/ulid/v2 v2.1.1 // indirect
- github.com/package-url/packageurl-go v0.1.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
diff --git a/go.sum b/go.sum
index 832e632554b2..a73c21d9f68a 100644
--- a/go.sum
+++ b/go.sum
@@ -114,10 +114,13 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.10.2 h1:USa1NUbDcl/cjb8T9iwnuFsnO79H+2ho2L5SjFKz3uI=
github.com/compose-spec/compose-go/v2 v2.10.2/go.mod h1:ZU6zlcweCZKyiB7BVfCizQT9XmkEIMFE+PRZydVcsZg=
+github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups/v3 v3.1.3 h1:eUNflyMddm18+yrDmZPn3jI7C5hJ9ahABE5q6dyLYXQ=
github.com/containerd/cgroups/v3 v3.1.3/go.mod h1:PKZ2AcWmSBsY/tJUVhtS/rluX0b1uq1GmPO1ElCmbOw=
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
+github.com/containerd/containerd v1.7.30 h1:/2vezDpLDVGGmkUXmlNPLCCNKHJ5BbC5tJB5JNzQhqE=
+github.com/containerd/containerd v1.7.30/go.mod h1:fek494vwJClULlTpExsmOyKCMUAbuVjlFsJQc4/j44M=
github.com/containerd/containerd/api v1.10.0 h1:5n0oHYVBwN4VhoX9fFykCV9dF1/BvAXeg2F8W6UYq1o=
github.com/containerd/containerd/api v1.10.0/go.mod h1:NBm1OAk8ZL+LG8R0ceObGxT5hbUYj7CzTmR3xh0DlMM=
github.com/containerd/containerd/v2 v2.2.2 h1:mjVQdtfryzT7lOqs5EYUFZm8ioPVjOpkSoG1GJPxEMY=
@@ -502,6 +505,8 @@ github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7D
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg=
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/reproducible-containers/diffoci v0.1.8 h1:bLxsTT5tuVbWYVDvXL738Av8M0A4hLwXKGS5tmQE4no=
+github.com/reproducible-containers/diffoci v0.1.8/go.mod h1:bvpyun/h4LSogUOlZZ2cFDwhSwchHSwy+F8gSFONVnU=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
diff --git a/policy/default.go b/policy/default.go
new file mode 100644
index 000000000000..188969cb6e16
--- /dev/null
+++ b/policy/default.go
@@ -0,0 +1,17 @@
+package policy
+
+import (
+ _ "embed"
+)
+
+// DefaultPolicyFilename is the synthetic filename used for the embedded
+// default policy when it is loaded as a regular policy file.
+const DefaultPolicyFilename = "buildx_default_policy.rego"
+
+//go:embed default.rego
+var defaultPolicyModule []byte
+
+// DefaultPolicyData returns the embedded default policy module bytes.
+func DefaultPolicyData() []byte {
+ return defaultPolicyModule
+}
diff --git a/policy/default.rego b/policy/default.rego
new file mode 100644
index 000000000000..2e89c1bab554
--- /dev/null
+++ b/policy/default.rego
@@ -0,0 +1,162 @@
+package docker
+
+# Default policy embedded in Buildx. It verifies trust for images shipped
+# by Docker that may be implicitly loaded during a build:
+#
+# - docker/dockerfile
+# - docker/dockerfile-upstream
+# - docker/buildkit-syft-scanner
+#
+# Any image outside this managed set is allowed and passes through to user
+# policies unchanged. Access by digest is always allowed. For tag-based
+# access the rules below enforce a signed release from the expected GitHub
+# source repository using the existing docker_github_builder_signature
+# helper from builtins.rego.
+
+is_dockerfile if {
+ input.image
+ input.image.fullRepo == "docker.io/docker/dockerfile"
+}
+
+is_dockerfile if {
+ input.image
+ input.image.fullRepo == "docker.io/docker/dockerfile-upstream"
+}
+
+is_syft_scanner if {
+ input.image
+ input.image.fullRepo == "docker.io/docker/buildkit-syft-scanner"
+}
+
+dockerfile_floating_tag(tag) if tag == "latest"
+dockerfile_floating_tag(tag) if tag == "labs"
+dockerfile_floating_tag(tag) if tag == "master"
+
+dockerfile_tag_requires_sig(tag) if dockerfile_floating_tag(tag)
+dockerfile_tag_requires_sig(tag) if version_tag_ge(tag, 1, 21)
+
+syft_scanner_floating_tag(tag) if tag == "latest"
+
+syft_scanner_tag_requires_sig(tag) if syft_scanner_floating_tag(tag)
+syft_scanner_tag_requires_sig(tag) if version_tag_ge(tag, 1, 10)
+
+
+default_policy_deny_msgs contains msg if {
+ is_dockerfile
+ tag := input.image.tag
+ tag != ""
+ dockerfile_tag_requires_sig(tag)
+ not dockerfile_sig_ok(tag)
+ msg := sprintf("image %s is not allowed by default policy: a verified docker-github-builder signature is required for %s tag", [input.image.ref, input.image.tag])
+}
+
+default_policy_deny_msgs contains msg if {
+ is_syft_scanner
+ tag := input.image.tag
+ tag != ""
+ syft_scanner_tag_requires_sig(tag)
+ not syft_scanner_sig_ok(tag)
+ msg := sprintf("image %s is not allowed by default policy: a verified docker-github-builder signature is required for %s tag", [input.image.ref, input.image.tag])
+}
+
+dockerfile_sig_ok(tag) if {
+ dockerfile_floating_tag(tag)
+ some sig in input.image.signatures
+ docker_github_builder_signature(sig, "moby/buildkit")
+}
+
+dockerfile_sig_ok(tag) if {
+ not dockerfile_floating_tag(tag)
+ some sig in input.image.signatures
+ docker_github_builder_signature(sig, "moby/buildkit")
+ dockerfile_sig_ref_matches(sig, tag)
+}
+
+syft_scanner_sig_ok(tag) if {
+ syft_scanner_floating_tag(tag)
+ some sig in input.image.signatures
+ docker_github_builder_signature(sig, "docker/buildkit-syft-scanner")
+}
+
+syft_scanner_sig_ok(tag) if {
+ not syft_scanner_floating_tag(tag)
+ some sig in input.image.signatures
+ docker_github_builder_signature(sig, "docker/buildkit-syft-scanner")
+ syft_scanner_sig_ref_matches(sig, tag)
+}
+
+
+decision := {
+ "allow": count(default_policy_deny_msgs) == 0,
+ "deny_msg": [msg | some msg in default_policy_deny_msgs],
+}
+
+# ---- helpers ----
+
+# parse_version returns [major, minor] when tag matches a version pattern
+# like "1", "1.21", "1.21.0", "1.21.0-labs". For a major-only tag such as
+# "1", the minor component is treated as effectively unbounded so floating
+# major tags are handled like the newest release in that major line.
+parse_version(tag) := [maj, min] if {
+ m := regex.find_all_string_submatch_n(`^(\d+)\.(\d+)(?:\.\d+)?(?:-labs)?$`, tag, 1)
+ count(m) == 1
+ maj := to_number(m[0][1])
+ min := to_number(m[0][2])
+}
+
+parse_version(tag) := [maj, 999999] if {
+ m := regex.find_all_string_submatch_n(`^(\d+)(?:-labs)?$`, tag, 1)
+ count(m) == 1
+ maj := to_number(m[0][1])
+}
+
+version_tag_ge(tag, target_major, _) if {
+ v := parse_version(tag)
+ v[0] > target_major
+}
+
+version_tag_ge(tag, target_major, target_minor) if {
+ v := parse_version(tag)
+ v[0] == target_major
+ v[1] >= target_minor
+}
+
+dockerfile_sig_ref_matches(sig, tag) if {
+ sig_ref_matches(sig.signer.sourceRepositoryRef, tag, "refs/tags/dockerfile/")
+}
+
+syft_scanner_sig_ref_matches(sig, tag) if {
+ ref := trim_prefix(sig.signer.sourceRepositoryRef, "refs/tags/")
+ ref != sig.signer.sourceRepositoryRef
+ version_tag_selector_matches(tag, ref)
+}
+
+sig_ref_matches(ref, tag, prefix) if {
+ stripped_ref := trim_prefix(ref, prefix)
+ stripped_ref != ref
+ tag_labs := endswith(tag, "-labs")
+ ref_labs := endswith(stripped_ref, "-labs")
+ tag_labs == ref_labs
+ version_tag_selector_matches(
+ trim_suffix(tag, "-labs"),
+ trim_suffix(stripped_ref, "-labs"),
+ )
+}
+
+version_tag_selector_matches(selector, candidate) if {
+ selector == candidate
+}
+
+version_tag_selector_matches(selector, candidate) if {
+ m := regex.find_all_string_submatch_n(`^(\d+)\.(\d+)$`, selector, 1)
+ count(m) == 1
+ parse_version(selector) == parse_version(candidate)
+}
+
+version_tag_selector_matches(selector, candidate) if {
+ m := regex.find_all_string_submatch_n(`^(\d+)$`, selector, 1)
+ count(m) == 1
+ sel := parse_version(selector)
+ cand := parse_version(candidate)
+ sel[0] == cand[0]
+}
diff --git a/policy/default_test.go b/policy/default_test.go
new file mode 100644
index 000000000000..749accb04abd
--- /dev/null
+++ b/policy/default_test.go
@@ -0,0 +1,402 @@
+package policy
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ gwpb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/solver/pb"
+ moby_buildkit_v1_sourcepolicy "github.com/moby/buildkit/sourcepolicy/pb"
+ "github.com/moby/buildkit/sourcepolicy/policysession"
+ policyimage "github.com/moby/policy-helpers/image"
+ policytypes "github.com/moby/policy-helpers/types"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/sigstore/sigstore-go/pkg/fulcio/certificate"
+ "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/require"
+)
+
+// makeDefaultPolicy returns a Policy instance backed by the embedded
+// default policy module, optionally wired to a mock signature verifier
+// returning the supplied SignatureInfo for image attestations.
+func makeDefaultPolicy(t *testing.T, sigInfo *policytypes.SignatureInfo) *Policy {
+ t.Helper()
+
+ var verifierProvider PolicyVerifierProvider
+ if sigInfo != nil {
+ verifierProvider = func() (PolicyVerifier, error) {
+ return &mockPolicyVerifier{
+ verifyImage: func(_ context.Context, _ policyimage.ReferrersProvider, _ ocispecs.Descriptor, _ *ocispecs.Platform) (*policytypes.SignatureInfo, error) {
+ return sigInfo, nil
+ },
+ }, nil
+ }
+ }
+ return NewPolicy(Opt{
+ Files: []File{{
+ Filename: DefaultPolicyFilename,
+ Data: DefaultPolicyData(),
+ }},
+ Log: func(level logrus.Level, msg string) {
+ t.Logf("[%s] %s", level, msg)
+ },
+ VerifierProvider: verifierProvider,
+ })
+}
+
+// dockerGithubBuilderSig returns a SignatureInfo that satisfies the
+// docker_github_builder_signature helper for the given source repository
+// and ref. Pass an empty ref to omit the SourceRepositoryRef field.
+func dockerGithubBuilderSig(sourceRepo, sourceRef string) *policytypes.SignatureInfo {
+ return &policytypes.SignatureInfo{
+ Kind: policytypes.KindDockerGithubBuilder,
+ SignatureType: policytypes.SignatureBundleV03,
+ Timestamps: []policytypes.TimestampVerificationResult{
+ {Type: "rekor", URI: "https://rekor.sigstore.dev", Timestamp: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)},
+ },
+ Signer: &certificate.Summary{
+ CertificateIssuer: "CN=sigstore-intermediate,O=sigstore.dev",
+ Extensions: certificate.Extensions{
+ Issuer: "https://token.actions.githubusercontent.com",
+ SourceRepositoryURI: "https://github.com/" + sourceRepo,
+ SourceRepositoryRef: sourceRef,
+ RunnerEnvironment: "github-hosted",
+ },
+ },
+ }
+}
+
+// runDefaultPolicyImage evaluates the default policy against the given image
+// reference. An attestation chain and an empty image config are always
+// supplied so that the policy can fully resolve metadata and produce a
+// decision (rather than requesting more data via the next response).
+func runDefaultPolicyImage(t *testing.T, p *Policy, ref string) *policysession.DecisionResponse {
+ t.Helper()
+ src := &gwpb.ResolveSourceMetaResponse{
+ Source: &pb.SourceOp{Identifier: "docker-image://" + ref},
+ Image: &gwpb.ResolveSourceImageResponse{
+ Digest: "sha256:abababababababababababababababababababababababababababababababab",
+ Config: []byte(`{"created":"2024-01-01T00:00:00Z","config":{}}`),
+ AttestationChain: newTestAttestationChain(t),
+ },
+ }
+ resp, _, err := p.CheckPolicy(context.Background(), &policysession.CheckPolicyRequest{
+ Platform: &pb.Platform{OS: "linux", Architecture: "amd64"},
+ Source: src,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ return resp
+}
+
+func TestDefaultPolicyAllowsNonImageSources(t *testing.T) {
+ p := makeDefaultPolicy(t, nil)
+ src := &gwpb.ResolveSourceMetaResponse{
+ Source: &pb.SourceOp{Identifier: "https://example.com/foo.tar.gz"},
+ HTTP: &gwpb.ResolveSourceHTTPResponse{
+ Checksum: "sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
+ },
+ }
+ resp, _, err := p.CheckPolicy(context.Background(), &policysession.CheckPolicyRequest{
+ Source: src,
+ })
+ require.NoError(t, err)
+ require.Equal(t, moby_buildkit_v1_sourcepolicy.PolicyAction_ALLOW, resp.Action)
+}
+
+func TestDefaultPolicyImages(t *testing.T) {
+ testCases := []struct {
+ name string
+ sig *policytypes.SignatureInfo
+ ref string
+ allow bool
+ denyMsg string
+ }{
+ {
+ name: "allows_unrelated_images",
+ ref: "alpine:latest",
+ allow: true,
+ },
+ {
+ name: "dockerfile_digest_only_always_allowed",
+ ref: "docker/dockerfile@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ allow: true,
+ },
+ {
+ name: "dockerfile_tagged_digest_denied",
+ ref: "docker/dockerfile:1.21.0@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_old_version_allowed_unsigned",
+ ref: "docker/dockerfile:1.20.0",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_requires_signature",
+ ref: "docker/dockerfile:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_new_minor_version_requires_signature",
+ ref: "docker/dockerfile:1.21",
+ denyMsg: "signature is required for 1.21 tag",
+ },
+ {
+ name: "dockerfile_new_major_version_requires_signature",
+ ref: "docker/dockerfile:1",
+ denyMsg: "signature is required for 1 tag",
+ },
+ {
+ name: "dockerfile_new_version_allowed_with_matching_signature",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile:1.21.0",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_minor_version_allowed_with_matching_patch_signature",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile:1.21",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_allowed_with_matching_signature_labs",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0-labs"),
+ ref: "docker/dockerfile:1.21.0-labs",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_allowed_with_matching_signature_labs2",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0-labs"),
+ ref: "docker/dockerfile:1.21-labs",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_allowed_with_matching_signature_labs3",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0-labs"),
+ ref: "docker/dockerfile:1-labs",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_allowed_with_matching_signature_labs4",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0-labs"),
+ ref: "docker/dockerfile:labs",
+ allow: true,
+ },
+ {
+ name: "dockerfile_new_version_denied_with_nonlabs_signature_for_labs_tag",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile:1-labs",
+ denyMsg: "signature is required for 1-labs tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_mismatched_ref_labs",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.22.0-labs"),
+ ref: "docker/dockerfile:1.21.0-labs",
+ denyMsg: "signature is required for 1.21.0-labs tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_nonlabs_signature_for_exact_labs_tag",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile:1.21.0-labs",
+ denyMsg: "signature is required for 1.21.0-labs tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_wrong_signature_repo",
+ sig: dockerGithubBuilderSig("docker/dockerfile", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_upstream_new_version_denied_with_wrong_signature_repo",
+ sig: dockerGithubBuilderSig("docker/dockerfile-upstream", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile-upstream:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_upstream_new_version_requires_signature",
+ ref: "docker/dockerfile-upstream:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_upstream_new_version_allowed_with_matching_signature",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.21.0"),
+ ref: "docker/dockerfile-upstream:1.21.0",
+ allow: true,
+ },
+ {
+ name: "dockerfile_upstream_new_version_denied_with_mismatched_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.22.0"),
+ ref: "docker/dockerfile-upstream:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_mismatched_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.22.0"),
+ ref: "docker/dockerfile:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_new_minor_version_denied_with_newer_patch_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.23.0"),
+ ref: "docker/dockerfile:1.22",
+ denyMsg: "signature is required for 1.22 tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_mismatched_ref_labs",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.22.0-labs"),
+ ref: "docker/dockerfile:1.21.0-labs",
+ denyMsg: "signature is required for 1.21.0-labs tag",
+ },
+ {
+ name: "dockerfile_new_version_denied_with_wrong_signature_tag",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/1.21.0"),
+ ref: "docker/dockerfile:1.21.0",
+ denyMsg: "signature is required for 1.21.0 tag",
+ },
+ {
+ name: "dockerfile_latest_allowed_with_signature_any_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.30.0"),
+ ref: "docker/dockerfile:latest",
+ allow: true,
+ },
+ {
+ name: "dockerfile_latest_denied_without_signature",
+ ref: "docker/dockerfile:latest",
+ denyMsg: "signature is required for latest tag",
+ },
+ {
+ name: "dockerfile_upstream_latest_allowed_with_signature_any_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/dockerfile/1.30.0"),
+ ref: "docker/dockerfile-upstream:latest",
+ allow: true,
+ },
+ {
+ name: "dockerfile_upstream_latest_denied_without_signature",
+ ref: "docker/dockerfile-upstream:latest",
+ denyMsg: "signature is required for latest tag",
+ },
+ {
+ name: "dockerfile_upstream_master_allowed_with_signature_any_ref",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/heads/master"),
+ ref: "docker/dockerfile-upstream:master",
+ allow: true,
+ },
+ {
+ name: "dockerfile_upstream_master_denied_without_signature",
+ ref: "docker/dockerfile-upstream:master",
+ denyMsg: "signature is required for master tag",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := makeDefaultPolicy(t, tc.sig)
+ resp := runDefaultPolicyImage(t, p, tc.ref)
+ if tc.allow {
+ require.Equal(t, moby_buildkit_v1_sourcepolicy.PolicyAction_ALLOW, resp.Action)
+ require.Empty(t, resp.DenyMessages)
+ return
+ }
+
+ require.Equal(t, moby_buildkit_v1_sourcepolicy.PolicyAction_DENY, resp.Action)
+ require.Len(t, resp.DenyMessages, 1)
+ require.Contains(t, resp.DenyMessages[0].Message, tc.denyMsg)
+ })
+ }
+}
+
+func TestDefaultPolicySyftScannerImages(t *testing.T) {
+ testCases := []struct {
+ name string
+ sig *policytypes.SignatureInfo
+ ref string
+ allow bool
+ denyMsg string
+ }{
+ {
+ name: "syft_scanner_old_version_allowed_unsigned",
+ ref: "docker/buildkit-syft-scanner:1.9.0",
+ allow: true,
+ },
+ {
+ name: "syft_scanner_new_version_requires_signature",
+ ref: "docker/buildkit-syft-scanner:1.10.0",
+ denyMsg: "signature is required for 1.10.0 tag",
+ },
+ {
+ name: "syft_scanner_new_minor_version_requires_signature",
+ ref: "docker/buildkit-syft-scanner:1.10",
+ denyMsg: "signature is required for 1.10 tag",
+ },
+ {
+ name: "syft_scanner_new_major_version_requires_signature",
+ ref: "docker/buildkit-syft-scanner:1",
+ denyMsg: "signature is required for 1 tag",
+ },
+ {
+ name: "syft_scanner_new_version_allowed_with_matching_signature",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.10.0"),
+ ref: "docker/buildkit-syft-scanner:1.10.0",
+ allow: true,
+ },
+ {
+ name: "syft_scanner_new_minor_version_allowed_with_matching_patch_signature",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.10.0"),
+ ref: "docker/buildkit-syft-scanner:1.10",
+ allow: true,
+ },
+ {
+ name: "syft_scanner_new_major_version_allowed_with_matching_minor_signature",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.10.0"),
+ ref: "docker/buildkit-syft-scanner:1",
+ allow: true,
+ },
+ {
+ name: "syft_scanner_new_version_denied_with_wrong_signature_repo",
+ sig: dockerGithubBuilderSig("moby/buildkit", "refs/tags/1.10.0"),
+ ref: "docker/buildkit-syft-scanner:1.10.0",
+ denyMsg: "signature is required for 1.10.0 tag",
+ },
+ {
+ name: "syft_scanner_new_version_denied_with_mismatched_ref",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.11.0"),
+ ref: "docker/buildkit-syft-scanner:1.10.0",
+ denyMsg: "signature is required for 1.10.0 tag",
+ },
+ {
+ name: "syft_scanner_new_minor_version_denied_with_newer_patch_ref",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.11.0"),
+ ref: "docker/buildkit-syft-scanner:1.10",
+ denyMsg: "signature is required for 1.10 tag",
+ },
+ {
+ name: "syft_scanner_latest_allowed_with_signature_any_ref",
+ sig: dockerGithubBuilderSig("docker/buildkit-syft-scanner", "refs/tags/1.10.0"),
+ ref: "docker/buildkit-syft-scanner:latest",
+ allow: true,
+ },
+ {
+ name: "syft_scanner_latest_denied_without_signature",
+ ref: "docker/buildkit-syft-scanner:latest",
+ denyMsg: "signature is required for latest tag",
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ p := makeDefaultPolicy(t, tc.sig)
+ resp := runDefaultPolicyImage(t, p, tc.ref)
+ if tc.allow {
+ require.Equal(t, moby_buildkit_v1_sourcepolicy.PolicyAction_ALLOW, resp.Action)
+ require.Empty(t, resp.DenyMessages)
+ return
+ }
+
+ require.Equal(t, moby_buildkit_v1_sourcepolicy.PolicyAction_DENY, resp.Action)
+ require.Len(t, resp.DenyMessages, 1)
+ require.Contains(t, resp.DenyMessages[0].Message, tc.denyMsg)
+ })
+ }
+}
diff --git a/policy/funcs.go b/policy/funcs.go
index 3593504661ee..fc673b7af983 100644
--- a/policy/funcs.go
+++ b/policy/funcs.go
@@ -665,7 +665,7 @@ func (p *Policy) builtinLoadJSONImpl(bctx rego.BuiltinContext, a *ast.Term) (*as
return ast.NewTerm(astVal), nil
}
-func addPinToImage(src *pb.SourceOp, dgst digest.Digest) (*pb.SourceOp, error) {
+func AddPinToImage(src *pb.SourceOp, dgst digest.Digest) (*pb.SourceOp, error) {
id, ok := strings.CutPrefix(src.Identifier, "docker-image://")
if !ok {
return nil, errors.Errorf("cannot pin non-image source: %q", src.Identifier)
diff --git a/policy/input.go b/policy/input.go
index bc53ed58b58a..25a9ed8d5a6e 100644
--- a/policy/input.go
+++ b/policy/input.go
@@ -46,7 +46,7 @@ func sourceToInputRecursive(ctx context.Context, verifier PolicyVerifierProvider
materials := make([]Input, 0, len(inp.Image.Provenance.materialsRaw))
for _, m := range inp.Image.Provenance.materialsRaw {
- matSrc, matPlatform, err := parseSLSAMaterial(m)
+ matSrc, matPlatform, err := ParseSLSAMaterial(m)
if err != nil {
materials = append(materials, Input{})
continue
diff --git a/policy/materials.go b/policy/materials.go
index b705efa1db92..084110ff6894 100644
--- a/policy/materials.go
+++ b/policy/materials.go
@@ -34,7 +34,7 @@ func isMaterialKey(key string) (idx int, rest string, ok bool) {
return n, rest, true
}
-func parseSLSAMaterial(m slsa1.ResourceDescriptor) (*pb.SourceOp, *ocispecs.Platform, error) {
+func ParseSLSAMaterial(m slsa1.ResourceDescriptor) (*pb.SourceOp, *ocispecs.Platform, error) {
uri := m.URI
dgst := m.Digest
if strings.HasPrefix(uri, "pkg:docker/") {
diff --git a/policy/provenance.go b/policy/provenance.go
index dc830feebf18..f6d557b9e200 100644
--- a/policy/provenance.go
+++ b/policy/provenance.go
@@ -165,7 +165,7 @@ func rawMaterialsFromSLSA1(materials []slsa1.ResourceDescriptor, logf func(logru
URI: m.URI,
Digest: maps.Clone(m.Digest),
}
- if _, _, err := parseSLSAMaterial(rd); err != nil {
+ if _, _, err := ParseSLSAMaterial(rd); err != nil {
if logf != nil {
logf(logrus.WarnLevel, fmt.Sprintf("skipping unsupported provenance material %q: %v", m.URI, err))
}
@@ -186,7 +186,7 @@ func rawMaterialsFromSLSA02(materials []slsa02.ProvenanceMaterial, logf func(log
URI: m.URI,
Digest: maps.Clone(m.Digest),
}
- if _, _, err := parseSLSAMaterial(rd); err != nil {
+ if _, _, err := ParseSLSAMaterial(rd); err != nil {
if logf != nil {
logf(logrus.WarnLevel, fmt.Sprintf("skipping unsupported provenance material %q: %v", m.URI, err))
}
diff --git a/policy/resolve.go b/policy/resolve.go
index 84aa8c102714..d805fc0e0827 100644
--- a/policy/resolve.go
+++ b/policy/resolve.go
@@ -62,7 +62,7 @@ func resolveNodeUnknowns(ctx context.Context, node *Input, source *pb.SourceOp,
continue
}
raw := node.Image.Provenance.materialsRaw[idx]
- childSource, childNodePlatform, err := parseSLSAMaterial(raw)
+ childSource, childNodePlatform, err := ParseSLSAMaterial(raw)
if err != nil {
continue
}
diff --git a/policy/validate.go b/policy/validate.go
index 37d1470f8adc..71a82a255646 100644
--- a/policy/validate.go
+++ b/policy/validate.go
@@ -347,7 +347,7 @@ func (p *Policy) CheckPolicy(ctx context.Context, req *policysession.CheckPolicy
return nil, nil, errors.Errorf("multiple image pins set to %s: %v", sourceName(req), st.ImagePins)
}
if len(st.ImagePins) == 1 {
- newSrc, err := addPinToImage(req.Source.Source, slices.Collect(maps.Keys(st.ImagePins))[0])
+ newSrc, err := AddPinToImage(req.Source.Source, slices.Collect(maps.Keys(st.ImagePins))[0])
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to add image pin to source")
}
diff --git a/replay/build.go b/replay/build.go
new file mode 100644
index 000000000000..7db9769d8c0d
--- /dev/null
+++ b/replay/build.go
@@ -0,0 +1,479 @@
+package replay
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/build"
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/confutil"
+ "github.com/docker/buildx/util/dockerutil"
+ "github.com/docker/buildx/util/progress"
+ "github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/identity"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ "github.com/moby/buildkit/util/progress/progressui"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/tonistiigi/go-csvvalue"
+)
+
+// BuildMode is a replay mode.
+type BuildMode string
+
+const (
+ BuildModeMaterials BuildMode = "materials"
+ BuildModeFrontend BuildMode = "frontend"
+ BuildModeLLB BuildMode = "llb"
+)
+
+// Target pairs one subject with its already-loaded predicate. A replay
+// operation spans N targets (one per platform, typically from a multi-
+// platform LoadSubjects fan-out).
+type Target struct {
+ Subject *Subject
+ Predicate *Predicate
+}
+
+// BuildRequest is a single replay-build invocation spanning one or more
+// targets that share the same user-supplied flags.
+type BuildRequest struct {
+ // Targets is the set of (subject, predicate) pairs to replay. For a
+ // single-platform subject this is len 1; multi-platform inputs fan
+ // out into multiple targets sharing the other fields below.
+ Targets []Target
+
+ // Mode selects a replay strategy. Empty defaults to BuildModeMaterials.
+ Mode BuildMode
+
+ // Materials resolves provenance materials to local content stores. May
+ // be nil, in which case the default sentinel-only resolver is used.
+ Materials *MaterialsResolver
+
+ // NetworkMode controls the network mode for RUN instructions in the
+ // replayed build (default | none). Material resolution is NOT affected.
+ NetworkMode string
+
+ // Secrets / SSH hold the user-supplied specs for the replayed solve.
+ // Cross-checked against each predicate via Secrets()/SSH() before any
+ // solve begins.
+ Secrets buildflags.Secrets
+ SSH []*buildflags.SSH
+
+ // Exports are the buildflags-parsed --output specs.
+ Exports []*buildflags.ExportEntry
+
+ // Tags are "--tag" values to apply to image/oci/docker exports. Flow
+ // matches `docker buildx build`: the tags become the `name=` attribute
+ // on each eligible export via build/opt.go toSolveOpt.
+ Tags []string
+
+ // Progress controls the display mode for replay progress output.
+ Progress progressui.DisplayMode
+}
+
+// SubjectKey returns a stable identifier for a subject, used as the map key
+// for build.Build's map[string]Options input.
+func SubjectKey(s *Subject) string {
+ if s == nil {
+ return ""
+ }
+ if s.Descriptor.Platform != nil {
+ return fmt.Sprintf("%s@%s", s.Descriptor.Digest, platforms.Format(*s.Descriptor.Platform))
+ }
+ return s.Descriptor.Digest.String()
+}
+
+// Build executes the replay request against the supplied builder.
+//
+// Fail-fast: cross-check errors are reported per-subject with typed errors
+// (Missing/ExtraSecretError, Missing/ExtraSSHError) before any solve starts;
+// a local-context predicate fails with UnreplayableLocalContextError.
+//
+// Mode selection:
+//
+// - BuildModeMaterials (default): recorded frontend + strict source-policy
+// pinning via the session policy callback.
+// - BuildModeFrontend: recorded frontend + NO strict pinning (sources float).
+// - BuildModeLLB: not yet implemented.
+func Build(ctx context.Context, dockerCli command.Cli, builderName string, req *BuildRequest) (retErr error) {
+ if req == nil {
+ return errors.New("nil build request")
+ }
+ if len(req.Targets) == 0 {
+ return errors.New("no targets to replay")
+ }
+ if req.Mode == BuildModeLLB {
+ return ErrNotImplemented("llb replay mode")
+ }
+
+ // Pre-solve: local-context + secret/ssh cross-check.
+ for _, t := range req.Targets {
+ if t.Subject == nil || t.Predicate == nil {
+ return errors.New("target has nil subject or predicate")
+ }
+ if locals := t.Predicate.Locals(); len(locals) > 0 {
+ names := make([]string, 0, len(locals))
+ for _, l := range locals {
+ names = append(names, l.Name)
+ }
+ return ErrUnreplayableLocalContext(names)
+ }
+ if err := CheckSecrets(t.Predicate.Secrets(), req.Secrets); err != nil {
+ return err
+ }
+ if err := CheckSSH(t.Predicate.SSH(), req.SSH); err != nil {
+ return err
+ }
+ }
+
+ // Parse exports once; shared across all targets.
+ exports, _, err := build.CreateExports(req.Exports)
+ if err != nil {
+ return errors.Wrap(err, "parse --output")
+ }
+
+ // Build the map[string]build.Options keyed by subject key.
+ buildOpts := make(map[string]build.Options, len(req.Targets))
+ for _, t := range req.Targets {
+ opt, err := BuildOptionsFromPredicate(t.Subject, t.Predicate, req)
+ if err != nil {
+ return err
+ }
+ opt.Exports = exports
+ buildOpts[SubjectKey(t.Subject)] = opt
+ }
+
+ // Builder + printer wiring.
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return err
+ }
+ nodes, err := b.LoadNodes(ctx)
+ if err != nil {
+ return err
+ }
+ mode := req.Mode
+ if mode == "" {
+ mode = BuildModeMaterials
+ }
+ warningMsg := ""
+ if mode == BuildModeMaterials {
+ warningMsg = materialsModePlatformWarning(req.Targets, nodes)
+ }
+
+ progressMode := req.Progress
+ if progressMode == "" {
+ progressMode = progressui.AutoMode
+ }
+ printer, err := progress.NewPrinter(ctx, os.Stderr, progressMode,
+ progress.WithDesc(
+ fmt.Sprintf("rebuilding %d subject(s) with %q instance using %s driver", len(req.Targets), b.Name, b.Driver),
+ fmt.Sprintf("%s:%s", b.Driver, b.Name),
+ ),
+ )
+ if err != nil {
+ return err
+ }
+ defer func() {
+ werr := printer.Wait()
+ if retErr == nil {
+ retErr = werr
+ }
+ }()
+ if warningMsg != "" {
+ if err := progress.Wrap("check replay environment", printer.Write, func(sub progress.SubLogger) error {
+ sub.Log(2, []byte("warning: "+warningMsg+"\n"))
+ return nil
+ }); err != nil {
+ return err
+ }
+ }
+
+ if _, err := build.Build(ctx, nodes, buildOpts, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), printer); err != nil {
+ return errors.Wrap(err, "replay build")
+ }
+ return nil
+}
+
+func materialsModePlatformWarning(targets []Target, nodes []builder.Node) string {
+ hostPlat := platforms.Normalize(platforms.DefaultSpec())
+ instancePlat := &hostPlat
+ if len(nodes) == 0 {
+ instanceFmt := platforms.Format(*instancePlat)
+ for _, t := range targets {
+ if t.Predicate == nil {
+ continue
+ }
+ prov, ok := t.Predicate.DefaultPlatform()
+ if !ok || prov == nil {
+ continue
+ }
+ provFmt := platforms.Format(*prov)
+ if provFmt == instanceFmt {
+ continue
+ }
+ return fmt.Sprintf("provenance default platform %s does not match current builder instance default platform %s; materials-mode replay may be inefficient or fail", provFmt, instanceFmt)
+ }
+ return ""
+ }
+ matchedHost := false
+ for _, n := range nodes {
+ if n.Err != nil || len(n.Platforms) == 0 {
+ continue
+ }
+ for i := range n.Platforms {
+ p := platforms.Normalize(n.Platforms[i])
+ if platforms.Only(hostPlat).Match(p) {
+ matchedHost = true
+ break
+ }
+ }
+ if matchedHost {
+ break
+ }
+ }
+ if !matchedHost {
+ for _, n := range nodes {
+ if n.Err != nil || len(n.Platforms) == 0 {
+ continue
+ }
+ p := platforms.Normalize(n.Platforms[0])
+ instancePlat = &p
+ break
+ }
+ }
+ instanceFmt := platforms.Format(platforms.Normalize(*instancePlat))
+ for _, t := range targets {
+ if t.Predicate == nil {
+ continue
+ }
+ prov, ok := t.Predicate.DefaultPlatform()
+ if !ok || prov == nil {
+ continue
+ }
+ provFmt := platforms.Format(*prov)
+ if provFmt == instanceFmt {
+ continue
+ }
+ return fmt.Sprintf("provenance default platform %s does not match current builder instance default platform %s; materials-mode replay may be inefficient or fail", provFmt, instanceFmt)
+ }
+ return ""
+}
+
+// BuildOptionsFromPredicate maps a (subject, predicate) pair to a
+// build.Options. The resulting options have Exports left empty; Build
+// populates them from the request.
+func BuildOptionsFromPredicate(s *Subject, pred *Predicate, req *BuildRequest) (build.Options, error) {
+ if pred == nil {
+ return build.Options{}, errors.New("nil predicate")
+ }
+
+ attrs := pred.FrontendAttrs()
+ cfgSrc := pred.ConfigSource()
+
+ labels := collectPrefixed(attrs, "label:")
+ buildArgs := collectPrefixed(attrs, "build-arg:")
+ var nocacheFilter []string
+ noCache := false
+ if v, ok := attrs["no-cache"]; ok {
+ if v == "" {
+ noCache = true
+ } else if fields, err := csvvalue.Fields(v, nil); err == nil {
+ nocacheFilter = fields
+ }
+ }
+
+ // NamedContexts from recorded "context:*" attrs.
+ namedContexts := map[string]build.NamedContext{}
+ for k, v := range attrs {
+ name, ok := strings.CutPrefix(k, "context:")
+ if !ok {
+ continue
+ }
+ namedContexts[name] = build.NamedContext{Path: v}
+ }
+
+ target := attrs["target"]
+ var extraHosts []string
+ if v := attrs["add-hosts"]; v != "" {
+ if fields, err := csvvalue.Fields(v, nil); err == nil {
+ extraHosts = fields
+ }
+ }
+ cgroupParent := attrs["cgroup-parent"]
+
+ // Dockerfile path comes from configSource.path when present — that is the
+ // canonical provenance field for the build definition. The recorded
+ // frontend attr is only used as a compatibility fallback.
+ dockerfilePath := cfgSrc.Path
+ if dockerfilePath == "" {
+ dockerfilePath = attrs["filename"]
+ }
+
+ // The build context comes from configSource.uri when present — that is
+ // the canonical provenance field for the source location. The recorded
+ // frontend attr is only used as a compatibility fallback. Replay rejects
+ // local filesystem contexts up-front via the Locals check, so by the time
+ // we get here the predicate is expected to carry a remote source URL.
+ contextPath := cfgSrc.URI
+ if contextPath == "" {
+ contextPath = attrs["context"]
+ }
+ if contextPath == "" {
+ return build.Options{}, errors.Errorf("predicate has no recorded build context; replay requires a remote-source build (git / https)")
+ }
+
+ opt := build.Options{
+ Ref: identity.NewID(),
+ Target: target,
+ Inputs: build.Inputs{
+ ContextPath: contextPath,
+ DockerfilePath: dockerfilePath,
+ NamedContexts: namedContexts,
+ },
+ BuildArgs: buildArgs,
+ Labels: labels,
+ NoCache: noCache,
+ NoCacheFilter: nocacheFilter,
+ ExtraHosts: extraHosts,
+ CgroupParent: cgroupParent,
+ NetworkMode: networkModeForReplay(req.NetworkMode),
+ SecretSpecs: req.Secrets,
+ SSHSpecs: req.SSH,
+ Tags: req.Tags,
+ // Reproduce the recorded attestation attrs so the replay output
+ // carries the same attestation shape as the original build.
+ Attests: pred.Attests(),
+ }
+
+ if s.Descriptor.Platform != nil {
+ opt.Platforms = []ocispecs.Platform{*s.Descriptor.Platform}
+ }
+
+ // Strict source pinning applies in materials mode only (the default).
+ // Attach via the shared Policy slot as a callback-only entry — composes
+ // with any file-based user policies the caller may have configured.
+ if req.Mode == "" || req.Mode == BuildModeMaterials {
+ opt.Policy = append(opt.Policy, buildflags.PolicyConfig{
+ Callback: ReplayPinCallback(NewPinIndex(pred)),
+ })
+ }
+
+ return opt, nil
+}
+
+func networkModeForReplay(mode string) string {
+ switch mode {
+ case "", "default":
+ return ""
+ case "none":
+ return "none"
+ }
+ // Replay refuses network modes beyond default/none to keep the replay
+ // sandbox at least as restrictive as the default. host-network mode
+ // requires explicit opt-in that replay does not yet plumb through.
+ return mode
+}
+
+// CheckSecrets enforces the provenance vs. user-supplied secret-ID cross
+// check: required (non-optional) IDs declared in provenance must be
+// provided; any provided IDs not declared in provenance are rejected.
+func CheckSecrets(declared []*provenancetypes.Secret, provided buildflags.Secrets) error {
+ required := map[string]struct{}{}
+ declaredAll := map[string]struct{}{}
+ for _, s := range declared {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ declaredAll[s.ID] = struct{}{}
+ if !s.Optional {
+ required[s.ID] = struct{}{}
+ }
+ }
+
+ providedIDs := map[string]struct{}{}
+ for _, s := range provided {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ providedIDs[s.ID] = struct{}{}
+ }
+
+ missing := setDiff(required, providedIDs)
+ extra := setDiff(providedIDs, declaredAll)
+ if len(missing) > 0 {
+ return ErrMissingSecret(missing)
+ }
+ if len(extra) > 0 {
+ return ErrExtraSecret(extra)
+ }
+ return nil
+}
+
+// CheckSSH enforces the provenance vs. user-supplied SSH cross check.
+func CheckSSH(declared []*provenancetypes.SSH, provided []*buildflags.SSH) error {
+ required := map[string]struct{}{}
+ declaredAll := map[string]struct{}{}
+ for _, s := range declared {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ declaredAll[s.ID] = struct{}{}
+ if !s.Optional {
+ required[s.ID] = struct{}{}
+ }
+ }
+
+ providedIDs := map[string]struct{}{}
+ for _, s := range provided {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ providedIDs[s.ID] = struct{}{}
+ }
+
+ missing := setDiff(required, providedIDs)
+ extra := setDiff(providedIDs, declaredAll)
+ if len(missing) > 0 {
+ return ErrMissingSSH(missing)
+ }
+ if len(extra) > 0 {
+ return ErrExtraSSH(extra)
+ }
+ return nil
+}
+
+// setDiff returns the ordered list of elements in a that are not in b.
+func setDiff(a, b map[string]struct{}) []string {
+ var out []string
+ for k := range a {
+ if _, ok := b[k]; !ok {
+ out = append(out, k)
+ }
+ }
+ sort.Strings(out)
+ return out
+}
+
+// collectPrefixed returns the keys from attrs whose key starts with prefix,
+// with the prefix stripped. Values are copied verbatim.
+func collectPrefixed(attrs map[string]string, prefix string) map[string]string {
+ out := map[string]string{}
+ for k, v := range attrs {
+ name, ok := strings.CutPrefix(k, prefix)
+ if !ok {
+ continue
+ }
+ out[name] = v
+ }
+ if len(out) == 0 {
+ return nil
+ }
+ return out
+}
diff --git a/replay/build_test.go b/replay/build_test.go
new file mode 100644
index 000000000000..b6aad509e718
--- /dev/null
+++ b/replay/build_test.go
@@ -0,0 +1,253 @@
+package replay
+
+import (
+ "testing"
+
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/buildflags"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCheckSecretsMissing(t *testing.T) {
+ declared := []*provenancetypes.Secret{
+ {ID: "required"},
+ {ID: "optional", Optional: true},
+ }
+ err := CheckSecrets(declared, nil)
+ require.Error(t, err)
+ var mse *MissingSecretError
+ require.ErrorAs(t, err, &mse)
+ require.Equal(t, []string{"required"}, mse.IDs)
+}
+
+func TestCheckSecretsExtra(t *testing.T) {
+ declared := []*provenancetypes.Secret{{ID: "a"}}
+ provided := buildflags.Secrets{{ID: "a"}, {ID: "rogue"}}
+ err := CheckSecrets(declared, provided)
+ require.Error(t, err)
+ var ese *ExtraSecretError
+ require.ErrorAs(t, err, &ese)
+ require.Equal(t, []string{"rogue"}, ese.IDs)
+}
+
+func TestCheckSecretsOptionalOmitted(t *testing.T) {
+ declared := []*provenancetypes.Secret{
+ {ID: "required"},
+ {ID: "optional", Optional: true},
+ }
+ provided := buildflags.Secrets{{ID: "required"}}
+ require.NoError(t, CheckSecrets(declared, provided))
+}
+
+func TestCheckSecretsOptionalProvidedAllowed(t *testing.T) {
+ declared := []*provenancetypes.Secret{
+ {ID: "required"},
+ {ID: "optional", Optional: true},
+ }
+ provided := buildflags.Secrets{{ID: "required"}, {ID: "optional"}}
+ require.NoError(t, CheckSecrets(declared, provided))
+}
+
+func TestCheckSSHMissing(t *testing.T) {
+ declared := []*provenancetypes.SSH{{ID: "default"}}
+ err := CheckSSH(declared, nil)
+ var mse *MissingSSHError
+ require.ErrorAs(t, err, &mse)
+ require.Equal(t, []string{"default"}, mse.IDs)
+}
+
+func TestCheckSSHExtra(t *testing.T) {
+ declared := []*provenancetypes.SSH{{ID: "default"}}
+ provided := []*buildflags.SSH{{ID: "default"}, {ID: "rogue"}}
+ err := CheckSSH(declared, provided)
+ var ese *ExtraSSHError
+ require.ErrorAs(t, err, &ese)
+ require.Equal(t, []string{"rogue"}, ese.IDs)
+}
+
+func predicateWithAttrs(attrs map[string]string) *Predicate {
+ // Ensure every fixture predicate carries a remote-source `context`
+ // so BuildOptionsFromPredicate passes the replay context check.
+ if attrs == nil {
+ attrs = map[string]string{}
+ }
+ if _, ok := attrs["context"]; !ok {
+ attrs["context"] = "https://github.com/docker/buildx.git"
+ }
+ pred := &Predicate{}
+ pred.BuildDefinition.ExternalParameters.Request.Args = attrs
+ return pred
+}
+
+func subjectWithPlatform(arch string) *Subject {
+ return &Subject{
+ Descriptor: ocispecs.Descriptor{
+ Platform: &ocispecs.Platform{OS: "linux", Architecture: arch},
+ },
+ }
+}
+
+func TestBuildOptionsFromPredicate(t *testing.T) {
+ s := subjectWithPlatform("amd64")
+
+ pred := predicateWithAttrs(map[string]string{
+ "target": "myapp",
+ "filename": "Dockerfile.prod",
+ "label:org.example.owner": "alice",
+ "build-arg:FOO": "bar",
+ "build-arg:BUILDKIT_INLINE_CACHE": "1",
+ "add-hosts": "foo:1.2.3.4,bar:5.6.7.8",
+ "no-cache": "stage1,stage2",
+ "context:app": "docker-image://alpine:3.18",
+ "attest:provenance": "mode=max",
+ "attest:sbom": "true",
+ })
+
+ req := &BuildRequest{Mode: BuildModeFrontend}
+ opt, err := BuildOptionsFromPredicate(s, pred, req)
+ require.NoError(t, err)
+
+ require.Equal(t, "myapp", opt.Target)
+ require.Equal(t, "Dockerfile.prod", opt.Inputs.DockerfilePath)
+ require.Equal(t, map[string]string{"org.example.owner": "alice"}, opt.Labels)
+ require.Equal(t, map[string]string{"FOO": "bar", "BUILDKIT_INLINE_CACHE": "1"}, opt.BuildArgs)
+ require.Equal(t, []string{"foo:1.2.3.4", "bar:5.6.7.8"}, opt.ExtraHosts)
+ require.Equal(t, []string{"stage1", "stage2"}, opt.NoCacheFilter)
+ require.False(t, opt.NoCache)
+
+ require.Contains(t, opt.Inputs.NamedContexts, "app")
+ require.Equal(t, "docker-image://alpine:3.18", opt.Inputs.NamedContexts["app"].Path)
+
+ // Recorded attest:* attrs flow through to opt.Attests unchanged so the
+ // replay output carries the same attestation shape as the original.
+ require.Contains(t, opt.Attests, "provenance")
+ require.NotNil(t, opt.Attests["provenance"])
+ require.Equal(t, "mode=max", *opt.Attests["provenance"])
+ require.Contains(t, opt.Attests, "sbom")
+ require.Equal(t, "true", *opt.Attests["sbom"])
+
+ // Platform should mirror the subject descriptor.
+ require.Len(t, opt.Platforms, 1)
+ require.Equal(t, "amd64", opt.Platforms[0].Architecture)
+
+ // In frontend mode, no pin callback is attached.
+ require.Empty(t, opt.Policy)
+}
+
+func TestBuildOptionsFromPredicateNoCacheAll(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{
+ "no-cache": "",
+ })
+ opt, err := BuildOptionsFromPredicate(subjectWithPlatform("amd64"), pred, &BuildRequest{Mode: BuildModeFrontend})
+ require.NoError(t, err)
+ require.True(t, opt.NoCache)
+ require.Nil(t, opt.NoCacheFilter)
+}
+
+func TestBuildOptionsFromPredicateMaterialsModeAttachesPinCallback(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{})
+ opt, err := BuildOptionsFromPredicate(subjectWithPlatform("amd64"), pred, &BuildRequest{Mode: BuildModeMaterials})
+ require.NoError(t, err)
+ require.Len(t, opt.Policy, 1, "materials mode must attach strict pin callback")
+ require.NotNil(t, opt.Policy[0].Callback, "Policy entry must carry a non-nil Callback")
+ require.Empty(t, opt.Policy[0].Files, "replay pin entry must not reference policy files")
+}
+
+func TestBuildOptionsFromPredicateUsesConfigSourceAsPrimary(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{
+ "context": "https://github.com/example/attrs.git",
+ "filename": "Dockerfile.attrs",
+ "target": "myapp",
+ })
+ pred.BuildDefinition.ExternalParameters.ConfigSource.URI = "https://github.com/moby/buildkit.git#refs/tags/v0.29.0"
+ pred.BuildDefinition.ExternalParameters.ConfigSource.Path = "Dockerfile"
+
+ opt, err := BuildOptionsFromPredicate(subjectWithPlatform("amd64"), pred, &BuildRequest{Mode: BuildModeFrontend})
+ require.NoError(t, err)
+ require.Equal(t, "https://github.com/moby/buildkit.git#refs/tags/v0.29.0", opt.Inputs.ContextPath)
+ require.Equal(t, "Dockerfile", opt.Inputs.DockerfilePath)
+}
+
+func TestBuildOptionsFromPredicateUsesConfigSourcePathForGatewayFrontend(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{
+ "source": "docker/dockerfile-upstream:master",
+ "cmdline": "docker/dockerfile-upstream:master",
+ })
+ pred.BuildDefinition.ExternalParameters.Request.Frontend = "gateway.v0"
+ pred.BuildDefinition.ExternalParameters.ConfigSource.URI = "https://github.com/moby/buildkit.git#refs/tags/dockerfile/1.23.0"
+ pred.BuildDefinition.ExternalParameters.ConfigSource.Path = "frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile"
+
+ opt, err := BuildOptionsFromPredicate(subjectWithPlatform("arm64"), pred, &BuildRequest{Mode: BuildModeMaterials})
+ require.NoError(t, err)
+ require.Equal(t, "https://github.com/moby/buildkit.git#refs/tags/dockerfile/1.23.0", opt.Inputs.ContextPath)
+ require.Equal(t, "frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile", opt.Inputs.DockerfilePath)
+}
+
+func TestMaterialsModePlatformWarningMismatch(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{})
+ pred.BuildDefinition.InternalParameters.BuilderPlatform = "linux/amd64"
+ msg := materialsModePlatformWarning([]Target{{
+ Subject: subjectWithPlatform("amd64"),
+ Predicate: pred,
+ }}, []builder.Node{{
+ Platforms: []ocispecs.Platform{{OS: "linux", Architecture: "arm64"}},
+ }})
+ require.Contains(t, msg, "provenance default platform linux/amd64")
+ require.Contains(t, msg, "current builder instance default platform linux/arm64")
+}
+
+func TestMaterialsModePlatformWarningMatch(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{})
+ pred.BuildDefinition.InternalParameters.BuilderPlatform = "linux/amd64"
+ msg := materialsModePlatformWarning([]Target{{
+ Subject: subjectWithPlatform("amd64"),
+ Predicate: pred,
+ }}, []builder.Node{{
+ Platforms: []ocispecs.Platform{{OS: "linux", Architecture: "amd64"}},
+ }})
+ require.Empty(t, msg)
+}
+
+func TestMaterialsModePlatformWarningInferredFromMaterials(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{})
+ pred.BuildDefinition.ResolvedDependencies = []slsa1.ResourceDescriptor{
+ {URI: "pkg:docker/golang@1.26-alpine3.23?platform=linux%2Famd64"},
+ {URI: "pkg:docker/tonistiigi/xx@1.9.0?platform=linux%2Famd64"},
+ }
+ msg := materialsModePlatformWarning([]Target{{
+ Subject: subjectWithPlatform("arm64"),
+ Predicate: pred,
+ }}, []builder.Node{{
+ Platforms: []ocispecs.Platform{{OS: "linux", Architecture: "arm64"}},
+ }})
+ require.Contains(t, msg, "provenance default platform linux/amd64")
+ require.Contains(t, msg, "current builder instance default platform linux/arm64")
+}
+
+func TestMaterialsModePlatformWarningPrefersInferredDefaultPlatform(t *testing.T) {
+ pred := predicateWithAttrs(map[string]string{})
+ pred.BuildDefinition.InternalParameters.BuilderPlatform = "linux/arm64"
+ pred.BuildDefinition.ResolvedDependencies = []slsa1.ResourceDescriptor{
+ {URI: "pkg:docker/golang@1.26-alpine3.23?platform=linux%2Famd64"},
+ {URI: "pkg:docker/tonistiigi/xx@1.9.0?platform=linux%2Famd64"},
+ }
+ msg := materialsModePlatformWarning([]Target{{
+ Subject: subjectWithPlatform("arm64"),
+ Predicate: pred,
+ }}, []builder.Node{{
+ Platforms: []ocispecs.Platform{{OS: "linux", Architecture: "arm64"}},
+ }})
+ require.Contains(t, msg, "provenance default platform linux/amd64")
+ require.Contains(t, msg, "current builder instance default platform linux/arm64")
+}
+
+func TestSubjectKeyWithPlatform(t *testing.T) {
+ s := &Subject{Descriptor: ocispecs.Descriptor{
+ Digest: "sha256:deadbeef",
+ Platform: &ocispecs.Platform{OS: "linux", Architecture: "arm64", Variant: "v8"},
+ }}
+ require.Equal(t, "sha256:deadbeef@linux/arm64/v8", SubjectKey(s))
+}
diff --git a/replay/dryrun.go b/replay/dryrun.go
new file mode 100644
index 000000000000..808a3e7f0e4c
--- /dev/null
+++ b/replay/dryrun.go
@@ -0,0 +1,469 @@
+package replay
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/buildx/util/progress"
+ "github.com/docker/cli/cli/command"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ "github.com/moby/buildkit/client"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ "github.com/moby/buildkit/util/purl"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/package-url/packageurl-go"
+ "github.com/pkg/errors"
+ "github.com/tonistiigi/go-csvvalue"
+)
+
+// BuildPlan is the JSON-serializable dry-run payload for `replay build`.
+// Field names are stable and consumed by tests / tooling.
+type BuildPlan struct {
+ // Subjects is one SubjectBuildPlan per replay target.
+ Subjects []SubjectBuildPlan `json:"subjects"`
+}
+
+// SubjectBuildPlan is the per-subject build-mode dry-run plan.
+type SubjectBuildPlan struct {
+ // Descriptor is the subject descriptor (digest + mediaType + size).
+ Descriptor ocispecs.Descriptor `json:"descriptor"`
+ // BuildConfig summarises the solve parameters replay would use.
+ BuildConfig BuildPlanConfig `json:"buildConfig"`
+ // Materials lists the resolved provenance materials.
+ Materials []PlanMaterial `json:"materials"`
+}
+
+// BuildPlanConfig mirrors the build.Options fields replay derives from the
+// predicate — enough for a user to eyeball that the replay will run as
+// expected.
+type BuildPlanConfig struct {
+ Frontend string `json:"frontend"`
+ FrontendAttrs map[string]string `json:"frontendAttrs,omitempty"`
+ Context string `json:"context,omitempty"`
+ Filename string `json:"filename,omitempty"`
+ Target string `json:"target,omitempty"`
+ BuildArgs map[string]string `json:"buildArgs,omitempty"`
+ Labels map[string]string `json:"labels,omitempty"`
+ NoCache bool `json:"noCache,omitempty"`
+ NoCacheFilter []string `json:"noCacheFilter,omitempty"`
+ Secrets []PlanSecret `json:"secrets,omitempty"`
+ SSH []string `json:"ssh,omitempty"`
+ NetworkMode string `json:"networkMode,omitempty"`
+ Exports []string `json:"exports,omitempty"`
+}
+
+// PlanMaterial describes one provenance material. Different dry-run modes
+// populate different subsets of the fields, but the JSON shape stays stable.
+type PlanMaterial struct {
+ URI string `json:"uri,omitempty"`
+ // Platform is populated for image materials only — either parsed
+ // from the purl `?platform=` qualifier or, when the URI doesn't
+ // carry one, from the predicate's builder platform.
+ Platform *ocispecs.Platform `json:"platform,omitempty"`
+ Digest string `json:"digest,omitempty"`
+ // Kind is one of: "image", "image-blob" (container-blob), "http",
+ // "git", or "unknown".
+ Kind string `json:"kind"`
+ // Included reports whether `replay snapshot` would copy this
+ // material's bytes into the snapshot.
+ Included bool `json:"included,omitempty"`
+ // Size is the total byte size this material contributes to the
+ // snapshot — the root index plus the platform-matched manifest
+ // chain (config + all layer descriptor sizes). Only populated for
+ // image materials during snapshot dry-run; computed from manifest
+ // metadata alone (no layer bodies are fetched).
+ Size int64 `json:"size,omitempty"`
+}
+
+// PlanSecret describes one declared secret plus whether it is optional.
+type PlanSecret struct {
+ ID string `json:"id"`
+ Optional bool `json:"optional,omitempty"`
+}
+
+// SnapshotPlan is the JSON-serializable dry-run payload for
+// `replay snapshot` — one entry per snapshot target.
+type SnapshotPlan []SnapshotPlanTarget
+
+// SnapshotPlanTarget is the per-subject snapshot dry-run plan.
+type SnapshotPlanTarget struct {
+ // Subject is the subject descriptor (already carries platform).
+ Subject ocispecs.Descriptor `json:"subject"`
+ // Materials lists each recorded material and whether the snapshot
+ // would include its content.
+ Materials []PlanMaterial `json:"materials"`
+}
+
+// MakeBuildPlan constructs the dry-run plan for a BuildRequest.
+// Any fatal up-front condition (local-context, extra/missing secrets or ssh,
+// unknown mode) surfaces as the same typed error the real build would
+// produce — dry-run is a reliable pre-flight.
+func MakeBuildPlan(req *BuildRequest) (*BuildPlan, error) {
+ if req == nil {
+ return nil, errors.New("nil build request")
+ }
+ if len(req.Targets) == 0 {
+ return nil, errors.New("no targets to replay")
+ }
+ if req.Mode == BuildModeLLB {
+ return nil, ErrNotImplemented("llb replay mode")
+ }
+
+ plan := &BuildPlan{Subjects: make([]SubjectBuildPlan, 0, len(req.Targets))}
+
+ for _, t := range req.Targets {
+ if t.Subject == nil || t.Predicate == nil {
+ return nil, errors.New("target has nil subject or predicate")
+ }
+ // Same local-context check as Build (§4.2 step 4).
+ if locals := t.Predicate.Locals(); len(locals) > 0 {
+ names := make([]string, 0, len(locals))
+ for _, l := range locals {
+ names = append(names, l.Name)
+ }
+ return nil, ErrUnreplayableLocalContext(names)
+ }
+ if err := CheckSecrets(t.Predicate.Secrets(), req.Secrets); err != nil {
+ return nil, err
+ }
+ if err := CheckSSH(t.Predicate.SSH(), req.SSH); err != nil {
+ return nil, err
+ }
+ if _, err := BuildOptionsFromPredicate(t.Subject, t.Predicate, req); err != nil {
+ return nil, err
+ }
+
+ plan.Subjects = append(plan.Subjects, subjectBuildPlan(t.Subject, t.Predicate, req))
+ }
+ return plan, nil
+}
+
+// MakeSnapshotPlan constructs the dry-run plan for a SnapshotRequest.
+// For each image material, the root index + platform-matched manifest
+// bodies are fetched so their descriptor sizes can be summed — layer
+// bodies are not fetched.
+func MakeSnapshotPlan(ctx context.Context, dockerCli command.Cli, builderName string, req *SnapshotRequest) (SnapshotPlan, error) {
+ if req == nil {
+ return nil, errors.New("nil snapshot request")
+ }
+ if len(req.Targets) == 0 {
+ return nil, errors.New("no targets to snapshot")
+ }
+
+ // Register ref-key prefixes so MakeRefKey in containerd does not log
+ // warnings while we fetch manifest bodies.
+ ctx = withMediaTypeKeyPrefix(ctx)
+
+ // Lazily-constructed registry resolver for image materials, shared
+ // across targets — mirrors the real-run setup.
+ var registryResolver *imagetools.Resolver
+ lazyResolver := func() (*imagetools.Resolver, error) {
+ if registryResolver != nil {
+ return registryResolver, nil
+ }
+ if dockerCli == nil {
+ registryResolver = imagetools.New(imagetools.Opt{})
+ return registryResolver, nil
+ }
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return nil, err
+ }
+ imgOpt, err := b.ImageOpt()
+ if err != nil {
+ return nil, err
+ }
+ registryResolver = imagetools.New(imgOpt)
+ return registryResolver, nil
+ }
+
+ plan := make(SnapshotPlan, 0, len(req.Targets))
+ var pwlog progress.Logger = func(*client.SolveStatus) {}
+ if req.Progress != nil {
+ pwlog = req.Progress.Write
+ }
+
+ for ti, t := range req.Targets {
+ s, pred := t.Subject, t.Predicate
+ if s == nil || pred == nil {
+ return nil, errors.New("target has nil subject or predicate")
+ }
+ if s.IsAttestationFile() {
+ return nil, ErrUnsupportedSubject("snapshot requires an image or oci-layout subject")
+ }
+ if s.AttestationManifest().Digest == "" {
+ return nil, ErrNoProvenance(s.InputRef())
+ }
+
+ var mats []PlanMaterial
+ targetName := fmt.Sprintf("[%d/%d] snapshot %s", ti+1, len(req.Targets), snapshotTargetLabel(s))
+ err := progress.Wrap(targetName, pwlog, func(sub progress.SubLogger) error {
+ var err error
+ mats, err = planMaterials(ctx, s, pred, req, lazyResolver, sub)
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+ plan = append(plan, SnapshotPlanTarget{
+ Subject: s.Descriptor,
+ Materials: mats,
+ })
+ }
+ return plan, nil
+}
+
+// planMaterials builds the material list for the dry-run plan. For image
+// materials it resolves the root index + picks the platform-matched
+// manifest and sums every descriptor's size — layer bytes are not
+// fetched.
+func planMaterials(ctx context.Context, s *Subject, pred *Predicate, req *SnapshotRequest, lazyResolver func() (*imagetools.Resolver, error), sub progress.SubLogger) ([]PlanMaterial, error) {
+ builder := pred.BuilderPlatform()
+ var out []PlanMaterial
+ for _, m := range pred.ResolvedDependencies() {
+ entry := materialPlan(m, builder)
+ kind := classifyMaterial(m)
+ entry.Included = req.IncludeMaterials && kind != materialKindGit && kind != materialKindUnknown
+ if kind == materialKindImage {
+ if req.IncludeMaterials {
+ var size int64
+ err := sub.Wrap(fmt.Sprintf("plan image material %s", entry.URI), func() error {
+ var err error
+ size, err = imageMaterialSize(ctx, req.Materials, lazyResolver, m, preferredDigest(m.Digest), s.Descriptor.Platform, pred.BuilderPlatform())
+ return err
+ })
+ if err != nil {
+ return nil, errors.Wrapf(err, "size image material %s", m.URI)
+ }
+ entry.Size = size
+ }
+ }
+ out = append(out, entry)
+ }
+ return out, nil
+}
+
+// imageMaterialSize fetches only the manifests needed to size an image
+// material: root index → platform-matched manifest → its config + layers.
+// Returns the sum of every descriptor's declared size. Layer bodies are
+// never fetched.
+func imageMaterialSize(ctx context.Context, resolver *MaterialsResolver, lazyResolver func() (*imagetools.Resolver, error), m slsa1.ResourceDescriptor, rootDgst digest.Digest, subjectPlat *ocispecs.Platform, builderPlat ocispecs.Platform) (int64, error) {
+ rootDesc, provider, err := resolveImageMaterial(ctx, resolver, lazyResolver, m, rootDgst, WithPlatform(subjectPlat), WithBuilderPlatform(builderPlat))
+ if err != nil {
+ return 0, err
+ }
+ total := rootDesc.Size
+
+ platDesc, err := pickPlatformChild(ctx, provider, rootDesc, subjectPlat, builderPlat)
+ if err != nil {
+ return 0, err
+ }
+ // pickPlatformChild may return rootDesc unchanged for single-platform
+ // images; avoid double-counting.
+ if platDesc.Digest != rootDesc.Digest {
+ total += platDesc.Size
+ }
+
+ mfstData, err := content.ReadBlob(ctx, provider, platDesc)
+ if err != nil {
+ return 0, errors.Wrapf(err, "read platform manifest %s", platDesc.Digest)
+ }
+ var mfst ocispecs.Manifest
+ if err := json.Unmarshal(mfstData, &mfst); err != nil {
+ return 0, errors.Wrapf(err, "parse platform manifest %s", platDesc.Digest)
+ }
+ total += mfst.Config.Size
+ for _, l := range mfst.Layers {
+ total += l.Size
+ }
+ return total, nil
+}
+
+// stripImagePurlQualifiers removes the `digest` and `platform` purl
+// qualifiers from an image material URI — those values are already
+// reported as separate fields on the plan entry so carrying them in the
+// URI is pure noise. Falls back to the original URI on parse failure.
+func stripImagePurlQualifiers(uri string) string {
+ p, err := packageurl.FromString(uri)
+ if err != nil {
+ return uri
+ }
+ kept := p.Qualifiers[:0]
+ for _, q := range p.Qualifiers {
+ switch q.Key {
+ case "digest", "platform":
+ continue
+ }
+ kept = append(kept, q)
+ }
+ p.Qualifiers = kept
+ return p.ToString()
+}
+
+func subjectBuildPlan(s *Subject, pred *Predicate, req *BuildRequest) SubjectBuildPlan {
+ attrs := pred.FrontendAttrs()
+ cfgSrc := pred.ConfigSource()
+ contextPath := cfgSrc.URI
+ if contextPath == "" {
+ contextPath = attrs["context"]
+ }
+ dockerfilePath := cfgSrc.Path
+ if dockerfilePath == "" {
+ dockerfilePath = attrs["filename"]
+ }
+
+ cfg := BuildPlanConfig{
+ Frontend: pred.Frontend(),
+ FrontendAttrs: frontendAttrSummary(attrs),
+ Context: contextPath,
+ Filename: dockerfilePath,
+ Target: attrs["target"],
+ BuildArgs: collectPrefixed(attrs, "build-arg:"),
+ Labels: collectPrefixed(attrs, "label:"),
+ Secrets: planSecrets(pred.Secrets()),
+ SSH: sshIDs(pred.SSH()),
+ NetworkMode: networkModeForReplay(req.NetworkMode),
+ Exports: exportSummaries(req.Exports),
+ }
+ if v, ok := attrs["no-cache"]; ok {
+ if v == "" {
+ cfg.NoCache = true
+ } else if fields, err := csvvalue.Fields(v, nil); err == nil {
+ cfg.NoCacheFilter = fields
+ }
+ }
+
+ // Materials summary.
+ mats := make([]PlanMaterial, 0, len(pred.ResolvedDependencies()))
+ for _, m := range pred.ResolvedDependencies() {
+ mats = append(mats, materialPlan(m, pred.BuilderPlatform()))
+ }
+
+ plan := SubjectBuildPlan{
+ Descriptor: s.Descriptor,
+ BuildConfig: cfg,
+ Materials: mats,
+ }
+ return plan
+}
+
+func frontendAttrSummary(attrs map[string]string) map[string]string {
+ if len(attrs) == 0 {
+ return nil
+ }
+ out := make(map[string]string, 2)
+ for _, key := range []string{"source", "cmdline"} {
+ if v, ok := attrs[key]; ok && v != "" {
+ out[key] = v
+ }
+ }
+ if len(out) == 0 {
+ return nil
+ }
+ return out
+}
+
+// materialPlan builds the shared dry-run material summary shape.
+func materialPlan(m slsa1.ResourceDescriptor, builder ocispecs.Platform) PlanMaterial {
+ kind := classifyMaterial(m)
+ pm := PlanMaterial{
+ URI: m.URI,
+ Kind: materialKindString(kind),
+ }
+ d := preferredDigest(m.Digest)
+ if d != "" {
+ pm.Digest = d.String()
+ }
+ if kind == materialKindImage {
+ if _, p, err := purl.PURLToRef(m.URI); err == nil && p != nil {
+ pm.Platform = p
+ } else {
+ b := builder
+ pm.Platform = &b
+ }
+ pm.URI = stripImagePurlQualifiers(m.URI)
+ }
+ return pm
+}
+
+// materialKindString returns the stable JSON kind tag for a material kind.
+func materialKindString(k materialKind) string {
+ switch k {
+ case materialKindImage:
+ return "image"
+ case materialKindContainerBlob:
+ return "image-blob"
+ case materialKindHTTP:
+ return "http"
+ case materialKindGit:
+ return "git"
+ }
+ return "unknown"
+}
+
+// planSecrets returns the sorted unique declared secrets, preserving whether
+// any declaration of the secret marked it optional.
+func planSecrets(secrets []*provenancetypes.Secret) []PlanSecret {
+ seen := map[string]bool{}
+ for _, s := range secrets {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ seen[s.ID] = seen[s.ID] || s.Optional
+ }
+ ids := make([]string, 0, len(seen))
+ for id := range seen {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ out := make([]PlanSecret, 0, len(ids))
+ for _, id := range ids {
+ out = append(out, PlanSecret{ID: id, Optional: seen[id]})
+ }
+ return out
+}
+
+// sshIDs returns the sorted unique IDs of declared SSH entries.
+func sshIDs(entries []*provenancetypes.SSH) []string {
+ seen := map[string]struct{}{}
+ var out []string
+ for _, s := range entries {
+ if s == nil || s.ID == "" {
+ continue
+ }
+ if _, ok := seen[s.ID]; ok {
+ continue
+ }
+ seen[s.ID] = struct{}{}
+ out = append(out, s.ID)
+ }
+ sort.Strings(out)
+ return out
+}
+
+// exportSummaries renders --output specs into a short "type=..." list for
+// dry-run JSON output.
+func exportSummaries(exports []*buildflags.ExportEntry) []string {
+ out := make([]string, 0, len(exports))
+ for _, e := range exports {
+ if e == nil {
+ continue
+ }
+ s := "type=" + e.Type
+ if e.Destination != "" {
+ s += ",dest=" + e.Destination
+ }
+ if name, ok := e.Attrs["name"]; ok && name != "" {
+ s += ",name=" + name
+ }
+ out = append(out, s)
+ }
+ return out
+}
diff --git a/replay/dryrun_test.go b/replay/dryrun_test.go
new file mode 100644
index 000000000000..fa2a1de8e54e
--- /dev/null
+++ b/replay/dryrun_test.go
@@ -0,0 +1,186 @@
+package replay
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/docker/buildx/util/buildflags"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func testSubject(t *testing.T) *Subject {
+ t.Helper()
+ return &Subject{
+ Descriptor: ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: "sha256:aaaa",
+ Platform: &ocispecs.Platform{OS: "linux", Architecture: "amd64"},
+ },
+ attestManifest: ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: "sha256:bbbb",
+ },
+ }
+}
+
+func testPredicate(secretSpecs []struct {
+ id string
+ optional bool
+}, locals []string) *Predicate {
+ p := &Predicate{}
+ for _, spec := range secretSpecs {
+ p.BuildDefinition.ExternalParameters.Request.Secrets = append(
+ p.BuildDefinition.ExternalParameters.Request.Secrets,
+ &provenancetypes.Secret{ID: spec.id, Optional: spec.optional},
+ )
+ }
+ for _, name := range locals {
+ p.BuildDefinition.ExternalParameters.Request.Locals = append(
+ p.BuildDefinition.ExternalParameters.Request.Locals,
+ &provenancetypes.LocalSource{Name: name},
+ )
+ }
+ p.BuildDefinition.ExternalParameters.Request.Frontend = "dockerfile.v0"
+ p.BuildDefinition.ExternalParameters.Request.Args = map[string]string{
+ "context": "https://github.com/example/repo.git",
+ "source": "docker/dockerfile:1.8",
+ "cmdline": "docker/dockerfile:1.8",
+ "target": "default",
+ "build-arg:EXAMPLE": "1",
+ "label:org.example.test": "yes",
+ }
+ p.BuildDefinition.ExternalParameters.ConfigSource.URI = "https://github.com/example/repo.git"
+ p.BuildDefinition.ExternalParameters.ConfigSource.Path = "Dockerfile"
+ p.BuildDefinition.ResolvedDependencies = []slsa1.ResourceDescriptor{
+ {URI: "pkg:docker/alpine@3.20", Digest: map[string]string{"sha256": "deadbeef"}},
+ {URI: "https://example.com/foo.tar", Digest: map[string]string{"sha256": "feed"}},
+ }
+ return p
+}
+
+func TestMakeBuildPlanHappyPath(t *testing.T) {
+ s := testSubject(t)
+ pred := testPredicate([]struct {
+ id string
+ optional bool
+ }{
+ {id: "required"},
+ {id: "optional", optional: true},
+ }, nil)
+ resolver, err := NewMaterialsResolver(nil)
+ require.NoError(t, err)
+
+ req := &BuildRequest{
+ Targets: []Target{{Subject: s, Predicate: pred}},
+ Mode: BuildModeMaterials,
+ Materials: resolver,
+ Secrets: buildflags.Secrets{{ID: "required"}},
+ }
+ plan, err := MakeBuildPlan(req)
+ require.NoError(t, err)
+ require.Len(t, plan.Subjects, 1)
+ require.Equal(t, s.Descriptor.Digest, plan.Subjects[0].Descriptor.Digest)
+ require.Len(t, plan.Subjects[0].Materials, 2)
+ require.Equal(t, "https://github.com/example/repo.git", plan.Subjects[0].BuildConfig.Context)
+ require.Equal(t, "Dockerfile", plan.Subjects[0].BuildConfig.Filename)
+ require.Equal(t, map[string]string{
+ "cmdline": "docker/dockerfile:1.8",
+ "source": "docker/dockerfile:1.8",
+ }, plan.Subjects[0].BuildConfig.FrontendAttrs)
+ require.Equal(t, []PlanSecret{
+ {ID: "optional", Optional: true},
+ {ID: "required"},
+ }, plan.Subjects[0].BuildConfig.Secrets)
+ // First material is image-kind.
+ require.Equal(t, "image", plan.Subjects[0].Materials[0].Kind)
+ // Second material is http.
+ require.Equal(t, "http", plan.Subjects[0].Materials[1].Kind)
+ // JSON shape is stable.
+ dt, err := json.Marshal(plan)
+ require.NoError(t, err)
+ require.NotContains(t, string(dt), `"inputRef":`)
+ require.NotContains(t, string(dt), `"platform":"linux/amd64"`)
+ require.NotContains(t, string(dt), `"predicateType":`)
+ require.NotContains(t, string(dt), `"pins":`)
+ require.NotContains(t, string(dt), `"replayMode":`)
+ require.NotContains(t, string(dt), `"warnings":`)
+ require.NotContains(t, string(dt), `"build-arg:`)
+ require.NotContains(t, string(dt), `"label:`)
+}
+
+func TestMakeBuildPlanLocalContextRejected(t *testing.T) {
+ s := testSubject(t)
+ pred := testPredicate(nil, []string{"ctx"})
+ req := &BuildRequest{
+ Targets: []Target{{Subject: s, Predicate: pred}},
+ }
+ _, err := MakeBuildPlan(req)
+ require.Error(t, err)
+ var ulc *UnreplayableLocalContextError
+ require.ErrorAs(t, err, &ulc)
+}
+
+func TestMakeBuildPlanExtraSecretRejected(t *testing.T) {
+ s := testSubject(t)
+ pred := testPredicate(nil, nil)
+ req := &BuildRequest{
+ Targets: []Target{{Subject: s, Predicate: pred}},
+ Secrets: buildflags.Secrets{{ID: "rogue"}},
+ }
+ _, err := MakeBuildPlan(req)
+ require.Error(t, err)
+ var es *ExtraSecretError
+ require.ErrorAs(t, err, &es)
+ require.Equal(t, []string{"rogue"}, es.IDs)
+}
+
+func TestMakeBuildPlanMissingRecordedContextRejected(t *testing.T) {
+ s := testSubject(t)
+ pred := testPredicate(nil, nil)
+ delete(pred.BuildDefinition.ExternalParameters.Request.Args, "context")
+ pred.BuildDefinition.ExternalParameters.ConfigSource.URI = ""
+ req := &BuildRequest{
+ Targets: []Target{{Subject: s, Predicate: pred}},
+ }
+ _, err := MakeBuildPlan(req)
+ require.EqualError(t, err, "predicate has no recorded build context; replay requires a remote-source build (git / https)")
+}
+
+func TestMakeSnapshotPlanHappyPath(t *testing.T) {
+ fx := makeSnapshotFixture(t)
+
+ req := &SnapshotRequest{
+ Targets: []Target{{Subject: fx.subject, Predicate: fx.predicate}},
+ IncludeMaterials: true,
+ Materials: snapshotOverrideResolver(t, fx.httpURI, fx.httpBytes),
+ }
+ plan, err := MakeSnapshotPlan(context.Background(), nil, "", req)
+ require.NoError(t, err)
+ require.Len(t, plan, 1)
+ require.Equal(t, fx.subject.Descriptor.Digest, plan[0].Subject.Digest)
+ require.NotEmpty(t, plan[0].Materials)
+ // Fixture has an http material only; non-image entries must not
+ // carry a manifest-derived size.
+ for _, m := range plan[0].Materials {
+ if m.Kind != "image" {
+ require.Zero(t, m.Size, "non-image materials must not report a size")
+ }
+ }
+}
+
+func TestMakeSnapshotPlanAttestationFileRejected(t *testing.T) {
+ s := testSubject(t)
+ s.kind = subjectKindAttestationFile
+ pred := testPredicate(nil, nil)
+ req := &SnapshotRequest{
+ Targets: []Target{{Subject: s, Predicate: pred}},
+ }
+ _, err := MakeSnapshotPlan(context.Background(), nil, "", req)
+ require.Error(t, err)
+ var us *UnsupportedSubjectError
+ require.ErrorAs(t, err, &us)
+}
diff --git a/replay/errors.go b/replay/errors.go
new file mode 100644
index 000000000000..f58091bd5a71
--- /dev/null
+++ b/replay/errors.go
@@ -0,0 +1,261 @@
+package replay
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+// UnreplayableLocalContextError signals that the original build used a local
+// filesystem context which replay cannot reproduce.
+type UnreplayableLocalContextError struct {
+ LocalSources []string
+}
+
+func (e *UnreplayableLocalContextError) Error() string {
+ if len(e.LocalSources) == 0 {
+ return "build used local context that cannot be replayed"
+ }
+ return fmt.Sprintf("build used local context that cannot be replayed: %s", strings.Join(e.LocalSources, ", "))
+}
+
+// ErrUnreplayableLocalContext constructs an UnreplayableLocalContextError.
+func ErrUnreplayableLocalContext(sources []string) error {
+ sorted := append([]string(nil), sources...)
+ sort.Strings(sorted)
+ return errors.WithStack(&UnreplayableLocalContextError{LocalSources: sorted})
+}
+
+// MissingSecretError is returned when provenance declares required secrets
+// that the user did not provide.
+type MissingSecretError struct {
+ IDs []string
+}
+
+func (e *MissingSecretError) Error() string {
+ return fmt.Sprintf("missing required secrets: %s", strings.Join(e.IDs, ", "))
+}
+
+// ErrMissingSecret constructs a MissingSecretError.
+func ErrMissingSecret(ids []string) error {
+ sorted := append([]string(nil), ids...)
+ sort.Strings(sorted)
+ return errors.WithStack(&MissingSecretError{IDs: sorted})
+}
+
+// ExtraSecretError is returned when the user supplies secrets that the
+// provenance does not declare.
+type ExtraSecretError struct {
+ IDs []string
+}
+
+func (e *ExtraSecretError) Error() string {
+ return fmt.Sprintf("extra secrets not declared in provenance: %s", strings.Join(e.IDs, ", "))
+}
+
+// ErrExtraSecret constructs an ExtraSecretError.
+func ErrExtraSecret(ids []string) error {
+ sorted := append([]string(nil), ids...)
+ sort.Strings(sorted)
+ return errors.WithStack(&ExtraSecretError{IDs: sorted})
+}
+
+// MissingSSHError is returned when provenance declares required SSH agents
+// that the user did not provide.
+type MissingSSHError struct {
+ IDs []string
+}
+
+func (e *MissingSSHError) Error() string {
+ return fmt.Sprintf("missing required ssh entries: %s", strings.Join(e.IDs, ", "))
+}
+
+// ErrMissingSSH constructs a MissingSSHError.
+func ErrMissingSSH(ids []string) error {
+ sorted := append([]string(nil), ids...)
+ sort.Strings(sorted)
+ return errors.WithStack(&MissingSSHError{IDs: sorted})
+}
+
+// ExtraSSHError is returned when the user supplies SSH agents that the
+// provenance does not declare.
+type ExtraSSHError struct {
+ IDs []string
+}
+
+func (e *ExtraSSHError) Error() string {
+ return fmt.Sprintf("extra ssh entries not declared in provenance: %s", strings.Join(e.IDs, ", "))
+}
+
+// ErrExtraSSH constructs an ExtraSSHError.
+func ErrExtraSSH(ids []string) error {
+ sorted := append([]string(nil), ids...)
+ sort.Strings(sorted)
+ return errors.WithStack(&ExtraSSHError{IDs: sorted})
+}
+
+// MaterialNotFoundError indicates a provenance material that the resolver
+// could not locate in any configured store.
+type MaterialNotFoundError struct {
+ URI string
+ Digest string
+}
+
+func (e *MaterialNotFoundError) Error() string {
+ return fmt.Sprintf("material not found: uri=%q digest=%q", e.URI, e.Digest)
+}
+
+// ErrMaterialNotFound constructs a MaterialNotFoundError.
+func ErrMaterialNotFound(uri, dgst string) error {
+ return errors.WithStack(&MaterialNotFoundError{URI: uri, Digest: dgst})
+}
+
+// CompareMismatchError is returned by `replay verify` when the replayed
+// artifact does not match the subject. The wrapped Report may be nil when no
+// structured diff is available (digest comparison).
+type CompareMismatchError struct {
+ // Report is typed as any so callers can surface either the basic compare
+ // tree or a future richer report format without breaking the error type.
+ Report any
+ Reason string
+}
+
+func (e *CompareMismatchError) Error() string {
+ if e.Reason != "" {
+ return fmt.Sprintf("replay mismatch: %s", e.Reason)
+ }
+ return "replay mismatch"
+}
+
+// ErrCompareMismatch constructs a CompareMismatchError.
+func ErrCompareMismatch(reason string, report any) error {
+ return errors.WithStack(&CompareMismatchError{Reason: reason, Report: report})
+}
+
+// NotImplementedError marks a feature that is not yet implemented.
+type NotImplementedError struct {
+ Feature string
+}
+
+func (e *NotImplementedError) Error() string {
+ return fmt.Sprintf("not implemented: %s", e.Feature)
+}
+
+// ErrNotImplemented constructs a NotImplementedError.
+func ErrNotImplemented(feature string) error {
+ return errors.WithStack(&NotImplementedError{Feature: feature})
+}
+
+// UnsupportedSubjectError signals that the supplied subject kind is not
+// compatible with the invoked subcommand.
+type UnsupportedSubjectError struct {
+ Kind string
+}
+
+func (e *UnsupportedSubjectError) Error() string {
+ return fmt.Sprintf("unsupported subject: %s", e.Kind)
+}
+
+// ErrUnsupportedSubject constructs an UnsupportedSubjectError.
+func ErrUnsupportedSubject(kind string) error {
+ return errors.WithStack(&UnsupportedSubjectError{Kind: kind})
+}
+
+// NoProvenanceError is returned when no SLSA provenance attestation could be
+// found for a subject.
+type NoProvenanceError struct {
+ Subject string
+}
+
+func (e *NoProvenanceError) Error() string {
+ if e.Subject == "" {
+ return "no SLSA provenance attestation found"
+ }
+ return fmt.Sprintf("no SLSA provenance attestation found for %s", e.Subject)
+}
+
+// ErrNoProvenance constructs a NoProvenanceError.
+func ErrNoProvenance(subject string) error {
+ return errors.WithStack(&NoProvenanceError{Subject: subject})
+}
+
+// UnsupportedPredicateError signals that the attached provenance predicate is
+// not SLSA v1.
+type UnsupportedPredicateError struct {
+ PredicateType string
+}
+
+func (e *UnsupportedPredicateError) Error() string {
+ return fmt.Sprintf("unsupported provenance predicate type %q; replay requires SLSA v1", e.PredicateType)
+}
+
+// ErrUnsupportedPredicate constructs an UnsupportedPredicateError.
+func ErrUnsupportedPredicate(predicateType string) error {
+ return errors.WithStack(&UnsupportedPredicateError{PredicateType: predicateType})
+}
+
+// BuildKitCapMissingError is returned when the target BuildKit daemon lacks
+// a capability required by replay (notably CapSourcePolicySession).
+type BuildKitCapMissingError struct {
+ Capability string
+}
+
+func (e *BuildKitCapMissingError) Error() string {
+ return fmt.Sprintf("BuildKit daemon missing required capability %q; please upgrade", e.Capability)
+}
+
+// ErrBuildKitCapMissing constructs a BuildKitCapMissingError.
+func ErrBuildKitCapMissing(capability string) error {
+ return errors.WithStack(&BuildKitCapMissingError{Capability: capability})
+}
+
+// SignatureVerificationRequiredError is returned when a signed DSSE envelope
+// or Sigstore bundle is encountered but no trust anchor is available. Replay
+// never silently accepts a signed attestation; full sigstore/cosign
+// verification is not yet implemented.
+type SignatureVerificationRequiredError struct {
+ // Source is the user-visible input that carries the signed envelope
+ // (file path for attestation-file inputs).
+ Source string
+ // Envelope describes the detected envelope shape ("dsse" or
+ // "sigstore-bundle") so the user can tell what was rejected.
+ Envelope string
+}
+
+func (e *SignatureVerificationRequiredError) Error() string {
+ src := e.Source
+ if src == "" {
+ src = "attestation"
+ }
+ env := e.Envelope
+ if env == "" {
+ env = "signed envelope"
+ }
+ return fmt.Sprintf("%s for %s carries signatures but signature verification is not yet implemented; refusing to accept unverified signed attestation", env, src)
+}
+
+// ErrSignatureVerificationRequired constructs a
+// SignatureVerificationRequiredError.
+func ErrSignatureVerificationRequired(source, envelope string) error {
+ return errors.WithStack(&SignatureVerificationRequiredError{Source: source, Envelope: envelope})
+}
+
+// MissingRequiredFlagError is returned when a replay subcommand is invoked
+// without a flag that is required (e.g. `replay snapshot` without `--output`).
+type MissingRequiredFlagError struct {
+ Flag string
+}
+
+func (e *MissingRequiredFlagError) Error() string {
+ if e.Flag == "" {
+ return "missing required flag"
+ }
+ return fmt.Sprintf("missing required flag %s", e.Flag)
+}
+
+// ErrMissingRequiredFlag constructs a MissingRequiredFlagError.
+func ErrMissingRequiredFlag(flag string) error {
+ return errors.WithStack(&MissingRequiredFlagError{Flag: flag})
+}
diff --git a/replay/errors_test.go b/replay/errors_test.go
new file mode 100644
index 000000000000..471331812726
--- /dev/null
+++ b/replay/errors_test.go
@@ -0,0 +1,87 @@
+package replay
+
+import (
+ "testing"
+
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+// TestErrorTypes asserts that each replay constructor returns the typed error
+// expected by errors.As-based consumers. Exit-code mapping lives with the CLI
+// glue (commands/replay/errors.go) and is tested there.
+func TestErrorTypes(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ want any
+ }{
+ {"local context", ErrUnreplayableLocalContext([]string{"default"}), &UnreplayableLocalContextError{}},
+ {"missing secret", ErrMissingSecret([]string{"mysecret"}), &MissingSecretError{}},
+ {"extra secret", ErrExtraSecret([]string{"mysecret"}), &ExtraSecretError{}},
+ {"missing ssh", ErrMissingSSH([]string{"default"}), &MissingSSHError{}},
+ {"extra ssh", ErrExtraSSH([]string{"default"}), &ExtraSSHError{}},
+ {"material not found", ErrMaterialNotFound("docker-image://foo", "sha256:aa"), &MaterialNotFoundError{}},
+ {"compare mismatch", ErrCompareMismatch("digest differs", nil), &CompareMismatchError{}},
+ {"not implemented", ErrNotImplemented("llb replay mode"), &NotImplementedError{}},
+ {"unsupported subject", ErrUnsupportedSubject("attestation-file"), &UnsupportedSubjectError{}},
+ {"no provenance", ErrNoProvenance("foo:latest"), &NoProvenanceError{}},
+ {"unsupported predicate", ErrUnsupportedPredicate("https://slsa.dev/provenance/v0.2"), &UnsupportedPredicateError{}},
+ {"buildkit cap missing", ErrBuildKitCapMissing("CapSourcePolicySession"), &BuildKitCapMissingError{}},
+ {"signature verification required", ErrSignatureVerificationRequired("./att.json", "dsse"), &SignatureVerificationRequiredError{}},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ require.Error(t, tt.err)
+ switch want := tt.want.(type) {
+ case *UnreplayableLocalContextError:
+ require.ErrorAs(t, tt.err, &want)
+ case *MissingSecretError:
+ require.ErrorAs(t, tt.err, &want)
+ case *ExtraSecretError:
+ require.ErrorAs(t, tt.err, &want)
+ case *MissingSSHError:
+ require.ErrorAs(t, tt.err, &want)
+ case *ExtraSSHError:
+ require.ErrorAs(t, tt.err, &want)
+ case *MaterialNotFoundError:
+ require.ErrorAs(t, tt.err, &want)
+ case *CompareMismatchError:
+ require.ErrorAs(t, tt.err, &want)
+ case *NotImplementedError:
+ require.ErrorAs(t, tt.err, &want)
+ case *UnsupportedSubjectError:
+ require.ErrorAs(t, tt.err, &want)
+ case *NoProvenanceError:
+ require.ErrorAs(t, tt.err, &want)
+ case *UnsupportedPredicateError:
+ require.ErrorAs(t, tt.err, &want)
+ case *BuildKitCapMissingError:
+ require.ErrorAs(t, tt.err, &want)
+ case *SignatureVerificationRequiredError:
+ require.ErrorAs(t, tt.err, &want)
+ default:
+ t.Fatalf("unexpected want type %T", tt.want)
+ }
+ })
+ }
+}
+
+// TestErrorsAsWrapped asserts that pkg/errors stack-wrapped and fmt-wrapped
+// replay errors remain errors.As-matchable.
+func TestErrorsAsWrapped(t *testing.T) {
+ e := ErrMaterialNotFound("foo", "sha256:a")
+ wrapped := errors.Wrap(e, "resolver failed")
+ var mnf *MaterialNotFoundError
+ require.ErrorAs(t, wrapped, &mnf)
+ require.Equal(t, "foo", mnf.URI)
+}
+
+// TestSignatureVerificationRequiredMessage asserts the error message carries
+// both the source and envelope kind so users know what was rejected.
+func TestSignatureVerificationRequiredMessage(t *testing.T) {
+ e := ErrSignatureVerificationRequired("/tmp/att.json", "dsse")
+ require.Contains(t, e.Error(), "/tmp/att.json")
+ require.Contains(t, e.Error(), "dsse")
+}
diff --git a/replay/materials.go b/replay/materials.go
new file mode 100644
index 000000000000..8f5ef6d40196
--- /dev/null
+++ b/replay/materials.go
@@ -0,0 +1,649 @@
+package replay
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/content"
+ contentlocal "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/buildx/util/ocilayout"
+ "github.com/moby/buildkit/client/ociindex"
+ "github.com/moby/buildkit/util/contentutil"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// MaterialsResolver resolves provenance materials to a (descriptor, provider)
+// pair the replay pipeline can use to serve content locally. Lookup order:
+//
+// 1. overrides keyed by URI or "sha256:"
+// 2. explicit stores, in the order listed on `--materials`
+// 3. the `provenance` sentinel (fetch from the URI recorded in provenance)
+//
+// Snapshot-backed store lookup (registry-native snapshot OCI indexes) is
+// implemented in a later slice — this slice supports overrides, generic
+// OCI-layout stores (image digests), filesystem content stores (sha256
+// addressed), and the provenance sentinel fallback.
+type MaterialsResolver struct {
+ overrides map[string]materialOverride
+ stores []materialStore
+ sentinel bool // true when `provenance` sentinel is enabled
+}
+
+type materialOverride struct {
+ // spec is the raw right-hand side of a `key=value` override. Resolved
+ // lazily so a bad override doesn't poison the whole resolver build.
+ spec string
+}
+
+// materialStore is an entry on the --materials list that is NOT an override.
+// Exactly one of path/ociLayout is populated.
+type materialStore struct {
+ // path is a filesystem directory laid out as a raw content store
+ // (blobs//).
+ path string
+ // ociLayout is an absolute path to an OCI image layout directory.
+ // Lookup is by descriptor digest only.
+ ociLayout string
+}
+
+// NewMaterialsResolver parses the --materials list and returns a resolver.
+// Spec forms accepted:
+//
+// - "provenance" — sentinel (default when the
+// --materials list is empty)
+// - "oci-layout://[:]" — OCI layout store
+// - "" — raw content store
+// (blobs//)
+// - "=" — override: is the URI or
+// "sha256:"; is any of
+// the above narrowed to one blob.
+//
+// `registry://][` is parsed and reserved but errs as not-yet-implemented
+// in this slice.
+func NewMaterialsResolver(specs []string) (*MaterialsResolver, error) {
+ r := &MaterialsResolver{
+ overrides: map[string]materialOverride{},
+ }
+
+ // Default behavior when the user supplied no stores: use the provenance
+ // sentinel (fetch from the URIs recorded on each material).
+ if len(specs) == 0 {
+ r.sentinel = true
+ return r, nil
+ }
+
+ for _, raw := range specs {
+ spec := strings.TrimSpace(raw)
+ if spec == "" {
+ continue
+ }
+
+ // Override: split on first '=' whose LHS is not a URI scheme.
+ // "=" — the LHS starts with a scheme and carries "://",
+ // so the '=' we want is after the "://...". Detect by looking for
+ // the first '=' that is NOT inside a URI's path portion. We keep
+ // this simple: if the token before the first '=' is one of
+ // "provenance", an oci-layout:// prefix, or an absolute path, it
+ // is not an override.
+ if isOverrideSpec(spec) {
+ key, val, ok := splitOverride(spec)
+ if !ok {
+ return nil, errors.Errorf("malformed --materials override %q (want =)", spec)
+ }
+ r.overrides[key] = materialOverride{spec: val}
+ continue
+ }
+
+ switch {
+ case spec == "provenance":
+ r.sentinel = true
+ case strings.HasPrefix(spec, "oci-layout://"):
+ ref, _, err := ocilayout.Parse(spec)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid --materials oci-layout spec %q", spec)
+ }
+ r.stores = append(r.stores, materialStore{ociLayout: ref.Path})
+ case strings.HasPrefix(spec, "registry://"):
+ // Reserved for snapshot-backed store lookup (slice B).
+ return nil, ErrNotImplemented("registry:// materials store")
+ case filepath.IsAbs(spec):
+ if fi, err := os.Stat(spec); err != nil || !fi.IsDir() {
+ return nil, errors.Errorf("--materials path %q is not a directory", spec)
+ }
+ r.stores = append(r.stores, materialStore{path: spec})
+ default:
+ return nil, errors.Errorf("unrecognized --materials spec %q", spec)
+ }
+ }
+
+ return r, nil
+}
+
+// isOverrideSpec decides whether a raw --materials token is shaped like
+// "=". A token that starts with a known store sentinel/prefix or
+// is an absolute path is never an override, even if it happens to contain an
+// '=' somewhere inside (e.g. "oci-layout:///path/to/layout:tag=foo").
+func isOverrideSpec(spec string) bool {
+ switch {
+ case spec == "provenance":
+ return false
+ case strings.HasPrefix(spec, "oci-layout://"):
+ return false
+ case strings.HasPrefix(spec, "registry://"):
+ return false
+ case filepath.IsAbs(spec):
+ return false
+ }
+ return strings.Contains(spec, "=")
+}
+
+func splitOverride(spec string) (key, val string, ok bool) {
+ i := strings.IndexByte(spec, '=')
+ if i <= 0 || i >= len(spec)-1 {
+ return "", "", false
+ }
+ return spec[:i], spec[i+1:], true
+}
+
+// Sentinel reports whether the `provenance` sentinel is enabled. The sentinel
+// authorises a fallback fetch from the URI recorded in the provenance.
+func (r *MaterialsResolver) Sentinel() bool {
+ return r != nil && r.sentinel
+}
+
+// Overrides returns an iteration-stable copy of the configured overrides for
+// tests and dry-run inspection.
+func (r *MaterialsResolver) Overrides() map[string]string {
+ if r == nil {
+ return nil
+ }
+ out := make(map[string]string, len(r.overrides))
+ for k, v := range r.overrides {
+ out[k] = v.spec
+ }
+ return out
+}
+
+// HasStores reports whether any explicit stores are configured. The primary
+// use is driving behavior when the resolver has only a sentinel.
+func (r *MaterialsResolver) HasStores() bool {
+ return r != nil && len(r.stores) > 0
+}
+
+// ResolveOption customises a Resolve call.
+type ResolveOption func(*resolveOptions)
+
+type resolveOptions struct {
+ // platform is used when resolving image materials out of a
+ // snapshot-backed store: the materials-manifest carries the original
+ // root-index blob, and the snapshot index's manifests[] holds the
+ // per-platform children. The caller supplies the platform it is
+ // replaying.
+ platform *ocispecs.Platform
+ // builderPlatform is the platform the original builder ran on
+ // (predicate InternalParameters.builderPlatform). Used as the
+ // fall-back when no child matches the subject platform — frontend
+ // images and cross-compile toolchains run on the builder host, not
+ // the target.
+ builderPlatform *ocispecs.Platform
+}
+
+// WithPlatform attaches a target platform to a Resolve call. When resolving
+// an image material against a snapshot-backed store this selects the
+// per-platform child to return.
+func WithPlatform(p *ocispecs.Platform) ResolveOption {
+ return func(o *resolveOptions) { o.platform = p }
+}
+
+// WithBuilderPlatform attaches the original builder's platform (recorded in
+// provenance) so the snapshot-backed store can fall back to it when the
+// subject platform has no match in an image material's root index.
+func WithBuilderPlatform(p ocispecs.Platform) ResolveOption {
+ return func(o *resolveOptions) { o.builderPlatform = &p }
+}
+
+// Resolve returns the descriptor and content.Provider that serve the material
+// with the given (uri, dgst). Exactly one of uri / dgst may be empty; when
+// both are empty an error is returned.
+//
+// Strict by default: materials not covered by the configured stores / overrides
+// and not reachable via the sentinel produce MaterialNotFoundError. The
+// provenance sentinel is NOT a network fetch in this slice — it signals that
+// BuildKit may resolve the material itself, subject to the source-policy
+// pin callback. A caller that requires a concrete (descriptor, provider)
+// pair for a sentinel-only material should use the store-backed
+// resolution path.
+//
+// Snapshot-backed stores are detected at lookup time. When `dgst` matches a
+// snapshot's materials-manifest layer AND the layer's media type is an OCI
+// image index (an image-material root index stashed by `replay snapshot`),
+// Resolve returns the platform-specific child manifest descriptor reachable
+// through the snapshot index's `manifests[]`. The caller should pass
+// WithPlatform so the correct child can be picked.
+func (r *MaterialsResolver) Resolve(ctx context.Context, uri string, dgst digest.Digest, opts ...ResolveOption) (ocispecs.Descriptor, content.Provider, error) {
+ if r == nil {
+ return ocispecs.Descriptor{}, nil, ErrMaterialNotFound(uri, dgst.String())
+ }
+ if uri == "" && dgst == "" {
+ return ocispecs.Descriptor{}, nil, errors.New("resolve called with empty uri and empty digest")
+ }
+
+ var ro resolveOptions
+ for _, opt := range opts {
+ opt(&ro)
+ }
+
+ // 1. Overrides.
+ if key, o, ok := r.lookupOverride(uri, dgst); ok {
+ desc, provider, err := r.resolveOverride(ctx, o, dgst, &ro)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrapf(err, "override %s", key)
+ }
+ return desc, provider, nil
+ }
+
+ // 2. Explicit stores, in order. Snapshot-backed lookup is preferred
+ // when a store root carries a snapshot index at its root.
+ for _, s := range r.stores {
+ if dgst == "" {
+ continue
+ }
+ if desc, provider, ok, err := s.lookupSnapshot(ctx, dgst, &ro); err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ } else if ok {
+ return desc, provider, nil
+ }
+ desc, provider, ok, err := s.lookupByDigest(ctx, dgst)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ if ok {
+ return desc, provider, nil
+ }
+ }
+
+ // 3. Sentinel fallback. Replay relies on BuildKit fetching the material
+ // over the network subject to the policy callback; we cannot
+ // materialise the content locally without going online, so we
+ // surface a sentinel-only descriptor (empty provider) that the
+ // caller may use to signal "let BuildKit resolve".
+ if r.sentinel {
+ return ocispecs.Descriptor{Digest: dgst}, nil, nil
+ }
+
+ return ocispecs.Descriptor{}, nil, ErrMaterialNotFound(uri, dgst.String())
+}
+
+func (r *MaterialsResolver) lookupOverride(uri string, dgst digest.Digest) (string, materialOverride, bool) {
+ if uri != "" {
+ if o, ok := r.overrides[uri]; ok {
+ return uri, o, true
+ }
+ }
+ if dgst != "" {
+ if o, ok := r.overrides[dgst.String()]; ok {
+ return dgst.String(), o, true
+ }
+ }
+ return "", materialOverride{}, false
+}
+
+// resolveOverride resolves an override value to a concrete (descriptor,
+// provider) pair. Override values accept the same forms as non-override
+// specs: "oci-layout://[:]", "".
+func (r *MaterialsResolver) resolveOverride(ctx context.Context, o materialOverride, dgst digest.Digest, ro *resolveOptions) (ocispecs.Descriptor, content.Provider, error) {
+ spec := strings.TrimSpace(o.spec)
+ switch {
+ case strings.HasPrefix(spec, "oci-layout://"):
+ ref, _, err := ocilayout.Parse(spec)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ store := materialStore{ociLayout: ref.Path}
+ if dgst == "" {
+ return ocispecs.Descriptor{}, nil, errors.New("override oci-layout requires a material digest")
+ }
+ if desc, provider, ok, err := store.lookupSnapshot(ctx, dgst, ro); err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ } else if ok {
+ return desc, provider, nil
+ }
+ desc, provider, ok, err := store.lookupByDigest(ctx, dgst)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ if !ok {
+ return ocispecs.Descriptor{}, nil, ErrMaterialNotFound("", dgst.String())
+ }
+ return desc, provider, nil
+ case filepath.IsAbs(spec):
+ fi, err := os.Stat(spec)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.WithStack(err)
+ }
+ if fi.IsDir() {
+ store := materialStore{path: spec}
+ if dgst == "" {
+ return ocispecs.Descriptor{}, nil, errors.New("override path requires a material digest")
+ }
+ if desc, provider, ok, err := store.lookupSnapshot(ctx, dgst, ro); err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ } else if ok {
+ return desc, provider, nil
+ }
+ desc, provider, ok, err := store.lookupByDigest(ctx, dgst)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ if !ok {
+ return ocispecs.Descriptor{}, nil, ErrMaterialNotFound("", dgst.String())
+ }
+ return desc, provider, nil
+ }
+ // A file override addresses exactly one blob. We expose it as a
+ // synthetic provider rooted at the file's bytes.
+ dt, err := os.ReadFile(spec)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.WithStack(err)
+ }
+ actual := digest.FromBytes(dt)
+ if dgst != "" && actual != dgst {
+ return ocispecs.Descriptor{}, nil, errors.Errorf("override file %s has digest %s, want %s", spec, actual, dgst)
+ }
+ desc := ocispecs.Descriptor{Digest: actual, Size: int64(len(dt))}
+ buf := contentutil.NewBuffer()
+ if err := content.WriteBlob(ctx, buf, actual.String(), bytes.NewReader(dt), desc); err != nil {
+ return ocispecs.Descriptor{}, nil, errors.WithStack(err)
+ }
+ return desc, buf, nil
+ }
+ return ocispecs.Descriptor{}, nil, errors.Errorf("unsupported override value %q", spec)
+}
+
+// lookupByDigest serves a blob by digest from a filesystem or oci-layout
+// store. The returned descriptor has Digest+Size populated; MediaType is
+// left empty — callers that need it must inspect the bytes.
+func (s materialStore) lookupByDigest(ctx context.Context, dgst digest.Digest) (ocispecs.Descriptor, content.Provider, bool, error) {
+ if dgst == "" {
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+ root := s.path
+ if root == "" {
+ root = s.ociLayout
+ }
+ if root == "" {
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+ blobPath := filepath.Join(root, "blobs", dgst.Algorithm().String(), dgst.Encoded())
+ fi, err := os.Stat(blobPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+ if fi.IsDir() {
+ return ocispecs.Descriptor{}, nil, false, errors.Errorf("blob path %s is a directory", blobPath)
+ }
+ desc := ocispecs.Descriptor{Digest: dgst, Size: fi.Size()}
+
+ // An OCI-layout store is expected to be a real containerd content
+ // store; use contentlocal.NewStore so Readers are proper.
+ provider, err := contentlocal.NewStore(root)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.Wrapf(err, "store at %s", root)
+ }
+ // Ensure the blob is actually readable (guards against partial layouts).
+ ra, err := provider.ReaderAt(ctx, desc)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+ _ = ra.Close()
+ return desc, provider, true, nil
+}
+
+// ParseLocationForMaterials exposes imagetools.ParseLocation for the
+// resolver's callers. Kept here as a thin alias so consumers don't need the
+// imagetools import just to parse an override right-hand side.
+func ParseLocationForMaterials(s string) (*imagetools.Location, error) {
+ return imagetools.ParseLocation(s)
+}
+
+// lookupSnapshot attempts to serve `dgst` through a snapshot-backed view of
+// the store. The store's root is inspected once per call;
+// if it carries a snapshot index (artifactType = ArtifactTypeSnapshot) the
+// lookup proceeds by:
+//
+// 1. Scanning every per-platform snapshot index's materials-manifest
+// layers for a layer whose digest matches `dgst`. When found, the
+// layer descriptor and a filesystem-backed provider are returned. If
+// the matched layer's media type is an OCI image index (i.e. the
+// original image-material root index kept opaque in the snapshot),
+// the function parses the root index, selects the platform child
+// matching ro.platform, and returns that child's descriptor as
+// reachable from the per-platform snapshot index's manifests[].
+// 2. Scanning the per-platform snapshot index's manifests[] directly so
+// a caller that already has the platform manifest's digest can
+// resolve it without going through the root.
+//
+// Returns ok == false when the store is not snapshot-shaped or when the
+// digest is not covered. In both cases the caller falls back to a plain
+// digest lookup.
+func (s materialStore) lookupSnapshot(ctx context.Context, dgst digest.Digest, ro *resolveOptions) (ocispecs.Descriptor, content.Provider, bool, error) {
+ if s.ociLayout == "" {
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+ root, roots, err := readSnapshotRoots(s.ociLayout)
+ if err != nil {
+ // A store that just isn't a snapshot: fall back to plain lookup.
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+ if root.ArtifactType != ArtifactTypeSnapshot && !anyIsSnapshot(roots) {
+ return ocispecs.Descriptor{}, nil, false, nil
+ }
+
+ store, err := contentlocal.NewStore(s.ociLayout)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.Wrapf(err, "store at %s", s.ociLayout)
+ }
+
+ perPlatformDescs, err := collectPerPlatformSnapshotIndexes(ctx, store, root, roots)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, err
+ }
+
+ var (
+ wantPlat *ocispecs.Platform
+ builderPlat = platforms.DefaultSpec()
+ )
+ if ro != nil {
+ wantPlat = ro.platform
+ if ro.builderPlatform != nil {
+ builderPlat = *ro.builderPlatform
+ }
+ }
+
+ for _, ppDesc := range perPlatformDescs {
+ ppDt, err := content.ReadBlob(ctx, store, ppDesc)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+ var pp ocispecs.Index
+ if err := json.Unmarshal(ppDt, &pp); err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+
+ // Load the materials manifest (first manifest with artifactType
+ // ArtifactTypeMaterials — may be absent when the snapshot was
+ // created with --include-materials=false).
+ var materialsLayers []ocispecs.Descriptor
+ for _, m := range pp.Manifests {
+ if m.ArtifactType != ArtifactTypeMaterials {
+ continue
+ }
+ mData, err := content.ReadBlob(ctx, store, m)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+ var mm ocispecs.Manifest
+ if err := json.Unmarshal(mData, &mm); err != nil {
+ return ocispecs.Descriptor{}, nil, false, errors.WithStack(err)
+ }
+ materialsLayers = mm.Layers
+ break
+ }
+
+ // 1a. Direct hit on a materials-manifest layer.
+ for _, l := range materialsLayers {
+ if l.Digest != dgst {
+ continue
+ }
+ if isIndexMediaType(l.MediaType) {
+ // Image material root — pick the platform-specific child
+ // from the per-platform index's manifests[].
+ child, err := pickPerPlatformChild(ctx, store, l, pp, wantPlat, builderPlat)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, false, err
+ }
+ return child, store, true, nil
+ }
+ return l, store, true, nil
+ }
+
+ // 1b. Direct hit on a manifests[] entry (a platform-specific image
+ // manifest descriptor that the caller already looked up).
+ for _, m := range pp.Manifests {
+ if m.Digest == dgst {
+ return m, store, true, nil
+ }
+ }
+ }
+
+ return ocispecs.Descriptor{}, nil, false, nil
+}
+
+// readSnapshotRoots loads the root manifest references from the store's
+// index.json. Returns the single-descriptor "root" when only one is present
+// (the per-platform case) plus the full list for multi-platform snapshots.
+func readSnapshotRoots(path string) (ocispecs.Descriptor, []ocispecs.Descriptor, error) {
+ idx, err := ociindex.NewStoreIndex(path).Read()
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ if len(idx.Manifests) == 0 {
+ return ocispecs.Descriptor{}, nil, errors.New("empty index")
+ }
+ return idx.Manifests[0], idx.Manifests, nil
+}
+
+// anyIsSnapshot reports whether any descriptor in roots carries the snapshot
+// artifact type. A top-level multi-platform snapshot's descriptor may itself
+// carry artifactType = ArtifactTypeSnapshot; so will each per-platform child.
+func anyIsSnapshot(roots []ocispecs.Descriptor) bool {
+ for _, r := range roots {
+ if r.ArtifactType == ArtifactTypeSnapshot {
+ return true
+ }
+ }
+ return false
+}
+
+// collectPerPlatformSnapshotIndexes traverses the supplied roots and returns
+// every per-platform snapshot index descriptor reachable from them. A
+// per-platform snapshot index is identified by mediaType=image index and
+// artifactType = ArtifactTypeSnapshot with a `subject` (§5.2.1). The
+// single-platform case returns the root itself; the multi-platform case
+// unwraps the top-level index and returns its children.
+func collectPerPlatformSnapshotIndexes(ctx context.Context, store content.Provider, root ocispecs.Descriptor, roots []ocispecs.Descriptor) ([]ocispecs.Descriptor, error) {
+ candidates := roots
+ if len(candidates) == 0 {
+ candidates = []ocispecs.Descriptor{root}
+ }
+ var out []ocispecs.Descriptor
+ for _, c := range candidates {
+ if !isIndexMediaType(c.MediaType) {
+ continue
+ }
+ dt, err := content.ReadBlob(ctx, store, c)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ var idx ocispecs.Index
+ if err := json.Unmarshal(dt, &idx); err != nil {
+ return nil, errors.WithStack(err)
+ }
+ // A per-platform snapshot index carries a non-nil Subject (§5.2.1).
+ if idx.Subject != nil {
+ out = append(out, c)
+ continue
+ }
+ // Top-level multi-platform index — unwrap one level.
+ for _, child := range idx.Manifests {
+ if child.ArtifactType == ArtifactTypeSnapshot && isIndexMediaType(child.MediaType) {
+ out = append(out, child)
+ }
+ }
+ }
+ return out, nil
+}
+
+// pickPerPlatformChild selects the platform-specific manifest from a
+// per-platform snapshot index's manifests[] that corresponds to the
+// recorded image-material root `rootLayer`. The matcher prefers the
+// subject's platform then falls back to the builder platform; when the
+// root index has a single child it is returned unconditionally.
+func pickPerPlatformChild(ctx context.Context, store content.Provider, rootLayer ocispecs.Descriptor, pp ocispecs.Index, wantPlat *ocispecs.Platform, builderPlat ocispecs.Platform) (ocispecs.Descriptor, error) {
+ dt, err := content.ReadBlob(ctx, store, rootLayer)
+ if err != nil {
+ return ocispecs.Descriptor{}, errors.WithStack(err)
+ }
+ var rootIdx ocispecs.Index
+ if err := json.Unmarshal(dt, &rootIdx); err != nil {
+ return ocispecs.Descriptor{}, errors.WithStack(err)
+ }
+ matcher := replayPlatformMatcher(wantPlat, builderPlat)
+ var best *ocispecs.Descriptor
+ for i := range rootIdx.Manifests {
+ c := rootIdx.Manifests[i]
+ if c.Platform == nil || !matcher.Match(*c.Platform) {
+ continue
+ }
+ if best == nil || matcher.Less(*c.Platform, *best.Platform) {
+ best = &c
+ }
+ }
+ var wantDgst digest.Digest
+ switch {
+ case best != nil:
+ wantDgst = best.Digest
+ case len(rootIdx.Manifests) == 1:
+ wantDgst = rootIdx.Manifests[0].Digest
+ default:
+ return ocispecs.Descriptor{}, errors.Errorf("snapshot lookup: root %s has no child matching subject platform %s or builder %s", rootLayer.Digest, formatPlatformPtr(wantPlat), platforms.Format(builderPlat))
+ }
+ // Resolve against manifests[] for the concrete descriptor (includes
+ // size / mediaType as recorded by the snapshot).
+ for _, m := range pp.Manifests {
+ if m.Digest == wantDgst {
+ return m, nil
+ }
+ }
+ // Not present in the snapshot's manifests[] — return a synthetic
+ // descriptor so the caller can still address content-by-digest.
+ return ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: wantDgst,
+ }, nil
+}
+
+func isIndexMediaType(mt string) bool {
+ return mt == ocispecs.MediaTypeImageIndex || mt == "application/vnd.docker.distribution.manifest.list.v2+json"
+}
diff --git a/replay/materials_test.go b/replay/materials_test.go
new file mode 100644
index 000000000000..b4935994c745
--- /dev/null
+++ b/replay/materials_test.go
@@ -0,0 +1,211 @@
+package replay
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func writeBlob(t *testing.T, root string, dt []byte) digest.Digest {
+ t.Helper()
+ d := digest.FromBytes(dt)
+ dir := filepath.Join(root, "blobs", d.Algorithm().String())
+ require.NoError(t, os.MkdirAll(dir, 0o755))
+ require.NoError(t, os.WriteFile(filepath.Join(dir, d.Encoded()), dt, 0o644))
+ return d
+}
+
+// seedOCILayout creates a minimum OCI layout skeleton: a `oci-layout` marker
+// file plus the blobs/sha256 dir. The caller writes blobs on top.
+func seedOCILayout(t *testing.T, root string) {
+ t.Helper()
+ require.NoError(t, os.MkdirAll(filepath.Join(root, "blobs", "sha256"), 0o755))
+ require.NoError(t, os.WriteFile(filepath.Join(root, "oci-layout"), []byte(`{"imageLayoutVersion":"1.0.0"}`), 0o644))
+ require.NoError(t, os.WriteFile(filepath.Join(root, "index.json"), []byte(`{"schemaVersion":2,"manifests":[]}`), 0o644))
+}
+
+func TestMaterialsResolverProvenanceDefault(t *testing.T) {
+ r, err := NewMaterialsResolver(nil)
+ require.NoError(t, err)
+ require.True(t, r.Sentinel())
+ require.False(t, r.HasStores())
+}
+
+func TestMaterialsResolverProvenanceSentinelExplicit(t *testing.T) {
+ r, err := NewMaterialsResolver([]string{"provenance"})
+ require.NoError(t, err)
+ require.True(t, r.Sentinel())
+}
+
+func TestMaterialsResolverOCILayoutDigestLookup(t *testing.T) {
+ dir := t.TempDir()
+ seedOCILayout(t, dir)
+ payload := []byte(`{"mediaType":"application/vnd.oci.image.manifest.v1+json"}`)
+ d := writeBlob(t, dir, payload)
+
+ r, err := NewMaterialsResolver([]string{"oci-layout://" + dir})
+ require.NoError(t, err)
+ require.False(t, r.Sentinel())
+ require.True(t, r.HasStores())
+
+ desc, provider, err := r.Resolve(context.Background(), "pkg:docker/alpine@3.18", d)
+ require.NoError(t, err)
+ require.Equal(t, d, desc.Digest)
+ require.NotNil(t, provider)
+
+ // Verify that the returned provider serves the blob.
+ dt, err := content.ReadBlob(context.Background(), provider, ocispecs.Descriptor{Digest: d, Size: int64(len(payload))})
+ require.NoError(t, err)
+ require.Equal(t, payload, dt)
+}
+
+func TestMaterialsResolverOCILayoutMiss(t *testing.T) {
+ dir := t.TempDir()
+ seedOCILayout(t, dir)
+
+ r, err := NewMaterialsResolver([]string{"oci-layout://" + dir})
+ require.NoError(t, err)
+
+ absent := digest.FromBytes([]byte("nope"))
+ _, _, err = r.Resolve(context.Background(), "pkg:docker/missing@1.0", absent)
+ require.Error(t, err)
+ var mnf *MaterialNotFoundError
+ require.ErrorAs(t, err, &mnf)
+ require.Equal(t, absent.String(), mnf.Digest)
+}
+
+func TestMaterialsResolverOverridesByURI(t *testing.T) {
+ dir := t.TempDir()
+ seedOCILayout(t, dir)
+ payload := []byte("hello-override")
+ d := writeBlob(t, dir, payload)
+
+ uri := "https://example.com/whatever.tar"
+ specs := []string{uri + "=oci-layout://" + dir}
+ r, err := NewMaterialsResolver(specs)
+ require.NoError(t, err)
+
+ overrides := r.Overrides()
+ require.Contains(t, overrides, uri)
+
+ desc, provider, err := r.Resolve(context.Background(), uri, d)
+ require.NoError(t, err)
+ require.Equal(t, d, desc.Digest)
+ require.NotNil(t, provider)
+}
+
+func TestMaterialsResolverOverridesByDigest(t *testing.T) {
+ dir := t.TempDir()
+ seedOCILayout(t, dir)
+ payload := []byte("bytes-for-digest-override")
+ d := writeBlob(t, dir, payload)
+
+ specs := []string{d.String() + "=oci-layout://" + dir}
+ r, err := NewMaterialsResolver(specs)
+ require.NoError(t, err)
+
+ desc, provider, err := r.Resolve(context.Background(), "", d)
+ require.NoError(t, err)
+ require.Equal(t, d, desc.Digest)
+ require.NotNil(t, provider)
+}
+
+func TestMaterialsResolverOverrideFile(t *testing.T) {
+ dir := t.TempDir()
+ payload := []byte("standalone-file-override")
+ path := filepath.Join(dir, "payload.bin")
+ require.NoError(t, os.WriteFile(path, payload, 0o644))
+
+ d := digest.FromBytes(payload)
+ uri := "https://example.com/a.tar"
+ r, err := NewMaterialsResolver([]string{uri + "=" + path})
+ require.NoError(t, err)
+
+ desc, provider, err := r.Resolve(context.Background(), uri, d)
+ require.NoError(t, err)
+ require.Equal(t, d, desc.Digest)
+ require.Equal(t, int64(len(payload)), desc.Size)
+ require.NotNil(t, provider)
+
+ dt, err := content.ReadBlob(context.Background(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, payload, dt)
+}
+
+func TestMaterialsResolverMalformedOverride(t *testing.T) {
+ _, err := NewMaterialsResolver([]string{"=oci-layout:///foo"})
+ require.Error(t, err)
+}
+
+func TestMaterialsResolverRegistryNotImplemented(t *testing.T) {
+ _, err := NewMaterialsResolver([]string{"registry://my/ref"})
+ require.Error(t, err)
+ var nie *NotImplementedError
+ require.ErrorAs(t, err, &nie)
+}
+
+func TestMaterialsResolverUnknownSpec(t *testing.T) {
+ _, err := NewMaterialsResolver([]string{"gopher://x"})
+ require.Error(t, err)
+}
+
+func TestMaterialsResolverOverrideIsNotConfusedWithPath(t *testing.T) {
+ // An absolute path that happens to contain '=' characters must still be
+ // recognised as a path, not as an override.
+ dir := t.TempDir()
+ oddDir := filepath.Join(dir, "has=equals")
+ require.NoError(t, os.MkdirAll(filepath.Join(oddDir, "blobs", "sha256"), 0o755))
+
+ r, err := NewMaterialsResolver([]string{oddDir})
+ require.NoError(t, err)
+ require.True(t, r.HasStores())
+}
+
+// TestMaterialsResolverSnapshotBackedLookup builds a snapshot via Snapshot(),
+// then points a fresh MaterialsResolver at the resulting OCI layout and
+// resolves both an http and (synthetic) image material. The snapshot-backed
+// lookup path (materials.go lookupSnapshot) is exercised.
+func TestMaterialsResolverSnapshotBackedLookup(t *testing.T) {
+ fx := makeSnapshotFixture(t)
+ dest := t.TempDir()
+ exp := buildflags.ExportEntry{Type: "oci", Destination: dest, Attrs: map[string]string{"tar": "false"}}
+ req := &SnapshotRequest{
+ Targets: []Target{{Subject: fx.subject, Predicate: fx.predicate}},
+ IncludeMaterials: true,
+ Materials: snapshotOverrideResolver(t, fx.httpURI, fx.httpBytes),
+ Output: &exp,
+ }
+ require.NoError(t, Snapshot(context.Background(), nil, "", req))
+
+ layout := "oci-layout://" + dest
+ r, err := NewMaterialsResolver([]string{layout})
+ require.NoError(t, err)
+
+ // 1. Http material: resolves by digest into the materials manifest's
+ // layer set.
+ desc, provider, err := r.Resolve(context.Background(), fx.httpURI, fx.httpDigest)
+ require.NoError(t, err)
+ require.NotNil(t, provider)
+ require.Equal(t, fx.httpDigest, desc.Digest)
+ got, err := content.ReadBlob(context.Background(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, fx.httpBytes, got)
+
+ // 2. The subject manifest itself is reachable by its digest: Snapshot
+ // does not store it in the materials manifest, but the per-platform
+ // snapshot index's `manifests[]` references the attestation-manifest
+ // chain which CopyChain has copied into the layout. A lookup by the
+ // attestation manifest's digest must resolve (direct content-by-
+ // digest path).
+ attestDesc, attestProvider, err := r.Resolve(context.Background(), "", fx.attestDigest)
+ require.NoError(t, err)
+ require.NotNil(t, attestProvider)
+ require.Equal(t, fx.attestDigest, attestDesc.Digest)
+}
diff --git a/replay/policy.go b/replay/policy.go
new file mode 100644
index 000000000000..6fad09492b66
--- /dev/null
+++ b/replay/policy.go
@@ -0,0 +1,369 @@
+package replay
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/policy"
+ "github.com/distribution/reference"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ gwpb "github.com/moby/buildkit/frontend/gateway/pb"
+ spb "github.com/moby/buildkit/sourcepolicy/pb"
+ "github.com/moby/buildkit/sourcepolicy/policysession"
+ "github.com/moby/buildkit/util/purl"
+ "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// PinIndex is a resolved, URI/digest-keyed view of the predicate's
+// ResolvedDependencies suitable for fast policy-callback lookup.
+//
+// Two lookup tables are maintained: byURI maps the material's URI (e.g.
+// "docker-image://alpine:3.18" or "https://example.com/foo.tar") to the pinned
+// digest, and byDigest maps an already-pinned digest to the URI it belongs to.
+// Either side of a pin index entry is sufficient to match a source-policy
+// request; when both are present on the request they must refer to the same
+// pin entry.
+type PinIndex struct {
+ byURI map[string]digest.Digest
+ byDigest map[digest.Digest]string
+ materials []string
+}
+
+// NewPinIndex builds a PinIndex from the predicate's ResolvedDependencies.
+// When a material has multiple digest entries (e.g. sha256 + sha512), the
+// sha256 entry is preferred; otherwise the first entry wins. Materials without
+// a usable digest are skipped.
+func NewPinIndex(p *Predicate) *PinIndex {
+ idx := &PinIndex{
+ byURI: map[string]digest.Digest{},
+ byDigest: map[digest.Digest]string{},
+ }
+ if p == nil {
+ return idx
+ }
+ seenMaterials := map[string]struct{}{}
+ for _, m := range p.ResolvedDependencies() {
+ if disp := formatPinMaterial(m); disp != "" {
+ if _, ok := seenMaterials[disp]; !ok {
+ seenMaterials[disp] = struct{}{}
+ idx.materials = append(idx.materials, disp)
+ }
+ }
+ d := preferredDigest(m.Digest)
+ if d == "" {
+ continue
+ }
+ if m.URI != "" {
+ idx.byURI[m.URI] = d
+ if canon, ok := canonicalMaterialIdentifier(m); ok {
+ idx.byURI[canon] = d
+ for _, alias := range canonicalIdentifierAliases(canon) {
+ idx.byURI[alias] = d
+ }
+ }
+ }
+ idx.byDigest[d] = m.URI
+ }
+ sort.Strings(idx.materials)
+ return idx
+}
+
+// preferredDigest picks a single digest from an in-toto DigestSet. sha256 is
+// preferred so it aligns with how BuildKit's source-meta responses return
+// image and http digests. Any other algorithm is accepted as a fallback.
+func preferredDigest(set map[string]string) digest.Digest {
+ if set == nil {
+ return ""
+ }
+ if v, ok := set["sha256"]; ok && v != "" {
+ return digest.NewDigestFromEncoded(digest.SHA256, v)
+ }
+ for alg, v := range set {
+ if v == "" {
+ continue
+ }
+ return digest.NewDigestFromEncoded(digest.Algorithm(alg), v)
+ }
+ return ""
+}
+
+// Len reports the number of pin entries. Used by `--dry-run`.
+func (p *PinIndex) Len() int {
+ if p == nil {
+ return 0
+ }
+ return len(p.byDigest)
+}
+
+// Lookup resolves a URI to its pinned digest. Returns ("", false) when the
+// URI is not covered by the index.
+func (p *PinIndex) Lookup(uri string) (digest.Digest, bool) {
+ if p == nil {
+ return "", false
+ }
+ d, ok := p.byURI[uri]
+ return d, ok
+}
+
+// ReplayPinCallback returns a policysession.PolicyCallback that enforces the
+// pin index. Sources covered by the index are ALLOWed when their requested
+// digest matches; unknown sources are DENY (fail-closed); covered sources
+// with wrong digest are DENY with a DenyMessage.
+func ReplayPinCallback(idx *PinIndex) policysession.PolicyCallback {
+ return func(ctx context.Context, req *policysession.CheckPolicyRequest) (*policysession.DecisionResponse, *gwpb.ResolveSourceMetaRequest, error) {
+ uri, observed := extractSourceIdentity(req)
+ if uri == "" && observed == "" {
+ return denyResponse("replay pin: request carried no source identifier"), nil, nil
+ }
+
+ // URI-matched: compare observed digest against the pinned digest.
+ if idx != nil && uri != "" {
+ if pinned, ok := idx.byURI[uri]; ok {
+ if decision, handled, err := convertPinnedImage(req, pinned); err != nil {
+ return nil, nil, err
+ } else if handled {
+ return decision, nil, nil
+ }
+ if observed == "" {
+ return allowResponse(), nil, nil
+ }
+ if pinned == observed {
+ return allowResponse(), nil, nil
+ }
+ return denyResponse(fmt.Sprintf("replay pin mismatch for %s: expected %s, got %s", uri, pinned, observed)), nil, nil
+ }
+ }
+
+ // Digest-only match: the observed digest matches a pinned material
+ // whose URI was not available on the request. Accept this — the
+ // content is the pinned bytes regardless of how the frontend named
+ // them.
+ if idx != nil && observed != "" {
+ if _, ok := idx.byDigest[observed]; ok {
+ return allowResponse(), nil, nil
+ }
+ }
+
+ // Source is not covered by the pin index. Fail closed.
+ return denyResponse(notCoveredMessage(idx, req, displaySource(uri, observed))), nil, nil
+ }
+}
+
+func convertPinnedImage(req *policysession.CheckPolicyRequest, pinned digest.Digest) (*policysession.DecisionResponse, bool, error) {
+ if req == nil || req.Source == nil || req.Source.GetSource() == nil {
+ return nil, false, nil
+ }
+ src := req.Source.GetSource()
+ if canon := canonicalRequestSource(req); canon != "" && canon != src.Identifier {
+ clone := *src
+ clone.Identifier = canon
+ src = &clone
+ }
+ if !strings.HasPrefix(src.Identifier, "docker-image://") {
+ return nil, false, nil
+ }
+ newSrc, err := policy.AddPinToImage(src, pinned)
+ if err != nil {
+ return nil, false, errors.Wrap(err, "failed to pin covered image source")
+ }
+ if newSrc.Identifier == src.Identifier {
+ return allowResponse(), true, nil
+ }
+ return &policysession.DecisionResponse{
+ Action: spb.PolicyAction_CONVERT,
+ Update: newSrc,
+ }, true, nil
+}
+
+// extractSourceIdentity pulls the URI and the digest (if known on this call)
+// from a CheckPolicyRequest. Supports the three source shapes BuildKit emits
+// on a session policy request: image, http, git.
+func extractSourceIdentity(req *policysession.CheckPolicyRequest) (uri string, dgst digest.Digest) {
+ if req == nil || req.Source == nil {
+ return "", ""
+ }
+ src := req.Source.GetSource()
+ if src != nil {
+ uri = src.GetIdentifier()
+ }
+
+ // Image response carries the resolved digest once BuildKit has asked the
+ // registry. First invocation (before resolution) comes without a digest —
+ // that is the expected path for the ALLOW-pending branch.
+ if img := req.Source.GetImage(); img != nil {
+ if v := img.GetDigest(); v != "" {
+ dgst = digest.Digest(v)
+ }
+ }
+ if h := req.Source.GetHTTP(); h != nil {
+ if v := h.GetChecksum(); v != "" {
+ dgst = digest.Digest(v)
+ }
+ }
+ if g := req.Source.GetGit(); g != nil {
+ // Git materials do not carry a sha256 content digest. Treat the
+ // commit checksum as a placeholder — the URI match is the real
+ // enforcement point for git.
+ if v := g.GetCommitChecksum(); v != "" && dgst == "" {
+ dgst = digest.Digest(v)
+ }
+ }
+ if uri != "" {
+ uri, dgst = normalizeRequestSourceIdentity(uri, dgst)
+ }
+ return uri, dgst
+}
+
+func displaySource(uri string, dgst digest.Digest) string {
+ switch {
+ case uri != "" && dgst != "":
+ return fmt.Sprintf("%s@%s", uri, dgst)
+ case uri != "":
+ return uri
+ case dgst != "":
+ return dgst.String()
+ default:
+ return ""
+ }
+}
+
+func canonicalMaterialIdentifier(m slsa1.ResourceDescriptor) (string, bool) {
+ src, _, err := policy.ParseSLSAMaterial(m)
+ if err != nil || src == nil || src.Identifier == "" {
+ return "", false
+ }
+ return src.Identifier, true
+}
+
+func canonicalIdentifierAliases(id string) []string {
+ refStr, ok := strings.CutPrefix(id, "docker-image://")
+ if !ok {
+ return nil
+ }
+ if refBase, _, ok := strings.Cut(refStr, "@"); ok {
+ return []string{"docker-image://" + refBase}
+ }
+ if _, err := reference.ParseNormalizedNamed(refStr); err != nil {
+ return nil
+ }
+ return nil
+}
+
+func normalizeRequestSourceIdentity(uri string, dgst digest.Digest) (string, digest.Digest) {
+ refStr, ok := strings.CutPrefix(uri, "docker-image://")
+ if !ok {
+ return uri, dgst
+ }
+ if refBase, refDigest, ok := strings.Cut(refStr, "@"); ok {
+ if dgst == "" {
+ if parsed, err := digest.Parse(refDigest); err == nil {
+ dgst = parsed
+ }
+ }
+ return "docker-image://" + refBase, dgst
+ }
+ return uri, dgst
+}
+
+func notCoveredMessage(idx *PinIndex, req *policysession.CheckPolicyRequest, src string) string {
+ if idx == nil || len(idx.materials) == 0 {
+ return fmt.Sprintf("replay pin: no provenance material matched requested source\n target: %s", formatTargetSource(req, src))
+ }
+ var b strings.Builder
+ b.WriteString("replay pin: no provenance material matched requested source")
+ b.WriteString("\n target: ")
+ b.WriteString(formatTargetSource(req, src))
+ b.WriteString("\n provenance materials:")
+ for _, m := range idx.materials {
+ b.WriteString("\n - ")
+ b.WriteString(m)
+ }
+ return b.String()
+}
+
+func requestPlatform(req *policysession.CheckPolicyRequest) *ocispecs.Platform {
+ if req == nil || req.Platform == nil {
+ return nil
+ }
+ p := ocispecs.Platform{
+ OS: req.Platform.OS,
+ Architecture: req.Platform.Architecture,
+ Variant: req.Platform.Variant,
+ }
+ norm := platforms.Normalize(p)
+ return &norm
+}
+
+func formatPinMaterial(m slsa1.ResourceDescriptor) string {
+ if m.URI == "" {
+ return ""
+ }
+ src, p, err := policy.ParseSLSAMaterial(m)
+ switch {
+ case err != nil || src == nil || src.Identifier == "":
+ return m.URI
+ case p != nil:
+ return fmt.Sprintf("uri=%s canonical=%s platform=%s", m.URI, src.Identifier, platforms.Format(*p))
+ case src.Identifier != m.URI:
+ return fmt.Sprintf("uri=%s canonical=%s", m.URI, src.Identifier)
+ default:
+ return "uri=" + m.URI
+ }
+}
+
+func formatTargetSource(req *policysession.CheckPolicyRequest, src string) string {
+ parts := []string{"uri=" + src}
+ if canon := canonicalRequestSource(req); canon != "" && canon != src {
+ parts = append(parts, "canonical="+canon)
+ }
+ if p := requestPlatform(req); p != nil {
+ parts = append(parts, "platform="+platforms.Format(*p))
+ }
+ return strings.Join(parts, " ")
+}
+
+func canonicalRequestSource(req *policysession.CheckPolicyRequest) string {
+ if req == nil || req.Source == nil || req.Source.Source == nil {
+ return ""
+ }
+ uri := req.Source.Source.Identifier
+ if uri == "" {
+ return ""
+ }
+ if strings.HasPrefix(uri, "pkg:docker/") {
+ refStr, _, err := purl.PURLToRef(uri)
+ if err != nil {
+ return ""
+ }
+ named, err := reference.ParseNormalizedNamed(refStr)
+ if err != nil {
+ return ""
+ }
+ return "docker-image://" + named.String()
+ }
+ return uri
+}
+
+func allowResponse() *policysession.DecisionResponse {
+ return &policysession.DecisionResponse{Action: spb.PolicyAction_ALLOW}
+}
+
+func denyResponse(msg string) *policysession.DecisionResponse {
+ return &policysession.DecisionResponse{
+ Action: spb.PolicyAction_DENY,
+ DenyMessages: []*policysession.DenyMessage{{Message: msg}},
+ }
+}
+
+// ComposeCallbacks aggregates any number of callbacks via the
+// MultiPolicyCallback helper so a replay pin callback composes cleanly with
+// any future overlay. The replay callback MUST be the last (most-strict)
+// entry; callers are expected to append it last.
+func ComposeCallbacks(cbs ...policysession.PolicyCallback) policysession.PolicyCallback {
+ return policy.MultiPolicyCallback(cbs...)
+}
diff --git a/replay/policy_test.go b/replay/policy_test.go
new file mode 100644
index 000000000000..924536b779a9
--- /dev/null
+++ b/replay/policy_test.go
@@ -0,0 +1,235 @@
+package replay
+
+import (
+ "context"
+ "testing"
+
+ buildxpolicy "github.com/docker/buildx/policy"
+ slsacommon "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ gwpb "github.com/moby/buildkit/frontend/gateway/pb"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ solverpb "github.com/moby/buildkit/solver/pb"
+ spb "github.com/moby/buildkit/sourcepolicy/pb"
+ "github.com/moby/buildkit/sourcepolicy/policysession"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ imageURIAlpine = "pkg:docker/alpine@3.18?platform=linux%2Famd64"
+ imageSHA = "sha256:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
+
+ httpURI = "https://example.com/payload.tar"
+ httpSHA = "sha256:1111111111111111111111111111111111111111111111111111111111111111"
+
+ gitURIHTTPS = "https://github.com/moby/buildkit.git#refs/tags/v0.29.0"
+ gitURIGit = "git://github.com/moby/buildkit.git#refs/tags/v0.29.0"
+ gitCommit = "sha1:8543ce4428265d547cb009e5ad62348284497a88"
+)
+
+func predicateWithMaterials(mats ...slsa1.ResourceDescriptor) *Predicate {
+ pred := &Predicate{}
+ pred.BuildDefinition = provenancetypes.ProvenanceBuildDefinitionSLSA1{}
+ pred.BuildDefinition.ResolvedDependencies = mats
+ return pred
+}
+
+func imageMaterial(uri, sha string) slsa1.ResourceDescriptor {
+ return slsa1.ResourceDescriptor{
+ URI: uri,
+ Digest: slsacommon.DigestSet{"sha256": stripSHA256(sha)},
+ }
+}
+
+func stripSHA256(s string) string {
+ if i := len("sha256:"); len(s) > i && s[:i] == "sha256:" {
+ return s[i:]
+ }
+ if i := len("sha1:"); len(s) > i && s[:i] == "sha1:" {
+ return s[i:]
+ }
+ return s
+}
+
+func imageCheckRequest(uri, observed string) *policysession.CheckPolicyRequest {
+ return &policysession.CheckPolicyRequest{
+ Source: &gwpb.ResolveSourceMetaResponse{
+ Source: &solverpb.SourceOp{Identifier: uri},
+ Image: &gwpb.ResolveSourceImageResponse{Digest: observed},
+ },
+ }
+}
+
+func httpCheckRequest(uri, observed string) *policysession.CheckPolicyRequest {
+ return &policysession.CheckPolicyRequest{
+ Source: &gwpb.ResolveSourceMetaResponse{
+ Source: &solverpb.SourceOp{Identifier: uri},
+ HTTP: &gwpb.ResolveSourceHTTPResponse{Checksum: observed},
+ },
+ }
+}
+
+func gitCheckRequest(uri, observed string) *policysession.CheckPolicyRequest {
+ return &policysession.CheckPolicyRequest{
+ Source: &gwpb.ResolveSourceMetaResponse{
+ Source: &solverpb.SourceOp{Identifier: uri},
+ Git: &gwpb.ResolveSourceGitResponse{CommitChecksum: observed},
+ },
+ }
+}
+
+func TestPinIndexURIAllowAndDeny(t *testing.T) {
+ pred := predicateWithMaterials(imageMaterial(imageURIAlpine, imageSHA))
+ idx := NewPinIndex(pred)
+ cb := ReplayPinCallback(idx)
+
+ // Covered image URI is converted to the pinned digest from provenance.
+ resp, _, err := cb(context.Background(), imageCheckRequest(imageURIAlpine, imageSHA))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/library/alpine:3.18@"+imageSHA, resp.Update.Identifier)
+
+ // Digest drift on a covered image still converts to the provenance pin.
+ wrongSHA := "sha256:0000000000000000000000000000000000000000000000000000000000000000"
+ resp, _, err = cb(context.Background(), imageCheckRequest(imageURIAlpine, wrongSHA))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/library/alpine:3.18@"+imageSHA, resp.Update.Identifier)
+}
+
+func TestPinIndexImagePURLCanonicalIdentifierAllowed(t *testing.T) {
+ mat := imageMaterial(imageURIAlpine, imageSHA)
+ src, _, err := buildxpolicy.ParseSLSAMaterial(mat)
+ require.NoError(t, err)
+ require.NotNil(t, src)
+
+ idx := NewPinIndex(predicateWithMaterials(mat))
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), imageCheckRequest(src.Identifier, imageSHA))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_ALLOW, resp.Action)
+}
+
+func TestPinIndexImageCanonicalUnpinnedIdentifierAllowed(t *testing.T) {
+ mat := imageMaterial("pkg:docker/docker/dockerfile-upstream@master", "sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24")
+ idx := NewPinIndex(predicateWithMaterials(mat))
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), imageCheckRequest("docker-image://docker.io/docker/dockerfile-upstream:master", ""))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/docker/dockerfile-upstream:master@sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24", resp.Update.Identifier)
+}
+
+func TestPinIndexImageCanonicalResolvedDigestMismatchConverted(t *testing.T) {
+ mat := imageMaterial("pkg:docker/docker/dockerfile-upstream@master", "sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24")
+ idx := NewPinIndex(predicateWithMaterials(mat))
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), imageCheckRequest("docker-image://docker.io/docker/dockerfile-upstream:master", "sha256:a7308cdb4411614c503aee073f5cb4caa5245b8e89fceb41887129219da0b267"))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/docker/dockerfile-upstream:master@sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24", resp.Update.Identifier)
+}
+
+func TestPinIndexImageCanonicalIdentifierCarriesDigestInSource(t *testing.T) {
+ mat := imageMaterial("pkg:docker/docker/dockerfile-upstream@master", "sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24")
+ idx := NewPinIndex(predicateWithMaterials(mat))
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), imageCheckRequest("docker-image://docker.io/docker/dockerfile-upstream:master@sha256:a7308cdb4411614c503aee073f5cb4caa5245b8e89fceb41887129219da0b267", ""))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/docker/dockerfile-upstream:master@sha256:02bce6c486f5bbd7b2eb6b9a16e3734110face1c70a6bacd827dcdb80c3f9a24", resp.Update.Identifier)
+}
+
+func TestPinIndexUnknownSourceDenied(t *testing.T) {
+ idx := NewPinIndex(predicateWithMaterials(imageMaterial(imageURIAlpine, imageSHA)))
+ cb := ReplayPinCallback(idx)
+
+ // Unknown URI, unknown digest → DENY (fail-closed).
+ other := "pkg:docker/ubuntu@22.04?platform=linux%2Famd64"
+ resp, _, err := cb(context.Background(), imageCheckRequest(other, "sha256:deadbeef"))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_DENY, resp.Action)
+ require.NotEmpty(t, resp.DenyMessages)
+ require.Contains(t, resp.DenyMessages[0].Message, "no provenance material matched requested source")
+ require.Contains(t, resp.DenyMessages[0].Message, "\n target: uri="+other+"@sha256:deadbeef")
+ require.Contains(t, resp.DenyMessages[0].Message, "\n provenance materials:")
+ require.Contains(t, resp.DenyMessages[0].Message, "\n - uri="+imageURIAlpine)
+ require.Contains(t, resp.DenyMessages[0].Message, "canonical=docker-image://docker.io/library/alpine:3.18@"+imageSHA)
+ require.Contains(t, resp.DenyMessages[0].Message, "platform=linux/amd64")
+}
+
+func TestPinIndexHTTPAllowed(t *testing.T) {
+ pred := predicateWithMaterials(slsa1.ResourceDescriptor{
+ URI: httpURI,
+ Digest: slsacommon.DigestSet{"sha256": stripSHA256(httpSHA)},
+ })
+ idx := NewPinIndex(pred)
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), httpCheckRequest(httpURI, httpSHA))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_ALLOW, resp.Action)
+}
+
+func TestPinIndexGitSchemeNormalizationAllowed(t *testing.T) {
+ pred := predicateWithMaterials(slsa1.ResourceDescriptor{
+ URI: gitURIHTTPS,
+ Digest: slsacommon.DigestSet{"sha1": stripSHA256(gitCommit)},
+ })
+ idx := NewPinIndex(pred)
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), gitCheckRequest(gitURIGit, gitCommit))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_ALLOW, resp.Action)
+}
+
+func TestPinIndexAllowPending(t *testing.T) {
+ // First callback invocation on an image source often arrives without
+ // an observed digest (the source-meta roundtrip produces it). The
+ // replay callback must not fail-closed on that pending shape when the
+ // URI is covered.
+ idx := NewPinIndex(predicateWithMaterials(imageMaterial(imageURIAlpine, imageSHA)))
+ cb := ReplayPinCallback(idx)
+
+ resp, _, err := cb(context.Background(), imageCheckRequest(imageURIAlpine, ""))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_CONVERT, resp.Action)
+ require.NotNil(t, resp.Update)
+ require.Equal(t, "docker-image://docker.io/library/alpine:3.18@"+imageSHA, resp.Update.Identifier)
+}
+
+func TestComposeCallbacksReplayStrict(t *testing.T) {
+ // A permissive user-defined overlay must not defeat the strict replay
+ // callback. Compose a pass-through first and the replay pin last.
+ passthrough := policysession.PolicyCallback(func(ctx context.Context, req *policysession.CheckPolicyRequest) (*policysession.DecisionResponse, *gwpb.ResolveSourceMetaRequest, error) {
+ return &policysession.DecisionResponse{Action: spb.PolicyAction_ALLOW}, nil, nil
+ })
+ idx := NewPinIndex(predicateWithMaterials(imageMaterial(imageURIAlpine, imageSHA)))
+ combined := ComposeCallbacks(passthrough, ReplayPinCallback(idx))
+
+ unknown := "pkg:docker/ubuntu@22.04?platform=linux%2Famd64"
+ resp, _, err := combined(context.Background(), imageCheckRequest(unknown, "sha256:deadbeef"))
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+ require.Equal(t, spb.PolicyAction_DENY, resp.Action)
+}
diff --git a/replay/predicate.go b/replay/predicate.go
new file mode 100644
index 000000000000..3739e49ff944
--- /dev/null
+++ b/replay/predicate.go
@@ -0,0 +1,150 @@
+package replay
+
+import (
+ "strings"
+
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/policy"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Predicate is a named type over ProvenancePredicateSLSA1 so replay code can
+// attach accessors without copying or wrapping. The receiver is never nil:
+// callers must have obtained a non-nil *Predicate from Subject.Predicate.
+type Predicate provenancetypes.ProvenancePredicateSLSA1
+
+// defaultFrontend matches BuildKit's default when no frontend is recorded on
+// the request (see build/opt.go:309 — dockerfile.v0).
+const defaultFrontend = "dockerfile.v0"
+
+// Frontend returns the frontend id recorded on the predicate, falling back
+// to dockerfile.v0 when the predicate does not record one.
+func (p *Predicate) Frontend() string {
+ if f := p.BuildDefinition.ExternalParameters.Request.Frontend; f != "" {
+ return f
+ }
+ return defaultFrontend
+}
+
+// FrontendAttrs returns the recorded frontend attrs with attestation-related
+// keys stripped (see Attests for those). Returns a fresh map so callers can
+// mutate it.
+func (p *Predicate) FrontendAttrs() map[string]string {
+ src := p.BuildDefinition.ExternalParameters.Request.Args
+ out := make(map[string]string, len(src))
+ for k, v := range src {
+ if strings.HasPrefix(k, "attest:") {
+ continue
+ }
+ out[k] = v
+ }
+ return out
+}
+
+// Attests returns the recorded attestation-related frontend attrs as the
+// map shape consumed by build.Options.Attests: key is the attestation type
+// (the text after "attest:"), value is the recorded attr payload.
+func (p *Predicate) Attests() map[string]*string {
+ src := p.BuildDefinition.ExternalParameters.Request.Args
+ out := map[string]*string{}
+ for k, v := range src {
+ name, ok := strings.CutPrefix(k, "attest:")
+ if !ok {
+ continue
+ }
+ vv := v
+ out[name] = &vv
+ }
+ return out
+}
+
+// ConfigSource returns the configSource descriptor recorded on the predicate.
+func (p *Predicate) ConfigSource() provenancetypes.ProvenanceConfigSourceSLSA1 {
+ return p.BuildDefinition.ExternalParameters.ConfigSource
+}
+
+// Secrets returns the declared secrets from the predicate's request.
+func (p *Predicate) Secrets() []*provenancetypes.Secret {
+ return p.BuildDefinition.ExternalParameters.Request.Secrets
+}
+
+// SSH returns the declared SSH entries from the predicate's request.
+func (p *Predicate) SSH() []*provenancetypes.SSH {
+ return p.BuildDefinition.ExternalParameters.Request.SSH
+}
+
+// Locals returns the local-context sources recorded on the predicate. A
+// non-empty result should cause replay to fail with
+// UnreplayableLocalContextError.
+func (p *Predicate) Locals() []*provenancetypes.LocalSource {
+ return p.BuildDefinition.ExternalParameters.Request.Locals
+}
+
+// BuilderPlatform returns the platform the original builder ran on, parsed
+// from InternalParameters.builderPlatform. Falls back to the runtime host
+// platform when the field is missing or malformed.
+func (p *Predicate) BuilderPlatform() ocispecs.Platform {
+ if plat, ok := p.RecordedBuilderPlatform(); ok {
+ return *plat
+ }
+ return platforms.DefaultSpec()
+}
+
+// RecordedBuilderPlatform returns the platform recorded in
+// InternalParameters.builderPlatform when present and valid.
+func (p *Predicate) RecordedBuilderPlatform() (*ocispecs.Platform, bool) {
+ s := p.BuildDefinition.InternalParameters.BuilderPlatform
+ if s == "" {
+ return nil, false
+ }
+ plat, err := platforms.Parse(s)
+ if err != nil {
+ return nil, false
+ }
+ norm := platforms.Normalize(plat)
+ return &norm, true
+}
+
+// DefaultPlatform returns the effective provenance default platform for
+// resolving host-side image sources during replay. It prefers the recorded
+// platform-qualified image materials when they all agree, and otherwise
+// falls back to the recorded builderPlatform field.
+func (p *Predicate) DefaultPlatform() (*ocispecs.Platform, bool) {
+ var inferred *ocispecs.Platform
+ for _, m := range p.ResolvedDependencies() {
+ _, mp, err := policy.ParseSLSAMaterial(m)
+ if err != nil || mp == nil {
+ continue
+ }
+ norm := platforms.Normalize(*mp)
+ if inferred == nil {
+ inferred = &norm
+ continue
+ }
+ if platforms.Format(*inferred) != platforms.Format(norm) {
+ return nil, false
+ }
+ }
+ if inferred != nil {
+ return inferred, true
+ }
+ if plat, ok := p.RecordedBuilderPlatform(); ok {
+ return plat, true
+ }
+ return nil, false
+}
+
+// ResolvedDependencies returns every material recorded on the predicate.
+// Classification by URI scheme is left to the caller (see MaterialsResolver).
+func (p *Predicate) ResolvedDependencies() []slsa1.ResourceDescriptor {
+ return p.BuildDefinition.ResolvedDependencies
+}
+
+// HasBuildDefinition reports whether the predicate carries a non-empty LLB
+// substrate (required by --replay-mode=llb).
+func (p *Predicate) HasBuildDefinition() bool {
+ bc := p.BuildDefinition.InternalParameters.BuildConfig
+ return bc != nil && len(bc.Definition) > 0
+}
diff --git a/replay/predicate_test.go b/replay/predicate_test.go
new file mode 100644
index 000000000000..a4ea7c6403c5
--- /dev/null
+++ b/replay/predicate_test.go
@@ -0,0 +1,123 @@
+package replay
+
+import (
+ "testing"
+
+ slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPredicateMethods(t *testing.T) {
+ raw := provenancetypes.ProvenancePredicateSLSA1{
+ BuildDefinition: provenancetypes.ProvenanceBuildDefinitionSLSA1{
+ ExternalParameters: provenancetypes.ProvenanceExternalParametersSLSA1{
+ ConfigSource: provenancetypes.ProvenanceConfigSourceSLSA1{
+ URI: "https://example.com/build",
+ Digest: slsa.DigestSet{"sha256": "deadbeef"},
+ Path: "Dockerfile",
+ },
+ Request: provenancetypes.Parameters{
+ Frontend: "gateway.v0",
+ Args: map[string]string{
+ "target": "release",
+ "build-arg:FOO": "bar",
+ "label:maintainer": "me",
+ "attest:sbom": "generator=scanner",
+ "attest:provenance": "mode=max",
+ },
+ Secrets: []*provenancetypes.Secret{{ID: "github_token"}, {ID: "npm_token", Optional: true}},
+ SSH: []*provenancetypes.SSH{{ID: "default"}},
+ Locals: []*provenancetypes.LocalSource{{Name: "context"}},
+ },
+ },
+ InternalParameters: provenancetypes.ProvenanceInternalParametersSLSA1{
+ BuildConfig: &provenancetypes.BuildConfig{
+ Definition: []provenancetypes.BuildStep{{ID: "step-0"}},
+ },
+ },
+ ProvenanceBuildDefinition: slsa1.ProvenanceBuildDefinition{
+ ResolvedDependencies: []slsa1.ResourceDescriptor{
+ {URI: "docker-image://alpine:latest", Digest: slsa.DigestSet{"sha256": "aaaa"}},
+ {URI: "https://example.com/pkg.tar.gz", Digest: slsa.DigestSet{"sha256": "bbbb"}},
+ },
+ },
+ },
+ }
+ pred := (*Predicate)(&raw)
+
+ t.Run("Frontend returns recorded frontend", func(t *testing.T) {
+ require.Equal(t, "gateway.v0", pred.Frontend())
+ })
+
+ t.Run("Frontend falls back to dockerfile.v0", func(t *testing.T) {
+ empty := &Predicate{}
+ require.Equal(t, defaultFrontend, empty.Frontend())
+ })
+
+ t.Run("FrontendAttrs strips attestation attrs", func(t *testing.T) {
+ attrs := pred.FrontendAttrs()
+ require.Equal(t, "release", attrs["target"])
+ require.Equal(t, "bar", attrs["build-arg:FOO"])
+ require.Equal(t, "me", attrs["label:maintainer"])
+ _, hasSBOM := attrs["attest:sbom"]
+ require.False(t, hasSBOM, "attest:sbom should be filtered out")
+ _, hasProv := attrs["attest:provenance"]
+ require.False(t, hasProv, "attest:provenance should be filtered out")
+ })
+
+ t.Run("FrontendAttrs returns fresh map", func(t *testing.T) {
+ attrs := pred.FrontendAttrs()
+ attrs["injected"] = "yes"
+ // The predicate's underlying map should be untouched.
+ require.NotContains(t, pred.BuildDefinition.ExternalParameters.Request.Args, "injected")
+ })
+
+ t.Run("ConfigSource", func(t *testing.T) {
+ cs := pred.ConfigSource()
+ require.Equal(t, "https://example.com/build", cs.URI)
+ require.Equal(t, "Dockerfile", cs.Path)
+ })
+
+ t.Run("Secrets", func(t *testing.T) {
+ secrets := pred.Secrets()
+ require.Len(t, secrets, 2)
+ require.Equal(t, "github_token", secrets[0].ID)
+ require.False(t, secrets[0].Optional)
+ require.True(t, secrets[1].Optional)
+ })
+
+ t.Run("SSH", func(t *testing.T) {
+ ssh := pred.SSH()
+ require.Len(t, ssh, 1)
+ require.Equal(t, "default", ssh[0].ID)
+ })
+
+ t.Run("Locals", func(t *testing.T) {
+ locals := pred.Locals()
+ require.Len(t, locals, 1)
+ require.Equal(t, "context", locals[0].Name)
+ })
+
+ t.Run("ResolvedDependencies", func(t *testing.T) {
+ deps := pred.ResolvedDependencies()
+ require.Len(t, deps, 2)
+ require.Equal(t, "docker-image://alpine:latest", deps[0].URI)
+ })
+
+ t.Run("HasBuildDefinition true", func(t *testing.T) {
+ require.True(t, pred.HasBuildDefinition())
+ })
+
+ t.Run("HasBuildDefinition false when empty", func(t *testing.T) {
+ empty := &Predicate{}
+ require.False(t, empty.HasBuildDefinition())
+ })
+
+ t.Run("HasBuildDefinition false when BuildConfig has no steps", func(t *testing.T) {
+ p := &Predicate{}
+ p.BuildDefinition.InternalParameters.BuildConfig = &provenancetypes.BuildConfig{}
+ require.False(t, p.HasBuildDefinition())
+ })
+}
diff --git a/replay/snapshot.go b/replay/snapshot.go
new file mode 100644
index 000000000000..785ac08e1e2a
--- /dev/null
+++ b/replay/snapshot.go
@@ -0,0 +1,998 @@
+package replay
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/fs"
+ "net/url"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+ "sync"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/containerd/containerd/v2/core/remotes"
+ contentlocal "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/containerd/errdefs"
+ "github.com/containerd/platforms"
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/buildx/util/progress"
+ "github.com/docker/cli/cli/command"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/client/ociindex"
+ "github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/purl"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// withMediaTypeKeyPrefix registers ref-key prefixes for non-standard media
+// types that the snapshot chain walker will encounter. Without this,
+// containerd's remotes.MakeRefKey falls to its default branch and logs
+// "reference for unknown type: …" when copying the OCI 1.1 empty-config
+// blob that sits inside buildx attestation manifests.
+func withMediaTypeKeyPrefix(ctx context.Context) context.Context {
+ return remotes.WithMediaTypeKeyPrefix(ctx, "application/vnd.oci.empty.v1+json", "empty")
+}
+
+// SnapshotRequest is the input to Snapshot.
+type SnapshotRequest struct {
+ // Targets are the per-platform (subject, predicate) pairs to snapshot.
+ // Each subject must carry a non-empty AttestationManifest descriptor
+ // (image / oci-layout subjects only; attestation-file inputs are
+ // rejected upstream).
+ Targets []Target
+ // IncludeMaterials controls whether material content is copied and the
+ // materials artifact manifest is emitted.
+ IncludeMaterials bool
+ // Materials resolves image / http / container-blob materials to a local
+ // (descriptor, provider) pair. Required when IncludeMaterials is true.
+ Materials *MaterialsResolver
+ // Output is the parsed --output spec. Exactly one form is allowed
+ // (local / oci / registry).
+ Output *buildflags.ExportEntry
+ // Progress receives step events and non-fatal warnings. May be nil —
+ // in that case events are silently dropped.
+ Progress progress.Writer
+}
+
+// Snapshot produces a replay snapshot for the supplied subjects/predicates and
+// writes it through the configured --output target. This function does NOT
+// invoke build.Build — snapshot is pure content movement plus manifest
+// assembly.
+//
+// dockerCli + builderName are consumed only to construct a buildx image
+// resolver so that image materials can be fetched from their recorded
+// registries. They may be zero when all materials are resolvable purely via
+// req.Materials (e.g. tests that pre-pin an --materials=oci-layout store).
+func Snapshot(ctx context.Context, dockerCli command.Cli, builderName string, req *SnapshotRequest) error {
+ if req == nil {
+ return errors.New("nil snapshot request")
+ }
+ if req.Output == nil {
+ return errors.New("snapshot: --output is required")
+ }
+ stage, root, _, err := assembleSnapshot(ctx, dockerCli, builderName, req)
+ if err != nil {
+ return err
+ }
+ return writeSnapshotOutput(ctx, stage, root, req.Output, dockerCli, builderName)
+}
+
+// assembleSnapshot runs the staging phase shared by real-run and dry-run:
+// validates targets, stages every blob the snapshot would emit, and
+// returns the root descriptor plus per-target staging stores (for dry-run
+// consumers that want per-target artifact lists). The real run only cares
+// about the first store and root.
+func assembleSnapshot(ctx context.Context, dockerCli command.Cli, builderName string, req *SnapshotRequest) (*stagingStore, ocispecs.Descriptor, []*stagingStore, error) {
+ if req == nil {
+ return nil, ocispecs.Descriptor{}, nil, errors.New("nil snapshot request")
+ }
+ if len(req.Targets) == 0 {
+ return nil, ocispecs.Descriptor{}, nil, errors.New("no targets to snapshot")
+ }
+
+ // Register ref-key prefixes for non-standard media types so
+ // containerd does not log spurious "reference for unknown type"
+ // warnings while copying the attestation chain.
+ ctx = withMediaTypeKeyPrefix(ctx)
+
+ // progress logger — nop when the caller did not supply a Writer.
+ var pwlog progress.Logger = func(*client.SolveStatus) {}
+ if req.Progress != nil {
+ pwlog = req.Progress.Write
+ }
+
+ // Shared warn-once ledger: each (category, key) pair prints at most
+ // one warning across the whole snapshot (e.g. the same git URI
+ // referenced from every platform should not flood output).
+ warn := newWarnOnce()
+
+ // One staging store per target — gives dry-run a clean per-target
+ // descriptor list and keeps the real-run assembly deterministic.
+ stages := make([]*stagingStore, 0, len(req.Targets))
+ // Merged stage for real-run output. Each target's stage is folded in
+ // after assembly so a single flush writes everything.
+ merged := newStagingStore()
+
+ // Lazily-constructed registry resolver for image materials (reused across
+ // subjects). buildx resolver is nil-safe so we keep the builder/auth setup
+ // deferred until an image material is seen.
+ var registryResolver *imagetools.Resolver
+ lazyResolver := func() (*imagetools.Resolver, error) {
+ if registryResolver != nil {
+ return registryResolver, nil
+ }
+ if dockerCli == nil {
+ registryResolver = imagetools.New(imagetools.Opt{})
+ return registryResolver, nil
+ }
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return nil, err
+ }
+ imgOpt, err := b.ImageOpt()
+ if err != nil {
+ return nil, err
+ }
+ registryResolver = imagetools.New(imgOpt)
+ return registryResolver, nil
+ }
+
+ emptyConfigDesc := ocispecs.Descriptor{
+ MediaType: ociEmptyConfigMediaType,
+ Digest: digest.Digest(ociEmptyConfigDigest),
+ Size: ociEmptyConfigSize,
+ }
+
+ perPlatformDescs := make([]ocispecs.Descriptor, 0, len(req.Targets))
+
+ for ti, t := range req.Targets {
+ s, pred := t.Subject, t.Predicate
+ if s == nil || pred == nil {
+ return nil, ocispecs.Descriptor{}, nil, errors.New("target has nil subject or predicate")
+ }
+ if s.IsAttestationFile() {
+ return nil, ocispecs.Descriptor{}, nil, ErrUnsupportedSubject("snapshot requires an image or oci-layout subject")
+ }
+ if s.AttestationManifest().Digest == "" {
+ return nil, ocispecs.Descriptor{}, nil, ErrNoProvenance(s.InputRef())
+ }
+
+ stage := newStagingStore()
+ if err := stage.writeRaw(ctx, emptyConfigDesc, OCIEmptyConfigBytes()); err != nil {
+ return nil, ocispecs.Descriptor{}, nil, errors.Wrap(err, "write empty config")
+ }
+
+ var ppDesc ocispecs.Descriptor
+ targetName := fmt.Sprintf("[%d/%d] snapshot %s", ti+1, len(req.Targets), snapshotTargetLabel(s))
+ err := progress.Wrap(targetName, pwlog, func(sub progress.SubLogger) error {
+ d, err := snapshotOneTarget(ctx, stage, s, pred, req, lazyResolver, warn, sub)
+ if err != nil {
+ return err
+ }
+ ppDesc = d
+ return nil
+ })
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, nil, err
+ }
+ if s.Descriptor.Platform != nil {
+ p := *s.Descriptor.Platform
+ ppDesc.Platform = &p
+ }
+ perPlatformDescs = append(perPlatformDescs, ppDesc)
+ stages = append(stages, stage)
+
+ if err := mergeStage(ctx, merged, stage); err != nil {
+ return nil, ocispecs.Descriptor{}, nil, err
+ }
+ }
+
+ // Root descriptor that the output writer addresses.
+ var root ocispecs.Descriptor
+ if len(perPlatformDescs) == 1 {
+ root = perPlatformDescs[0]
+ } else {
+ _, rootDesc, rootData, err := MultiPlatformSnapshotIndex(perPlatformDescs)
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, nil, err
+ }
+ if err := merged.writeRaw(ctx, ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Digest: rootDesc.Digest,
+ Size: rootDesc.Size,
+ }, rootData); err != nil {
+ return nil, ocispecs.Descriptor{}, nil, errors.Wrap(err, "write multi-platform snapshot index")
+ }
+ root = rootDesc
+ }
+
+ return merged, root, stages, nil
+}
+
+// mergeStage copies every blob from src into dst. Used to fold per-target
+// stages into the merged stage that the real-run output writer flushes.
+func mergeStage(ctx context.Context, dst, src *stagingStore) error {
+ src.mu.Lock()
+ order := slices.Clone(src.order)
+ src.mu.Unlock()
+ for _, dgst := range order {
+ desc := src.descs[dgst]
+ ra, err := src.ReaderAt(ctx, desc)
+ if err != nil {
+ return errors.Wrapf(err, "read %s", dgst)
+ }
+ err = content.WriteBlob(ctx, dst, "snapshot-"+dgst.String(), content.NewReader(ra), desc)
+ ra.Close()
+ if err != nil && !errdefs.IsAlreadyExists(err) {
+ return errors.Wrapf(err, "merge %s", dgst)
+ }
+ dst.record(desc)
+ }
+ return nil
+}
+
+// snapshotOneTarget copies the attestation chain and each material for a
+// single (subject, predicate) target into the staging buffer and assembles
+// the per-platform snapshot index. Each unit of work is wrapped in a
+// progress sub-step so the caller's printer can render what is happening.
+func snapshotOneTarget(
+ ctx context.Context,
+ stage *stagingStore,
+ s *Subject,
+ pred *Predicate,
+ req *SnapshotRequest,
+ lazyResolver func() (*imagetools.Resolver, error),
+ warn *warnOnce,
+ sub progress.SubLogger,
+) (ocispecs.Descriptor, error) {
+ attestMfst := s.AttestationManifest()
+ if err := sub.Wrap(fmt.Sprintf("copy attestation manifest %s", attestMfst.Digest), func() error {
+ return contentutil.CopyChain(ctx, stage, s.Provider, attestMfst)
+ }); err != nil {
+ return ocispecs.Descriptor{}, errors.Wrapf(err, "copy attestation manifest chain for %s", s.InputRef())
+ }
+
+ var (
+ materialsLayers []ocispecs.Descriptor
+ imageMfsts []ocispecs.Descriptor
+ )
+ seenLayerDigest := map[digest.Digest]struct{}{}
+ seenImageMfst := map[digest.Digest]struct{}{}
+
+ for _, m := range pred.ResolvedDependencies() {
+ switch classifyMaterial(m) {
+ case materialKindImage:
+ if !req.IncludeMaterials {
+ continue
+ }
+ rootDgst := resourceDigest(m)
+ if rootDgst == "" {
+ return ocispecs.Descriptor{}, errors.Errorf("image material %q has no sha256 digest", m.URI)
+ }
+ if err := sub.Wrap(fmt.Sprintf("image material %s", m.URI), func() error {
+ rootDesc, rootProvider, err := resolveImageMaterial(ctx, req.Materials, lazyResolver, m, rootDgst, WithPlatform(s.Descriptor.Platform), WithBuilderPlatform(pred.BuilderPlatform()))
+ if err != nil {
+ return err
+ }
+ if err := stage.copyBlob(ctx, rootProvider, rootDesc); err != nil {
+ return errors.Wrapf(err, "copy image root %s", rootDesc.Digest)
+ }
+ if _, ok := seenLayerDigest[rootDesc.Digest]; !ok {
+ seenLayerDigest[rootDesc.Digest] = struct{}{}
+ materialsLayers = append(materialsLayers, ocispecs.Descriptor{
+ MediaType: rootDesc.MediaType,
+ Digest: rootDesc.Digest,
+ Size: rootDesc.Size,
+ })
+ }
+ platDesc, err := pickPlatformChild(ctx, rootProvider, rootDesc, s.Descriptor.Platform, pred.BuilderPlatform())
+ if err != nil {
+ return errors.Wrapf(err, "pick platform child for %s", m.URI)
+ }
+ if err := contentutil.CopyChain(ctx, stage, rootProvider, platDesc); err != nil {
+ return errors.Wrapf(err, "copy image material chain for %s", m.URI)
+ }
+ if _, ok := seenImageMfst[platDesc.Digest]; !ok {
+ seenImageMfst[platDesc.Digest] = struct{}{}
+ imageMfsts = append(imageMfsts, platDesc)
+ }
+ return nil
+ }); err != nil {
+ return ocispecs.Descriptor{}, err
+ }
+
+ case materialKindHTTP:
+ if !req.IncludeMaterials {
+ continue
+ }
+ dgst := resourceDigest(m)
+ if dgst == "" {
+ return ocispecs.Descriptor{}, errors.Errorf("http material %q has no sha256 digest", m.URI)
+ }
+ if err := sub.Wrap(fmt.Sprintf("http material %s", m.URI), func() error {
+ desc, provider, err := req.Materials.Resolve(ctx, m.URI, dgst)
+ if err != nil {
+ return errors.Wrapf(err, "resolve http material %s", m.URI)
+ }
+ if provider == nil {
+ return ErrMaterialNotFound(m.URI, dgst.String())
+ }
+ desc.MediaType = layerMediaTypeHTTP
+ if err := stage.copyBlob(ctx, provider, desc); err != nil {
+ return errors.Wrapf(err, "copy http material %s", desc.Digest)
+ }
+ if _, ok := seenLayerDigest[desc.Digest]; !ok {
+ seenLayerDigest[desc.Digest] = struct{}{}
+ materialsLayers = append(materialsLayers, desc)
+ }
+ return nil
+ }); err != nil {
+ return ocispecs.Descriptor{}, err
+ }
+
+ case materialKindContainerBlob:
+ if !req.IncludeMaterials {
+ continue
+ }
+ dgst := resourceDigest(m)
+ if dgst == "" {
+ return ocispecs.Descriptor{}, errors.Errorf("container-blob material has no sha256 digest")
+ }
+ if err := sub.Wrap(fmt.Sprintf("container-blob material %s", dgst), func() error {
+ desc, provider, err := req.Materials.Resolve(ctx, m.URI, dgst)
+ if err != nil {
+ return errors.Wrapf(err, "resolve container-blob material %s", dgst)
+ }
+ if provider == nil {
+ return ErrMaterialNotFound(m.URI, dgst.String())
+ }
+ if desc.MediaType == "" {
+ desc.MediaType = layerMediaTypeContainerBlob
+ }
+ if err := stage.copyBlob(ctx, provider, desc); err != nil {
+ return errors.Wrapf(err, "copy container-blob material %s", desc.Digest)
+ }
+ if _, ok := seenLayerDigest[desc.Digest]; !ok {
+ seenLayerDigest[desc.Digest] = struct{}{}
+ materialsLayers = append(materialsLayers, desc)
+ }
+ return nil
+ }); err != nil {
+ return ocispecs.Descriptor{}, err
+ }
+
+ case materialKindGit:
+ // Git packfile snapshotting is not implemented. Dedup so a
+ // single URI referenced across many platforms warns once.
+ warn.Log(sub, "git:"+m.URI, fmt.Sprintf("git material %q is not included in the snapshot (not yet supported)", m.URI))
+
+ case materialKindUnknown:
+ warn.Log(sub, "unknown:"+m.URI, fmt.Sprintf("material with URI %q and no recognised scheme is ignored", m.URI))
+ }
+ }
+
+ var materialsManifestDesc ocispecs.Descriptor
+ if req.IncludeMaterials {
+ _, mDesc, mData, err := MaterialsManifest(materialsLayers)
+ if err != nil {
+ return ocispecs.Descriptor{}, err
+ }
+ if err := stage.writeRaw(ctx, ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: mDesc.Digest,
+ Size: mDesc.Size,
+ }, mData); err != nil {
+ return ocispecs.Descriptor{}, errors.Wrap(err, "write materials manifest")
+ }
+ materialsManifestDesc = mDesc
+ }
+
+ attestDesc := attestMfst
+ // The attestation manifest's descriptor in the original image index
+ // carries Docker reference annotations (vnd.docker.reference.*) that
+ // are meaningless for the snapshot-subject role — strip them. Preserve
+ // the manifest's own artifactType so consumers can tell this is a
+ // Docker attestation manifest without fetching the body.
+ attestDesc.Annotations = nil
+ attestMfstData, err := content.ReadBlob(ctx, stage, attestMfst)
+ if err != nil {
+ return ocispecs.Descriptor{}, errors.Wrapf(err, "read attestation manifest %s", attestMfst.Digest)
+ }
+ var attestMfstBody ocispecs.Manifest
+ if err := json.Unmarshal(attestMfstData, &attestMfstBody); err != nil {
+ return ocispecs.Descriptor{}, errors.Wrapf(err, "parse attestation manifest %s", attestMfst.Digest)
+ }
+ attestDesc.ArtifactType = attestMfstBody.ArtifactType
+
+ _, ppDesc, ppData, err := PerPlatformSnapshotIndex(attestDesc, materialsManifestDesc, imageMfsts)
+ if err != nil {
+ return ocispecs.Descriptor{}, err
+ }
+ if err := stage.writeRaw(ctx, ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Digest: ppDesc.Digest,
+ Size: ppDesc.Size,
+ }, ppData); err != nil {
+ return ocispecs.Descriptor{}, errors.Wrap(err, "write per-platform snapshot index")
+ }
+ return ppDesc, nil
+}
+
+// stagingStore wraps a contentutil.Buffer with a few helpers and also
+// tracks every digest written into the buffer so the final flush can emit
+// exactly that set (contentutil.Buffer.Walk is a stub, so we maintain our
+// own digest list). Any ingester-facing code that bypasses the helpers
+// (e.g. contentutil.CopyChain) must be invoked with stagingStore itself —
+// it implements content.Ingester — so those writes are tracked too.
+type stagingStore struct {
+ buffer contentutil.Buffer
+ mu sync.Mutex
+ order []digest.Digest
+ descs map[digest.Digest]ocispecs.Descriptor
+}
+
+func newStagingStore() *stagingStore {
+ return &stagingStore{
+ buffer: contentutil.NewBuffer(),
+ descs: map[digest.Digest]ocispecs.Descriptor{},
+ }
+}
+
+// record captures the full descriptor for each blob written into the stage.
+// Later writes merge fields (e.g. a FetchHandler Writer has the MediaType
+// from content.WithDescriptor; writeRaw and copyBlob pass full descriptors).
+func (s *stagingStore) record(desc ocispecs.Descriptor) {
+ if desc.Digest == "" {
+ return
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if prev, ok := s.descs[desc.Digest]; ok {
+ if prev.MediaType == "" && desc.MediaType != "" {
+ prev.MediaType = desc.MediaType
+ }
+ if prev.Size == 0 && desc.Size > 0 {
+ prev.Size = desc.Size
+ }
+ s.descs[desc.Digest] = prev
+ return
+ }
+ s.descs[desc.Digest] = desc
+ s.order = append(s.order, desc.Digest)
+}
+
+// ReaderAt satisfies content.Provider, delegating to the wrapped buffer.
+func (s *stagingStore) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) {
+ return s.buffer.ReaderAt(ctx, desc)
+}
+
+// Writer satisfies content.Ingester; commits are tracked so the final dump
+// can enumerate exactly what the snapshot added.
+func (s *stagingStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+ w, err := s.buffer.Writer(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ var desc ocispecs.Descriptor
+ var wOpts content.WriterOpts
+ for _, o := range opts {
+ _ = o(&wOpts)
+ }
+ desc = wOpts.Desc
+ return &stagingWriter{Writer: w, store: s, desc: desc}, nil
+}
+
+type stagingWriter struct {
+ content.Writer
+ store *stagingStore
+ desc ocispecs.Descriptor
+}
+
+func (w *stagingWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+ if err := w.Writer.Commit(ctx, size, expected, opts...); err != nil {
+ return err
+ }
+ d := expected
+ if d == "" {
+ d = w.Digest()
+ }
+ recorded := w.desc
+ recorded.Digest = d
+ if recorded.Size == 0 {
+ recorded.Size = size
+ }
+ w.store.record(recorded)
+ return nil
+}
+
+// writeRaw writes the bytes dt under desc.Digest. Already-present content is
+// treated as a no-op (the staging buffer is content-addressable).
+func (s *stagingStore) writeRaw(ctx context.Context, desc ocispecs.Descriptor, dt []byte) error {
+ if desc.Digest == "" {
+ return errors.New("writeRaw: empty digest")
+ }
+ if desc.Size == 0 {
+ desc.Size = int64(len(dt))
+ }
+ ref := "snapshot-" + desc.Digest.String()
+ err := content.WriteBlob(ctx, s, ref, bytes.NewReader(dt), desc)
+ if err != nil && !errdefs.IsAlreadyExists(err) {
+ return errors.WithStack(err)
+ }
+ s.record(desc)
+ return nil
+}
+
+// copyBlob copies one blob from src through into the staging buffer. This is
+// a content-only copy — no children / manifest walk.
+func (s *stagingStore) copyBlob(ctx context.Context, src content.Provider, desc ocispecs.Descriptor) error {
+ if desc.Digest == "" {
+ return errors.New("copyBlob: empty digest")
+ }
+ if err := contentutil.Copy(ctx, s, src, desc, "snapshot-"+desc.Digest.String(), nil); err != nil {
+ return err
+ }
+ s.record(desc)
+ return nil
+}
+
+// resolveImageMaterial looks up a provenance image material by its recorded
+// URI + root digest. Prefers a locally-configured MaterialsResolver (which
+// may be snapshot-backed) and falls back to fetching from the registry
+// derived from the pkg:docker purl.
+func resolveImageMaterial(
+ ctx context.Context,
+ resolver *MaterialsResolver,
+ lazyResolver func() (*imagetools.Resolver, error),
+ m slsa1.ResourceDescriptor,
+ rootDgst digest.Digest,
+ opts ...ResolveOption,
+) (ocispecs.Descriptor, content.Provider, error) {
+ if resolver != nil {
+ desc, provider, err := resolver.Resolve(ctx, m.URI, rootDgst, opts...)
+ if err == nil && provider != nil {
+ if desc.MediaType == "" {
+ desc.MediaType = ocispecs.MediaTypeImageIndex
+ }
+ return desc, provider, nil
+ }
+ if err != nil {
+ var mnf *MaterialNotFoundError
+ if !errors.As(err, &mnf) {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ // Fall through to registry fetch for MaterialNotFoundError.
+ }
+ }
+
+ // Fall back to a registry fetch driven off the pkg:docker purl.
+ ref, _, err := purl.PURLToRef(m.URI)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrapf(err, "invalid image material URI %q", m.URI)
+ }
+ imgResolver, err := lazyResolver()
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ // Address the root directly by digest so we fetch the exact recorded
+ // index regardless of tag mutations since the original build.
+ fetchRef := ref
+ if !strings.Contains(fetchRef, "@") {
+ fetchRef = ref + "@" + rootDgst.String()
+ }
+ // Resolve the fetchRef first so we learn the root descriptor's size +
+ // mediaType. Without a size, contentutil.FromFetcher's ReaderAt reports
+ // size=0 and ReadBlob returns an empty payload (silently succeeding with
+ // a JSON decode error downstream).
+ _, rootDesc, err := imgResolver.Resolve(ctx, fetchRef)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrapf(err, "resolve image material %s", m.URI)
+ }
+ fetcher, err := imgResolver.Fetcher(ctx, fetchRef)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrapf(err, "fetch image material %s", m.URI)
+ }
+ provider := contentutil.FromFetcher(fetcher)
+ if rootDesc.Digest == "" {
+ rootDesc.Digest = rootDgst
+ }
+ if rootDesc.MediaType == "" {
+ // Paranoia: if the resolver left the mediaType empty, probe by
+ // reading the manifest payload.
+ dt, err := content.ReadBlob(ctx, provider, rootDesc)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrapf(err, "read image material root %s", rootDgst)
+ }
+ mt, err := detectManifestMediaType(dt)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, err
+ }
+ rootDesc.MediaType = mt
+ if rootDesc.Size == 0 {
+ rootDesc.Size = int64(len(dt))
+ }
+ }
+ return rootDesc, provider, nil
+}
+
+// pickPlatformChild turns a "root" descriptor for an image material into the
+// platform-specific manifest descriptor to store in the snapshot. Provenance
+// only records the root index digest, so replay has to guess which child
+// BuildKit actually resolved. The matcher prefers the subject's platform
+// (FROM-style base images resolve at TARGETPLATFORM) and falls back to the
+// builder platform recorded on the predicate (frontend images and
+// cross-compile toolchains run on the build host). An index with a single
+// child is always returned as-is.
+func pickPlatformChild(ctx context.Context, provider content.Provider, root ocispecs.Descriptor, subjectPlat *ocispecs.Platform, builderPlat ocispecs.Platform) (ocispecs.Descriptor, error) {
+ switch root.MediaType {
+ case ocispecs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
+ return root, nil
+ case ocispecs.MediaTypeImageIndex, images.MediaTypeDockerSchema2ManifestList:
+ dt, err := content.ReadBlob(ctx, provider, root)
+ if err != nil {
+ return ocispecs.Descriptor{}, errors.WithStack(err)
+ }
+ var idx ocispecs.Index
+ if err := json.Unmarshal(dt, &idx); err != nil {
+ return ocispecs.Descriptor{}, errors.WithStack(err)
+ }
+
+ matcher := replayPlatformMatcher(subjectPlat, builderPlat)
+ var best *ocispecs.Descriptor
+ for i := range idx.Manifests {
+ c := idx.Manifests[i]
+ if c.Platform == nil || !matcher.Match(*c.Platform) {
+ continue
+ }
+ if best == nil || matcher.Less(*c.Platform, *best.Platform) {
+ best = &c
+ }
+ }
+ if best != nil {
+ return *best, nil
+ }
+ if len(idx.Manifests) == 1 {
+ return idx.Manifests[0], nil
+ }
+ return ocispecs.Descriptor{}, errors.Errorf("image material index %s has no child matching subject platform %s or builder %s", root.Digest, formatPlatformPtr(subjectPlat), platforms.Format(builderPlat))
+ default:
+ return ocispecs.Descriptor{}, errors.Errorf("unsupported image material root media type %q", root.MediaType)
+ }
+}
+
+// replayPlatformMatcher returns a MatchComparer that prefers the subject's
+// platform then falls back to the builder platform recorded on the
+// predicate. When the subject has no platform, the builder platform is used
+// directly (buildx records builderPlatform as the effective default for any
+// subject produced by that build).
+func replayPlatformMatcher(subjectPlat *ocispecs.Platform, builderPlat ocispecs.Platform) platforms.MatchComparer {
+ if subjectPlat == nil || platforms.Only(*subjectPlat).Match(builderPlat) {
+ return platforms.Only(builderPlat)
+ }
+ return platforms.Any(*subjectPlat, builderPlat)
+}
+
+func formatPlatformPtr(p *ocispecs.Platform) string {
+ if p == nil {
+ return ""
+ }
+ return platforms.Format(*p)
+}
+
+// snapshotTargetLabel produces a human-readable label for progress output:
+// " ()" when the subject has a platform, otherwise just
+// the digest.
+func snapshotTargetLabel(s *Subject) string {
+ d := s.Descriptor.Digest.String()
+ if s.Descriptor.Platform != nil {
+ return platforms.Format(*s.Descriptor.Platform) + " (" + d + ")"
+ }
+ return d
+}
+
+// detectManifestMediaType inspects the first bytes of a manifest blob and
+// returns the OCI media type — shared with util/imagetools but re-implemented
+// here to avoid importing a single helper.
+func detectManifestMediaType(dt []byte) (string, error) {
+ var probe struct {
+ MediaType string `json:"mediaType"`
+ Manifests []json.RawMessage `json:"manifests,omitempty"`
+ Config json.RawMessage `json:"config,omitempty"`
+ }
+ if err := json.Unmarshal(dt, &probe); err != nil {
+ return "", errors.WithStack(err)
+ }
+ if probe.MediaType != "" {
+ return probe.MediaType, nil
+ }
+ if len(probe.Manifests) > 0 {
+ return ocispecs.MediaTypeImageIndex, nil
+ }
+ if len(probe.Config) > 0 {
+ return ocispecs.MediaTypeImageManifest, nil
+ }
+ return "", errors.Errorf("cannot detect media type from manifest payload")
+}
+
+// materialKind classifies a provenance material by its URI scheme.
+type materialKind int
+
+const (
+ materialKindUnknown materialKind = iota
+ materialKindImage
+ materialKindHTTP
+ materialKindContainerBlob
+ materialKindGit
+)
+
+// classifyMaterial picks a materialKind from a ResourceDescriptor's URI.
+func classifyMaterial(m slsa1.ResourceDescriptor) materialKind {
+ switch {
+ case strings.HasPrefix(m.URI, "pkg:docker/"):
+ return materialKindImage
+ case strings.HasPrefix(m.URI, "https://") || strings.HasPrefix(m.URI, "http://"):
+ if looksLikeGitURL(m.URI) {
+ return materialKindGit
+ }
+ return materialKindHTTP
+ case strings.HasPrefix(m.URI, "git+") ||
+ strings.HasPrefix(m.URI, "git://") ||
+ strings.HasPrefix(m.URI, "ssh://"):
+ return materialKindGit
+ case m.URI == "" && resourceDigest(m) != "":
+ // No URI but a digest: treat as a container-blob material. Layers
+ // pulled directly by digest during a build show up this way.
+ return materialKindContainerBlob
+ }
+ return materialKindUnknown
+}
+
+// looksLikeGitURL returns true when an http(s) URL appears to name a git repo
+// (the provenance recorder sometimes uses a bare https url for a git remote).
+func looksLikeGitURL(raw string) bool {
+ u, err := url.Parse(raw)
+ if err != nil {
+ return false
+ }
+ return strings.HasSuffix(strings.ToLower(u.Path), ".git")
+}
+
+// resourceDigest extracts the preferred digest from a SLSA ResourceDescriptor
+// (sha256 first, then any other algorithm). Mirrors policy.preferredDigest.
+func resourceDigest(m slsa1.ResourceDescriptor) digest.Digest {
+ if m.Digest == nil {
+ return ""
+ }
+ if v, ok := m.Digest["sha256"]; ok && v != "" {
+ return digest.NewDigestFromEncoded(digest.SHA256, v)
+ }
+ for alg, v := range m.Digest {
+ if v == "" {
+ continue
+ }
+ return digest.NewDigestFromEncoded(digest.Algorithm(alg), v)
+ }
+ return ""
+}
+
+// writeSnapshotOutput materialises the staged snapshot content through the
+// selected --output target. oci / registry are supported.
+func writeSnapshotOutput(
+ ctx context.Context,
+ stage *stagingStore,
+ root ocispecs.Descriptor,
+ exp *buildflags.ExportEntry,
+ dockerCli command.Cli,
+ builderName string,
+) error {
+ switch exp.Type {
+ case "oci":
+ if exp.Destination == "" {
+ return errors.New("snapshot: type=oci requires dest=")
+ }
+ // tar defaults to true — "tar=false" selects the oci-layout
+ // directory form. The TTY refusal for dest=- lives in the
+ // command layer; this function trusts its caller.
+ if exp.Attrs["tar"] == "false" {
+ return writeOCILayoutDir(ctx, stage, root, exp.Destination)
+ }
+ return writeOCILayoutTar(ctx, stage, root, exp.Destination)
+ case "registry":
+ ref := exp.Attrs["name"]
+ if ref == "" {
+ return errors.New("snapshot: type=registry requires name=][")
+ }
+ return pushSnapshotToRegistry(ctx, stage, root, ref, dockerCli, builderName)
+ }
+ return errors.Errorf("snapshot: unsupported --output type %q (want oci | registry)", exp.Type)
+}
+
+// writeOCILayoutDir writes the snapshot as an OCI layout tree at dest.
+// The staging store already contains exactly the content that belongs in
+// the snapshot (each target loop staged precisely what was needed), so we
+// flush every blob verbatim — no graph walk is required.
+func writeOCILayoutDir(ctx context.Context, stage *stagingStore, root ocispecs.Descriptor, dest string) error {
+ if err := os.MkdirAll(dest, 0o755); err != nil {
+ return errors.WithStack(err)
+ }
+ store, err := contentlocal.NewStore(dest)
+ if err != nil {
+ return errors.Wrapf(err, "open store at %s", dest)
+ }
+ if err := flushStage(ctx, store, stage); err != nil {
+ return errors.Wrap(err, "copy snapshot into oci-layout")
+ }
+ idx := ociindex.NewStoreIndex(dest)
+ if err := idx.Put(root, ociindex.Tag("latest")); err != nil {
+ return errors.Wrapf(err, "update oci-layout index at %s", dest)
+ }
+ return nil
+}
+
+// flushStage copies every tracked blob from stage into ingester by reading
+// bytes directly from the staging buffer and writing them through the
+// ingester. Doing it at the byte level avoids containerd's remotes-layer
+// ref-key lookup (which warns on the empty mediaType we'd otherwise need
+// to plumb through for every blob).
+func flushStage(ctx context.Context, ingester content.Ingester, stage *stagingStore) error {
+ stage.mu.Lock()
+ order := slices.Clone(stage.order)
+ stage.mu.Unlock()
+ for _, dgst := range order {
+ info, err := stage.buffer.Info(ctx, dgst)
+ if err != nil {
+ return errors.Wrapf(err, "lookup %s", dgst)
+ }
+ desc := ocispecs.Descriptor{Digest: info.Digest, Size: info.Size}
+ ra, err := stage.ReaderAt(ctx, desc)
+ if err != nil {
+ return errors.Wrapf(err, "read %s", dgst)
+ }
+ err = content.WriteBlob(ctx, ingester, "snapshot-"+dgst.String(), content.NewReader(ra), desc)
+ ra.Close()
+ if err != nil && !errdefs.IsAlreadyExists(err) {
+ return errors.Wrapf(err, "write %s", dgst)
+ }
+ }
+ return nil
+}
+
+// writeOCILayoutTar writes the snapshot as an OCI layout tar at dest.
+//
+// containerd's imgarchive.Export walks the tree via images.Children, which
+// returns only config+layers for manifests and manifests[] for indexes —
+// it never follows `subject`. Our per-platform snapshot index reaches the
+// attestation manifest only through `subject`, so an imgarchive-driven
+// export would leave the attestation chain out of the tar. We instead
+// materialise the snapshot into a temp oci-layout directory using our
+// own walker (which does follow `subject`) and tar that directory.
+func writeOCILayoutTar(ctx context.Context, stage *stagingStore, root ocispecs.Descriptor, dest string) error {
+ tmp, err := os.MkdirTemp("", "buildx-snapshot-tar-")
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ defer os.RemoveAll(tmp)
+ if err := writeOCILayoutDir(ctx, stage, root, tmp); err != nil {
+ return err
+ }
+
+ var w io.Writer = os.Stdout
+ if dest != "-" {
+ if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
+ return errors.WithStack(err)
+ }
+ f, err := os.Create(dest)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ defer f.Close()
+ w = f
+ }
+ return tarDirectory(tmp, w)
+}
+
+// tarDirectory writes the contents of srcDir into w as a tar archive. Entries
+// are rooted at the directory's contents (not the directory name itself) so
+// the resulting tar matches the OCI image-layout v1 spec: index.json,
+// oci-layout and blobs/ at the archive root.
+//
+// The walk is confined by os.Root so that symlinks or relative components
+// inside srcDir cannot escape it and leak unrelated files into the tar.
+func tarDirectory(srcDir string, w io.Writer) error {
+ root, err := os.OpenRoot(srcDir)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ defer root.Close()
+
+ tw := tar.NewWriter(w)
+ defer tw.Close()
+
+ return fs.WalkDir(root.FS(), ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == "." {
+ return nil
+ }
+ info, err := d.Info()
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ hdr, err := tar.FileInfoHeader(info, "")
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ hdr.Name = filepath.ToSlash(path)
+ if info.IsDir() {
+ hdr.Name += "/"
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return errors.WithStack(err)
+ }
+ if !info.Mode().IsRegular() {
+ return nil
+ }
+ rf, err := root.Open(path)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ defer rf.Close()
+ if _, err := io.Copy(tw, rf); err != nil {
+ return errors.WithStack(err)
+ }
+ return nil
+ })
+}
+
+// pushSnapshotToRegistry pushes the snapshot to a registry through the buildx
+// imagetools.Resolver.
+func pushSnapshotToRegistry(ctx context.Context, stage *stagingStore, root ocispecs.Descriptor, ref string, dockerCli command.Cli, builderName string) error {
+ loc, err := imagetools.ParseLocation(ref)
+ if err != nil {
+ return errors.Wrapf(err, "parse registry ref %q", ref)
+ }
+ if !loc.IsRegistry() {
+ return errors.Errorf("snapshot: --output type=registry expects a registry ref, got %q", ref)
+ }
+
+ var resolver *imagetools.Resolver
+ if dockerCli == nil {
+ resolver = imagetools.New(imagetools.Opt{})
+ } else {
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return err
+ }
+ imgOpt, err := b.ImageOpt()
+ if err != nil {
+ return err
+ }
+ resolver = imagetools.New(imgOpt)
+ }
+
+ ingester, err := resolver.IngesterForLocation(ctx, loc)
+ if err != nil {
+ return err
+ }
+ if err := flushStage(ctx, ingester, stage); err != nil {
+ return errors.Wrap(err, "copy snapshot to registry")
+ }
+ rootData, err := content.ReadBlob(ctx, stage, root)
+ if err != nil {
+ return errors.Wrap(err, "read snapshot root")
+ }
+ return resolver.Push(ctx, loc, root, rootData)
+}
diff --git a/replay/snapshot_format.go b/replay/snapshot_format.go
new file mode 100644
index 000000000000..338714d113c6
--- /dev/null
+++ b/replay/snapshot_format.go
@@ -0,0 +1,176 @@
+package replay
+
+import (
+ "encoding/json"
+
+ "github.com/opencontainers/go-digest"
+ ocispecsgo "github.com/opencontainers/image-spec/specs-go"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// Media types and artifact types that identify buildx snapshots and the
+// materials manifest child.
+const (
+ // ArtifactTypeSnapshot is the artifactType for both the per-platform
+ // snapshot index and the top-level multi-platform snapshot index.
+ ArtifactTypeSnapshot = "application/vnd.docker.buildx.snapshots.v1+json"
+ // ArtifactTypeMaterials is the artifactType for the materials artifact
+ // manifest nested inside a per-platform snapshot index.
+ ArtifactTypeMaterials = "application/vnd.docker.buildx.snapshots.materials.v1+json"
+
+ // ociEmptyConfigMediaType is the OCI 1.1 empty descriptor for use as the
+ // `config:` of an artifact manifest that carries no image configuration.
+ // See the image-spec manifest guidance for the empty descriptor.
+ ociEmptyConfigMediaType = "application/vnd.oci.empty.v1+json"
+ // ociEmptyConfigDigest is the canonical sha256 of the two-byte `{}`
+ // OCI empty config.
+ ociEmptyConfigDigest = "sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a"
+ // ociEmptyConfigSize is the size in bytes of the OCI empty config (`{}`).
+ ociEmptyConfigSize = 2
+
+ // layerMediaTypeHTTP is the mediaType written for http material blob
+ // layers (raw bytes served by an http/https material).
+ layerMediaTypeHTTP = "application/octet-stream"
+ // layerMediaTypeContainerBlob is the default mediaType used for a
+ // container-blob layer material copied into the snapshot.
+ layerMediaTypeContainerBlob = "application/vnd.oci.image.layer.v1.tar+gzip"
+)
+
+// OCIEmptyConfigDescriptor returns the descriptor buildx uses for the empty
+// config on the materials artifact manifest. The two bytes of the empty
+// config are inlined via the descriptor's `data` field (OCI 1.1) so a
+// consumer never has to fetch the empty-config blob separately.
+func OCIEmptyConfigDescriptor() ocispecs.Descriptor {
+ return ocispecs.Descriptor{
+ MediaType: ociEmptyConfigMediaType,
+ Digest: digest.Digest(ociEmptyConfigDigest),
+ Size: ociEmptyConfigSize,
+ Data: OCIEmptyConfigBytes(),
+ }
+}
+
+// OCIEmptyConfigBytes returns the raw bytes of the OCI empty config (`{}`)
+// that callers must write into the snapshot content store so the materials
+// artifact manifest has a valid content-addressable config blob.
+func OCIEmptyConfigBytes() []byte {
+ return []byte("{}")
+}
+
+// MaterialsManifest builds the materials artifact manifest descriptor (the
+// image-manifest document plus its serialized bytes and descriptor) from the
+// ordered list of layer descriptors. The caller owns the task of copying the
+// referenced layer bytes (and the empty config) into the snapshot store; this
+// function produces only the manifest document and its addressable
+// descriptor.
+//
+// The layers parameter is used verbatim and must already include (in order):
+//
+// 1. http material layers (mediaType application/octet-stream)
+// 2. container-blob layers (mediaType vnd.oci.image.layer.v1.tar+gzip or
+// the recorded equivalent)
+// 3. image-material root-index blobs kept opaque
+// (mediaType vnd.oci.image.index.v1+json)
+func MaterialsManifest(layers []ocispecs.Descriptor) (ocispecs.Manifest, ocispecs.Descriptor, []byte, error) {
+ if layers == nil {
+ layers = []ocispecs.Descriptor{}
+ }
+ mfst := ocispecs.Manifest{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageManifest,
+ ArtifactType: ArtifactTypeMaterials,
+ Config: OCIEmptyConfigDescriptor(),
+ Layers: layers,
+ }
+ dt, err := json.Marshal(mfst)
+ if err != nil {
+ return ocispecs.Manifest{}, ocispecs.Descriptor{}, nil, errors.Wrap(err, "marshal materials manifest")
+ }
+ desc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ ArtifactType: ArtifactTypeMaterials,
+ Digest: digest.FromBytes(dt),
+ Size: int64(len(dt)),
+ }
+ return mfst, desc, dt, nil
+}
+
+// PerPlatformSnapshotIndex builds a per-platform snapshot index.
+// `attestManifest` is the ORIGINAL provenance attestation manifest
+// descriptor; it becomes the index's `subject` and its blob must be copied
+// into the snapshot content store by the caller.
+//
+// `materialsManifestDesc` — descriptor of the materials artifact manifest —
+// is included as the first entry in `manifests[]` when non-zero. Passing a
+// zero descriptor omits the materials manifest entirely (used when
+// `--include-materials=false`).
+//
+// `imageMaterialManifests` — platform-specific image manifests for each image
+// material — follow the materials manifest. Each descriptor's `Digest`
+// addresses the platform-specific manifest that the original build actually
+// used; its chain (manifest + config + layers) is expected to be present in
+// the snapshot store.
+func PerPlatformSnapshotIndex(
+ attestManifest ocispecs.Descriptor,
+ materialsManifestDesc ocispecs.Descriptor,
+ imageMaterialManifests []ocispecs.Descriptor,
+) (ocispecs.Index, ocispecs.Descriptor, []byte, error) {
+ manifests := make([]ocispecs.Descriptor, 0, 1+len(imageMaterialManifests))
+ if materialsManifestDesc.Digest != "" {
+ manifests = append(manifests, materialsManifestDesc)
+ }
+ manifests = append(manifests, imageMaterialManifests...)
+
+ idx := ocispecs.Index{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Subject: descriptorPtr(attestManifest),
+ Manifests: manifests,
+ }
+ dt, err := json.Marshal(idx)
+ if err != nil {
+ return ocispecs.Index{}, ocispecs.Descriptor{}, nil, errors.Wrap(err, "marshal per-platform snapshot index")
+ }
+ desc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Digest: digest.FromBytes(dt),
+ Size: int64(len(dt)),
+ }
+ return idx, desc, dt, nil
+}
+
+// MultiPlatformSnapshotIndex wraps N per-platform snapshot index descriptors
+// into a top-level index. Each input descriptor must already carry its
+// `Platform` field so consumers can pick the right child.
+func MultiPlatformSnapshotIndex(perPlatform []ocispecs.Descriptor) (ocispecs.Index, ocispecs.Descriptor, []byte, error) {
+ idx := ocispecs.Index{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Manifests: perPlatform,
+ }
+ dt, err := json.Marshal(idx)
+ if err != nil {
+ return ocispecs.Index{}, ocispecs.Descriptor{}, nil, errors.Wrap(err, "marshal multi-platform snapshot index")
+ }
+ desc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Digest: digest.FromBytes(dt),
+ Size: int64(len(dt)),
+ }
+ return idx, desc, dt, nil
+}
+
+// descriptorPtr returns a pointer to desc, or nil when desc is the zero
+// descriptor (so callers that pass no subject do not end up with an empty
+// stub).
+func descriptorPtr(desc ocispecs.Descriptor) *ocispecs.Descriptor {
+ if desc.Digest == "" {
+ return nil
+ }
+ out := desc
+ return &out
+}
diff --git a/replay/snapshot_format_test.go b/replay/snapshot_format_test.go
new file mode 100644
index 000000000000..e0582412a09b
--- /dev/null
+++ b/replay/snapshot_format_test.go
@@ -0,0 +1,170 @@
+package replay
+
+import (
+ "encoding/json"
+ "testing"
+
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+// Round-trip tests for the snapshot-format writers. Each test builds a
+// concrete document through the writer, serializes it back through json,
+// and asserts the expected shape.
+
+func TestPerPlatformSnapshotIndex_WithMaterials(t *testing.T) {
+ // Fake attestation manifest descriptor (subject chain into the snapshot).
+ attest := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ ArtifactType: "application/vnd.in-toto+json",
+ Digest: digest.FromBytes([]byte("attest")),
+ Size: 12,
+ }
+
+ // Two materials layers: one http blob, one image-material root index.
+ httpBytes := []byte("http-material-bytes")
+ rootBytes := []byte("root-index-bytes")
+ layers := []ocispecs.Descriptor{
+ {
+ MediaType: "application/octet-stream",
+ Digest: digest.FromBytes(httpBytes),
+ Size: int64(len(httpBytes)),
+ },
+ {
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Digest: digest.FromBytes(rootBytes),
+ Size: int64(len(rootBytes)),
+ },
+ }
+ _, matDesc, matData, err := MaterialsManifest(layers)
+ require.NoError(t, err)
+ require.Equal(t, ArtifactTypeMaterials, matDesc.ArtifactType)
+ require.Equal(t, ocispecs.MediaTypeImageManifest, matDesc.MediaType)
+ require.NotZero(t, matDesc.Size)
+ require.Equal(t, digest.FromBytes(matData), matDesc.Digest)
+
+ // Parse the materials manifest and assert §5.2.2 rules.
+ var mm ocispecs.Manifest
+ require.NoError(t, json.Unmarshal(matData, &mm))
+ require.Equal(t, 2, mm.SchemaVersion)
+ require.Equal(t, ocispecs.MediaTypeImageManifest, mm.MediaType)
+ require.Equal(t, ArtifactTypeMaterials, mm.ArtifactType)
+ require.Equal(t, OCIEmptyConfigDescriptor().Digest, mm.Config.Digest)
+ require.Equal(t, OCIEmptyConfigDescriptor().MediaType, mm.Config.MediaType)
+ require.Equal(t, int64(2), mm.Config.Size)
+ require.Len(t, mm.Layers, 2)
+ require.Equal(t, layers[0].Digest, mm.Layers[0].Digest)
+ require.Equal(t, layers[1].Digest, mm.Layers[1].Digest)
+ require.Nil(t, mm.Subject, "materials manifest MUST NOT carry a subject (§5.2.2)")
+
+ // Build the per-platform snapshot index on top.
+ imgMfst := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: digest.FromBytes([]byte("platform-manifest")),
+ Size: 42,
+ Platform: &ocispecs.Platform{Architecture: "amd64", OS: "linux"},
+ }
+ _, ppDesc, ppData, err := PerPlatformSnapshotIndex(attest, matDesc, []ocispecs.Descriptor{imgMfst})
+ require.NoError(t, err)
+ require.Equal(t, ArtifactTypeSnapshot, ppDesc.ArtifactType)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, ppDesc.MediaType)
+ require.Equal(t, digest.FromBytes(ppData), ppDesc.Digest)
+
+ var pp ocispecs.Index
+ require.NoError(t, json.Unmarshal(ppData, &pp))
+ require.Equal(t, 2, pp.SchemaVersion)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, pp.MediaType)
+ require.Equal(t, ArtifactTypeSnapshot, pp.ArtifactType)
+
+ require.NotNil(t, pp.Subject, "per-platform snapshot index MUST carry a subject (§5.2.1)")
+ require.Equal(t, attest.Digest, pp.Subject.Digest)
+ require.Equal(t, attest.MediaType, pp.Subject.MediaType)
+ require.Equal(t, attest.Size, pp.Subject.Size)
+
+ require.Len(t, pp.Manifests, 2)
+ // First entry MUST be the materials manifest when it is present.
+ require.Equal(t, matDesc.Digest, pp.Manifests[0].Digest)
+ require.Equal(t, ArtifactTypeMaterials, pp.Manifests[0].ArtifactType)
+ // Remaining entries are the per-material platform manifests.
+ require.Equal(t, imgMfst.Digest, pp.Manifests[1].Digest)
+ require.NotNil(t, pp.Manifests[1].Platform)
+ require.Equal(t, "amd64", pp.Manifests[1].Platform.Architecture)
+
+ // Snapshot indexes are deterministic — no creation timestamp annotation.
+ require.Empty(t, pp.Annotations[ocispecs.AnnotationCreated])
+}
+
+func TestPerPlatformSnapshotIndex_WithoutMaterials(t *testing.T) {
+ // When --include-materials=false the materials manifest is omitted:
+ // the caller passes a zero descriptor.
+ attest := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: digest.FromBytes([]byte("attest-noop")),
+ Size: 10,
+ }
+ imgMfst := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: digest.FromBytes([]byte("platform-manifest-noop")),
+ Size: 11,
+ }
+
+ _, _, ppData, err := PerPlatformSnapshotIndex(attest, ocispecs.Descriptor{}, []ocispecs.Descriptor{imgMfst})
+ require.NoError(t, err)
+
+ var pp ocispecs.Index
+ require.NoError(t, json.Unmarshal(ppData, &pp))
+ require.Len(t, pp.Manifests, 1, "zero materials descriptor should be dropped")
+ require.Equal(t, imgMfst.Digest, pp.Manifests[0].Digest)
+ require.NotEqual(t, ArtifactTypeMaterials, pp.Manifests[0].ArtifactType)
+}
+
+func TestMultiPlatformSnapshotIndex(t *testing.T) {
+ // Build two dummy per-platform snapshot descriptors with populated
+ // Platform fields; wrap via MultiPlatformSnapshotIndex.
+ amd := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Digest: digest.FromBytes([]byte("pp-amd64")),
+ Size: 100,
+ Platform: &ocispecs.Platform{Architecture: "amd64", OS: "linux"},
+ }
+ arm := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ ArtifactType: ArtifactTypeSnapshot,
+ Digest: digest.FromBytes([]byte("pp-arm64")),
+ Size: 110,
+ Platform: &ocispecs.Platform{Architecture: "arm64", OS: "linux"},
+ }
+ _, desc, data, err := MultiPlatformSnapshotIndex([]ocispecs.Descriptor{amd, arm})
+ require.NoError(t, err)
+ require.Equal(t, ArtifactTypeSnapshot, desc.ArtifactType)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, desc.MediaType)
+ require.Equal(t, digest.FromBytes(data), desc.Digest)
+
+ var idx ocispecs.Index
+ require.NoError(t, json.Unmarshal(data, &idx))
+ require.Equal(t, 2, idx.SchemaVersion)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, idx.MediaType)
+ require.Equal(t, ArtifactTypeSnapshot, idx.ArtifactType)
+ require.Nil(t, idx.Subject, "top-level multi-platform snapshot index MUST NOT carry a subject (§5.2.3)")
+ require.Len(t, idx.Manifests, 2)
+
+ for i, child := range idx.Manifests {
+ require.Equal(t, ArtifactTypeSnapshot, child.ArtifactType, "child %d", i)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, child.MediaType, "child %d", i)
+ require.NotNil(t, child.Platform, "child %d must carry platform", i)
+ }
+ require.Equal(t, "amd64", idx.Manifests[0].Platform.Architecture)
+ require.Equal(t, "arm64", idx.Manifests[1].Platform.Architecture)
+ require.Empty(t, idx.Annotations[ocispecs.AnnotationCreated])
+}
+
+func TestOCIEmptyConfig(t *testing.T) {
+ // Round-trip the empty config constants: digest of the bytes must equal
+ // the exported descriptor digest.
+ bytes := OCIEmptyConfigBytes()
+ require.Equal(t, "{}", string(bytes))
+ require.Equal(t, digest.FromBytes(bytes).String(), string(OCIEmptyConfigDescriptor().Digest))
+ require.Equal(t, int64(len(bytes)), OCIEmptyConfigDescriptor().Size)
+}
diff --git a/replay/snapshot_test.go b/replay/snapshot_test.go
new file mode 100644
index 000000000000..2d917229b8c7
--- /dev/null
+++ b/replay/snapshot_test.go
@@ -0,0 +1,289 @@
+package replay
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/docker/buildx/util/buildflags"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ "github.com/moby/buildkit/client/ociindex"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ "github.com/moby/buildkit/util/contentutil"
+ digest "github.com/opencontainers/go-digest"
+ ocispecsgo "github.com/opencontainers/image-spec/specs-go"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+// snapshotFixture produces a Subject + Predicate pair backed by an in-memory
+// content provider. The subject carries a synthetic image manifest; the
+// attestation manifest references an in-toto SLSA v1 provenance statement
+// whose ResolvedDependencies name one http material.
+//
+// Returned slice of material bytes gives tests a handle on the http material
+// payload so they can be pinned via a MaterialsResolver override.
+type snapshotFixture struct {
+ subject *Subject
+ predicate *Predicate
+ httpBytes []byte
+ httpDigest digest.Digest
+ httpURI string
+ attestDigest digest.Digest
+ subjectDesc ocispecs.Descriptor
+ attestDesc ocispecs.Descriptor
+ provider contentutil.Buffer
+}
+
+func makeSnapshotFixture(t *testing.T) *snapshotFixture {
+ t.Helper()
+ ctx := context.Background()
+
+ buf := contentutil.NewBuffer()
+
+ // 1. Synthetic subject image: config + single layer + manifest.
+ configBytes := []byte(`{"architecture":"amd64","os":"linux","rootfs":{"type":"layers","diff_ids":[]}}`)
+ configDesc := writeBlobBuf(ctx, t, buf, ocispecs.MediaTypeImageConfig, configBytes)
+ layerBytes := []byte("fake-image-layer")
+ layerDesc := writeBlobBuf(ctx, t, buf, ocispecs.MediaTypeImageLayerGzip, layerBytes)
+
+ subjectManifest := ocispecs.Manifest{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: configDesc,
+ Layers: []ocispecs.Descriptor{layerDesc},
+ }
+ subjectBytes, err := json.Marshal(subjectManifest)
+ require.NoError(t, err)
+ subjectDesc := writeBlobBuf(ctx, t, buf, ocispecs.MediaTypeImageManifest, subjectBytes)
+ subjectDesc.Platform = &ocispecs.Platform{OS: "linux", Architecture: "amd64"}
+
+ // 2. http material (reachable later by digest).
+ httpBytes := []byte("hello-from-http-material")
+ httpDigest := digest.FromBytes(httpBytes)
+ _ = writeBlobBuf(ctx, t, buf, "application/octet-stream", httpBytes)
+
+ httpURI := "https://example.com/payload.tgz"
+
+ // 3. SLSA v1 provenance predicate.
+ pred := provenancetypes.ProvenancePredicateSLSA1{}
+ pred.BuildDefinition.ExternalParameters.Request.Frontend = "dockerfile.v0"
+ pred.BuildDefinition.ResolvedDependencies = []slsa1.ResourceDescriptor{
+ {
+ URI: httpURI,
+ Digest: map[string]string{
+ "sha256": httpDigest.Encoded(),
+ },
+ },
+ }
+
+ // 4. in-toto Statement wrapping the predicate.
+ predBytes, err := json.Marshal(pred)
+ require.NoError(t, err)
+ stmt := map[string]any{
+ "_type": "https://in-toto.io/Statement/v1",
+ "predicateType": "https://slsa.dev/provenance/v1",
+ "subject": []map[string]any{
+ {
+ "name": "synthetic",
+ "digest": map[string]string{"sha256": subjectDesc.Digest.Encoded()},
+ },
+ },
+ "predicate": json.RawMessage(predBytes),
+ }
+ stmtBytes, err := json.Marshal(stmt)
+ require.NoError(t, err)
+
+ stmtDesc := writeBlobBufWithAnnotations(
+ ctx, t, buf,
+ "application/vnd.in-toto+json",
+ stmtBytes,
+ map[string]string{"in-toto.io/predicate-type": "https://slsa.dev/provenance/v1"},
+ )
+
+ // 5. Attestation manifest referencing the in-toto statement layer.
+ attestManifest := ocispecs.Manifest{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageManifest,
+ ArtifactType: "application/vnd.in-toto+json",
+ Config: OCIEmptyConfigDescriptor(),
+ Layers: []ocispecs.Descriptor{stmtDesc},
+ }
+ // Write empty config (required by the attestation manifest chain).
+ _ = writeBlobBufRaw(ctx, t, buf, OCIEmptyConfigDescriptor(), OCIEmptyConfigBytes())
+
+ attestBytes, err := json.Marshal(attestManifest)
+ require.NoError(t, err)
+ attestDesc := writeBlobBuf(ctx, t, buf, ocispecs.MediaTypeImageManifest, attestBytes)
+ attestDesc.ArtifactType = "application/vnd.in-toto+json"
+
+ // 6. Build the Subject pointing at the image manifest.
+ s := &Subject{
+ Descriptor: subjectDesc,
+ Provider: buf,
+ inputRef: "synthetic://subject",
+ kind: subjectKindImage,
+ attestManifest: attestDesc,
+ }
+ p := Predicate(pred)
+
+ return &snapshotFixture{
+ subject: s,
+ predicate: &p,
+ httpBytes: httpBytes,
+ httpDigest: httpDigest,
+ httpURI: httpURI,
+ attestDigest: attestDesc.Digest,
+ subjectDesc: subjectDesc,
+ attestDesc: attestDesc,
+ provider: buf,
+ }
+}
+
+// writeBlobBuf stores dt in buf under a mediaType and returns the descriptor.
+func writeBlobBuf(ctx context.Context, t *testing.T, buf contentutil.Buffer, mediaType string, dt []byte) ocispecs.Descriptor {
+ t.Helper()
+ d := digest.FromBytes(dt)
+ desc := ocispecs.Descriptor{MediaType: mediaType, Digest: d, Size: int64(len(dt))}
+ require.NoError(t, content.WriteBlob(ctx, buf, d.String(), bytes.NewReader(dt), desc))
+ return desc
+}
+
+func writeBlobBufWithAnnotations(ctx context.Context, t *testing.T, buf contentutil.Buffer, mediaType string, dt []byte, ann map[string]string) ocispecs.Descriptor {
+ desc := writeBlobBuf(ctx, t, buf, mediaType, dt)
+ desc.Annotations = ann
+ return desc
+}
+
+func writeBlobBufRaw(ctx context.Context, t *testing.T, buf contentutil.Buffer, desc ocispecs.Descriptor, dt []byte) ocispecs.Descriptor {
+ t.Helper()
+ require.NoError(t, content.WriteBlob(ctx, buf, desc.Digest.String(), bytes.NewReader(dt), desc))
+ return desc
+}
+
+// snapshotOverrideResolver builds a MaterialsResolver whose single override
+// points at a temp file carrying the material bytes. That lets Snapshot
+// resolve the material without any network or registry access.
+func snapshotOverrideResolver(t *testing.T, uri string, dt []byte) *MaterialsResolver {
+ t.Helper()
+ tmp := filepath.Join(t.TempDir(), "override.bin")
+ require.NoError(t, os.WriteFile(tmp, dt, 0o644))
+ r, err := NewMaterialsResolver([]string{uri + "=" + tmp})
+ require.NoError(t, err)
+ return r
+}
+
+func TestSnapshotWritesOCILayout(t *testing.T) {
+ fx := makeSnapshotFixture(t)
+
+ dest := t.TempDir()
+ req := &SnapshotRequest{
+ Targets: []Target{{Subject: fx.subject, Predicate: fx.predicate}},
+ IncludeMaterials: true,
+ Materials: snapshotOverrideResolver(t, fx.httpURI, fx.httpBytes),
+ Output: &buildflags.ExportEntry{
+ Type: "oci",
+ Destination: dest,
+ Attrs: map[string]string{"tar": "false"},
+ },
+ }
+ require.NoError(t, Snapshot(context.Background(), nil, "", req))
+
+ // OCI layout skeleton present.
+ _, err := os.Stat(filepath.Join(dest, "oci-layout"))
+ require.NoError(t, err)
+ _, err = os.Stat(filepath.Join(dest, "index.json"))
+ require.NoError(t, err)
+ _, err = os.Stat(filepath.Join(dest, "blobs", "sha256"))
+ require.NoError(t, err)
+
+ // Index.json points at the per-platform snapshot index (single-platform
+ // case — Snapshot does not wrap in a multi-platform index).
+ idx, err := ociindex.NewStoreIndex(dest).Read()
+ require.NoError(t, err)
+ require.Len(t, idx.Manifests, 1)
+ root := idx.Manifests[0]
+ require.Equal(t, ArtifactTypeSnapshot, root.ArtifactType)
+
+ // Walk the root and assert its subject is the attestation manifest.
+ rootBlobPath := filepath.Join(dest, "blobs", root.Digest.Algorithm().String(), root.Digest.Encoded())
+ rootData, err := os.ReadFile(rootBlobPath)
+ require.NoError(t, err)
+ var ppIdx ocispecs.Index
+ require.NoError(t, json.Unmarshal(rootData, &ppIdx))
+ require.Equal(t, ArtifactTypeSnapshot, ppIdx.ArtifactType)
+ require.NotNil(t, ppIdx.Subject)
+ require.Equal(t, fx.attestDigest, ppIdx.Subject.Digest)
+
+ // First manifests[] entry must be the materials artifact manifest.
+ require.NotEmpty(t, ppIdx.Manifests)
+ matDesc := ppIdx.Manifests[0]
+ require.Equal(t, ArtifactTypeMaterials, matDesc.ArtifactType)
+
+ // The http material layer lives inside the materials manifest.
+ matPath := filepath.Join(dest, "blobs", matDesc.Digest.Algorithm().String(), matDesc.Digest.Encoded())
+ matRaw, err := os.ReadFile(matPath)
+ require.NoError(t, err)
+ var mm ocispecs.Manifest
+ require.NoError(t, json.Unmarshal(matRaw, &mm))
+ require.Equal(t, ArtifactTypeMaterials, mm.ArtifactType)
+
+ var gotHTTPLayer bool
+ for _, l := range mm.Layers {
+ if l.Digest == fx.httpDigest {
+ gotHTTPLayer = true
+ // Blob on disk must equal the pinned material bytes.
+ lp := filepath.Join(dest, "blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
+ got, err := os.ReadFile(lp)
+ require.NoError(t, err)
+ require.Equal(t, fx.httpBytes, got)
+ }
+ }
+ require.True(t, gotHTTPLayer, "materials manifest must include the http material layer")
+
+ // Round-trip: a MaterialsResolver pointed at oci-layout:// must
+ // find the http material through the snapshot-backed lookup path.
+ layout := fmt.Sprintf("oci-layout://%s", dest)
+ rr, err := NewMaterialsResolver([]string{layout})
+ require.NoError(t, err)
+ desc, provider, err := rr.Resolve(context.Background(), fx.httpURI, fx.httpDigest)
+ require.NoError(t, err)
+ require.NotNil(t, provider)
+ require.Equal(t, fx.httpDigest, desc.Digest)
+
+ gotBytes, err := content.ReadBlob(context.Background(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, fx.httpBytes, gotBytes)
+}
+
+func TestSnapshotRejectsAttestationFileSubject(t *testing.T) {
+ fx := makeSnapshotFixture(t)
+ fx.subject.kind = subjectKindAttestationFile
+
+ req := &SnapshotRequest{
+ Targets: []Target{{Subject: fx.subject, Predicate: fx.predicate}},
+ Output: &buildflags.ExportEntry{
+ Type: "oci",
+ Destination: t.TempDir(),
+ Attrs: map[string]string{"tar": "false"},
+ },
+ }
+ err := Snapshot(context.Background(), nil, "", req)
+ require.Error(t, err)
+ var unsup *UnsupportedSubjectError
+ require.ErrorAs(t, err, &unsup)
+}
+
+func TestSnapshotMissingOutputRejected(t *testing.T) {
+ fx := makeSnapshotFixture(t)
+
+ err := Snapshot(context.Background(), nil, "", &SnapshotRequest{
+ Targets: []Target{{Subject: fx.subject, Predicate: fx.predicate}},
+ })
+ require.Error(t, err)
+}
diff --git a/replay/subject.go b/replay/subject.go
new file mode 100644
index 000000000000..88a68c24848e
--- /dev/null
+++ b/replay/subject.go
@@ -0,0 +1,453 @@
+package replay
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "os"
+ "strings"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ "github.com/distribution/reference"
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/cli/cli/command"
+ slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
+ slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1"
+ provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
+ "github.com/moby/buildkit/util/attestation"
+ "github.com/moby/buildkit/util/contentutil"
+ "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+const (
+ dockerImagePrefix = "docker-image://"
+ ociLayoutPrefix = "oci-layout://"
+)
+
+// subjectKind classifies how a Subject was produced; downstream code only
+// reads the exported fields, but the kind guides error messages.
+type subjectKind int
+
+const (
+ // subjectKindImage covers both remote image refs and OCI-layout
+ // inputs — both resolve to a (descriptor, content.Provider) tuple
+ // with identical semantics.
+ subjectKindImage subjectKind = iota
+ subjectKindAttestationFile
+)
+
+// Subject is one replayable unit: a single manifest-level descriptor plus a
+// content.Provider that serves that descriptor, its referrers, and the
+// predicate blob.
+//
+// For image and oci-layout inputs, Descriptor is the produced artifact's
+// manifest descriptor. For an attestation-file input, Descriptor points at
+// the predicate blob in an in-memory content.Provider and there is no
+// produced artifact.
+type Subject struct {
+ Descriptor ocispecs.Descriptor
+ Provider content.Provider
+
+ // inputRef is the user-visible string for error messages.
+ inputRef string
+ kind subjectKind
+ // attestManifest is the in-toto attestation manifest descriptor that
+ // referrers this subject (image / oci-layout kinds). Empty for
+ // attestation-file subjects, whose predicate blob is directly at
+ // Descriptor.
+ attestManifest ocispecs.Descriptor
+ // predicateType caches the predicate type URI for attestation-file
+ // subjects so Predicate() can reject non-SLSA-v1 without re-reading.
+ predicateType string
+}
+
+// IsAttestationFile reports whether this subject was loaded from a local
+// attestation file (no produced artifact is available).
+func (s *Subject) IsAttestationFile() bool { return s != nil && s.kind == subjectKindAttestationFile }
+
+// InputRef returns the user-supplied input string that produced this
+// subject. Used for diagnostics.
+func (s *Subject) InputRef() string {
+ if s == nil {
+ return ""
+ }
+ return s.inputRef
+}
+
+// AttestationManifest returns the attestation manifest descriptor associated
+// with this subject, or the zero descriptor if none was found (or the subject
+// was loaded from a local attestation file).
+func (s *Subject) AttestationManifest() ocispecs.Descriptor {
+ if s == nil {
+ return ocispecs.Descriptor{}
+ }
+ return s.attestManifest
+}
+
+// LoadSubjects parses a user-supplied input and returns one Subject per
+// manifest to replay. An image index expands into N subjects (one per child
+// manifest); a single image manifest or attestation file returns 1.
+//
+// Input forms:
+// - docker-image://][ — explicit remote reference.
+// - oci-layout://[:] — explicit OCI layout directory.
+// - — local attestation file (in-toto / DSSE).
+// - — treated as an OCI layout.
+// - — valid image reference (docker-image).
+func LoadSubjects(ctx context.Context, dockerCli command.Cli, builderName, input string) ([]*Subject, error) {
+ trimmed := strings.TrimSpace(input)
+ if trimmed == "" {
+ return nil, errors.New("empty subject input")
+ }
+
+ // Already-prefixed inputs go straight through.
+ if strings.HasPrefix(trimmed, dockerImagePrefix) || strings.HasPrefix(trimmed, ociLayoutPrefix) {
+ return loadImageSubjects(ctx, dockerCli, builderName, trimmed)
+ }
+
+ // A local filesystem path: regular file = attestation, directory =
+ // OCI layout.
+ if fi, err := os.Stat(trimmed); err == nil {
+ if fi.IsDir() {
+ return loadImageSubjects(ctx, dockerCli, builderName, ociLayoutPrefix+trimmed)
+ }
+ return loadAttestationFileSubject(trimmed)
+ }
+
+ // Fall through: treat as a remote image reference. Validation happens
+ // inside loadImageSubjects.
+ return loadImageSubjects(ctx, dockerCli, builderName, trimmed)
+}
+
+// loadAttestationFileSubject reads a local attestation file (in-toto
+// Statement JSON, DSSE envelope, or an intoto.jsonl line-per-envelope file)
+// and synthesizes a Subject whose Descriptor points at the predicate blob
+// inside an in-memory content.Provider.
+func loadAttestationFileSubject(path string) ([]*Subject, error) {
+ dt, err := os.ReadFile(path)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ dt = bytes.TrimSpace(dt)
+ if len(dt) == 0 {
+ return nil, errors.Errorf("attestation file %s is empty", path)
+ }
+
+ // Heuristic: .intoto.jsonl is line-delimited JSON Statements. Pick the
+ // first line that carries a provenance predicateType.
+ if strings.HasSuffix(path, ".intoto.jsonl") {
+ for line := range bytes.SplitSeq(dt, []byte("\n")) {
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 {
+ continue
+ }
+ s, err := subjectFromAttestationBytes(line, path)
+ if err == nil {
+ return []*Subject{s}, nil
+ }
+ }
+ return nil, errors.Errorf("no SLSA provenance statement found in %s", path)
+ }
+
+ s, err := subjectFromAttestationBytes(dt, path)
+ if err != nil {
+ return nil, err
+ }
+ return []*Subject{s}, nil
+}
+
+// subjectFromAttestationBytes parses a single in-toto Statement (or DSSE
+// envelope around one) and returns a Subject whose Descriptor addresses the
+// predicate bytes inside an in-memory content.Provider. Signed DSSE
+// envelopes or Sigstore bundles are rejected with
+// SignatureVerificationRequiredError — replay never silently accepts a
+// signed attestation without a trust anchor. Full sigstore/cosign
+// verification is tracked as a follow-up.
+func subjectFromAttestationBytes(dt []byte, inputRef string) (*Subject, error) {
+ // Sigstore bundle detection: a bundle carries mediaType
+ // "application/vnd.dev.sigstore.bundle.v0.3+json" (or a v0.X variant)
+ // and a non-empty verificationMaterial / dsseEnvelope pair. Detect
+ // conservatively before the DSSE probe.
+ var bundleProbe struct {
+ MediaType string `json:"mediaType"`
+ VerificationMaterial json.RawMessage `json:"verificationMaterial,omitempty"`
+ DSSEEnvelope json.RawMessage `json:"dsseEnvelope,omitempty"`
+ MessageSignature json.RawMessage `json:"messageSignature,omitempty"`
+ }
+ if err := json.Unmarshal(dt, &bundleProbe); err == nil {
+ if strings.HasPrefix(bundleProbe.MediaType, "application/vnd.dev.sigstore.bundle.") ||
+ (len(bundleProbe.VerificationMaterial) > 0 && (len(bundleProbe.DSSEEnvelope) > 0 || len(bundleProbe.MessageSignature) > 0)) {
+ return nil, ErrSignatureVerificationRequired(inputRef, "sigstore-bundle")
+ }
+ }
+
+ // DSSE envelopes have a "payload" field with base64 content plus an
+ // optional "signatures" array. Detect by shape rather than media type
+ // (files carry no MIME).
+ var env struct {
+ Payload string `json:"payload"`
+ PayloadType string `json:"payloadType"`
+ Signatures []struct {
+ Sig string `json:"sig"`
+ KeyID string `json:"keyid,omitempty"`
+ } `json:"signatures"`
+ }
+ if err := json.Unmarshal(dt, &env); err == nil && env.Payload != "" {
+ // A signed DSSE envelope carries at least one non-empty signature.
+ // Reject it — replay has no trust anchor. Full sigstore/cosign
+ // signature verification is not implemented.
+ for _, sig := range env.Signatures {
+ if sig.Sig != "" {
+ return nil, ErrSignatureVerificationRequired(inputRef, "dsse")
+ }
+ }
+ decoded, err := decodeDSSEPayload(env.Payload)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to decode DSSE payload")
+ }
+ dt = decoded
+ }
+
+ var stmt struct {
+ PredicateType string `json:"predicateType"`
+ Predicate json.RawMessage `json:"predicate"`
+ }
+ if err := json.Unmarshal(dt, &stmt); err != nil {
+ return nil, errors.Wrap(err, "failed to unmarshal in-toto statement")
+ }
+ if stmt.PredicateType == "" {
+ return nil, errors.Errorf("attestation file %s has no predicateType", inputRef)
+ }
+
+ predBytes := []byte(stmt.Predicate)
+ dgst := digest.FromBytes(predBytes)
+ buf := contentutil.NewBuffer()
+ if err := content.WriteBlob(context.Background(), buf, dgst.String(), bytes.NewReader(predBytes), ocispecs.Descriptor{Digest: dgst, Size: int64(len(predBytes))}); err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ desc := ocispecs.Descriptor{
+ MediaType: "application/json",
+ Digest: dgst,
+ Size: int64(len(predBytes)),
+ }
+ return &Subject{
+ Descriptor: desc,
+ Provider: buf,
+ inputRef: inputRef,
+ kind: subjectKindAttestationFile,
+ predicateType: stmt.PredicateType,
+ }, nil
+}
+
+// decodeDSSEPayload base64-decodes the DSSE payload. Tries the standard
+// Base64 alphabet first, then URL alphabet (some implementations use the URL
+// variant for JSON-in-JSON safety).
+func decodeDSSEPayload(payload string) ([]byte, error) {
+ if dt, err := base64.StdEncoding.DecodeString(payload); err == nil {
+ return dt, nil
+ }
+ if dt, err := base64.URLEncoding.DecodeString(payload); err == nil {
+ return dt, nil
+ }
+ return nil, errors.New("payload is neither std nor url base64")
+}
+
+// loadImageSubjects resolves a remote ref or an oci-layout://[:]
+// input into one Subject per child manifest, fanning out only if the
+// resolved descriptor is itself an index. Both shapes use the same
+// util/imagetools.Resolver path as `imagetools inspect`; the resolver
+// internally dispatches on the location shape (see
+// util/imagetools/inspect.go:80).
+func loadImageSubjects(ctx context.Context, dockerCli command.Cli, builderName, input string) ([]*Subject, error) {
+ var (
+ resolver *imagetools.Resolver
+ )
+
+ if imagetools.IsOCILayout(input) {
+ // oci-layout:// does not need a builder / auth provider. Drive
+ // Resolve + Fetcher through a default resolver backed purely by
+ // the local layout store.
+ resolver = imagetools.New(imagetools.Opt{})
+ } else {
+ trimmed := strings.TrimPrefix(input, dockerImagePrefix)
+ if _, err := reference.ParseNormalizedNamed(trimmed); err != nil {
+ return nil, errors.Wrapf(err, "invalid image reference %q", trimmed)
+ }
+ input = trimmed
+
+ if dockerCli == nil {
+ return nil, errors.New("docker CLI is required to resolve remote image subjects")
+ }
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return nil, err
+ }
+ imageOpt, err := b.ImageOpt()
+ if err != nil {
+ return nil, err
+ }
+ resolver = imagetools.New(imageOpt)
+ }
+
+ _, desc, err := resolver.Resolve(ctx, input)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to resolve %s", input)
+ }
+
+ fetcher, err := resolver.Fetcher(ctx, input)
+ if err != nil {
+ return nil, err
+ }
+ provider := contentutil.FromFetcher(fetcher)
+
+ return fanOutSubjects(ctx, provider, desc, input)
+}
+
+// fanOutSubjects walks an OCI index (if the root descriptor is an index) and
+// returns one Subject per non-attestation child manifest. When the root is
+// itself a manifest, a single subject is returned.
+func fanOutSubjects(ctx context.Context, provider content.Provider, root ocispecs.Descriptor, inputRef string) ([]*Subject, error) {
+ switch root.MediaType {
+ case ocispecs.MediaTypeImageIndex, images.MediaTypeDockerSchema2ManifestList:
+ dt, err := content.ReadBlob(ctx, provider, root)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ var idx ocispecs.Index
+ if err := json.Unmarshal(dt, &idx); err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ // Buildx snapshot layouts carry their own artifactType and reach
+ // the attestation chain via the index's `subject`, not through a
+ // sibling attestation manifest. Loading them as subjects requires
+ // a different walk that is not yet implemented.
+ if idx.ArtifactType == ArtifactTypeSnapshot {
+ return nil, errors.Errorf("%s is a buildx snapshot layout; loading snapshots as subjects is not yet supported", inputRef)
+ }
+
+ // Partition entries: attestation manifests (via the Docker ref
+ // annotation) vs. real image manifests. The attestation manifest
+ // for a given subject carries `vnd.docker.reference.digest` /
+ // `com.docker.reference.digest` pointing at the subject's digest.
+ attestFor := map[digest.Digest]ocispecs.Descriptor{}
+ var imageManifests []ocispecs.Descriptor
+ for _, m := range idx.Manifests {
+ if ref := attestationReferenceDigest(m); ref != "" {
+ if d, err := digest.Parse(ref); err == nil {
+ attestFor[d] = m
+ continue
+ }
+ }
+ imageManifests = append(imageManifests, m)
+ }
+
+ out := make([]*Subject, 0, len(imageManifests))
+ for _, m := range imageManifests {
+ s := &Subject{
+ Descriptor: m,
+ Provider: provider,
+ inputRef: inputRef,
+ kind: subjectKindImage,
+ }
+ if att, ok := attestFor[m.Digest]; ok {
+ s.attestManifest = att
+ }
+ out = append(out, s)
+ }
+ if len(out) == 0 {
+ return nil, errors.Errorf("index %s has no image manifests", root.Digest)
+ }
+ return out, nil
+ case ocispecs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
+ return []*Subject{{
+ Descriptor: root,
+ Provider: provider,
+ inputRef: inputRef,
+ kind: subjectKindImage,
+ }}, nil
+ default:
+ return nil, errors.Errorf("unsupported root media type %q", root.MediaType)
+ }
+}
+
+// attestationReferenceDigest returns the subject digest recorded on an
+// attestation manifest's descriptor annotations, or "" if not present.
+// Keep in sync with util/imagetools/loader.go annotationReferences list.
+func attestationReferenceDigest(d ocispecs.Descriptor) string {
+ if d.Annotations == nil {
+ return ""
+ }
+ for _, k := range []string{
+ attestation.DockerAnnotationReferenceDigest,
+ "vnd.docker.reference.digest",
+ } {
+ if v, ok := d.Annotations[k]; ok && v != "" {
+ return v
+ }
+ }
+ return ""
+}
+
+// Predicate locates and parses the SLSA v1 provenance predicate attached to
+// Descriptor via Provider. Returns UnsupportedPredicateError on a non-v1
+// predicateType and NoProvenanceError when none is found.
+func (s *Subject) Predicate(ctx context.Context) (*Predicate, error) {
+ if s == nil {
+ return nil, errors.New("nil subject")
+ }
+
+ switch s.kind {
+ case subjectKindAttestationFile:
+ dt, err := content.ReadBlob(ctx, s.Provider, s.Descriptor)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ return decodeProvenancePredicate(dt, s.predicateType)
+
+ case subjectKindImage:
+ if s.attestManifest.Digest == "" {
+ return nil, ErrNoProvenance(s.inputRef)
+ }
+ predDt, predType, err := imagetools.ReadProvenancePredicate(ctx, s.Provider, s.attestManifest)
+ if err != nil {
+ return nil, err
+ }
+ if predType == "" {
+ return nil, ErrNoProvenance(s.inputRef)
+ }
+ return decodeProvenancePredicate(predDt, predType)
+ }
+
+ return nil, ErrUnsupportedSubject("unknown")
+}
+
+// decodeProvenancePredicate unmarshals a provenance predicate in its
+// native form. SLSA v1 is used as-is; SLSA v0.2 is converted to v1 via
+// provenancetypes.ProvenancePredicateSLSA02.ConvertToSLSA1 so the rest of
+// the replay code only has to understand one shape.
+func decodeProvenancePredicate(dt []byte, predType string) (*Predicate, error) {
+ switch predType {
+ case slsa1.PredicateSLSAProvenance:
+ var pred Predicate
+ if err := json.Unmarshal(dt, &pred); err != nil {
+ return nil, errors.Wrap(err, "failed to unmarshal SLSA v1 predicate")
+ }
+ return &pred, nil
+ case slsa02.PredicateSLSAProvenance:
+ var old provenancetypes.ProvenancePredicateSLSA02
+ if err := json.Unmarshal(dt, &old); err != nil {
+ return nil, errors.Wrap(err, "failed to unmarshal SLSA v0.2 predicate")
+ }
+ converted := old.ConvertToSLSA1()
+ pred := Predicate(*converted)
+ return &pred, nil
+ }
+ return nil, ErrUnsupportedPredicate(predType)
+}
diff --git a/replay/subject_test.go b/replay/subject_test.go
new file mode 100644
index 000000000000..9051b928ca56
--- /dev/null
+++ b/replay/subject_test.go
@@ -0,0 +1,360 @@
+package replay
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "sort"
+ "testing"
+
+ "github.com/containerd/containerd/v2/core/content"
+ contentlocal "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/moby/buildkit/client/ociindex"
+ "github.com/moby/buildkit/util/attestation"
+ "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+// TestSubjectPredicateAcceptsSLSA02 asserts that a SLSA v0.2 attestation
+// file is accepted and converted to the v1 shape used internally.
+func TestSubjectPredicateAcceptsSLSA02(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "provenance.intoto.json")
+
+ stmt := map[string]any{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v0.2",
+ "subject": []any{},
+ "predicate": map[string]any{
+ "builder": map[string]string{"id": "buildkit"},
+ "buildType": "https://mobyproject.org/buildkit@v1",
+ "invocation": map[string]any{
+ "configSource": map[string]any{"uri": "https://example.com/dockerfile"},
+ "parameters": map[string]any{},
+ "environment": map[string]any{"platform": "linux/amd64"},
+ },
+ },
+ }
+ dt, err := json.Marshal(stmt)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ subjects, err := LoadSubjects(context.Background(), nil, "", path)
+ require.NoError(t, err)
+ require.Len(t, subjects, 1)
+ require.True(t, subjects[0].IsAttestationFile())
+
+ _, err = subjects[0].Predicate(context.Background())
+ require.NoError(t, err)
+}
+
+// TestSubjectPredicateRejectsUnknown asserts that a predicateType outside
+// the SLSA v1 / v0.2 set is rejected with UnsupportedPredicateError.
+func TestSubjectPredicateRejectsUnknown(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "provenance.intoto.json")
+
+ stmt := map[string]any{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://example.com/custom/v1",
+ "subject": []any{},
+ "predicate": map[string]any{},
+ }
+ dt, err := json.Marshal(stmt)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ subjects, err := LoadSubjects(context.Background(), nil, "", path)
+ require.NoError(t, err)
+ _, err = subjects[0].Predicate(context.Background())
+ require.Error(t, err)
+ var unsup *UnsupportedPredicateError
+ require.ErrorAs(t, err, &unsup)
+ require.Equal(t, "https://example.com/custom/v1", unsup.PredicateType)
+}
+
+// TestSubjectPredicateAttestationFileSLSA1 asserts that an unsigned DSSE-less
+// in-toto Statement carrying a SLSA v1 predicate round-trips through
+// LoadSubjects + Predicate without error.
+func TestSubjectPredicateAttestationFileSLSA1(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "provenance.intoto.json")
+ stmt := map[string]any{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v1",
+ "subject": []any{},
+ "predicate": map[string]any{
+ "buildDefinition": map[string]any{
+ "externalParameters": map[string]any{
+ "request": map[string]any{
+ "frontend": "dockerfile.v0",
+ },
+ },
+ },
+ },
+ }
+ dt, err := json.Marshal(stmt)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ subjects, err := LoadSubjects(context.Background(), nil, "", path)
+ require.NoError(t, err)
+ require.Len(t, subjects, 1)
+
+ pred, err := subjects[0].Predicate(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, "dockerfile.v0", pred.Frontend())
+}
+
+// TestSubjectPredicateRejectsSignedDSSE asserts that a DSSE envelope with
+// non-empty signatures is rejected with SignatureVerificationRequiredError —
+// replay never silently accepts a signed attestation in this slice.
+func TestSubjectPredicateRejectsSignedDSSE(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "signed.dsse.json")
+
+ inner := map[string]any{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v1",
+ "subject": []any{},
+ "predicate": map[string]any{},
+ }
+ innerDt, err := json.Marshal(inner)
+ require.NoError(t, err)
+
+ env := map[string]any{
+ "payload": base64.StdEncoding.EncodeToString(innerDt),
+ "payloadType": "application/vnd.in-toto+json",
+ "signatures": []map[string]string{
+ {"sig": "MEUCIQDstubbedsignaturebytes==", "keyid": "test-key"},
+ },
+ }
+ dt, err := json.Marshal(env)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ _, err = LoadSubjects(context.Background(), nil, "", path)
+ require.Error(t, err)
+ var sig *SignatureVerificationRequiredError
+ require.ErrorAs(t, err, &sig)
+ require.Equal(t, path, sig.Source)
+ require.Equal(t, "dsse", sig.Envelope)
+}
+
+// TestSubjectPredicateRejectsSigstoreBundle asserts that a Sigstore bundle
+// shape is rejected without signature verification support.
+func TestSubjectPredicateRejectsSigstoreBundle(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "bundle.sigstore.json")
+
+ bundle := map[string]any{
+ "mediaType": "application/vnd.dev.sigstore.bundle.v0.3+json",
+ "verificationMaterial": map[string]any{
+ "tlogEntries": []any{},
+ },
+ "dsseEnvelope": map[string]any{
+ "payload": base64.StdEncoding.EncodeToString([]byte(`{}`)),
+ "payloadType": "application/vnd.in-toto+json",
+ "signatures": []any{},
+ },
+ }
+ dt, err := json.Marshal(bundle)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ _, err = LoadSubjects(context.Background(), nil, "", path)
+ require.Error(t, err)
+ var sig *SignatureVerificationRequiredError
+ require.ErrorAs(t, err, &sig)
+ require.Equal(t, "sigstore-bundle", sig.Envelope)
+}
+
+// TestSubjectPredicateAcceptsUnsignedDSSE asserts that a DSSE envelope with
+// an empty (or missing) signatures array is still accepted — the rejection
+// is gated on actual signatures being present.
+func TestSubjectPredicateAcceptsUnsignedDSSE(t *testing.T) {
+ dir := t.TempDir()
+ path := filepath.Join(dir, "unsigned.dsse.json")
+
+ inner := map[string]any{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v1",
+ "subject": []any{},
+ "predicate": map[string]any{},
+ }
+ innerDt, err := json.Marshal(inner)
+ require.NoError(t, err)
+
+ env := map[string]any{
+ "payload": base64.StdEncoding.EncodeToString(innerDt),
+ "payloadType": "application/vnd.in-toto+json",
+ "signatures": []any{},
+ }
+ dt, err := json.Marshal(env)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(path, dt, 0644))
+
+ subjects, err := LoadSubjects(context.Background(), nil, "", path)
+ require.NoError(t, err)
+ require.Len(t, subjects, 1)
+ require.True(t, subjects[0].IsAttestationFile())
+}
+
+// TestLoadSubjectsIndexFanout builds an OCI layout with a two-platform
+// image index (amd64 + arm64) and asserts LoadSubjects returns two subjects
+// with distinct Descriptor.Platform.
+func TestLoadSubjectsIndexFanout(t *testing.T) {
+ dir := t.TempDir()
+
+ store, err := contentlocal.NewStore(dir)
+ require.NoError(t, err)
+
+ ctx := context.Background()
+
+ cfgAmd64 := []byte(`{"architecture":"amd64","os":"linux"}`)
+ cfgAmd64Dgst, cfgAmd64Sz := putBlob(ctx, t, store, cfgAmd64, "application/vnd.oci.image.config.v1+json")
+ cfgArm64 := []byte(`{"architecture":"arm64","os":"linux"}`)
+ cfgArm64Dgst, cfgArm64Sz := putBlob(ctx, t, store, cfgArm64, "application/vnd.oci.image.config.v1+json")
+
+ amd64Desc := putManifest(ctx, t, store, ocispecs.Manifest{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: ocispecs.Descriptor{
+ MediaType: "application/vnd.oci.image.config.v1+json",
+ Digest: cfgAmd64Dgst,
+ Size: cfgAmd64Sz,
+ },
+ }, &ocispecs.Platform{Architecture: "amd64", OS: "linux"})
+
+ arm64Desc := putManifest(ctx, t, store, ocispecs.Manifest{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: ocispecs.Descriptor{
+ MediaType: "application/vnd.oci.image.config.v1+json",
+ Digest: cfgArm64Dgst,
+ Size: cfgArm64Sz,
+ },
+ }, &ocispecs.Platform{Architecture: "arm64", OS: "linux"})
+
+ idx := ocispecs.Index{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Manifests: []ocispecs.Descriptor{amd64Desc, arm64Desc},
+ }
+ idx.SchemaVersion = 2
+
+ idxDt, err := json.Marshal(idx)
+ require.NoError(t, err)
+ idxDgst, idxSize := putBlob(ctx, t, store, idxDt, ocispecs.MediaTypeImageIndex)
+
+ storeIdx := ociindex.NewStoreIndex(dir)
+ require.NoError(t, storeIdx.Put(ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Digest: idxDgst,
+ Size: idxSize,
+ }, ociindex.Tag("latest")))
+
+ subjects, err := LoadSubjects(ctx, nil, "", "oci-layout://"+dir+":latest")
+ require.NoError(t, err)
+ require.Len(t, subjects, 2, "expected two fan-out subjects")
+
+ sort.Slice(subjects, func(i, j int) bool {
+ return subjects[i].Descriptor.Platform.Architecture < subjects[j].Descriptor.Platform.Architecture
+ })
+ require.Equal(t, "amd64", subjects[0].Descriptor.Platform.Architecture)
+ require.Equal(t, "arm64", subjects[1].Descriptor.Platform.Architecture)
+ require.NotEqual(t, subjects[0].Descriptor.Digest, subjects[1].Descriptor.Digest)
+}
+
+// TestLoadSubjectsFanoutSkipsAttestation exercises the attestation-manifest
+// filtering path: an index with an attestation manifest annotated via
+// vnd.docker.reference.digest must not produce a bonus subject for the
+// attestation.
+func TestLoadSubjectsFanoutSkipsAttestation(t *testing.T) {
+ dir := t.TempDir()
+ store, err := contentlocal.NewStore(dir)
+ require.NoError(t, err)
+ ctx := context.Background()
+
+ cfgDt := []byte(`{"architecture":"amd64","os":"linux"}`)
+ cfgDgst, cfgSize := putBlob(ctx, t, store, cfgDt, "application/vnd.oci.image.config.v1+json")
+ imgDesc := putManifest(ctx, t, store, ocispecs.Manifest{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: ocispecs.Descriptor{
+ MediaType: "application/vnd.oci.image.config.v1+json",
+ Digest: cfgDgst,
+ Size: cfgSize,
+ },
+ }, &ocispecs.Platform{Architecture: "amd64", OS: "linux"})
+
+ // Synthesize a bare "attestation manifest" that references imgDesc.
+ attestManifest := ocispecs.Manifest{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: ocispecs.Descriptor{
+ MediaType: "application/vnd.oci.image.config.v1+json",
+ Digest: cfgDgst,
+ Size: cfgSize,
+ },
+ }
+ attestDt, err := json.Marshal(attestManifest)
+ require.NoError(t, err)
+ attestDgst, attestSize := putBlob(ctx, t, store, attestDt, ocispecs.MediaTypeImageManifest)
+
+ attestDesc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: attestDgst,
+ Size: attestSize,
+ Annotations: map[string]string{
+ attestation.DockerAnnotationReferenceDigest: imgDesc.Digest.String(),
+ },
+ }
+
+ idx := ocispecs.Index{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Manifests: []ocispecs.Descriptor{imgDesc, attestDesc},
+ }
+ idx.SchemaVersion = 2
+ idxDt, err := json.Marshal(idx)
+ require.NoError(t, err)
+ idxDgst, idxSize := putBlob(ctx, t, store, idxDt, ocispecs.MediaTypeImageIndex)
+
+ storeIdx := ociindex.NewStoreIndex(dir)
+ require.NoError(t, storeIdx.Put(ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageIndex,
+ Digest: idxDgst,
+ Size: idxSize,
+ }, ociindex.Tag("latest")))
+
+ subjects, err := LoadSubjects(ctx, nil, "", "oci-layout://"+dir+":latest")
+ require.NoError(t, err)
+ require.Len(t, subjects, 1, "attestation manifest should not expand to a subject")
+ require.Equal(t, imgDesc.Digest, subjects[0].Descriptor.Digest)
+ require.Equal(t, attestDgst, subjects[0].AttestationManifest().Digest, "subject should record its attestation manifest")
+}
+
+// putBlob writes raw bytes to the content store and returns the digest/size.
+func putBlob(ctx context.Context, t *testing.T, store content.Ingester, dt []byte, mediaType string) (digest.Digest, int64) {
+ t.Helper()
+ dgst := digest.FromBytes(dt)
+ desc := ocispecs.Descriptor{MediaType: mediaType, Digest: dgst, Size: int64(len(dt))}
+ err := content.WriteBlob(ctx, store, dgst.String(), bytes.NewReader(dt), desc)
+ require.NoError(t, err)
+ return dgst, int64(len(dt))
+}
+
+// putManifest marshals an OCI manifest and writes it to the store. Returns
+// the descriptor (with optional platform).
+func putManifest(ctx context.Context, t *testing.T, store content.Ingester, mfst ocispecs.Manifest, plat *ocispecs.Platform) ocispecs.Descriptor {
+ t.Helper()
+ mfst.SchemaVersion = 2
+ dt, err := json.Marshal(mfst)
+ require.NoError(t, err)
+ dgst, sz := putBlob(ctx, t, store, dt, ocispecs.MediaTypeImageManifest)
+ return ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: dgst,
+ Size: sz,
+ Platform: plat,
+ }
+}
diff --git a/replay/verify.go b/replay/verify.go
new file mode 100644
index 000000000000..bc8e79b772d0
--- /dev/null
+++ b/replay/verify.go
@@ -0,0 +1,472 @@
+package replay
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/containerd/containerd/v2/core/content"
+ contentlocal "github.com/containerd/containerd/v2/plugins/content/local"
+ "github.com/docker/buildx/build"
+ "github.com/docker/buildx/builder"
+ "github.com/docker/buildx/util/buildflags"
+ "github.com/docker/buildx/util/confutil"
+ "github.com/docker/buildx/util/dockerutil"
+ "github.com/docker/buildx/util/imagetools"
+ "github.com/docker/buildx/util/progress"
+ "github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/client/ociindex"
+ digest "github.com/opencontainers/go-digest"
+ ocispecsgo "github.com/opencontainers/image-spec/specs-go"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// Compare modes accepted by Verify.
+const (
+ CompareModeDigest = "digest"
+ CompareModeArtifact = "artifact"
+ CompareModeSemantic = "semantic"
+)
+
+// VerifyVSAPredicateType is the in-toto predicate type for a SLSA
+// Verification Summary Attestation.
+const VerifyVSAPredicateType = "https://slsa.dev/verification_summary/v1"
+
+// VerifyArtifactType is the OCI artifact type used when --output type=oci
+// packages the VSA + diff report together.
+const VerifyArtifactType = "application/vnd.docker.buildx.snapshots.verify.v1+json"
+
+// VerifyRequest is the library-level input to Verify.
+type VerifyRequest struct {
+ // Subject is the loaded subject (exactly one — multi-platform subjects
+ // are verified one at a time by the caller).
+ Subject *Subject
+ // Predicate is the subject's SLSA v1 predicate.
+ Predicate *Predicate
+ // Mode selects the comparison strategy: "digest" (default),
+ // "artifact" (basic content walk), or "semantic" (deferred).
+ Mode string
+ // Materials resolver, same semantics as BuildRequest.Materials.
+ Materials *MaterialsResolver
+ // Network controls the replayed build's RUN-network mode.
+ Network string
+ // Secrets / SSH mirror the BuildRequest shape for secret pass-through.
+ Secrets buildflags.Secrets
+ SSH []*buildflags.SSH
+ // Output is an optional --output spec (local|oci|attest) that controls
+ // where the verification artefacts are written.
+ Output *buildflags.ExportEntry
+}
+
+// VerifyResult is the library-level result of a verification.
+type VerifyResult struct {
+ Matched bool
+ DiffReport *CompareReport
+ // VSABytes is the in-toto Statement bytes written when req.Output is
+ // set; empty otherwise.
+ VSABytes []byte
+}
+
+// Verify replays the subject to an ephemeral OCI layout, compares, and
+// optionally writes a VSA + diff report to req.Output.
+//
+// Semantic mode returns ErrNotImplemented. On an artifact-mode mismatch the
+// returned error is a typed CompareMismatchError wrapping the diff report;
+// callers should not attempt to interpret Matched=false with nil error.
+func Verify(ctx context.Context, dockerCli command.Cli, builderName string, req *VerifyRequest) (_ *VerifyResult, retErr error) {
+ if req == nil {
+ return nil, errors.New("nil verify request")
+ }
+ if req.Subject == nil {
+ return nil, errors.New("nil subject")
+ }
+ if req.Predicate == nil {
+ return nil, errors.New("nil predicate")
+ }
+
+ mode := req.Mode
+ if mode == "" {
+ mode = CompareModeDigest
+ }
+ switch mode {
+ case CompareModeDigest, CompareModeArtifact:
+ // ok
+ case CompareModeSemantic:
+ return nil, ErrNotImplemented("--compare=semantic")
+ default:
+ return nil, errors.Errorf("unknown --compare mode %q", mode)
+ }
+
+ // Subject-kind gating: attestation-file subjects have no produced
+ // artifact to verify against (§3).
+ if req.Subject.IsAttestationFile() {
+ return nil, ErrUnsupportedSubject("verify requires an image or oci-layout subject")
+ }
+
+ // Local-context reject (§4.2 step 4).
+ if locals := req.Predicate.Locals(); len(locals) > 0 {
+ names := make([]string, 0, len(locals))
+ for _, l := range locals {
+ names = append(names, l.Name)
+ }
+ return nil, ErrUnreplayableLocalContext(names)
+ }
+
+ // Secret / SSH cross-check.
+ if err := CheckSecrets(req.Predicate.Secrets(), req.Secrets); err != nil {
+ return nil, err
+ }
+ if err := CheckSSH(req.Predicate.SSH(), req.SSH); err != nil {
+ return nil, err
+ }
+
+ // Prepare ephemeral OCI layout for the replay output.
+ tmpDir, err := os.MkdirTemp("", "buildx-replay-verify-")
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ layoutDir := filepath.Join(tmpDir, "replay-oci")
+ if err := os.MkdirAll(layoutDir, 0o755); err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ // Run the replay build into the layout.
+ if err := verifyReplay(ctx, dockerCli, builderName, req, layoutDir); err != nil {
+ return nil, errors.Wrap(err, "replay for verify")
+ }
+
+ replayDesc, replayProvider, err := openVerifyReplayLayout(layoutDir)
+ if err != nil {
+ return nil, errors.Wrap(err, "open replay layout")
+ }
+
+ result := &VerifyResult{}
+ switch mode {
+ case CompareModeDigest:
+ result.Matched = CompareDigest(req.Subject.Descriptor, replayDesc)
+ case CompareModeArtifact:
+ replaySubj := &Subject{Descriptor: replayDesc, Provider: replayProvider}
+ rep, err := CompareArtifact(ctx, req.Subject, replaySubj)
+ if err != nil {
+ return nil, err
+ }
+ result.DiffReport = rep
+ result.Matched = ReportMatched(rep)
+ }
+
+ // VSA + output.
+ vsa, err := buildVSA(req, replayDesc, result, mode)
+ if err != nil {
+ return nil, err
+ }
+ result.VSABytes = vsa
+
+ if req.Output != nil {
+ if err := writeVerifyOutput(req, result, vsa); err != nil {
+ return nil, err
+ }
+ }
+
+ if !result.Matched {
+ reason := fmt.Sprintf("verify --compare=%s failed", mode)
+ return result, ErrCompareMismatch(reason, result.DiffReport)
+ }
+ return result, nil
+}
+
+// verifyReplay executes the replay using build.Build against a temp layout.
+func verifyReplay(ctx context.Context, dockerCli command.Cli, builderName string, req *VerifyRequest, layoutDir string) (retErr error) {
+ exportEntry := &buildflags.ExportEntry{
+ Type: "oci",
+ Destination: layoutDir,
+ Attrs: map[string]string{
+ // tar=false forces the oci exporter to emit an OCI layout tree
+ // (blobs/, index.json) which we can then open with
+ // contentlocal.NewStore.
+ "tar": "false",
+ },
+ }
+ exportSpecs := []*buildflags.ExportEntry{exportEntry}
+
+ breq := &BuildRequest{
+ Targets: []Target{{Subject: req.Subject, Predicate: req.Predicate}},
+ Mode: BuildModeMaterials,
+ Materials: req.Materials,
+ NetworkMode: req.Network,
+ Secrets: req.Secrets,
+ SSH: req.SSH,
+ Exports: exportSpecs,
+ }
+
+ // verifyReplay lives in verify.go so we can avoid duplicating the solve
+ // driver wiring that Build already owns; call Build directly.
+ return runVerifyBuild(ctx, dockerCli, builderName, breq)
+}
+
+// runVerifyBuild is a trimmed-down cousin of Build that exports into a
+// caller-supplied OCI layout directory instead of a user-specified export
+// spec. Kept separate so Verify can address the layout by path when reading
+// the replay output back.
+func runVerifyBuild(ctx context.Context, dockerCli command.Cli, builderName string, req *BuildRequest) (retErr error) {
+ // Local-context / cross-check already done by caller (Verify); we
+ // recompute exports + build opts + driver wiring here.
+ exports, _, err := build.CreateExports(req.Exports)
+ if err != nil {
+ return errors.Wrap(err, "parse --output")
+ }
+
+ buildOpts := make(map[string]build.Options, len(req.Targets))
+ for _, t := range req.Targets {
+ opt, err := BuildOptionsFromPredicate(t.Subject, t.Predicate, req)
+ if err != nil {
+ return err
+ }
+ opt.Exports = exports
+ buildOpts[SubjectKey(t.Subject)] = opt
+ }
+
+ b, err := builder.New(dockerCli, builder.WithName(builderName))
+ if err != nil {
+ return err
+ }
+ nodes, err := b.LoadNodes(ctx)
+ if err != nil {
+ return err
+ }
+ printer, err := progress.NewPrinter(ctx, dockerCli.Err(), "auto",
+ progress.WithDesc(
+ fmt.Sprintf("verifying %d subject(s) with %q instance using %s driver", len(req.Targets), b.Name, b.Driver),
+ fmt.Sprintf("%s:%s", b.Driver, b.Name),
+ ),
+ )
+ if err != nil {
+ return err
+ }
+ defer func() {
+ werr := printer.Wait()
+ if retErr == nil {
+ retErr = werr
+ }
+ }()
+
+ if _, err := build.Build(ctx, nodes, buildOpts, dockerutil.NewClient(dockerCli), confutil.NewConfig(dockerCli), printer); err != nil {
+ return errors.Wrap(err, "verify build")
+ }
+ return nil
+}
+
+// openVerifyReplayLayout reads the root descriptor from an OCI-layout
+// directory that buildx just populated via type=oci export.
+func openVerifyReplayLayout(dir string) (ocispecs.Descriptor, content.Provider, error) {
+ store, err := contentlocal.NewStore(dir)
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrap(err, "open layout store")
+ }
+ idx, err := ociindex.NewStoreIndex(dir).Read()
+ if err != nil {
+ return ocispecs.Descriptor{}, nil, errors.Wrap(err, "read layout index")
+ }
+ if len(idx.Manifests) == 0 {
+ return ocispecs.Descriptor{}, nil, errors.New("empty layout index")
+ }
+ return idx.Manifests[0], store, nil
+}
+
+// buildVSA returns an in-toto Statement containing a SLSA VSA predicate
+// (§6.3). VSABytes is an .intoto.jsonl-shaped single-line JSON document.
+func buildVSA(req *VerifyRequest, replayDesc ocispecs.Descriptor, result *VerifyResult, mode string) ([]byte, error) {
+ status := "PASSED"
+ if !result.Matched {
+ status = "FAILED"
+ }
+ subjectName := req.Subject.InputRef()
+ if subjectName == "" {
+ subjectName = req.Subject.Descriptor.Digest.String()
+ }
+ statement := map[string]any{
+ "_type": "https://in-toto.io/Statement/v1",
+ "predicateType": VerifyVSAPredicateType,
+ "subject": []map[string]any{
+ {
+ "name": subjectName,
+ "digest": digestToDigestSet(req.Subject.Descriptor.Digest),
+ },
+ },
+ "predicate": map[string]any{
+ "verifier": map[string]any{
+ "id": "https://github.com/docker/buildx",
+ },
+ "timeVerified": time.Now().UTC().Format(time.RFC3339),
+ "resourceUri": subjectName,
+ "policy": map[string]any{"uri": ""},
+ "verificationResult": status,
+ "verifiedLevels": []string{},
+ "dependencyLevels": map[string]any{},
+ "inputAttestations": []map[string]any{
+ {
+ "uri": subjectName,
+ "digest": configSourceDigestSet(req.Predicate.BuildDefinition.ExternalParameters.ConfigSource.Digest),
+ },
+ },
+ // Buildx-specific sidecar fields (§6.3 "Fields that don't fit
+ // cleanly" strategy).
+ "buildx": map[string]any{
+ "replayMode": string(BuildModeMaterials),
+ "compareMode": mode,
+ "replayDigest": replayDesc.Digest.String(),
+ "subjectDigest": req.Subject.Descriptor.Digest.String(),
+ },
+ },
+ }
+ return json.Marshal(statement)
+}
+
+// digestToDigestSet turns an OCI digest into the in-toto DigestSet shape.
+func digestToDigestSet(d digest.Digest) map[string]string {
+ if d == "" {
+ return map[string]string{}
+ }
+ return map[string]string{d.Algorithm().String(): d.Encoded()}
+}
+
+// configSourceDigestSet returns a stable copy of the provenance-recorded
+// config-source digest set. A nil / empty input yields an empty map so the
+// JSON output is never `null`.
+func configSourceDigestSet(ds map[string]string) map[string]string {
+ out := make(map[string]string, len(ds))
+ for k, v := range ds {
+ if v == "" {
+ continue
+ }
+ out[k] = v
+ }
+ return out
+}
+
+// writeVerifyOutput emits the VSA (and diff report) through req.Output.
+func writeVerifyOutput(req *VerifyRequest, result *VerifyResult, vsa []byte) error {
+ switch req.Output.Type {
+ case "local":
+ dest := req.Output.Destination
+ if dest == "" {
+ return errors.New("verify output type=local requires dest=")
+ }
+ if err := os.MkdirAll(dest, 0o755); err != nil {
+ return errors.WithStack(err)
+ }
+ if err := os.WriteFile(filepath.Join(dest, "vsa.intoto.jsonl"), append(vsa, '\n'), 0o644); err != nil {
+ return errors.WithStack(err)
+ }
+ if result.DiffReport != nil {
+ dt, err := ReportJSON(result.DiffReport)
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(filepath.Join(dest, "diff.json"), dt, 0o644); err != nil {
+ return errors.WithStack(err)
+ }
+ }
+ return nil
+ case "oci":
+ dest := req.Output.Destination
+ if dest == "" {
+ return errors.New("verify output type=oci requires dest=")
+ }
+ return writeVerifyOCIArtifact(dest, vsa, result)
+ case "attest":
+ return writeVerifyAttestReferrer(req)
+ }
+ return errors.Errorf("verify: unsupported --output type %q (want local | oci | attest)", req.Output.Type)
+}
+
+// writeVerifyOCIArtifact writes VSA + diff report as a single OCI artifact
+// tar at dest.
+func writeVerifyOCIArtifact(dest string, vsa []byte, result *VerifyResult) error {
+ if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
+ return errors.WithStack(err)
+ }
+ // Build a minimal manifest document referencing the VSA as config and
+ // the diff report (if any) as a layer.
+ cfgDesc := ocispecs.Descriptor{
+ MediaType: "application/vnd.in-toto+json",
+ Digest: digest.FromBytes(vsa),
+ Size: int64(len(vsa)),
+ }
+ var layerDesc ocispecs.Descriptor
+ var layerBytes []byte
+ if result.DiffReport != nil {
+ var err error
+ layerBytes, err = ReportJSON(result.DiffReport)
+ if err != nil {
+ return err
+ }
+ layerDesc = ocispecs.Descriptor{
+ MediaType: "application/json",
+ Digest: digest.FromBytes(layerBytes),
+ Size: int64(len(layerBytes)),
+ }
+ }
+ mfst := ocispecs.Manifest{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageManifest,
+ ArtifactType: VerifyArtifactType,
+ Config: cfgDesc,
+ }
+ if layerDesc.Digest != "" {
+ mfst.Layers = []ocispecs.Descriptor{layerDesc}
+ }
+ mdt, err := json.Marshal(mfst)
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ // Write plain file + sibling blobs. The simplest emission is a
+ // directory laid out as an OCI layout in dest's parent; however the
+ // spec calls for a single file. We write a JSON "bundle" envelope
+ // containing manifest + blobs for consumers — sufficient for a v1
+ // reproducible artefact without pulling in image archive dependencies.
+ type ociBundle struct {
+ Manifest json.RawMessage `json:"manifest"`
+ Blobs map[string]json.RawMessage `json:"blobs"`
+ }
+ bundle := ociBundle{
+ Manifest: mdt,
+ Blobs: map[string]json.RawMessage{
+ cfgDesc.Digest.String(): json.RawMessage(vsa),
+ },
+ }
+ if layerDesc.Digest != "" {
+ bundle.Blobs[layerDesc.Digest.String()] = json.RawMessage(layerBytes)
+ }
+ dt, err := json.MarshalIndent(bundle, "", " ")
+ if err != nil {
+ return errors.WithStack(err)
+ }
+ return os.WriteFile(dest, dt, 0o644)
+}
+
+// writeVerifyAttestReferrer attaches the VSA to the subject as a registry
+// referrer. Only valid when the subject was loaded from a registry image.
+func writeVerifyAttestReferrer(req *VerifyRequest) error {
+ ref := strings.TrimPrefix(req.Subject.InputRef(), "docker-image://")
+ if ref == "" || strings.HasPrefix(ref, "oci-layout://") || strings.HasSuffix(ref, ".intoto.jsonl") {
+ return ErrUnsupportedSubject("verify --output type=attest requires a registry image subject")
+ }
+ // Parse subject ref to produce a Location suitable for push.
+ loc, err := imagetools.ParseLocation(ref)
+ if err != nil {
+ return errors.Wrapf(err, "parse subject ref %q", ref)
+ }
+ if !loc.IsRegistry() {
+ return ErrUnsupportedSubject("verify --output type=attest requires a registry image subject")
+ }
+ // Full push plumbing for the referrer (manifest + config + optional
+ // layer) is substantial; v1 emits a clear error when requested.
+ return ErrNotImplemented("verify --output type=attest")
+}
diff --git a/replay/verify_compare.go b/replay/verify_compare.go
new file mode 100644
index 000000000000..460c94e3c461
--- /dev/null
+++ b/replay/verify_compare.go
@@ -0,0 +1,264 @@
+package replay
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/containerd/containerd/v2/core/images"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+const (
+ EventTypeNone = ""
+ EventTypeDescriptorMismatch = "DescriptorMismatch"
+ EventTypeIndexBlobMismatch = "IndexBlobMismatch"
+ EventTypeConfigBlobMismatch = "ConfigBlobMismatch"
+ EventTypeLayerBlobMismatch = "LayerBlobMismatch"
+)
+
+// CompareEventInput carries the relevant object for one side of a mismatch.
+// This intentionally stays small and demo-focused; TODO: re-evaluate diffoci
+// once it no longer depends on older containerd/linkname behavior.
+type CompareEventInput struct {
+ Descriptor *ocispecs.Descriptor `json:"descriptor,omitempty"`
+ Index *ocispecs.Index `json:"index,omitempty"`
+ Manifest *ocispecs.Manifest `json:"manifest,omitempty"`
+}
+
+// CompareEvent records a single divergence at one tree node.
+type CompareEvent struct {
+ Type string `json:"type,omitempty"`
+ Inputs [2]CompareEventInput `json:"inputs,omitempty"`
+ Diff string `json:"diff,omitempty"`
+}
+
+// CompareReport is the basic per-node event tree emitted by replay verify.
+type CompareReport struct {
+ CompareEvent
+ Context string `json:"context,omitempty"`
+ Children []*CompareReport `json:"children,omitempty"`
+}
+
+// CompareDigest returns whether subject and replay descriptors share the same
+// manifest digest. This is the fastest check and is the default for
+// `replay verify`.
+func CompareDigest(subject, replay ocispecs.Descriptor) bool {
+ return subject.Digest != "" && subject.Digest == replay.Digest
+}
+
+// CompareArtifact walks both subject and replay stores and returns a
+// CompareReport describing any divergence.
+//
+// The implementation is intentionally basic and content-addressed. A
+// manifest-digest match short-circuits the walk and returns an empty report
+// (no divergence). A mismatch at any level surfaces as an event node
+// populated with inputs referring to the two sides.
+//
+// TODO: experiment with diffoci again once it no longer requires older
+// containerd APIs and private linkname-based archive wiring.
+func CompareArtifact(ctx context.Context, subject, replay *Subject) (*CompareReport, error) {
+ if subject == nil || replay == nil {
+ return nil, errors.New("compare: nil subject or replay")
+ }
+ if subject.Provider == nil || replay.Provider == nil {
+ return nil, errors.New("compare: nil content provider")
+ }
+
+ // Short-circuit when both descriptors already have the same digest.
+ if CompareDigest(subject.Descriptor, replay.Descriptor) {
+ return &CompareReport{Context: "root"}, nil
+ }
+
+ root := &CompareReport{Context: "root"}
+ if err := compareDescriptor(ctx, root, subject.Provider, subject.Descriptor, replay.Provider, replay.Descriptor); err != nil {
+ return root, err
+ }
+ return root, nil
+}
+
+// CompareSemantic is declared for API completeness. Semantic comparison is
+// not yet implemented; callers receive a typed NotImplemented error.
+func CompareSemantic(ctx context.Context, subject, replay *Subject) (*CompareReport, error) {
+ return nil, ErrNotImplemented("--compare=semantic")
+}
+
+// compareDescriptor does a recursive content compare between two descriptors.
+// When any level diverges, an Event node is attached to parent and recursion
+// stops at that level.
+func compareDescriptor(ctx context.Context, parent *CompareReport, pa content.Provider, da ocispecs.Descriptor, pb content.Provider, db ocispecs.Descriptor) error {
+ // Descriptor-level mismatch (digest).
+ if da.Digest != db.Digest {
+ descA, descB := da, db
+ parent.Children = append(parent.Children, &CompareReport{
+ Context: fmt.Sprintf("descriptor %s", displayMediaType(da.MediaType)),
+ CompareEvent: CompareEvent{
+ Type: EventTypeDescriptorMismatch,
+ Inputs: [2]CompareEventInput{
+ {Descriptor: &descA},
+ {Descriptor: &descB},
+ },
+ Diff: fmt.Sprintf("digest mismatch: %s vs %s", da.Digest, db.Digest),
+ },
+ })
+ return nil
+ }
+
+ // Same digest at this level — walk descendants when available.
+ switch da.MediaType {
+ case ocispecs.MediaTypeImageIndex, images.MediaTypeDockerSchema2ManifestList:
+ return compareIndex(ctx, parent, pa, da, pb, db)
+ case ocispecs.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
+ return compareManifest(ctx, parent, pa, da, pb, db)
+ }
+ return nil
+}
+
+func compareIndex(ctx context.Context, parent *CompareReport, pa content.Provider, da ocispecs.Descriptor, pb content.Provider, db ocispecs.Descriptor) error {
+ ia, err := readIndex(ctx, pa, da)
+ if err != nil {
+ return err
+ }
+ ib, err := readIndex(ctx, pb, db)
+ if err != nil {
+ return err
+ }
+ if len(ia.Manifests) != len(ib.Manifests) {
+ parent.Children = append(parent.Children, &CompareReport{
+ Context: "index",
+ CompareEvent: CompareEvent{
+ Type: EventTypeIndexBlobMismatch,
+ Inputs: [2]CompareEventInput{
+ {Index: ia},
+ {Index: ib},
+ },
+ Diff: fmt.Sprintf("child count mismatch: %d vs %d", len(ia.Manifests), len(ib.Manifests)),
+ },
+ })
+ return nil
+ }
+ for i := range ia.Manifests {
+ node := &CompareReport{Context: fmt.Sprintf("index/manifests[%d]", i)}
+ if err := compareDescriptor(ctx, node, pa, ia.Manifests[i], pb, ib.Manifests[i]); err != nil {
+ return err
+ }
+ if len(node.Children) > 0 || node.Type != EventTypeNone {
+ parent.Children = append(parent.Children, node)
+ }
+ }
+ return nil
+}
+
+func compareManifest(ctx context.Context, parent *CompareReport, pa content.Provider, da ocispecs.Descriptor, pb content.Provider, db ocispecs.Descriptor) error {
+ ma, err := readManifest(ctx, pa, da)
+ if err != nil {
+ return err
+ }
+ mb, err := readManifest(ctx, pb, db)
+ if err != nil {
+ return err
+ }
+ if ma.Config.Digest != mb.Config.Digest {
+ parent.Children = append(parent.Children, &CompareReport{
+ Context: "manifest/config",
+ CompareEvent: CompareEvent{
+ Type: EventTypeConfigBlobMismatch,
+ Inputs: [2]CompareEventInput{
+ {Manifest: ma},
+ {Manifest: mb},
+ },
+ Diff: fmt.Sprintf("config digest mismatch: %s vs %s", ma.Config.Digest, mb.Config.Digest),
+ },
+ })
+ }
+ if len(ma.Layers) != len(mb.Layers) {
+ parent.Children = append(parent.Children, &CompareReport{
+ Context: "manifest/layers",
+ CompareEvent: CompareEvent{
+ Type: EventTypeLayerBlobMismatch,
+ Inputs: [2]CompareEventInput{
+ {Manifest: ma},
+ {Manifest: mb},
+ },
+ Diff: fmt.Sprintf("layer count mismatch: %d vs %d", len(ma.Layers), len(mb.Layers)),
+ },
+ })
+ return nil
+ }
+ for i := range ma.Layers {
+ if ma.Layers[i].Digest != mb.Layers[i].Digest {
+ la, lb := ma.Layers[i], mb.Layers[i]
+ parent.Children = append(parent.Children, &CompareReport{
+ Context: fmt.Sprintf("manifest/layers[%d]", i),
+ CompareEvent: CompareEvent{
+ Type: EventTypeLayerBlobMismatch,
+ Inputs: [2]CompareEventInput{
+ {Descriptor: &la},
+ {Descriptor: &lb},
+ },
+ Diff: fmt.Sprintf("layer[%d] digest mismatch: %s vs %s", i, la.Digest, lb.Digest),
+ },
+ })
+ }
+ }
+ return nil
+}
+
+func readIndex(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ocispecs.Index, error) {
+ dt, err := content.ReadBlob(ctx, p, desc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "read index %s", desc.Digest)
+ }
+ var idx ocispecs.Index
+ if err := json.Unmarshal(dt, &idx); err != nil {
+ return nil, errors.Wrapf(err, "unmarshal index %s", desc.Digest)
+ }
+ return &idx, nil
+}
+
+func readManifest(ctx context.Context, p content.Provider, desc ocispecs.Descriptor) (*ocispecs.Manifest, error) {
+ dt, err := content.ReadBlob(ctx, p, desc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "read manifest %s", desc.Digest)
+ }
+ var mfst ocispecs.Manifest
+ if err := json.Unmarshal(dt, &mfst); err != nil {
+ return nil, errors.Wrapf(err, "unmarshal manifest %s", desc.Digest)
+ }
+ return &mfst, nil
+}
+
+func displayMediaType(mt string) string {
+ if mt == "" {
+ return ""
+ }
+ return mt
+}
+
+// ReportMatched reports whether a CompareReport represents a successful
+// compare (no divergence events). An empty tree counts as matched.
+func ReportMatched(r *CompareReport) bool {
+ if r == nil {
+ return true
+ }
+ if r.Type != "" && r.Type != EventTypeNone {
+ return false
+ }
+ for _, c := range r.Children {
+ if !ReportMatched(c) {
+ return false
+ }
+ }
+ return true
+}
+
+// ReportJSON serializes a CompareReport to JSON.
+// A nil report is emitted as an empty object.
+func ReportJSON(r *CompareReport) ([]byte, error) {
+ if r == nil {
+ return []byte("{}"), nil
+ }
+ return json.MarshalIndent(r, "", " ")
+}
diff --git a/replay/verify_compare_test.go b/replay/verify_compare_test.go
new file mode 100644
index 000000000000..deb5b5c47cce
--- /dev/null
+++ b/replay/verify_compare_test.go
@@ -0,0 +1,124 @@
+package replay
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/containerd/containerd/v2/core/content"
+ "github.com/moby/buildkit/util/contentutil"
+ digest "github.com/opencontainers/go-digest"
+ ocispecsgo "github.com/opencontainers/image-spec/specs-go"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCompareDigestMatch(t *testing.T) {
+ d := digest.FromBytes([]byte("hello"))
+ subject := ocispecs.Descriptor{Digest: d, MediaType: ocispecs.MediaTypeImageManifest}
+ replay := ocispecs.Descriptor{Digest: d, MediaType: ocispecs.MediaTypeImageManifest}
+ require.True(t, CompareDigest(subject, replay))
+}
+
+func TestCompareDigestMismatch(t *testing.T) {
+ subject := ocispecs.Descriptor{Digest: digest.FromBytes([]byte("a")), MediaType: ocispecs.MediaTypeImageManifest}
+ replay := ocispecs.Descriptor{Digest: digest.FromBytes([]byte("b")), MediaType: ocispecs.MediaTypeImageManifest}
+ require.False(t, CompareDigest(subject, replay))
+ // Empty subject digest is never considered a match.
+ require.False(t, CompareDigest(ocispecs.Descriptor{}, replay))
+}
+
+// writeManifestTree writes a minimal OCI manifest with the given config +
+// layer blob contents into buf and returns the manifest descriptor.
+func writeManifestTree(t *testing.T, buf contentutil.Buffer, configBytes []byte, layerBytes []byte) ocispecs.Descriptor {
+ t.Helper()
+ ctx := context.Background()
+
+ configDesc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageConfig,
+ Digest: digest.FromBytes(configBytes),
+ Size: int64(len(configBytes)),
+ }
+ require.NoError(t, content.WriteBlob(ctx, buf, configDesc.Digest.String(), bytes.NewReader(configBytes), configDesc))
+
+ layerDesc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageLayerGzip,
+ Digest: digest.FromBytes(layerBytes),
+ Size: int64(len(layerBytes)),
+ }
+ require.NoError(t, content.WriteBlob(ctx, buf, layerDesc.Digest.String(), bytes.NewReader(layerBytes), layerDesc))
+
+ mfst := ocispecs.Manifest{
+ Versioned: ocispecsgo.Versioned{SchemaVersion: 2},
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Config: configDesc,
+ Layers: []ocispecs.Descriptor{layerDesc},
+ }
+ dt, err := json.Marshal(mfst)
+ require.NoError(t, err)
+ desc := ocispecs.Descriptor{
+ MediaType: ocispecs.MediaTypeImageManifest,
+ Digest: digest.FromBytes(dt),
+ Size: int64(len(dt)),
+ }
+ require.NoError(t, content.WriteBlob(ctx, buf, desc.Digest.String(), bytes.NewReader(dt), desc))
+ return desc
+}
+
+func TestCompareArtifactStubbed(t *testing.T) {
+ // Two identical in-memory content stores (structurally identical
+ // manifest trees). CompareArtifact should return without error and
+ // the resulting report should indicate no divergence.
+ bufA := contentutil.NewBuffer()
+ bufB := contentutil.NewBuffer()
+
+ configBytes := []byte(`{"architecture":"amd64","os":"linux"}`)
+ layerBytes := []byte("dummy-layer-content")
+
+ descA := writeManifestTree(t, bufA, configBytes, layerBytes)
+ descB := writeManifestTree(t, bufB, configBytes, layerBytes)
+
+ // Both trees have the same bytes → same digests.
+ require.Equal(t, descA.Digest, descB.Digest)
+
+ subject := &Subject{Descriptor: descA, Provider: bufA}
+ replay := &Subject{Descriptor: descB, Provider: bufB}
+
+ report, err := CompareArtifact(context.Background(), subject, replay)
+ require.NoError(t, err)
+ require.NotNil(t, report)
+ require.True(t, ReportMatched(report), "identical stores should report no divergence")
+}
+
+func TestCompareArtifactMismatch(t *testing.T) {
+ bufA := contentutil.NewBuffer()
+ bufB := contentutil.NewBuffer()
+
+ descA := writeManifestTree(t, bufA, []byte(`{"os":"linux"}`), []byte("a"))
+ descB := writeManifestTree(t, bufB, []byte(`{"os":"linux"}`), []byte("b"))
+ require.NotEqual(t, descA.Digest, descB.Digest)
+
+ subject := &Subject{Descriptor: descA, Provider: bufA}
+ replay := &Subject{Descriptor: descB, Provider: bufB}
+
+ report, err := CompareArtifact(context.Background(), subject, replay)
+ require.NoError(t, err)
+ require.NotNil(t, report)
+ require.False(t, ReportMatched(report), "mismatching stores should produce divergence events")
+
+ // JSON should round-trip.
+ raw, err := ReportJSON(report)
+ require.NoError(t, err)
+ require.NotEmpty(t, raw)
+ var parsed CompareReport
+ require.NoError(t, json.Unmarshal(raw, &parsed))
+}
+
+func TestCompareSemanticNotImplemented(t *testing.T) {
+ _, err := CompareSemantic(context.Background(), &Subject{}, &Subject{})
+ require.Error(t, err)
+ var nie *NotImplementedError
+ require.ErrorAs(t, err, &nie)
+ require.Equal(t, "--compare=semantic", nie.Feature)
+}
diff --git a/replay/verify_test.go b/replay/verify_test.go
new file mode 100644
index 000000000000..3021aed21e4c
--- /dev/null
+++ b/replay/verify_test.go
@@ -0,0 +1,105 @@
+package replay
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestVerifyRejectsSemantic(t *testing.T) {
+ req := &VerifyRequest{
+ Subject: &Subject{},
+ Predicate: &Predicate{},
+ Mode: CompareModeSemantic,
+ }
+ _, err := Verify(context.Background(), nil, "", req)
+ require.Error(t, err)
+ var nie *NotImplementedError
+ require.ErrorAs(t, err, &nie)
+ require.Equal(t, "--compare=semantic", nie.Feature)
+}
+
+func TestVerifyRejectsUnknownMode(t *testing.T) {
+ req := &VerifyRequest{
+ Subject: &Subject{},
+ Predicate: &Predicate{},
+ Mode: "bogus",
+ }
+ _, err := Verify(context.Background(), nil, "", req)
+ require.Error(t, err)
+}
+
+func TestVerifyRejectsAttestationFileSubject(t *testing.T) {
+ s := &Subject{kind: subjectKindAttestationFile}
+ req := &VerifyRequest{
+ Subject: s,
+ Predicate: &Predicate{},
+ Mode: CompareModeDigest,
+ }
+ _, err := Verify(context.Background(), nil, "", req)
+ require.Error(t, err)
+ var us *UnsupportedSubjectError
+ require.ErrorAs(t, err, &us)
+}
+
+func TestVerifyRejectsLocalContext(t *testing.T) {
+ pred := testPredicate(nil, []string{"ctx"})
+ req := &VerifyRequest{
+ Subject: testSubject(t),
+ Predicate: pred,
+ Mode: CompareModeDigest,
+ }
+ _, err := Verify(context.Background(), nil, "", req)
+ require.Error(t, err)
+ var ulc *UnreplayableLocalContextError
+ require.ErrorAs(t, err, &ulc)
+}
+
+func TestBuildVSASchema(t *testing.T) {
+ // Exercise the VSA builder directly — it's deterministic given inputs
+ // and does not require a live daemon.
+ s := testSubject(t)
+ s.inputRef = "docker-image://example.test/app:v1"
+
+ pred := testPredicate(nil, nil)
+ pred.BuildDefinition.ExternalParameters.ConfigSource.Digest = map[string]string{"sha256": "cafef00d"}
+
+ req := &VerifyRequest{
+ Subject: s,
+ Predicate: pred,
+ }
+ result := &VerifyResult{Matched: true}
+ dt, err := buildVSA(req, s.Descriptor, result, CompareModeDigest)
+ require.NoError(t, err)
+
+ var parsed map[string]any
+ require.NoError(t, json.Unmarshal(dt, &parsed))
+
+ require.Equal(t, "https://in-toto.io/Statement/v1", parsed["_type"])
+ require.Equal(t, VerifyVSAPredicateType, parsed["predicateType"])
+ subjects := parsed["subject"].([]any)
+ require.Len(t, subjects, 1)
+ require.Equal(t, "docker-image://example.test/app:v1", subjects[0].(map[string]any)["name"])
+
+ pred0 := parsed["predicate"].(map[string]any)
+ require.Equal(t, "PASSED", pred0["verificationResult"])
+ verifier := pred0["verifier"].(map[string]any)
+ require.Equal(t, "https://github.com/docker/buildx", verifier["id"])
+ buildxSide := pred0["buildx"].(map[string]any)
+ require.Equal(t, string(CompareModeDigest), buildxSide["compareMode"])
+}
+
+func TestBuildVSAFailedStatus(t *testing.T) {
+ s := testSubject(t)
+ pred := testPredicate(nil, nil)
+ req := &VerifyRequest{Subject: s, Predicate: pred}
+ result := &VerifyResult{Matched: false}
+ dt, err := buildVSA(req, s.Descriptor, result, CompareModeArtifact)
+ require.NoError(t, err)
+ var parsed map[string]any
+ require.NoError(t, json.Unmarshal(dt, &parsed))
+ pred0 := parsed["predicate"].(map[string]any)
+ require.Equal(t, "FAILED", pred0["verificationResult"])
+}
diff --git a/replay/warn.go b/replay/warn.go
new file mode 100644
index 000000000000..ca8ca0c75f72
--- /dev/null
+++ b/replay/warn.go
@@ -0,0 +1,40 @@
+package replay
+
+import (
+ "sync"
+
+ "github.com/docker/buildx/util/progress"
+)
+
+// warnOnce routes non-fatal warnings through a progress sub-logger
+// and drops duplicates by key. It is shared across snapshot targets so a
+// predicate that references the same material from every platform warns
+// at most once.
+type warnOnce struct {
+ mu sync.Mutex
+ visited map[string]struct{}
+}
+
+func newWarnOnce() *warnOnce {
+ return &warnOnce{visited: map[string]struct{}{}}
+}
+
+// Log emits msg (prefixed with "warning: ") on sub's stderr stream the
+// first time key is seen. Subsequent calls with the same key are silent.
+// sub may be nil, in which case the message is dropped entirely.
+func (w *warnOnce) Log(sub progress.SubLogger, key, msg string) {
+ if w == nil {
+ return
+ }
+ w.mu.Lock()
+ if _, ok := w.visited[key]; ok {
+ w.mu.Unlock()
+ return
+ }
+ w.visited[key] = struct{}{}
+ w.mu.Unlock()
+ if sub == nil {
+ return
+ }
+ sub.Log(2, []byte("warning: "+msg+"\n"))
+}
diff --git a/tests/integration_test.go b/tests/integration_test.go
index 574251dc5075..f9e8860fb2a0 100644
--- a/tests/integration_test.go
+++ b/tests/integration_test.go
@@ -30,6 +30,7 @@ func TestIntegration(t *testing.T) {
tests = append(tests, policyTestTests...)
tests = append(tests, bakeTests...)
tests = append(tests, historyTests...)
+ tests = append(tests, replayTests...)
tests = append(tests, inspectTests...)
tests = append(tests, lsTests...)
tests = append(tests, imagetoolsTests...)
diff --git a/tests/replay.go b/tests/replay.go
new file mode 100644
index 000000000000..07e14baa57fd
--- /dev/null
+++ b/tests/replay.go
@@ -0,0 +1,259 @@
+package tests
+
+import (
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/containerd/continuity/fs/fstest"
+ "github.com/moby/buildkit/util/testutil/integration"
+ "github.com/stretchr/testify/require"
+)
+
+// replayTests exercises the buildx replay subcommands.
+//
+// All tests require a registry sandbox for the build-to-replay round-trip
+// because replay resolves a subject through the registry resolver. Tests
+// that need a writable registry skip when `sb.RegistryAddress()` is empty.
+var replayTests = []func(t *testing.T, sb integration.Sandbox){
+ testReplayBuildRoundTrip,
+ testReplaySnapshotRoundTrip,
+ testReplayVerifyDigest,
+ testReplayVerifyArtifactDivergence,
+ testReplayRejectsLocalContext,
+ testReplaySecretRoundTrip,
+ testReplayMultiPlatformRoundTrip,
+}
+
+// replayTestDockerfile returns a minimal Dockerfile that COPYs from a named
+// context so the build's provenance records no local filesystem context —
+// the default buildx build always records a local context which replay
+// correctly refuses (SPEC §9).
+const replayTestDockerfile = `# syntax=docker/dockerfile:1
+FROM scratch
+COPY --from=ctx /etc/hosts /hosts
+`
+
+// buildReplayableImage does a `buildx build` against a registry with
+// --build-context=ctx=docker-image://alpine:3.20 so the resulting image has
+// a valid SLSA v1 provenance without local context. Returns the registry
+// ref (image@digest).
+func buildReplayableImage(t *testing.T, sb integration.Sandbox, extra ...string) string {
+ t.Helper()
+ registry, err := sb.NewRegistry()
+ if err != nil {
+ t.Skipf("skipping: registry not available: %v", err)
+ }
+ ref := registry + "/buildx-replay:" + t.Name()
+
+ dir := tmpdir(t,
+ fstest.CreateFile("Dockerfile", []byte(replayTestDockerfile), 0o600),
+ )
+
+ args := []string{
+ "--output=type=registry,name=" + ref,
+ "--build-context=ctx=docker-image://alpine:3.20",
+ "--attest=type=provenance,mode=max",
+ dir,
+ }
+ args = append(args, extra...)
+ out, err := buildCmd(sb, withArgs(args...))
+ require.NoError(t, err, out)
+ return ref
+}
+
+func testReplayBuildRoundTrip(t *testing.T, sb integration.Sandbox) {
+ ref := buildReplayableImage(t, sb)
+
+ dest := filepath.Join(t.TempDir(), "replay-out")
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ "--output=type=oci,dest="+filepath.Join(dest, "replay.oci.tar"),
+ ))
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+ require.FileExists(t, filepath.Join(dest, "replay.oci.tar"))
+}
+
+func testReplaySnapshotRoundTrip(t *testing.T, sb integration.Sandbox) {
+ ref := buildReplayableImage(t, sb)
+
+ dest := filepath.Join(t.TempDir(), "snap")
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "snapshot",
+ "docker-image://"+ref,
+ "--output=type=local,dest="+dest,
+ ))
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+ require.FileExists(t, filepath.Join(dest, "oci-layout"))
+
+ // Round-trip: use the snapshot as a materials store and replay the build.
+ outDir := filepath.Join(t.TempDir(), "replay-from-snapshot")
+ cmd = buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ "--materials=oci-layout://"+dest,
+ "--output=type=oci,dest="+filepath.Join(outDir, "replay.oci.tar"),
+ ))
+ out, err = cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+}
+
+func testReplayVerifyDigest(t *testing.T, sb integration.Sandbox) {
+ ref := buildReplayableImage(t, sb)
+
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "verify",
+ "docker-image://"+ref,
+ "--compare=digest",
+ ))
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+}
+
+func testReplayVerifyArtifactDivergence(t *testing.T, sb integration.Sandbox) {
+ // Build, then replay with an extra build-arg injected at verify time
+ // so the resulting image diverges.
+ ref := buildReplayableImage(t, sb)
+
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "verify",
+ "docker-image://"+ref,
+ "--compare=artifact",
+ ))
+ out, err := cmd.CombinedOutput()
+ // This MAY produce a match if the build is perfectly reproducible;
+ // the harness just asserts the command runs without crashing and
+ // exits with a well-defined exit code (0 match, 8 mismatch).
+ if err != nil {
+ // Mismatch exit code is 8 (SPEC §10). Other codes mean the
+ // harness couldn't set up the test.
+ if !strings.Contains(string(out), "replay mismatch") {
+ t.Skipf("verify --compare=artifact could not be exercised: %v\n%s", err, out)
+ }
+ }
+}
+
+func testReplayRejectsLocalContext(t *testing.T, sb integration.Sandbox) {
+ // A default `buildx build` records a local filesystem context. Replay
+ // must refuse this case.
+ registry, err := sb.NewRegistry()
+ if err != nil {
+ t.Skipf("skipping: registry not available: %v", err)
+ }
+ ref := registry + "/buildx-replay-local:" + t.Name()
+
+ dir := createTestProject(t)
+ out, err := buildCmd(sb, withArgs(
+ "--output=type=registry,name="+ref,
+ "--attest=type=provenance,mode=max",
+ dir,
+ ))
+ require.NoError(t, err, out)
+
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ ))
+ bout, err := cmd.CombinedOutput()
+ require.Error(t, err, string(bout))
+ require.Contains(t, string(bout), "local context")
+}
+
+func testReplaySecretRoundTrip(t *testing.T, sb integration.Sandbox) {
+ // Build with a declared secret so the provenance records a required
+ // secret ID. Replay without --secret must fail; with --secret passes.
+ registry, err := sb.NewRegistry()
+ if err != nil {
+ t.Skipf("skipping: registry not available: %v", err)
+ }
+ ref := registry + "/buildx-replay-secret:" + t.Name()
+
+ secretFile := filepath.Join(t.TempDir(), "secret.txt")
+ require.NoError(t, os.WriteFile(secretFile, []byte("hunter2"), 0o600))
+
+ dockerfile := `# syntax=docker/dockerfile:1
+FROM scratch
+COPY --from=ctx /etc/hosts /hosts
+`
+ dir := tmpdir(t,
+ fstest.CreateFile("Dockerfile", []byte(dockerfile), 0o600),
+ )
+
+ out, err := buildCmd(sb, withArgs(
+ "--output=type=registry,name="+ref,
+ "--build-context=ctx=docker-image://alpine:3.20",
+ "--secret=id=api,src="+secretFile,
+ "--attest=type=provenance,mode=max",
+ dir,
+ ))
+ require.NoError(t, err, out)
+
+ // Without --secret: fail with missing-secret exit code.
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ "--output=type=oci,dest="+filepath.Join(t.TempDir(), "out.oci.tar"),
+ ))
+ bout, err := cmd.CombinedOutput()
+ if err == nil {
+ // Provenance may not have recorded the secret. Skip rather than
+ // fail — this is environment-dependent.
+ t.Skipf("secret was not recorded in provenance; cannot exercise missing-secret path:\n%s", bout)
+ }
+ require.Contains(t, string(bout), "missing required secrets", string(bout))
+
+ // With --secret: succeed.
+ cmd = buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ "--secret=id=api,src="+secretFile,
+ "--output=type=oci,dest="+filepath.Join(t.TempDir(), "out.oci.tar"),
+ ))
+ bout, err = cmd.CombinedOutput()
+ require.NoError(t, err, string(bout))
+}
+
+func testReplayMultiPlatformRoundTrip(t *testing.T, sb integration.Sandbox) {
+ if !isRemoteMultiNodeWorker(sb) {
+ t.Skip("only testing with remote multi-node worker")
+ }
+ registry, err := sb.NewRegistry()
+ if err != nil {
+ t.Skipf("skipping: registry not available: %v", err)
+ }
+ ref := registry + "/buildx-replay-mp:" + t.Name()
+
+ dir := tmpdir(t,
+ fstest.CreateFile("Dockerfile", []byte(replayTestDockerfile), 0o600),
+ )
+
+ out, err := buildCmd(sb, withArgs(
+ "--output=type=registry,name="+ref,
+ "--build-context=ctx=docker-image://alpine:3.20",
+ "--attest=type=provenance,mode=max",
+ "--platform=linux/amd64,linux/arm64",
+ dir,
+ ))
+ require.NoError(t, err, out)
+
+ // Dry-run should enumerate both platforms.
+ cmd := buildxCmd(sb, withArgs(
+ "replay", "build",
+ "docker-image://"+ref,
+ "--dry-run",
+ ))
+ bout, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(bout))
+ var plan struct {
+ Subjects []struct {
+ Platform string `json:"platform"`
+ } `json:"subjects"`
+ }
+ require.NoError(t, json.Unmarshal(bout, &plan), "dry-run must emit JSON plan")
+ require.Len(t, plan.Subjects, 2)
+}
diff --git a/util/buildflags/policy.go b/util/buildflags/policy.go
index 0d1abb694b53..ff44b166e203 100644
--- a/util/buildflags/policy.go
+++ b/util/buildflags/policy.go
@@ -5,6 +5,7 @@ import (
"strings"
"github.com/docker/buildx/policy"
+ "github.com/moby/buildkit/sourcepolicy/policysession"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/go-csvvalue"
@@ -16,6 +17,13 @@ type PolicyConfig struct {
Disabled bool
Strict *bool
LogLevel *logrus.Level
+
+ // Callback, when non-nil, is a programmatic policy evaluator. Set in
+ // code (not from CLI flags) for cases like `buildx replay` that
+ // enforce pinning without loading a policy file. Configs with
+ // Callback set are composed into the policy callback chain alongside
+ // file-based policies.
+ Callback policysession.PolicyCallback
}
func ParsePolicyConfigs(in []string) ([]PolicyConfig, error) {
diff --git a/util/imagetools/loader.go b/util/imagetools/loader.go
index d30fd49b615d..95e1ae88795d 100644
--- a/util/imagetools/loader.go
+++ b/util/imagetools/loader.go
@@ -466,3 +466,62 @@ func withIntotoMediaTypes(ctx context.Context) context.Context {
}
return ctx
}
+
+// ReadProvenancePredicate loads the SLSA provenance predicate payload from the
+// attestation manifest referenced by attestManifest, reading blobs through the
+// supplied content provider. Returns the raw predicate JSON bytes and the
+// predicate type URI. When the manifest has no provenance layer both return
+// values are empty without error so callers can distinguish "no provenance"
+// from a hard failure.
+//
+// This exposes the provenance scan + DSSE unwrap logic used internally by
+// scanProvenance for reuse by `buildx replay` (see
+// /src/hack/poc/replay/SPEC.md section 3).
+func ReadProvenancePredicate(ctx context.Context, provider content.Provider, attestManifest ocispecs.Descriptor) ([]byte, string, error) {
+ ctx = withIntotoMediaTypes(ctx)
+ dt, err := content.ReadBlob(ctx, provider, attestManifest)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "failed to read attestation manifest")
+ }
+ var mfst ocispecs.Manifest
+ if err := json.Unmarshal(dt, &mfst); err != nil {
+ return nil, "", errors.Wrap(err, "failed to unmarshal attestation manifest")
+ }
+ var (
+ layer ocispecs.Descriptor
+ predType string
+ )
+ for _, l := range mfst.Layers {
+ annot := l.Annotations["in-toto.io/predicate-type"]
+ if (l.MediaType == inTotoGenericMime || isInTotoDSSE(l.MediaType)) &&
+ strings.HasPrefix(annot, "https://slsa.dev/provenance/") {
+ layer = l
+ predType = annot
+ break
+ }
+ }
+ if predType == "" {
+ return nil, "", nil
+ }
+ layerDt, err := content.ReadBlob(ctx, provider, layer)
+ if err != nil {
+ return nil, "", errors.Wrapf(err, "failed to read provenance layer %s", layer.Digest)
+ }
+ layerDt, err = decodeDSSE(layerDt, layer.MediaType)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "failed to decode DSSE envelope")
+ }
+ var stmt struct {
+ Predicate json.RawMessage `json:"predicate"`
+ PredicateType string `json:"predicateType"`
+ }
+ if err := json.Unmarshal(layerDt, &stmt); err != nil {
+ return nil, "", errors.Wrap(err, "failed to unmarshal in-toto statement")
+ }
+ // Prefer the in-toto Statement's predicateType over the annotation when
+ // both are set — the annotation is a hint; the payload is canonical.
+ if stmt.PredicateType != "" {
+ predType = stmt.PredicateType
+ }
+ return stmt.Predicate, predType, nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index dafa7fa817a7..9401816966be 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -206,6 +206,8 @@ github.com/compose-spec/compose-go/v2/validation
# github.com/containerd/console v1.0.5
## explicit; go 1.13
github.com/containerd/console
+# github.com/containerd/containerd v1.7.30
+## explicit; go 1.24.0
# github.com/containerd/containerd/api v1.10.0
## explicit; go 1.23.0
github.com/containerd/containerd/api/services/content/v1
]