diff --git a/cmd/build.go b/cmd/build.go index ba948aa..c2ceb4d 100644 --- a/cmd/build.go +++ b/cmd/build.go @@ -185,6 +185,7 @@ func addBuildFlags(cmd *cobra.Command) { cmd.Flags().StringToString("docker-build-options", nil, "Options passed to all 'docker build' commands") cmd.Flags().Bool("slsa-cache-verification", false, "Enable SLSA verification for cached artifacts") cmd.Flags().String("slsa-source-uri", "", "Expected source URI for SLSA verification (required when verification enabled)") + cmd.Flags().Bool("in-flight-checksums", false, "Enable checksumming of cache artifacts to prevent TOCTU attacks") cmd.Flags().String("report", "", "Generate a HTML report after the build has finished. (e.g. --report myreport.html)") cmd.Flags().String("report-segment", os.Getenv("LEEWAY_SEGMENT_KEY"), "Report build events to segment using the segment key (defaults to $LEEWAY_SEGMENT_KEY)") cmd.Flags().Bool("report-github", os.Getenv("GITHUB_OUTPUT") != "", "Report package build success/failure to GitHub Actions using the GITHUB_OUTPUT environment variable") @@ -318,6 +319,17 @@ func getBuildOpts(cmd *cobra.Command) ([]leeway.BuildOption, cache.LocalCache) { log.Fatal(err) } + // Get in-flight checksums setting (env var as default, CLI flag overrides) + inFlightChecksumsDefault := os.Getenv(EnvvarEnableInFlightChecksums) == "true" + inFlightChecksums, err := cmd.Flags().GetBool("in-flight-checksums") + if err != nil { + log.Fatal(err) + } + // If flag wasn't explicitly set, use environment variable + if !cmd.Flags().Changed("in-flight-checksums") { + inFlightChecksums = inFlightChecksumsDefault + } + return []leeway.BuildOption{ leeway.WithLocalCache(localCache), leeway.WithRemoteCache(remoteCache), @@ -332,6 +344,7 @@ func getBuildOpts(cmd *cobra.Command) ([]leeway.BuildOption, cache.LocalCache) { leeway.WithCompressionDisabled(dontCompress), leeway.WithFixedBuildDir(fixedBuildDir), leeway.WithDisableCoverage(disableCoverage), + leeway.WithInFlightChecksums(inFlightChecksums), }, localCache } @@ -351,6 +364,10 @@ func (c *pushOnlyRemoteCache) Upload(ctx context.Context, src cache.LocalCache, return c.C.Upload(ctx, src, pkgs) } +func (c *pushOnlyRemoteCache) UploadFile(ctx context.Context, filePath string, key string) error { + return c.C.UploadFile(ctx, filePath, key) +} + type pullOnlyRemoteCache struct { C cache.RemoteCache } @@ -367,6 +384,10 @@ func (c *pullOnlyRemoteCache) Upload(ctx context.Context, src cache.LocalCache, return nil } +func (c *pullOnlyRemoteCache) UploadFile(ctx context.Context, filePath string, key string) error { + return nil +} + func getRemoteCacheFromEnv() cache.RemoteCache { return getRemoteCache(nil) } diff --git a/cmd/build_test.go b/cmd/build_test.go new file mode 100644 index 0000000..e8a419c --- /dev/null +++ b/cmd/build_test.go @@ -0,0 +1,242 @@ +package cmd + +import ( + "os" + "testing" + + "github.com/spf13/cobra" +) + +func TestBuildCommandFlags(t *testing.T) { + tests := []struct { + name string + args []string + wantFlag string + wantVal interface{} + }{ + { + name: "in-flight-checksums flag default", + args: []string{}, + wantFlag: "in-flight-checksums", + wantVal: false, + }, + { + name: "in-flight-checksums flag enabled", + args: []string{"--in-flight-checksums"}, + wantFlag: "in-flight-checksums", + wantVal: true, + }, + { + name: "in-flight-checksums flag explicitly disabled", + args: []string{"--in-flight-checksums=false"}, + wantFlag: "in-flight-checksums", + wantVal: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a new build command for each test + cmd := &cobra.Command{ + Use: "build", + Run: func(cmd *cobra.Command, args []string) { + // No-op for testing + }, + } + + // Add the build flags + addBuildFlags(cmd) + + // Set the args and parse + cmd.SetArgs(tt.args) + err := cmd.Execute() + if err != nil { + t.Fatalf("failed to execute command: %v", err) + } + + // Check if the flag exists + flag := cmd.Flags().Lookup(tt.wantFlag) + if flag == nil { + t.Fatalf("flag %s not found", tt.wantFlag) + } + + // Get the flag value + val, err := cmd.Flags().GetBool(tt.wantFlag) + if err != nil { + t.Fatalf("failed to get flag value: %v", err) + } + + if val != tt.wantVal { + t.Errorf("expected flag %s to be %v, got %v", tt.wantFlag, tt.wantVal, val) + } + }) + } +} + +func TestInFlightChecksumsEnvironmentVariable(t *testing.T) { + tests := []struct { + name string + envValue string + flagValue string + flagSet bool + expected bool + }{ + { + name: "env var enabled, no flag", + envValue: "true", + expected: true, + }, + { + name: "env var disabled, no flag", + envValue: "false", + expected: false, + }, + { + name: "no env var, no flag", + envValue: "", + expected: false, + }, + { + name: "env var enabled, flag explicitly disabled", + envValue: "true", + flagValue: "false", + flagSet: true, + expected: false, // Flag should override + }, + { + name: "env var disabled, flag explicitly enabled", + envValue: "false", + flagValue: "true", + flagSet: true, + expected: true, // Flag should override + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variable using t.Setenv for proper cleanup + if tt.envValue != "" { + t.Setenv("LEEWAY_ENABLE_IN_FLIGHT_CHECKSUMS", tt.envValue) + } + + // Create test command + cmd := &cobra.Command{ + Use: "build", + Run: func(cmd *cobra.Command, args []string) {}, + } + + addBuildFlags(cmd) + + // Set flag if specified + if tt.flagSet { + err := cmd.Flags().Set("in-flight-checksums", tt.flagValue) + if err != nil { + t.Fatalf("failed to set flag: %v", err) + } + } + + // Test the actual logic from getBuildOpts + inFlightChecksumsDefault := os.Getenv("LEEWAY_ENABLE_IN_FLIGHT_CHECKSUMS") == "true" + inFlightChecksums, err := cmd.Flags().GetBool("in-flight-checksums") + if err != nil { + t.Fatalf("failed to get flag: %v", err) + } + // If flag wasn't explicitly set, use environment variable + if !cmd.Flags().Changed("in-flight-checksums") { + inFlightChecksums = inFlightChecksumsDefault + } + + if inFlightChecksums != tt.expected { + t.Errorf("expected in-flight checksums to be %v, got %v", tt.expected, inFlightChecksums) + } + }) + } +} + +func TestBuildCommandHelpText(t *testing.T) { + cmd := &cobra.Command{ + Use: "build", + Run: func(cmd *cobra.Command, args []string) { + // No-op for testing + }, + } + + addBuildFlags(cmd) + + // Check that the in-flight-checksums flag is documented + flag := cmd.Flags().Lookup("in-flight-checksums") + if flag == nil { + t.Fatal("in-flight-checksums flag not found") + } + + expectedUsage := "Enable checksumming of cache artifacts to prevent TOCTU attacks" + if flag.Usage != expectedUsage { + t.Errorf("expected flag usage to be %q, got %q", expectedUsage, flag.Usage) + } + + // Verify it's a boolean flag + if flag.Value.Type() != "bool" { + t.Errorf("expected flag type to be bool, got %s", flag.Value.Type()) + } + + // Verify default value + if flag.DefValue != "false" { + t.Errorf("expected default value to be false, got %s", flag.DefValue) + } +} + +func TestGetBuildOptsWithInFlightChecksums(t *testing.T) { + tests := []struct { + name string + inFlightChecksumsFlag bool + expectInFlightChecksums bool + }{ + { + name: "in-flight checksums disabled", + inFlightChecksumsFlag: false, + expectInFlightChecksums: false, + }, + { + name: "in-flight checksums enabled", + inFlightChecksumsFlag: true, + expectInFlightChecksums: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := &cobra.Command{ + Use: "build", + Run: func(cmd *cobra.Command, args []string) { + // No-op for testing + }, + } + + addBuildFlags(cmd) + + // Set the flag value + err := cmd.Flags().Set("in-flight-checksums", "false") + if tt.inFlightChecksumsFlag { + err = cmd.Flags().Set("in-flight-checksums", "true") + } + if err != nil { + t.Fatalf("failed to set flag: %v", err) + } + + // Test getBuildOpts function + opts, localCache := getBuildOpts(cmd) + + // We can't directly test the WithInFlightChecksums option since it's internal, + // but we can verify the function doesn't error and returns options + if opts == nil { + t.Error("expected build options but got nil") + } + if localCache == nil { + t.Error("expected local cache but got nil") + } + + // The actual verification of the in-flight checksums option would need + // to be done through integration tests or by exposing the option state + }) + } +} diff --git a/cmd/root.go b/cmd/root.go index 35d772b..4664781 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -30,6 +30,9 @@ const ( // EnvvarSLSASourceURI configures the expected source URI for SLSA verification EnvvarSLSASourceURI = "LEEWAY_SLSA_SOURCE_URI" + + // EnvvarEnableInFlightChecksums enables in-flight checksumming of cache artifacts + EnvvarEnableInFlightChecksums = "LEEWAY_ENABLE_IN_FLIGHT_CHECKSUMS" ) const ( @@ -99,6 +102,7 @@ variables have an effect on leeway: LEEWAY_DEFAULT_CACHE_LEVEL Sets the default cache level for builds. Defaults to "remote". LEEWAY_SLSA_CACHE_VERIFICATION Enables SLSA verification for cached artifacts (true/false). LEEWAY_SLSA_SOURCE_URI Expected source URI for SLSA verification (github.com/owner/repo). +LEEWAY_ENABLE_IN_FLIGHT_CHECKSUMS Enable checksumming of cache artifacts (true/false). LEEWAY_EXPERIMENTAL Enables experimental leeway features and commands. `), PersistentPreRun: func(cmd *cobra.Command, args []string) { diff --git a/cmd/sign-cache.go b/cmd/sign-cache.go new file mode 100644 index 0000000..171ab81 --- /dev/null +++ b/cmd/sign-cache.go @@ -0,0 +1,291 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/gitpod-io/leeway/pkg/leeway/cache" + "github.com/gitpod-io/leeway/pkg/leeway/signing" +) + +// signCacheCmd represents the sign-cache command +var signCacheCmd = &cobra.Command{ + Use: "sign-cache --from-manifest ", + Short: "Signs and uploads cache artifacts using manifest (CI use only)", + Long: `Reads artifact paths from manifest file, generates SLSA attestations, +and uploads to remote cache with write-only credentials. + +This command is designed for CI environments where build and signing are +separated for security. The build job creates a manifest of artifacts to sign, +and this command consumes that manifest to generate cryptographic attestations. + +Example: + leeway plumbing sign-cache --from-manifest artifacts-to-sign.txt + leeway plumbing sign-cache --from-manifest artifacts.txt --dry-run`, + RunE: func(cmd *cobra.Command, args []string) error { + manifestPath, _ := cmd.Flags().GetString("from-manifest") + dryRun, _ := cmd.Flags().GetBool("dry-run") + + if manifestPath == "" { + return fmt.Errorf("--from-manifest flag is required") + } + + // Validate manifest file exists + if _, err := os.Stat(manifestPath); os.IsNotExist(err) { + return fmt.Errorf("manifest file does not exist: %s", manifestPath) + } + + return runSignCache(cmd.Context(), cmd, manifestPath, dryRun) + }, +} + +func init() { + plumbingCmd.AddCommand(signCacheCmd) + signCacheCmd.Flags().String("from-manifest", "", "Path to newline-separated artifact paths file") + signCacheCmd.Flags().Bool("dry-run", false, "Log actions without signing or uploading") + _ = signCacheCmd.MarkFlagRequired("from-manifest") +} + +// runSignCache implements the main signing logic +func runSignCache(ctx context.Context, cmd *cobra.Command, manifestPath string, dryRun bool) error { + log.WithFields(log.Fields{ + "manifest": manifestPath, + "dry_run": dryRun, + }).Info("Starting cache artifact signing process") + + if dryRun { + log.Info("DRY-RUN MODE: No actual signing or uploading will occur") + } + + // Get workspace configuration using existing Leeway patterns + ws, err := getWorkspace() + if err != nil { + return fmt.Errorf("failed to get workspace: %w", err) + } + + // Get remote cache using existing Leeway patterns + remoteCache := getRemoteCacheFromEnv() + if remoteCache == nil { + return fmt.Errorf("remote cache not configured - set LEEWAY_REMOTE_CACHE_BUCKET and LEEWAY_REMOTE_CACHE_STORAGE") + } + + log.WithFields(log.Fields{ + "workspace": ws.Origin, + "cache_type": fmt.Sprintf("%T", remoteCache), + }).Info("Initialized workspace and remote cache") + + // Validate GitHub context for CI environment + githubCtx := signing.GetGitHubContext() + if err := githubCtx.Validate(); err != nil { + return fmt.Errorf("invalid GitHub context - this command must run in GitHub Actions: %w", err) + } + + shaDisplay := githubCtx.SHA + if len(shaDisplay) > 8 { + shaDisplay = shaDisplay[:8] + "..." + } + + log.WithFields(log.Fields{ + "repository": githubCtx.Repository, + "run_id": githubCtx.RunID, + "sha": shaDisplay, + }).Info("Validated GitHub Actions context") + + // Parse and validate manifest + artifacts, err := parseManifest(manifestPath) + if err != nil { + return fmt.Errorf("failed to parse manifest: %w", err) + } + + if len(artifacts) == 0 { + log.Warn("No artifacts found in manifest") + return nil + } + + log.WithField("artifacts", len(artifacts)).Info("Found artifacts to sign") + + // Process artifacts with bounded concurrency to avoid overwhelming Sigstore + const maxConcurrency = 5 // Reasonable limit for Sigstore API + const maxAcceptableFailureRate = 0.5 // Fail command if more than 50% of artifacts fail + semaphore := make(chan struct{}, maxConcurrency) + + var successful []string + var failed []*signing.SigningError + var mu sync.Mutex + var wg sync.WaitGroup + + // Track temporary files for cleanup + var tempFiles []string + defer func() { + // Clean up all temporary files + for _, tempFile := range tempFiles { + if err := os.Remove(tempFile); err != nil && !os.IsNotExist(err) { + log.WithError(err).WithField("file", tempFile).Warn("Failed to clean up temporary file") + } + } + }() + + for _, artifact := range artifacts { + wg.Add(1) + go func(artifactPath string) { + defer wg.Done() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + log.WithField("artifact", artifactPath).Debug("Starting artifact processing") + + if err := processArtifact(ctx, artifactPath, githubCtx, remoteCache, dryRun); err != nil { + signingErr := signing.CategorizeError(artifactPath, err) + + mu.Lock() + failed = append(failed, signingErr) + mu.Unlock() + + log.WithFields(log.Fields{ + "artifact": artifactPath, + "error_type": signingErr.Type, + }).WithError(err).Error("Failed to process artifact") + } else { + mu.Lock() + successful = append(successful, artifactPath) + mu.Unlock() + + log.WithField("artifact", artifactPath).Debug("Successfully processed artifact") + } + }(artifact) + } + + // Wait for all goroutines to complete + wg.Wait() + + // Report final results + log.WithFields(log.Fields{ + "successful": len(successful), + "failed": len(failed), + "total": len(artifacts), + }).Info("Artifact signing process completed") + + // Determine exit strategy based on failure ratio + if len(failed) > 0 { + failureRate := float64(len(failed)) / float64(len(artifacts)) + + // Log detailed failure information + for _, failure := range failed { + log.WithFields(log.Fields{ + "type": failure.Type, + "artifact": failure.Artifact, + }).Error(failure.Message) + } + + if failureRate > maxAcceptableFailureRate { + return fmt.Errorf("signing failed for %d/%d artifacts (%.1f%% failure rate)", + len(failed), len(artifacts), failureRate*100) + } else { + log.WithField("failure_rate", fmt.Sprintf("%.1f%%", failureRate*100)). + Warn("Partial signing failure - continuing with available artifacts") + } + } + + log.Info("Cache artifact signing process completed") + return nil +} + +// processArtifact handles signing and uploading of a single artifact using integrated SLSA signing +func processArtifact(ctx context.Context, artifactPath string, githubCtx *signing.GitHubContext, remoteCache cache.RemoteCache, dryRun bool) error { + log.WithFields(log.Fields{ + "artifact": artifactPath, + "dry_run": dryRun, + }).Debug("Processing artifact with integrated SLSA signing") + + if dryRun { + log.WithField("artifact", artifactPath).Info("DRY-RUN: Would generate signed SLSA attestation and upload") + return nil + } + + // Single step: generate and sign SLSA attestation using integrated approach + signedAttestation, err := signing.GenerateSignedSLSAAttestation(ctx, artifactPath, githubCtx) + if err != nil { + return fmt.Errorf("failed to generate signed attestation: %w", err) + } + + log.WithFields(log.Fields{ + "artifact": artifactPath, + "artifact_name": signedAttestation.ArtifactName, + "checksum": signedAttestation.Checksum[:16] + "...", + "attestation_size": len(signedAttestation.AttestationBytes), + }).Info("Successfully generated signed SLSA attestation") + + // Upload artifact + .att file using existing RemoteCache patterns + uploader := signing.NewArtifactUploader(remoteCache) + if err := uploader.UploadArtifactWithAttestation(ctx, artifactPath, signedAttestation.AttestationBytes); err != nil { + return fmt.Errorf("failed to upload to remote cache: %w", err) + } + + log.WithField("artifact", artifactPath).Info("Successfully uploaded signed artifact and attestation to remote cache") + return nil +} + +// parseManifest reads and validates the manifest file +func parseManifest(manifestPath string) ([]string, error) { + log.WithField("manifest", manifestPath).Debug("Parsing manifest file") + + content, err := os.ReadFile(manifestPath) + if err != nil { + return nil, fmt.Errorf("failed to read manifest file: %w", err) + } + + if len(content) == 0 { + return nil, fmt.Errorf("manifest file is empty") + } + + // Split by newlines and filter empty lines + lines := strings.Split(string(content), "\n") + var artifacts []string + var validationErrors []string + + for i, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue // Skip empty lines + } + + // Validate artifact path exists and is readable + if stat, err := os.Stat(line); os.IsNotExist(err) { + validationErrors = append(validationErrors, fmt.Sprintf("line %d: artifact not found: %s", i+1, line)) + continue + } else if err != nil { + validationErrors = append(validationErrors, fmt.Sprintf("line %d: cannot access artifact: %s (%v)", i+1, line, err)) + continue + } else if stat.IsDir() { + validationErrors = append(validationErrors, fmt.Sprintf("line %d: path is a directory, not a file: %s", i+1, line)) + continue + } + + // Validate it looks like a cache artifact (basic heuristic) + if !strings.HasSuffix(line, ".tar.gz") && !strings.HasSuffix(line, ".tar") { + log.WithField("artifact", line).Warn("Artifact does not have expected extension (.tar.gz or .tar)") + } + + artifacts = append(artifacts, line) + } + + // Report validation errors if any + if len(validationErrors) > 0 { + return nil, fmt.Errorf("manifest validation failed:\n%s", strings.Join(validationErrors, "\n")) + } + + log.WithFields(log.Fields{ + "total_lines": len(lines), + "artifacts": len(artifacts), + }).Debug("Successfully parsed manifest") + + return artifacts, nil +} diff --git a/go.mod b/go.mod index 6708fe9..386be71 100644 --- a/go.mod +++ b/go.mod @@ -27,6 +27,7 @@ require ( github.com/opencontainers/runtime-spec v1.1.0 github.com/segmentio/analytics-go/v3 v3.3.0 github.com/segmentio/textio v1.2.0 + github.com/sigstore/sigstore-go v1.1.2 github.com/sirupsen/logrus v1.9.3 github.com/slsa-framework/slsa-verifier/v2 v2.6.0 github.com/spf13/cobra v1.10.1 @@ -49,6 +50,7 @@ require ( cloud.google.com/go/kms v1.22.0 // indirect cloud.google.com/go/longrunning v0.6.7 // indirect cloud.google.com/go/monitoring v1.24.2 // indirect + cloud.google.com/go/spanner v1.84.1 // indirect cloud.google.com/go/storage v1.56.1 // indirect dario.cat/mergo v1.0.2 // indirect filippo.io/edwards25519 v1.1.0 // indirect @@ -63,6 +65,7 @@ require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/CycloneDX/cyclonedx-go v0.9.2 // indirect github.com/DataDog/zstd v1.5.5 // indirect + github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect @@ -138,6 +141,7 @@ require ( github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.7 // indirect github.com/containerd/typeurl/v2 v2.2.0 // indirect + github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect @@ -213,6 +217,7 @@ require ( github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -225,6 +230,7 @@ require ( github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.5 // indirect github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/hashicorp/vault/api v1.16.0 // indirect @@ -309,6 +315,7 @@ require ( github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 // indirect github.com/secure-systems-lab/go-securesystemslib v0.9.1 // indirect github.com/segmentio/backo-go v1.0.0 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect @@ -316,14 +323,15 @@ require ( github.com/sigstore/fulcio v1.4.5 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect github.com/sigstore/rekor v1.4.2 // indirect + github.com/sigstore/rekor-tiles v0.1.10 // indirect github.com/sigstore/sigstore v1.9.6-0.20250729224751-181c5d3339b3 // indirect - github.com/sigstore/sigstore-go v1.1.2 // indirect github.com/sigstore/sigstore/pkg/signature/kms/aws v1.9.5 // indirect github.com/sigstore/sigstore/pkg/signature/kms/azure v1.9.5 // indirect github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.9.6-0.20250729224751-181c5d3339b3 // indirect github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.9.5 // indirect github.com/sigstore/timestamp-authority v1.2.8 // indirect github.com/skeema/knownhosts v1.3.1 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/slsa-framework/slsa-github-generator v1.9.0 // indirect github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb // indirect github.com/spdx/tools-golang v0.5.5 // indirect @@ -343,7 +351,9 @@ require ( github.com/tink-crypto/tink-go-gcpkms/v2 v2.2.0 // indirect github.com/tink-crypto/tink-go/v2 v2.4.0 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/transparency-dev/formats v0.0.0-20250421220931-bb8ad4d07c26 // indirect github.com/transparency-dev/merkle v0.0.2 // indirect + github.com/transparency-dev/tessera v1.0.0-rc2 // indirect github.com/ulikunitz/xz v0.5.14 // indirect github.com/vbatts/go-mtree v0.5.4 // indirect github.com/vbatts/tar-split v0.12.1 // indirect @@ -362,12 +372,10 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.step.sm/crypto v0.70.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect diff --git a/go.sum b/go.sum index 24eb4f3..b6bf25b 100644 --- a/go.sum +++ b/go.sum @@ -532,6 +532,8 @@ cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+ cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/spanner v1.84.1 h1:ShH4Y3YeDtmHa55dFiSS3YtQ0dmCuP0okfAoHp/d68w= +cloud.google.com/go/spanner v1.84.1/go.mod h1:3GMEIjOcXINJSvb42H3M6TdlGCDzaCFpiiNQpjHPlCM= cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= @@ -683,6 +685,8 @@ github.com/CycloneDX/cyclonedx-go v0.9.2/go.mod h1:vcK6pKgO1WanCdd61qx4bFnSsDJQ6 github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3 h1:2afWGsMzkIcN8Qm4mgPJKZWyroE5QBszMiDMYEBrnfw= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= @@ -1212,6 +1216,8 @@ github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc= github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk= +github.com/go-rod/rod v0.116.2 h1:A5t2Ky2A+5eD/ZJQr1EfsQSe5rms5Xof/qj296e+ZqA= +github.com/go-rod/rod v0.116.2/go.mod h1:H+CMO9SCNc2TJ2WfrG+pKhITz57uGNYU43qYHh438Mg= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -1389,7 +1395,6 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= @@ -1444,6 +1449,8 @@ github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= @@ -1479,6 +1486,14 @@ github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= +github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= @@ -1857,6 +1872,8 @@ github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/rekor v1.4.2 h1:Lx2xby7loviFYdg2C9pB1mESk2QU/LqcYSGsqqZwmg8= github.com/sigstore/rekor v1.4.2/go.mod h1:nX/OYaLqpTeCOuMEt7ELE0+5cVjZWFnFKM+cZ+3hQRA= +github.com/sigstore/rekor-tiles v0.1.10 h1:10LVWV+isl43KpjmAID/DH/wT7LeYj3j0eW5pVu6SXE= +github.com/sigstore/rekor-tiles v0.1.10/go.mod h1:SDtO+1nGYo6hEPTyshgd4EFDP3gZyZuVCUukBCqaqz0= github.com/sigstore/sigstore v1.9.6-0.20250729224751-181c5d3339b3 h1:IEhSeWfhTd0kaBpHUXniWU2Tl5K5OUACN69mi1WGd+8= github.com/sigstore/sigstore v1.9.6-0.20250729224751-181c5d3339b3/go.mod h1:JuqyPRJYnkNl6OTnQiG503EUnKih4P5EV6FUw+1B0iA= github.com/sigstore/sigstore-go v1.1.2 h1:VFfqVQvUm3m7uAiqJFK+hDSH3I2rCBYH4l3wIDy+Ozo= @@ -1972,8 +1989,12 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= +github.com/transparency-dev/formats v0.0.0-20250421220931-bb8ad4d07c26 h1:YTbkeFbzcer+42bIgo6Za2194nKwhZPgaZKsP76QffE= +github.com/transparency-dev/formats v0.0.0-20250421220931-bb8ad4d07c26/go.mod h1:ODywn0gGarHMMdSkWT56ULoK8Hk71luOyRseKek9COw= github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= +github.com/transparency-dev/tessera v1.0.0-rc2 h1:BKtDWr0nhL9dG66cS4DyKU9lpZFbUZrpHGh+BpqakcU= +github.com/transparency-dev/tessera v1.0.0-rc2/go.mod h1:aaLlvG/sEPMzT96iIF4hua6Z9pLzkfDtkbaUAR4IL8I= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -2011,6 +2032,16 @@ github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= +github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= +github.com/ysmood/got v0.40.0 h1:ZQk1B55zIvS7zflRrkGfPDrPG3d7+JOza1ZkNxcc74Q= +github.com/ysmood/got v0.40.0/go.mod h1:W7DdpuX6skL3NszLmAsC5hT7JAhuLZhByVzHTq874Qg= +github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= +github.com/ysmood/leakless v0.9.0 h1:qxCG5VirSBvmi3uynXFkcnLMzkphdh3xx5FtrORwDCU= +github.com/ysmood/leakless v0.9.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/pkg/leeway/build.go b/pkg/leeway/build.go index c1b38b8..69adf92 100644 --- a/pkg/leeway/build.go +++ b/pkg/leeway/build.go @@ -71,6 +71,11 @@ type buildContext struct { pkgLockCond *sync.Cond pkgLocks map[string]struct{} buildLimit *semaphore.Weighted + + // For in-flight checksumming + InFlightChecksums bool // Feature enabled flag + artifactChecksums map[string]string // path -> sha256 hex + artifactChecksumsMutex sync.RWMutex // Thread safety for parallel builds } const ( @@ -145,6 +150,14 @@ func newBuildContext(options buildOptions) (ctx *buildContext, err error) { return nil, xerrors.Errorf("cannot compute hash of myself: %w", err) } + // Initialize checksum storage based on feature flag + var checksumMap map[string]string + if options.InFlightChecksums { + checksumMap = make(map[string]string) + } else { + checksumMap = nil // Disable feature completely + } + ctx = &buildContext{ buildOptions: options, buildDir: buildDir, @@ -154,6 +167,9 @@ func newBuildContext(options buildOptions) (ctx *buildContext, err error) { pkgLocks: make(map[string]struct{}), buildLimit: buildLimit, leewayHash: hex.EncodeToString(leewayHash.Sum(nil)), + // In-flight checksumming initialization + InFlightChecksums: options.InFlightChecksums, + artifactChecksums: checksumMap, } err = os.MkdirAll(buildDir, 0755) @@ -251,6 +267,115 @@ func (c *buildContext) GetNewPackagesForCache() []*Package { return res } +func (ctx *buildContext) recordArtifactChecksum(path string) error { + if ctx.artifactChecksums == nil { + return nil + } + + checksum, err := computeSHA256(path) + if err != nil { + return fmt.Errorf("failed to compute checksum for %s: %w", path, err) + } + + // Thread-safe storage + ctx.artifactChecksumsMutex.Lock() + ctx.artifactChecksums[path] = checksum + ctx.artifactChecksumsMutex.Unlock() + + log.WithFields(log.Fields{ + "artifact": path, + "checksum": checksum[:16] + "...", // Log first 16 chars for debugging + }).Debug("Recorded cache artifact checksum") + + return nil +} + +// verifyArtifactChecksum +func (ctx *buildContext) verifyArtifactChecksum(path string) error { + if ctx.artifactChecksums == nil { + return nil + } + + // Get stored checksum + ctx.artifactChecksumsMutex.RLock() + expectedChecksum, exists := ctx.artifactChecksums[path] + ctx.artifactChecksumsMutex.RUnlock() + + if !exists { + return nil // Not tracked, skip verification + } + + // Compute current checksum + actualChecksum, err := computeSHA256(path) + if err != nil { + return fmt.Errorf("failed to verify checksum for %s: %w", path, err) + } + + // Detect tampering + if expectedChecksum != actualChecksum { + return fmt.Errorf("cache artifact %s modified (expected: %s..., actual: %s...)", + path, expectedChecksum[:16], actualChecksum[:16]) + } + + return nil +} + +// computeSHA256 computes the SHA256 hash of a file +func computeSHA256(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer func() { _ = file.Close() }() + + hash := sha256.New() + if _, err := io.Copy(hash, file); err != nil { + return "", err + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} + +// verifyAllArtifactChecksums verifies all tracked cache artifacts before signing handoff +func verifyAllArtifactChecksums(buildctx *buildContext) error { + if buildctx.artifactChecksums == nil { + return nil // Feature disabled + } + + // Get snapshot of all artifacts to verify + buildctx.artifactChecksumsMutex.RLock() + checksumCount := len(buildctx.artifactChecksums) + artifactsToVerify := make([]string, 0, checksumCount) + for path := range buildctx.artifactChecksums { + artifactsToVerify = append(artifactsToVerify, path) + } + buildctx.artifactChecksumsMutex.RUnlock() + + if checksumCount == 0 { + log.Debug("No cache artifacts to verify") + return nil + } + + log.WithField("artifacts", checksumCount).Info("Verifying cache artifact integrity") + + // Verify each artifact + var verificationErrors []string + for _, path := range artifactsToVerify { + if err := buildctx.verifyArtifactChecksum(path); err != nil { + verificationErrors = append(verificationErrors, err.Error()) + } + } + + // Report results + if len(verificationErrors) > 0 { + return fmt.Errorf("checksum verification failures:\n%s", + strings.Join(verificationErrors, "\n")) + } + + log.WithField("artifacts", checksumCount).Info("All cache artifacts verified successfully") + return nil +} + type buildOptions struct { LocalCache cache.LocalCache RemoteCache cache.RemoteCache @@ -266,6 +391,7 @@ type buildOptions struct { JailedExecution bool UseFixedBuildDir bool DisableCoverage bool + InFlightChecksums bool context *buildContext } @@ -381,6 +507,14 @@ func WithDisableCoverage(disableCoverage bool) BuildOption { } } +// WithInFlightChecksums enables checksumming of cache artifacts to prevent TOCTU attacks +func WithInFlightChecksums(enabled bool) BuildOption { + return func(opts *buildOptions) error { + opts.InFlightChecksums = enabled + return nil + } +} + func withBuildContext(ctx *buildContext) BuildOption { return func(opts *buildOptions) error { opts.context = ctx @@ -564,6 +698,13 @@ func Build(pkg *Package, opts ...BuildOption) (err error) { } } + // Verify all cache artifact checksums before signing handoff + if ctx.InFlightChecksums { + if err := verifyAllArtifactChecksums(ctx); err != nil { + return fmt.Errorf("cache artifact integrity check failed - potential TOCTU attack detected: %w", err) + } + } + return nil } @@ -785,6 +926,14 @@ func (p *Package) build(buildctx *buildContext) (err error) { } } + // Record checksum immediately after cache artifact creation + if cacheArtifactPath, exists := buildctx.LocalCache.Location(p); exists { + if err := buildctx.recordArtifactChecksum(cacheArtifactPath); err != nil { + log.WithError(err).WithField("package", p.FullName()).Warn("Failed to record cache artifact checksum") + // Don't fail build - this is defensive, not critical path + } + } + // Register newly built package return buildctx.RegisterNewlyBuilt(p) } diff --git a/pkg/leeway/build_checksum_test.go b/pkg/leeway/build_checksum_test.go new file mode 100644 index 0000000..6b04a3a --- /dev/null +++ b/pkg/leeway/build_checksum_test.go @@ -0,0 +1,142 @@ +package leeway + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestRecordArtifactChecksum(t *testing.T) { + // Test checksum recording works correctly + tmpDir := t.TempDir() + testArtifact := filepath.Join(tmpDir, "test.tar.gz") + err := os.WriteFile(testArtifact, []byte("test content"), 0644) + if err != nil { + t.Fatal(err) + } + + ctx := &buildContext{ + InFlightChecksums: true, + artifactChecksums: make(map[string]string), + } + + err = ctx.recordArtifactChecksum(testArtifact) + if err != nil { + t.Errorf("recordArtifactChecksum failed: %v", err) + } + + if len(ctx.artifactChecksums) != 1 { + t.Errorf("Expected 1 checksum, got %d", len(ctx.artifactChecksums)) + } +} + +func TestVerifyArtifactChecksum(t *testing.T) { + tmpDir := t.TempDir() + testArtifact := filepath.Join(tmpDir, "test.tar.gz") + err := os.WriteFile(testArtifact, []byte("test content"), 0644) + if err != nil { + t.Fatal(err) + } + + ctx := &buildContext{ + InFlightChecksums: true, + artifactChecksums: make(map[string]string), + } + + // Record initial checksum + err = ctx.recordArtifactChecksum(testArtifact) + if err != nil { + t.Fatal(err) + } + + // Verify unmodified file passes + err = ctx.verifyArtifactChecksum(testArtifact) + if err != nil { + t.Errorf("Verification should pass for unmodified file: %v", err) + } + + // Modify file to simulate TOCTU attack + err = os.WriteFile(testArtifact, []byte("tampered content"), 0644) + if err != nil { + t.Fatal(err) + } + + // Verify modified file fails with TOCTU message + err = ctx.verifyArtifactChecksum(testArtifact) + if err == nil { + t.Error("Verification should fail for tampered file") + } + if !strings.Contains(err.Error(), "cache artifact") || !strings.Contains(err.Error(), "modified") { + t.Errorf("Expected cache artifact modified error, got: %v", err) + } +} + +func TestInFlightChecksumsDisabled(t *testing.T) { + ctx := &buildContext{ + InFlightChecksums: false, + artifactChecksums: nil, + } + + // Both operations should be no-op + err := ctx.recordArtifactChecksum("nonexistent") + if err != nil { + t.Errorf("Disabled checksumming should be no-op: %v", err) + } + + err = ctx.verifyArtifactChecksum("nonexistent") + if err != nil { + t.Errorf("Disabled checksumming should be no-op: %v", err) + } +} + +func TestVerifyAllArtifactChecksums(t *testing.T) { + tmpDir := t.TempDir() + + // Create multiple test artifacts + artifacts := []string{ + filepath.Join(tmpDir, "pkg1.tar.gz"), + filepath.Join(tmpDir, "pkg2.tar.gz"), + } + + ctx := &buildContext{ + InFlightChecksums: true, + artifactChecksums: make(map[string]string), + } + + // Record checksums for all artifacts + for i, artifact := range artifacts { + content := fmt.Sprintf("package %d content", i) + err := os.WriteFile(artifact, []byte(content), 0644) + if err != nil { + t.Fatal(err) + } + + err = ctx.recordArtifactChecksum(artifact) + if err != nil { + t.Fatal(err) + } + } + + // Verify all pass initially + err := verifyAllArtifactChecksums(ctx) + if err != nil { + t.Errorf("All checksums should verify: %v", err) + } + + // Tamper with one artifact + err = os.WriteFile(artifacts[0], []byte("tampered!"), 0644) + if err != nil { + t.Fatal(err) + } + + // Verification should fail + err = verifyAllArtifactChecksums(ctx) + if err == nil { + t.Error("Verification should fail when artifact is tampered") + } + if !strings.Contains(err.Error(), "checksum verification failures") { + t.Errorf("Expected verification failure message, got: %v", err) + } +} \ No newline at end of file diff --git a/pkg/leeway/cache/remote/gsutil.go b/pkg/leeway/cache/remote/gsutil.go index 7e98a96..db8adb7 100644 --- a/pkg/leeway/cache/remote/gsutil.go +++ b/pkg/leeway/cache/remote/gsutil.go @@ -173,6 +173,25 @@ func (rs *GSUtilCache) Upload(ctx context.Context, src cache.LocalCache, pkgs [] return gsutilTransfer(fmt.Sprintf("gs://%s", rs.BucketName), files) } +// UploadFile uploads a single file to the remote cache with the given key +func (rs *GSUtilCache) UploadFile(ctx context.Context, filePath string, key string) error { + target := fmt.Sprintf("gs://%s/%s", rs.BucketName, key) + log.WithFields(log.Fields{ + "file": filePath, + "target": target, + }).Debug("Uploading file using gsutil") + + cmd := exec.CommandContext(ctx, "gsutil", "cp", filePath, target) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to upload file %s to %s: %w", filePath, target, err) + } + + return nil +} + func parseGSUtilStatOutput(reader io.Reader) map[string]struct{} { exists := make(map[string]struct{}) scanner := bufio.NewScanner(reader) diff --git a/pkg/leeway/cache/remote/no_cache.go b/pkg/leeway/cache/remote/no_cache.go index 8566ec3..a1187d2 100644 --- a/pkg/leeway/cache/remote/no_cache.go +++ b/pkg/leeway/cache/remote/no_cache.go @@ -28,3 +28,8 @@ func (NoRemoteCache) Download(ctx context.Context, dst cache.LocalCache, pkgs [] func (NoRemoteCache) Upload(ctx context.Context, src cache.LocalCache, pkgs []cache.Package) error { return nil } + +// UploadFile uploads a single file to the remote cache with the given key +func (NoRemoteCache) UploadFile(ctx context.Context, filePath string, key string) error { + return nil +} diff --git a/pkg/leeway/cache/remote/s3.go b/pkg/leeway/cache/remote/s3.go index c62f51d..5baebe7 100644 --- a/pkg/leeway/cache/remote/s3.go +++ b/pkg/leeway/cache/remote/s3.go @@ -58,7 +58,7 @@ type S3Cache struct { cfg *cache.RemoteConfig workerCount int slsaVerifier slsa.VerifierInterface - cleanupMu sync.Mutex // Protects concurrent file cleanup operations + cleanupMu sync.Mutex // Protects concurrent file cleanup operations rateLimiter *rate.Limiter // Rate limiter for S3 API calls semaphore chan struct{} // Semaphore for limiting concurrent operations } @@ -79,7 +79,7 @@ func NewS3Cache(cfg *cache.RemoteConfig) (*S3Cache, error) { } storage := NewS3Storage(cfg.BucketName, &awsCfg) - + // Initialize SLSA verifier if enabled var slsaVerifier slsa.VerifierInterface if cfg.SLSA != nil && cfg.SLSA.Verification && cfg.SLSA.SourceURI != "" { @@ -89,13 +89,13 @@ func NewS3Cache(cfg *cache.RemoteConfig) (*S3Cache, error) { "trustedRoots": len(cfg.SLSA.TrustedRoots), }).Debug("SLSA verification enabled for cache") } - + // Initialize rate limiter with default limits rateLimiter := rate.NewLimiter(rate.Limit(defaultRateLimit), defaultBurstLimit) - + // Initialize semaphore for goroutine limiting semaphore := make(chan struct{}, maxConcurrentOperations) - + return &S3Cache{ storage: storage, cfg: cfg, @@ -208,7 +208,7 @@ func (s *S3Cache) ExistingPackages(ctx context.Context, pkgs []cache.Package) (m log.WithError(err).Debug("Rate limiter error during .tar.gz check") // Continue to .tar check even if rate limited } - + timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() exists, err := s.storage.HasObject(timeoutCtx, gzKey) @@ -236,7 +236,7 @@ func (s *S3Cache) ExistingPackages(ctx context.Context, pkgs []cache.Package) (m if err := s.waitForRateLimit(ctx); err != nil { log.WithError(err).Debug("Rate limiter error during .tar check") } - + timeoutCtx2, cancel2 := context.WithTimeout(ctx, 30*time.Second) defer cancel2() exists, err = s.storage.HasObject(timeoutCtx2, tarKey) @@ -363,7 +363,7 @@ func (s *S3Cache) downloadOriginal(ctx context.Context, p cache.Package, version if err := s.waitForRateLimit(ctx); err != nil { return err } - + timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() _, err := s.storage.GetObject(timeoutCtx, gzKey, localPath) @@ -400,7 +400,7 @@ func (s *S3Cache) downloadOriginal(ctx context.Context, p cache.Package, version if err := s.waitForRateLimit(ctx); err != nil { return err } - + timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() _, err := s.storage.GetObject(timeoutCtx, tarKey, localPath) @@ -534,7 +534,7 @@ func (s *S3Cache) downloadWithSLSAVerification(ctx context.Context, p cache.Pack "attestation": attestationKey, "duration": verifyDuration, }).Warn("SLSA verification failed, artifact rejected") - + s.cleanupTempFiles(tmpArtifactPath, tmpAttestationPath) continue } @@ -551,11 +551,11 @@ func (s *S3Cache) downloadWithSLSAVerification(ctx context.Context, p cache.Pack totalDuration := time.Since(downloadStart) log.WithFields(log.Fields{ - "package": p.FullName(), - "key": artifactKey, - "path": localPath, - "verified": true, - "downloadTime": totalDuration, + "package": p.FullName(), + "key": artifactKey, + "path": localPath, + "verified": true, + "downloadTime": totalDuration, "verificationTime": verifyDuration, }).Info("Successfully downloaded and verified package with SLSA attestation") @@ -589,7 +589,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe // Check artifact existence with timeout protection go func() { defer wg.Done() - + // Acquire semaphore slot if err := s.acquireSemaphore(ctx); err != nil { select { @@ -599,7 +599,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe return } defer s.releaseSemaphore() - + // Wait for rate limiter permission if err := s.waitForRateLimit(ctx); err != nil { select { @@ -608,11 +608,11 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe } return } - + // Create timeout context for storage operation timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - + exists, err := s.storage.HasObject(timeoutCtx, artifactKey) select { case results <- existResult{artifactKey, exists, err}: @@ -623,7 +623,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe // Check attestation existence with timeout protection go func() { defer wg.Done() - + // Acquire semaphore slot if err := s.acquireSemaphore(ctx); err != nil { select { @@ -633,7 +633,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe return } defer s.releaseSemaphore() - + // Wait for rate limiter permission if err := s.waitForRateLimit(ctx); err != nil { select { @@ -642,11 +642,11 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe } return } - + // Create timeout context for storage operation timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - + exists, err := s.storage.HasObject(timeoutCtx, attestationKey) select { case results <- existResult{attestationKey, exists, err}: @@ -670,7 +670,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe var artifactExists, attestationExists bool var artifactErr, attestationErr error - + // Block until all results arrive - no default case to avoid race condition for i := 0; i < 2; i++ { select { @@ -686,7 +686,7 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe return false, false, ctx.Err() } } - + // Log any errors but don't fail the check - let caller decide if artifactErr != nil { log.WithError(artifactErr).WithField("key", artifactKey).Debug("Failed to check artifact existence") @@ -699,10 +699,10 @@ func (s *S3Cache) checkBothExist(ctx context.Context, artifactKey, attestationKe } // downloadFileAsync downloads a single file asynchronously with proper concurrency control -func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind string, +func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind string, wg *sync.WaitGroup, resultChan chan<- downloadResult) { defer wg.Done() - + // Acquire semaphore for concurrency control if err := s.acquireSemaphore(ctx); err != nil { resultChan <- downloadResult{ @@ -712,7 +712,7 @@ func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind st return } defer s.releaseSemaphore() - + // Download with retry logic err := withRetry(3, func() error { select { @@ -723,7 +723,7 @@ func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind st if err := s.waitForRateLimit(ctx); err != nil { return err } - + // Create timeout context for storage operation timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() @@ -731,7 +731,7 @@ func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind st return err } }) - + // NEW: Structured result reporting if err != nil { // Clean up on error with race protection @@ -754,24 +754,24 @@ func (s *S3Cache) downloadFileAsync(ctx context.Context, key, localPath, kind st func (s *S3Cache) downloadBothParallel(ctx context.Context, artifactKey, attestationKey, artifactPath, attestationPath string) (error, error) { resultChan := make(chan downloadResult, 2) var wg sync.WaitGroup - + // Download artifact wg.Add(1) go s.downloadFileAsync(ctx, artifactKey, artifactPath, "artifact", &wg, resultChan) - - // Download attestation + + // Download attestation wg.Add(1) go s.downloadFileAsync(ctx, attestationKey, attestationPath, "attestation", &wg, resultChan) - + // Wait and close channel when done go func() { wg.Wait() close(resultChan) }() - + var artifactErr, attestationErr error var resultsCollected int - + // Collect results with proper context handling for resultsCollected < 2 { select { @@ -787,11 +787,11 @@ func (s *S3Cache) downloadBothParallel(ctx context.Context, artifactKey, attesta case "attestation": attestationErr = result.err } - + case <-ctx.Done(): // Context cancelled - provide specific errors based on what we know ctxErr := ctx.Err() - + // Set errors for operations that haven't completed if resultsCollected < 2 { if artifactErr == nil { @@ -801,11 +801,11 @@ func (s *S3Cache) downloadBothParallel(ctx context.Context, artifactKey, attesta attestationErr = fmt.Errorf("attestation download cancelled: %w", ctxErr) } } - + return artifactErr, attestationErr } } - + return artifactErr, attestationErr } @@ -815,7 +815,7 @@ func (s *S3Cache) atomicMove(src, dst string) error { if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return fmt.Errorf("failed to create destination directory: %w", err) } - + // On Windows, os.Rename fails if destination exists if runtime.GOOS == "windows" { if _, err := os.Stat(dst); err == nil { @@ -827,7 +827,7 @@ func (s *S3Cache) atomicMove(src, dst string) error { } } } - + return os.Rename(src, dst) } @@ -835,7 +835,7 @@ func (s *S3Cache) atomicMove(src, dst string) error { func (s *S3Cache) cleanupTempFiles(paths ...string) { s.cleanupMu.Lock() defer s.cleanupMu.Unlock() - + for _, path := range paths { if removeErr := os.Remove(path); removeErr != nil && !os.IsNotExist(removeErr) { log.WithError(removeErr).WithField("path", path).Debug("Failed to cleanup temporary file") @@ -846,13 +846,13 @@ func (s *S3Cache) cleanupTempFiles(paths ...string) { // downloadUnverified handles backward compatibility for missing attestations func (s *S3Cache) downloadUnverified(ctx context.Context, p cache.Package, version, localPath, extension string) error { key := fmt.Sprintf("%s%s", version, extension) - + err := withRetry(3, func() error { // Wait for rate limiter permission if err := s.waitForRateLimit(ctx); err != nil { return err } - + timeoutCtx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() _, err := s.storage.GetObject(timeoutCtx, key, localPath) @@ -906,7 +906,7 @@ func (s *S3Cache) Upload(ctx context.Context, src cache.LocalCache, pkgs []cache uploadErrors = append(uploadErrors, fmt.Errorf("package %s: rate limit error: %w", p.FullName(), err)) return nil // Don't fail the entire operation } - + timeoutCtx, cancel := context.WithTimeout(ctx, 120*time.Second) defer cancel() if err := s.storage.UploadObject(timeoutCtx, key, localPath); err != nil { @@ -937,6 +937,29 @@ func (s *S3Cache) Upload(ctx context.Context, src cache.LocalCache, pkgs []cache return nil // Always return nil to allow the build to continue } +// UploadFile uploads a single file to the remote cache with the given key +func (s *S3Cache) UploadFile(ctx context.Context, filePath string, key string) error { + // Wait for rate limiter permission + if err := s.waitForRateLimit(ctx); err != nil { + return fmt.Errorf("rate limiter error: %w", err) + } + + // Use timeout for the upload operation + timeoutCtx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + + if err := s.storage.UploadObject(timeoutCtx, key, filePath); err != nil { + return fmt.Errorf("failed to upload file %s with key %s: %w", filePath, key, err) + } + + log.WithFields(log.Fields{ + "file": filePath, + "key": key, + }).Debug("successfully uploaded file to remote cache") + + return nil +} + // s3ClientAPI is a subset of the S3 client interface we need type s3ClientAPI interface { HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) diff --git a/pkg/leeway/cache/types.go b/pkg/leeway/cache/types.go index d73695b..0e0c184 100644 --- a/pkg/leeway/cache/types.go +++ b/pkg/leeway/cache/types.go @@ -48,6 +48,10 @@ type RemoteCache interface { // Upload makes a best effort to upload the build artifacts to a remote cache Upload(ctx context.Context, src LocalCache, pkgs []Package) error + + // UploadFile uploads a single file to the remote cache with the given key + // This is useful for uploading individual files like attestations without Package abstraction + UploadFile(ctx context.Context, filePath string, key string) error } // ObjectStorage represents a generic object storage interface diff --git a/pkg/leeway/signing/attestation.go b/pkg/leeway/signing/attestation.go new file mode 100644 index 0000000..14b40d0 --- /dev/null +++ b/pkg/leeway/signing/attestation.go @@ -0,0 +1,358 @@ +package signing + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/in-toto/in-toto-golang/in_toto" + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/sigstore/sigstore-go/pkg/root" + "github.com/sigstore/sigstore-go/pkg/sign" + + log "github.com/sirupsen/logrus" +) + +// GitHubContext contains GitHub Actions environment information +type GitHubContext struct { + RunID string // GITHUB_RUN_ID + RunNumber string // GITHUB_RUN_NUMBER + Actor string // GITHUB_ACTOR + Repository string // GITHUB_REPOSITORY + Ref string // GITHUB_REF + SHA string // GITHUB_SHA + ServerURL string // GITHUB_SERVER_URL + WorkflowRef string // GITHUB_WORKFLOW_REF +} + +// Validate ensures all required GitHub context fields are present +func (ctx *GitHubContext) Validate() error { + if ctx.RunID == "" { + return fmt.Errorf("GITHUB_RUN_ID is required") + } + if ctx.Repository == "" { + return fmt.Errorf("GITHUB_REPOSITORY is required") + } + if ctx.SHA == "" { + return fmt.Errorf("GITHUB_SHA is required") + } + if ctx.ServerURL == "" { + return fmt.Errorf("GITHUB_SERVER_URL is required") + } + if ctx.WorkflowRef == "" { + return fmt.Errorf("GITHUB_WORKFLOW_REF is required") + } + return nil +} + +// GetGitHubContext extracts GitHub Actions context from environment variables +func GetGitHubContext() *GitHubContext { + return &GitHubContext{ + RunID: os.Getenv("GITHUB_RUN_ID"), + RunNumber: os.Getenv("GITHUB_RUN_NUMBER"), + Actor: os.Getenv("GITHUB_ACTOR"), + Repository: os.Getenv("GITHUB_REPOSITORY"), + Ref: os.Getenv("GITHUB_REF"), + SHA: os.Getenv("GITHUB_SHA"), + ServerURL: os.Getenv("GITHUB_SERVER_URL"), + WorkflowRef: os.Getenv("GITHUB_WORKFLOW_REF"), + } +} + +// SignedAttestationResult contains the signed SLSA attestation ready for upload +type SignedAttestationResult struct { + AttestationBytes []byte `json:"attestation_bytes"` // Complete .att file content + Checksum string `json:"checksum"` // SHA256 of the artifact + ArtifactName string `json:"artifact_name"` // Name of the artifact +} + + + +// GenerateSignedSLSAAttestation generates and signs SLSA provenance in one integrated step +func GenerateSignedSLSAAttestation(ctx context.Context, artifactPath string, githubCtx *GitHubContext) (*SignedAttestationResult, error) { + // Calculate artifact checksum + checksum, err := computeSHA256(artifactPath) + if err != nil { + return nil, fmt.Errorf("checksum calculation failed: %w", err) + } + + // Validate GitHub context completeness + if err := githubCtx.Validate(); err != nil { + return nil, fmt.Errorf("incomplete GitHub context: %w", err) + } + + sourceURI := fmt.Sprintf("%s/%s", githubCtx.ServerURL, githubCtx.Repository) + builderID := fmt.Sprintf("%s/%s", githubCtx.ServerURL, githubCtx.WorkflowRef) + + log.WithFields(log.Fields{ + "artifact": filepath.Base(artifactPath), + "checksum": checksum[:16] + "...", + "source_uri": sourceURI, + "builder_id": builderID, + }).Debug("Generating SLSA attestation") + + // Create SLSA statement directly using in-toto libraries + stmt := &in_toto.Statement{ + StatementHeader: in_toto.StatementHeader{ + Type: in_toto.StatementInTotoV01, + PredicateType: slsa.PredicateSLSAProvenance, + Subject: []in_toto.Subject{{ + Name: filepath.Base(artifactPath), + Digest: common.DigestSet{ + "sha256": checksum, + }, + }}, + }, + } + + // Create SLSA predicate directly + pred := slsa.ProvenancePredicate{ + Builder: common.ProvenanceBuilder{ + ID: builderID, + }, + BuildType: "https://leeway.build/cache-signing/v1", + Invocation: slsa.ProvenanceInvocation{ + ConfigSource: slsa.ConfigSource{ + URI: sourceURI, + }, + Parameters: map[string]interface{}{ + "workflow": githubCtx.WorkflowRef, + }, + }, + } + + // Set metadata + // Note: BuildStartedOn and BuildFinishedOn are set to nil because sign-cache runs + // in a separate job after the build completes, and we don't have access to the + // actual build times. Using signing time or artifact mtime would be misleading. + pred.Metadata = &slsa.ProvenanceMetadata{ + BuildInvocationID: githubCtx.RunID, + BuildStartedOn: nil, + BuildFinishedOn: nil, + Completeness: slsa.ProvenanceComplete{ + Parameters: true, + Environment: false, + Materials: false, + }, + Reproducible: false, + } + + // Set the predicate + stmt.Predicate = pred + + log.WithFields(log.Fields{ + "artifact": filepath.Base(artifactPath), + "checksum": checksum[:16] + "...", + "source_uri": sourceURI, + "builder_id": builderID, + }).Debug("Generated SLSA provenance, proceeding with integrated signing") + + // Generate and sign the SLSA provenance using Sigstore + signedAttestation, err := signProvenanceWithSigstore(ctx, stmt) + if err != nil { + return nil, fmt.Errorf("failed to sign SLSA provenance: %w", err) + } + + return &SignedAttestationResult{ + AttestationBytes: signedAttestation, + Checksum: checksum, + ArtifactName: filepath.Base(artifactPath), + }, nil +} + +// computeSHA256 calculates the SHA256 hash of a file +func computeSHA256(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", fmt.Errorf("failed to open file: %w", err) + } + defer file.Close() + + hash := sha256.New() + if _, err := io.Copy(hash, file); err != nil { + return "", fmt.Errorf("failed to calculate hash: %w", err) + } + + return fmt.Sprintf("%x", hash.Sum(nil)), nil +} + +// signProvenanceWithSigstore signs SLSA provenance using Sigstore keyless signing +func signProvenanceWithSigstore(ctx context.Context, statement *in_toto.Statement) ([]byte, error) { + // Validate GitHub OIDC environment + if err := validateSigstoreEnvironment(); err != nil { + return nil, fmt.Errorf("sigstore environment validation failed: %w", err) + } + + // Marshal the statement to JSON for signing + payload, err := json.Marshal(statement) + if err != nil { + return nil, fmt.Errorf("failed to marshal statement: %w", err) + } + + log.WithFields(log.Fields{ + "payload_size": len(payload), + "subject": statement.Subject[0].Name, + }).Debug("Starting Sigstore keyless signing") + + // Create ephemeral keypair for signing + keypair, err := sign.NewEphemeralKeypair(nil) + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to create ephemeral keypair: %v", err), + Cause: err, + } + } + + // Create DSSE content for SLSA attestation (in-toto format) + content := &sign.DSSEData{ + Data: payload, + PayloadType: "application/vnd.in-toto+json", + } + + // Get trusted root from Sigstore TUF + trustedRoot, err := root.FetchTrustedRoot() + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to fetch trusted root: %v", err), + Cause: err, + } + } + + // Get signing config from Sigstore TUF + signingConfig, err := root.FetchSigningConfig() + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to fetch signing config: %v", err), + Cause: err, + } + } + + // Create bundle options + bundleOpts := sign.BundleOptions{ + TrustedRoot: trustedRoot, + Context: ctx, + } + + // Configure Fulcio for GitHub OIDC if we have a token + if os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") != "" { + // Select Fulcio service from signing config + fulcioService, err := root.SelectService(signingConfig.FulcioCertificateAuthorityURLs(), sign.FulcioAPIVersions, time.Now()) + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to select Fulcio service: %v", err), + Cause: err, + } + } + + fulcioOpts := &sign.FulcioOptions{ + BaseURL: fulcioService.URL, + Timeout: 30 * time.Second, + Retries: 1, + } + bundleOpts.CertificateProvider = sign.NewFulcio(fulcioOpts) + bundleOpts.CertificateProviderOptions = &sign.CertificateProviderOptions{ + // Let sigstore-go automatically handle GitHub OIDC + // It will use ACTIONS_ID_TOKEN_REQUEST_TOKEN/URL automatically + } + + // Configure Rekor transparency log + rekorServices, err := root.SelectServices(signingConfig.RekorLogURLs(), + signingConfig.RekorLogURLsConfig(), sign.RekorAPIVersions, time.Now()) + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to select Rekor services: %v", err), + Cause: err, + } + } + + for _, rekorService := range rekorServices { + rekorOpts := &sign.RekorOptions{ + BaseURL: rekorService.URL, + Timeout: 90 * time.Second, + Retries: 1, + Version: rekorService.MajorAPIVersion, + } + bundleOpts.TransparencyLogs = append(bundleOpts.TransparencyLogs, sign.NewRekor(rekorOpts)) + } + } + + // Sign and create bundle + signedBundle, err := sign.Bundle(content, keypair, bundleOpts) + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to sign with Sigstore: %v", err), + Cause: err, + } + } + + // Convert to bytes for .att file format + bundleBytes, err := json.Marshal(signedBundle) + if err != nil { + return nil, &SigningError{ + Type: ErrorTypeSigstore, + Artifact: statement.Subject[0].Name, + Message: fmt.Sprintf("failed to marshal signed bundle: %v", err), + Cause: err, + } + } + + log.WithFields(log.Fields{ + "artifact": statement.Subject[0].Name, + "bundle_size": len(bundleBytes), + }).Info("Successfully signed SLSA attestation with Sigstore") + + return bundleBytes, nil +} + +// validateSigstoreEnvironment checks if the environment is properly configured for keyless signing +func validateSigstoreEnvironment() error { + // Check for GitHub OIDC token (this is the key requirement) + if os.Getenv("ACTIONS_ID_TOKEN_REQUEST_TOKEN") == "" { + return &SigningError{ + Type: ErrorTypeValidation, + Artifact: "", + Message: "ACTIONS_ID_TOKEN_REQUEST_TOKEN not found - ensure 'permissions: id-token: write' is set in GitHub Actions", + Cause: nil, + } + } + + if os.Getenv("ACTIONS_ID_TOKEN_REQUEST_URL") == "" { + return &SigningError{ + Type: ErrorTypeValidation, + Artifact: "", + Message: "ACTIONS_ID_TOKEN_REQUEST_URL not found - ensure running in GitHub Actions", + Cause: nil, + } + } + + // Verify we're in GitHub Actions environment + if os.Getenv("GITHUB_ACTIONS") != "true" { + return &SigningError{ + Type: ErrorTypeValidation, + Artifact: "", + Message: "not running in GitHub Actions environment", + Cause: nil, + } + } + + log.Debug("Sigstore environment validation passed") + return nil +} \ No newline at end of file diff --git a/pkg/leeway/signing/errors.go b/pkg/leeway/signing/errors.go new file mode 100644 index 0000000..d4c1211 --- /dev/null +++ b/pkg/leeway/signing/errors.go @@ -0,0 +1,159 @@ +package signing + +import ( + "fmt" + "strings" + "time" + + log "github.com/sirupsen/logrus" +) + +// SigningError represents a categorized error during the signing process +type SigningError struct { + Type SigningErrorType `json:"type"` + Artifact string `json:"artifact"` + Message string `json:"message"` + Cause error `json:"-"` +} + +// SigningErrorType categorizes different types of signing errors +type SigningErrorType string + +const ( + ErrorTypeNetwork SigningErrorType = "network" + ErrorTypeSigstore SigningErrorType = "sigstore" + ErrorTypePermission SigningErrorType = "permission" + ErrorTypeValidation SigningErrorType = "validation" + ErrorTypeFileSystem SigningErrorType = "filesystem" +) + +// Error implements the error interface +func (e *SigningError) Error() string { + return fmt.Sprintf("[%s] %s: %s", e.Type, e.Artifact, e.Message) +} + +// Unwrap returns the underlying cause for error wrapping +func (e *SigningError) Unwrap() error { + return e.Cause +} + +// NewSigningError creates a new categorized signing error +func NewSigningError(errorType SigningErrorType, artifact, message string, cause error) *SigningError { + return &SigningError{ + Type: errorType, + Artifact: artifact, + Message: message, + Cause: cause, + } +} + +// IsRetryable determines if an error type should be retried +func (e *SigningError) IsRetryable() bool { + switch e.Type { + case ErrorTypeNetwork, ErrorTypeSigstore: + return true + case ErrorTypePermission, ErrorTypeValidation, ErrorTypeFileSystem: + return false + default: + return false + } +} + +// WithRetry executes an operation with exponential backoff retry logic +func WithRetry(maxAttempts int, operation func() error) error { + var lastErr error + backoff := time.Second + + for attempt := 1; attempt <= maxAttempts; attempt++ { + if err := operation(); err != nil { + lastErr = err + + // Check if this is a retryable error + if signingErr, ok := err.(*SigningError); ok && !signingErr.IsRetryable() { + log.WithFields(log.Fields{ + "error_type": signingErr.Type, + "artifact": signingErr.Artifact, + }).Debug("Non-retryable error encountered") + return err + } + + if attempt < maxAttempts { + log.WithFields(log.Fields{ + "attempt": attempt, + "max_attempts": maxAttempts, + "backoff": backoff, + }).WithError(err).Warn("Operation failed, retrying") + + time.Sleep(backoff) + backoff *= 2 // Exponential backoff + if backoff > 30*time.Second { + backoff = 30 * time.Second // Cap at 30 seconds + } + continue + } + } else { + return nil // Success + } + } + + return fmt.Errorf("operation failed after %d attempts: %w", maxAttempts, lastErr) +} + +// CategorizeError attempts to categorize a generic error into a SigningError +func CategorizeError(artifact string, err error) *SigningError { + if signingErr, ok := err.(*SigningError); ok { + return signingErr + } + + // Try to categorize based on error message patterns + errMsg := err.Error() + + // Network-related errors + if containsAny(errMsg, []string{"connection", "timeout", "network", "dial", "dns"}) { + return &SigningError{ + Type: ErrorTypeNetwork, + Artifact: artifact, + Message: errMsg, + Cause: err, + } + } + + // Permission-related errors + if containsAny(errMsg, []string{"permission", "access denied", "forbidden", "unauthorized"}) { + return &SigningError{ + Type: ErrorTypePermission, + Artifact: artifact, + Message: errMsg, + Cause: err, + } + } + + // File system errors + if containsAny(errMsg, []string{"no such file", "not found", "is a directory", "read-only"}) { + return &SigningError{ + Type: ErrorTypeFileSystem, + Artifact: artifact, + Message: errMsg, + Cause: err, + } + } + + // Default to network error for unknown errors (most likely to be retryable) + return &SigningError{ + Type: ErrorTypeNetwork, + Artifact: artifact, + Message: errMsg, + Cause: err, + } +} + +// containsAny checks if a string contains any of the given substrings (case-insensitive) +func containsAny(s string, substrings []string) bool { + s = strings.ToLower(s) + for _, substr := range substrings { + if strings.Contains(s, strings.ToLower(substr)) { + return true + } + } + return false +} \ No newline at end of file diff --git a/pkg/leeway/signing/upload.go b/pkg/leeway/signing/upload.go new file mode 100644 index 0000000..7727364 --- /dev/null +++ b/pkg/leeway/signing/upload.go @@ -0,0 +1,77 @@ +package signing + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/gitpod-io/leeway/pkg/leeway/cache" + log "github.com/sirupsen/logrus" +) + +// ArtifactUploader handles uploading signed artifacts and attestations to remote cache +type ArtifactUploader struct { + remoteCache cache.RemoteCache +} + +// NewArtifactUploader creates a new artifact uploader +func NewArtifactUploader(remoteCache cache.RemoteCache) *ArtifactUploader { + return &ArtifactUploader{ + remoteCache: remoteCache, + } +} + +// UploadArtifactWithAttestation uploads both the artifact and its .att file to remote cache +func (u *ArtifactUploader) UploadArtifactWithAttestation(ctx context.Context, artifactPath string, attestationBytes []byte) error { + // Extract artifact name for key generation + artifactName := filepath.Base(artifactPath) + + // Generate cache keys following existing patterns + artifactKey := artifactName + attestationKey := artifactName + ".att" + + log.WithFields(log.Fields{ + "artifact": artifactPath, + "artifact_key": artifactKey, + "att_key": attestationKey, + }).Debug("Preparing to upload signed artifact and attestation") + + // Create temporary file for the .att file + tmpDir := os.TempDir() + attestationPath := filepath.Join(tmpDir, fmt.Sprintf("attestation-%d.att", time.Now().UnixNano())) + defer func() { + if err := os.Remove(attestationPath); err != nil && !os.IsNotExist(err) { + log.WithError(err).WithField("file", attestationPath).Warn("Failed to clean up temporary attestation file") + } + }() + + // Write .att file to temporary location + if err := os.WriteFile(attestationPath, attestationBytes, 0644); err != nil { + return &SigningError{ + Type: ErrorTypeFileSystem, + Artifact: artifactPath, + Message: fmt.Sprintf("failed to write .att file: %v", err), + Cause: err, + } + } + + // Upload artifact first using the new UploadFile method + if err := u.remoteCache.UploadFile(ctx, artifactPath, artifactKey); err != nil { + return fmt.Errorf("failed to upload artifact: %w", err) + } + + // Upload .att file using the new UploadFile method + if err := u.remoteCache.UploadFile(ctx, attestationPath, attestationKey); err != nil { + return fmt.Errorf("failed to upload .att file: %w", err) + } + + log.WithFields(log.Fields{ + "artifact": artifactPath, + "artifact_key": artifactKey, + "att_key": attestationKey, + }).Info("Successfully uploaded artifact and .att file") + + return nil +}