Skip to content

Commit

Permalink
enable multi exporters
Browse files Browse the repository at this point in the history
Signed-off-by: fahed dorgaa <fahed.dorgaa@gmail.com>
Signed-off-by: Dmitri Shelenin <deemok@gmail.com>
  • Loading branch information
fahedouch authored and a-palchikov committed Mar 30, 2022
1 parent a6bbde0 commit 97804ac
Show file tree
Hide file tree
Showing 17 changed files with 1,205 additions and 289 deletions.
1,118 changes: 921 additions & 197 deletions api/services/control/control.pb.go

Large diffs are not rendered by default.

29 changes: 25 additions & 4 deletions api/services/control/control.proto
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ message PruneRequest {
}

message DiskUsageRequest {
repeated string filter = 1;
repeated string filter = 1;
}

message DiskUsageResponse {
Expand All @@ -51,17 +51,26 @@ message UsageRecord {
repeated string Parents = 12;
}

// Exporter describes the output exporter
message Exporter {
// Name identifies the exporter
string Name = 1;
// Attrs specifies exporter configuration
map<string, string> Attrs = 2;
}

message SolveRequest {
string Ref = 1;
pb.Definition Definition = 2;
string Exporter = 3;
map<string, string> ExporterAttrs = 4;
string ExporterDeprecated = 3;
map<string, string> ExporterAttrsDeprecated = 4;
string Session = 5;
string Frontend = 6;
map<string, string> FrontendAttrs = 7;
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ];
map<string, pb.Definition> FrontendInputs = 10;
repeated Exporter Exporters = 11;
}

message CacheOptions {
Expand Down Expand Up @@ -92,8 +101,20 @@ message CacheOptionsEntry {
map<string, string> Attrs = 2;
}

message ExporterResponse {
map<string, string> Response = 1;
// Name identifies the exporter. It is empty
// for the common exporter metadata (e.g. cache exporter)
string Name = 2;
}

message SolveResponse {
map<string, string> ExporterResponse = 1;
// ExporterResponse is the metadata aggregated from
// alternative sources like cache exporter and frontend
ExporterResponse ExporterResponse = 1;
// ExportersResponse groups metadata from individual
// output exporters
repeated ExporterResponse ExportersResponse = 2;
}

message StatusRequest {
Expand Down
85 changes: 85 additions & 0 deletions client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,8 @@ func TestIntegration(t *testing.T) {
testTarExporterWithSocketCopy,
testTarExporterSymlink,
testMultipleRegistryCacheImportExport,
testMultipleExporters,
testPreventsMultipleExportWithSameExporter,
testSourceMap,
testSourceMapFromRef,
testLazyImagePush,
Expand Down Expand Up @@ -1892,6 +1894,81 @@ func testUser(t *testing.T, sb integration.Sandbox) {
checkAllReleasable(t, c, sb, true)
}

func testPreventsMultipleExportWithSameExporter(t *testing.T, sb integration.Sandbox) {
integration.SkipIfDockerd(t, sb, "multiple exporters")
requiresLinux(t)

c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()

def, err := llb.Image("busybox").Marshal(context.TODO())
require.NoError(t, err)

destDir1, destDir2 := t.TempDir(), t.TempDir()

_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterLocal,
OutputDir: destDir1,
},
{
Type: ExporterLocal,
OutputDir: destDir2,
},
},
}, nil)
require.Errorf(t, err, "using multiple ExporterLocal is not supported")
}

func testMultipleExporters(t *testing.T, sb integration.Sandbox) {
integration.SkipIfDockerd(t, sb, "multiple exporters")
requiresLinux(t)

c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()

def, err := llb.Image("busybox").Marshal(context.TODO())
require.NoError(t, err)

destDir := t.TempDir()

registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)

target1, target2 := registry+"/buildkit/build/exporter:image",
registry+"/buildkit/build/alternative:image"

resp, err := c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterLocal,
OutputDir: destDir,
},
{
Type: ExporterImage,
Attrs: map[string]string{
"name": target1,
},
},
{
Type: ExporterImage,
Attrs: map[string]string{
"name": target2,
},
},
},
}, nil)
require.NoError(t, err)

validateMetadataContains(t, resp.ExportersResponse, "image.name", []string{target1, target2})
}

func testOCIExporter(t *testing.T, sb integration.Sandbox) {
integration.SkipIfDockerd(t, sb, "oci exporter")
requiresLinux(t)
Expand Down Expand Up @@ -5765,6 +5842,14 @@ func makeSSHAgentSock(agent agent.Agent) (p string, cleanup func() error, err er
}, nil
}

func validateMetadataContains(t *testing.T, mds []map[string]string, key string, expectedValues []string) {
var values []string
for _, md := range mds {
values = append(values, md[key])
}
require.ElementsMatch(t, values, expectedValues)
}

type server struct {
l net.Listener
}
Expand Down
1 change: 1 addition & 0 deletions client/exporters.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package client

const (
// Keep these in sync with the corresponding exporter Name constants
ExporterImage = "image"
ExporterLocal = "local"
ExporterTar = "tar"
Expand Down
2 changes: 2 additions & 0 deletions client/graph.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,4 +56,6 @@ type SolveStatus struct {
type SolveResponse struct {
// ExporterResponse is also used for CacheExporter
ExporterResponse map[string]string
// ExportersResponse lists metadata from output exporters
ExportersResponse []map[string]string
}
94 changes: 55 additions & 39 deletions client/solve.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s

type runGatewayCB func(ref string, s *session.Session, opts map[string]string) error

func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (result *SolveResponse, err error) {
if def != nil && runGateway != nil {
return nil, errors.New("invalid with def and cb")
}
Expand Down Expand Up @@ -114,14 +114,6 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
return nil, err
}

var ex ExportEntry
if len(opt.Exports) > 1 {
return nil, errors.New("currently only single Exports can be specified")
}
if len(opt.Exports) == 1 {
ex = opt.Exports[0]
}

if !opt.SessionPreInitialized {
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
Expand All @@ -131,29 +123,40 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(a)
}

switch ex.Type {
case ExporterLocal:
if ex.Output != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if ex.OutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
case ExporterOCI, ExporterDocker, ExporterTar:
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(ex.Output))
default:
if ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
exporters := map[string]struct{}{}
for _, exp := range opt.Exports {
switch exp.Type {
case ExporterLocal:
if _, ok := exporters[exp.Type]; ok {
return nil, errors.New("using multiple ExporterLocal is not supported")
}
if exp.Output != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if exp.OutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
exporters[exp.Type] = struct{}{}
s.Allow(filesync.NewFSSyncTargetDir(exp.OutputDir))
case ExporterOCI, ExporterDocker, ExporterTar:
if _, ok := exporters[exp.Type]; ok {
return nil, errors.Errorf("using multiple %s is not supported", exp.Type)
}
if exp.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", exp.OutputDir, exp.Type)
}
if exp.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", exp.Type)
}
exporters[exp.Type] = struct{}{}
s.Allow(filesync.NewFSSyncTarget(exp.Output))
default:
if exp.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", exp.Type)
}
if exp.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", exp.OutputDir, exp.Type)
}
}
}

Expand All @@ -178,7 +181,6 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}

solveCtx, cancelSolve := context.WithCancel(ctx)
var res *SolveResponse
eg.Go(func() error {
ctx := solveCtx
defer cancelSolve()
Expand All @@ -205,11 +207,18 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
frontendInputs[key] = def.ToPB()
}

exporters := make([]*controlapi.Exporter, 0, len(opt.Exports))
for _, exp := range opt.Exports {
exporters = append(exporters, &controlapi.Exporter{
Name: exp.Type,
Attrs: exp.Attrs,
})
}

resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: ex.Type,
ExporterAttrs: ex.Attrs,
Exporters: exporters,
Session: s.ID(),
Frontend: opt.Frontend,
FrontendAttrs: opt.FrontendAttrs,
Expand All @@ -220,8 +229,15 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if err != nil {
return errors.Wrap(err, "failed to solve")
}
res = &SolveResponse{
ExporterResponse: resp.ExporterResponse,

exportersResponse := make([]map[string]string, 0, len(resp.ExportersResponse))
for _, resp := range resp.ExportersResponse {
exportersResponse = append(exportersResponse, resp.Response)
}

result = &SolveResponse{
ExporterResponse: resp.ExporterResponse.GetResponse(),
ExportersResponse: exportersResponse,
}
return nil
})
Expand Down Expand Up @@ -318,7 +334,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
// Update index.json of exported cache content store
// FIXME(AkihiroSuda): dedupe const definition of cache/remotecache.ExporterResponseManifestDesc = "cache.manifest"
if manifestDescJSON := res.ExporterResponse["cache.manifest"]; manifestDescJSON != "" {
if manifestDescJSON := result.ExporterResponse["cache.manifest"]; manifestDescJSON != "" {
var manifestDesc ocispecs.Descriptor
if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil {
return nil, err
Expand All @@ -329,7 +345,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
}
}
return res, nil
return result, nil
}

func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
Expand Down
43 changes: 31 additions & 12 deletions cmd/buildctl/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,13 +293,15 @@ func buildAction(clicontext *cli.Context) error {
if err != nil {
return err
}
for k, v := range resp.ExporterResponse {
logrus.Debugf("exporter response: %s=%s", k, v)
for _, resp := range resp.ExportersResponse {
for k, v := range resp {
logrus.Debugf("exporter response: %s=%s", k, v)
}
}

metadataFile := clicontext.String("metadata-file")
if metadataFile != "" && resp.ExporterResponse != nil {
if err := writeMetadataFile(metadataFile, resp.ExporterResponse); err != nil {
if metadataFile != "" && (resp.ExporterResponse != nil || len(resp.ExportersResponse) != 0) {
if err := writeMetadataFile(metadataFile, resp); err != nil {
return err
}
}
Expand All @@ -315,10 +317,31 @@ func buildAction(clicontext *cli.Context) error {
return eg.Wait()
}

func writeMetadataFile(filename string, exporterResponse map[string]string) error {
var err error
func writeMetadataFile(filename string, resp *client.SolveResponse) (err error) {
if len(resp.ExportersResponse) == 0 {
// Keep the old metadata file format
b, err := json.MarshalIndent(marshalExporterMetadata(resp.ExporterResponse), "", " ")
if err != nil {
return err
}
return continuity.AtomicWriteFile(filename, b, 0666)
}
result := []map[string]interface{}{
marshalExporterMetadata(resp.ExporterResponse),
}
for _, md := range resp.ExportersResponse {
result = append(result, marshalExporterMetadata(md))
}
b, err := json.MarshalIndent(result, "", " ")
if err != nil {
return err
}
return continuity.AtomicWriteFile(filename, b, 0666)
}

func marshalExporterMetadata(exp map[string]string) map[string]interface{} {
out := make(map[string]interface{})
for k, v := range exporterResponse {
for k, v := range exp {
dt, err := base64.StdEncoding.DecodeString(v)
if err != nil {
out[k] = v
Expand All @@ -331,9 +354,5 @@ func writeMetadataFile(filename string, exporterResponse map[string]string) erro
}
out[k] = json.RawMessage(dt)
}
b, err := json.MarshalIndent(out, "", " ")
if err != nil {
return err
}
return continuity.AtomicWriteFile(filename, b, 0666)
return out
}
Loading

0 comments on commit 97804ac

Please sign in to comment.