diff --git a/pkg/builder/BUILD.bazel b/pkg/builder/BUILD.bazel index 99c1dc9b..bbe05264 100644 --- a/pkg/builder/BUILD.bazel +++ b/pkg/builder/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "logging_build_executor.go", "metrics_build_executor.go", "naive_build_directory.go", + "output_hierarchy.go", "root_build_directory_creator.go", "shared_build_directory_creator.go", "storage_flushing_build_executor.go", @@ -65,6 +66,7 @@ go_test( "in_memory_build_queue_test.go", "local_build_executor_test.go", "naive_build_directory_test.go", + "output_hierarchy_test.go", "root_build_directory_creator_test.go", "shared_build_directory_creator_test.go", "storage_flushing_build_executor_test.go", diff --git a/pkg/builder/local_build_executor.go b/pkg/builder/local_build_executor.go index bdfc621e..3382a784 100644 --- a/pkg/builder/local_build_executor.go +++ b/pkg/builder/local_build_executor.go @@ -4,7 +4,6 @@ import ( "context" "os" "path" - "strings" "time" remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" @@ -15,9 +14,7 @@ import ( "github.com/buildbarn/bb-storage/pkg/cas" "github.com/buildbarn/bb-storage/pkg/clock" "github.com/buildbarn/bb-storage/pkg/digest" - "github.com/buildbarn/bb-storage/pkg/filesystem" "github.com/buildbarn/bb-storage/pkg/util" - "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/empty" @@ -49,113 +46,6 @@ func NewLocalBuildExecutor(contentAddressableStorage cas.ContentAddressableStora } } -func (be *localBuildExecutor) uploadDirectory(ctx context.Context, outputDirectory filesystem.Directory, parentDigest digest.Digest, children map[string]*remoteexecution.Directory, components []string) (*remoteexecution.Directory, error) { - files, err := outputDirectory.ReadDir() - if err != nil { - return nil, util.StatusWrapf(err, "Failed to read output directory %#v", path.Join(components...)) - } - - var directory remoteexecution.Directory - for _, file := range files { - name := file.Name() - childComponents := append(components, name) - // The elided default case of the below switch statement would - // represent a UNIX socket, FIFO or device node. These files cannot - // be represented in a Directory message. They are simply skipped. - // Returning an error would make the overall user experience worse. - switch fileType := file.Type(); fileType { - case filesystem.FileTypeRegularFile, filesystem.FileTypeExecutableFile: - childDigest, err := be.contentAddressableStorage.PutFile(ctx, outputDirectory, name, parentDigest) - if err != nil { - return nil, util.StatusWrapf(err, "Failed to store output file %#v", path.Join(childComponents...)) - } - directory.Files = append(directory.Files, &remoteexecution.FileNode{ - Name: name, - Digest: childDigest.GetPartialDigest(), - IsExecutable: fileType == filesystem.FileTypeExecutableFile, - }) - case filesystem.FileTypeDirectory: - childDirectory, err := outputDirectory.EnterDirectory(name) - if err != nil { - return nil, util.StatusWrapf(err, "Failed to enter output directory %#v", path.Join(childComponents...)) - } - child, err := be.uploadDirectory(ctx, childDirectory, parentDigest, children, childComponents) - childDirectory.Close() - if err != nil { - return nil, err - } - - // Compute digest of the child directory. This requires serializing it. - data, err := proto.Marshal(child) - if err != nil { - return nil, util.StatusWrapf(err, "Failed to marshal output directory %#v", path.Join(childComponents...)) - } - digestGenerator := parentDigest.NewGenerator() - if _, err := digestGenerator.Write(data); err != nil { - return nil, util.StatusWrapf(err, "Failed to compute digest of output directory %#v", path.Join(childComponents...)) - } - childDigest := digestGenerator.Sum() - - children[childDigest.GetKey(digest.KeyWithoutInstance)] = child - directory.Directories = append(directory.Directories, &remoteexecution.DirectoryNode{ - Name: name, - Digest: childDigest.GetPartialDigest(), - }) - case filesystem.FileTypeSymlink: - target, err := outputDirectory.Readlink(name) - if err != nil { - return nil, util.StatusWrapf(err, "Failed to read output symlink %#v", path.Join(childComponents...)) - } - directory.Symlinks = append(directory.Symlinks, &remoteexecution.SymlinkNode{ - Name: name, - Target: target, - }) - } - } - return &directory, nil -} - -func (be *localBuildExecutor) uploadTree(ctx context.Context, outputDirectory filesystem.Directory, parentDigest digest.Digest, components []string) (digest.Digest, error) { - // Gather all individual directory objects and turn them into a tree. - children := map[string]*remoteexecution.Directory{} - root, err := be.uploadDirectory(ctx, outputDirectory, parentDigest, children, components) - if err != nil { - return digest.BadDigest, err - } - tree := &remoteexecution.Tree{ - Root: root, - } - for _, child := range children { - tree.Children = append(tree.Children, child) - } - treeDigest, err := be.contentAddressableStorage.PutTree(ctx, tree, parentDigest) - if err != nil { - return digest.BadDigest, util.StatusWrapf(err, "Failed to store output directory %#v", path.Join(components...)) - } - return treeDigest, err -} - -func (be *localBuildExecutor) createOutputParentDirectory(inputRootDirectory filesystem.Directory, outputParentPath string) (filesystem.DirectoryCloser, error) { - // Create and enter successive components, closing the former. - components := strings.FieldsFunc(outputParentPath, func(r rune) bool { return r == '/' }) - d := filesystem.NopDirectoryCloser(inputRootDirectory) - for n, component := range components { - if component != "." { - if err := d.Mkdir(component, 0777); err != nil && !os.IsExist(err) { - d.Close() - return nil, util.StatusWrapf(err, "Failed to create output directory %#v", path.Join(components[:n+1]...)) - } - d2, err := d.EnterDirectory(component) - d.Close() - if err != nil { - return nil, util.StatusWrapf(err, "Failed to enter output directory %#v", path.Join(components[:n+1]...)) - } - d = d2 - } - } - return d, nil -} - func (be *localBuildExecutor) createCharacterDevices(inputRootDirectory BuildDirectory) error { if err := inputRootDirectory.Mkdir("dev", 0777); err != nil && !os.IsExist(err) { return util.StatusWrap(err, "Unable to create /dev directory in input root") @@ -276,47 +166,21 @@ func (be *localBuildExecutor) Execute(ctx context.Context, filePool re_filesyste } } - // Create and open parent directories of where we expect to see output. - // Build rules generally expect the parent directories to already be - // there. We later use the directory handles to extract output files. - outputParentDirectories := map[string]filesystem.Directory{} + // Create parent directories of output files and directories. + // These are not declared in the input root explicitly. + outputHierarchy := NewOutputHierarchy(command.WorkingDirectory) for _, outputDirectory := range command.OutputDirectories { - // Although REv2 explicitly document that only parents - // of output directories are created (i.e., not the - // output directory itself), Bazel recently changed its - // behaviour to do so after all for local execution. See - // these issues for details: - // - // https://github.com/bazelbuild/bazel/issues/6262 - // https://github.com/bazelbuild/bazel/issues/6393 - // - // For now, be consistent with Bazel. What the intended - // protocol behaviour is should be clarified at some - // point. What is especially confusing is that creating - // these directories up front somewhat rules out the - // possibility of omitting output directories and using - // OutputDirectorySymlinks. - if _, ok := outputParentDirectories[outputDirectory]; !ok { - dir, err := be.createOutputParentDirectory(inputRootDirectory, path.Join(command.WorkingDirectory, outputDirectory)) - if err != nil { - attachErrorToExecuteResponse(response, err) - return response - } - outputParentDirectories[outputDirectory] = dir - defer dir.Close() - } + outputHierarchy.AddDirectory(outputDirectory) } for _, outputFile := range command.OutputFiles { - dirPath := path.Dir(outputFile) - if _, ok := outputParentDirectories[dirPath]; !ok { - dir, err := be.createOutputParentDirectory(inputRootDirectory, path.Join(command.WorkingDirectory, dirPath)) - if err != nil { - attachErrorToExecuteResponse(response, err) - return response - } - outputParentDirectories[dirPath] = dir - defer dir.Close() - } + outputHierarchy.AddFile(outputFile) + } + for _, outputPath := range command.OutputPaths { + outputHierarchy.AddPath(outputPath) + } + if err := outputHierarchy.CreateParentDirectories(inputRootDirectory); err != nil { + attachErrorToExecuteResponse(response, err) + return response } // Create a directory inside the build directory that build @@ -370,9 +234,9 @@ func (be *localBuildExecutor) Execute(ctx context.Context, filePool re_filesyste }, } - // Upload command output. In the common case, the files are - // empty. If that's the case, don't bother setting the digest to - // keep the ActionResult small. + // Upload command output. In the common case, the stdout and + // stderr files are empty. If that's the case, don't bother + // setting the digest to keep the ActionResult small. if stdoutDigest, err := be.contentAddressableStorage.PutFile(ctx, buildDirectory, "stdout", actionDigest); err != nil { attachErrorToExecuteResponse(response, util.StatusWrap(err, "Failed to store stdout")) } else if stdoutDigest.GetSizeBytes() > 0 { @@ -383,58 +247,8 @@ func (be *localBuildExecutor) Execute(ctx context.Context, filePool re_filesyste } else if stderrDigest.GetSizeBytes() > 0 { response.Result.StderrDigest = stderrDigest.GetPartialDigest() } - - // Upload output files. - for _, outputFile := range command.OutputFiles { - outputParentDirectory := outputParentDirectories[path.Dir(outputFile)] - outputBaseName := path.Base(outputFile) - if fileInfo, err := outputParentDirectory.Lstat(outputBaseName); err == nil { - switch fileType := fileInfo.Type(); fileType { - case filesystem.FileTypeRegularFile, filesystem.FileTypeExecutableFile: - if digest, err := be.contentAddressableStorage.PutFile(ctx, outputParentDirectory, outputBaseName, actionDigest); err == nil { - response.Result.OutputFiles = append(response.Result.OutputFiles, &remoteexecution.OutputFile{ - Path: outputFile, - Digest: digest.GetPartialDigest(), - IsExecutable: fileType == filesystem.FileTypeExecutableFile, - }) - } else { - attachErrorToExecuteResponse( - response, - util.StatusWrapf(err, "Failed to store output file %#v", outputFile)) - } - case filesystem.FileTypeSymlink: - if target, err := outputParentDirectory.Readlink(outputBaseName); err == nil { - response.Result.OutputFileSymlinks = append(response.Result.OutputFileSymlinks, &remoteexecution.OutputSymlink{ - Path: outputFile, - Target: target, - }) - } else { - attachErrorToExecuteResponse( - response, - util.StatusWrapf(err, "Failed to read output symlink %#v", outputFile)) - } - default: - attachErrorToExecuteResponse( - response, - status.Errorf(codes.Internal, "Output file %#v is not a regular file or symlink", outputFile)) - } - } else if !os.IsNotExist(err) { - attachErrorToExecuteResponse( - response, - util.StatusWrapf(err, "Failed to read attributes of output file %#v", outputFile)) - } - } - - // Upload output directories. - for _, outputDirectory := range command.OutputDirectories { - if digest, err := be.uploadTree(ctx, outputParentDirectories[outputDirectory], actionDigest, []string{outputDirectory}); err == nil { - response.Result.OutputDirectories = append(response.Result.OutputDirectories, &remoteexecution.OutputDirectory{ - Path: outputDirectory, - TreeDigest: digest.GetPartialDigest(), - }) - } else { - attachErrorToExecuteResponse(response, err) - } + if err := outputHierarchy.UploadOutputs(ctx, inputRootDirectory, be.contentAddressableStorage, actionDigest, response.Result); err != nil { + attachErrorToExecuteResponse(response, err) } return response diff --git a/pkg/builder/local_build_executor_test.go b/pkg/builder/local_build_executor_test.go index 22e66263..7105bc5f 100644 --- a/pkg/builder/local_build_executor_test.go +++ b/pkg/builder/local_build_executor_test.go @@ -3,7 +3,6 @@ package builder_test import ( "context" "os" - "syscall" "testing" "time" @@ -29,6 +28,7 @@ import ( func TestLocalBuildExecutorInvalidActionDigest(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) runner := mock.NewMockRunner(ctrl) @@ -72,6 +72,7 @@ func TestLocalBuildExecutorInvalidActionDigest(t *testing.T) { func TestLocalBuildExecutorMissingAction(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) runner := mock.NewMockRunner(ctrl) @@ -109,6 +110,7 @@ func TestLocalBuildExecutorMissingAction(t *testing.T) { func TestLocalBuildExecutorMissingCommand(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) runner := mock.NewMockRunner(ctrl) @@ -145,6 +147,7 @@ func TestLocalBuildExecutorMissingCommand(t *testing.T) { func TestLocalBuildExecutorBuildDirectoryCreatorFailedFailed(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) buildDirectoryCreator.EXPECT().GetBuildDirectory( @@ -192,6 +195,7 @@ func TestLocalBuildExecutorBuildDirectoryCreatorFailedFailed(t *testing.T) { func TestLocalBuildExecutorInputRootPopulationFailed(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) buildDirectory := mock.NewMockBuildDirectory(ctrl) @@ -250,6 +254,7 @@ func TestLocalBuildExecutorInputRootPopulationFailed(t *testing.T) { func TestLocalBuildExecutorOutputDirectoryCreationFailure(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) buildDirectory := mock.NewMockBuildDirectory(ctrl) @@ -302,13 +307,14 @@ func TestLocalBuildExecutorOutputDirectoryCreationFailure(t *testing.T) { Result: &remoteexecution.ActionResult{ ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{}, }, - Status: status.New(codes.Internal, "Failed to create output directory \"foo\": Out of disk space").Proto(), + Status: status.New(codes.Internal, "Failed to create output parent directory \"foo\": Out of disk space").Proto(), }, executeResponse) } func TestLocalBuildExecutorOutputSymlinkReadingFailure(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) buildDirectory := mock.NewMockBuildDirectory(ctrl) contentAddressableStorage.EXPECT().PutFile(ctx, buildDirectory, "stdout", gomock.Any()).Return( @@ -317,6 +323,13 @@ func TestLocalBuildExecutorOutputSymlinkReadingFailure(t *testing.T) { contentAddressableStorage.EXPECT().PutFile(ctx, buildDirectory, "stderr", gomock.Any()).Return( digest.MustNewDigest("nintendo64", "0000000000000000000000000000000000000000000000000000000000000006", 678), nil) + contentAddressableStorage.EXPECT().PutTree(ctx, + &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, gomock.Any()).Return( + digest.MustNewDigest("nintendo64", "0000000000000000000000000000000000000000000000000000000000000007", 902), + nil) + buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) buildDirectoryCreator.EXPECT().GetBuildDirectory( digest.MustNewDigest("nintendo64", "5555555555555555555555555555555555555555555555555555555555555555", 7), @@ -346,6 +359,7 @@ func TestLocalBuildExecutorOutputSymlinkReadingFailure(t *testing.T) { ExitCode: 0, }, nil) fooDirectory := mock.NewMockDirectoryCloser(ctrl) + inputRootDirectory.EXPECT().Lstat("foo").Return(filesystem.NewFileInfo("foo", filesystem.FileTypeDirectory), nil) inputRootDirectory.EXPECT().EnterDirectory("foo").Return(fooDirectory, nil) fooDirectory.EXPECT().ReadDir().Return([]filesystem.FileInfo{ filesystem.NewFileInfo("bar", filesystem.FileTypeSymlink), @@ -387,6 +401,15 @@ func TestLocalBuildExecutorOutputSymlinkReadingFailure(t *testing.T) { metadata) require.Equal(t, &remoteexecution.ExecuteResponse{ Result: &remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "foo", + TreeDigest: &remoteexecution.Digest{ + Hash: "0000000000000000000000000000000000000000000000000000000000000007", + SizeBytes: 902, + }, + }, + }, StdoutDigest: &remoteexecution.Digest{ Hash: "0000000000000000000000000000000000000000000000000000000000000005", SizeBytes: 567, @@ -428,11 +451,25 @@ func TestLocalBuildExecutorSuccess(t *testing.T) { binDirectory.EXPECT().EnterDirectory("_objs").Return(objsDirectory, nil) objsDirectory.EXPECT().Close() objsDirectory.EXPECT().Mkdir("hello", os.FileMode(0777)).Return(nil) + + // Uploading of files in bazel-out/k8-fastbuild/bin/_objs/hello. + bazelOutDirectory = mock.NewMockDirectoryCloser(ctrl) + inputRootDirectory.EXPECT().EnterDirectory("bazel-out").Return(bazelOutDirectory, nil) + bazelOutDirectory.EXPECT().Close() + k8FastbuildDirectory = mock.NewMockBuildDirectory(ctrl) + bazelOutDirectory.EXPECT().EnterDirectory("k8-fastbuild").Return(k8FastbuildDirectory, nil) + k8FastbuildDirectory.EXPECT().Close() + binDirectory = mock.NewMockDirectoryCloser(ctrl) + k8FastbuildDirectory.EXPECT().EnterDirectory("bin").Return(binDirectory, nil) + binDirectory.EXPECT().Close() + objsDirectory = mock.NewMockDirectoryCloser(ctrl) + binDirectory.EXPECT().EnterDirectory("_objs").Return(objsDirectory, nil) + objsDirectory.EXPECT().Close() helloDirectory := mock.NewMockDirectoryCloser(ctrl) objsDirectory.EXPECT().EnterDirectory("hello").Return(helloDirectory, nil) - helloDirectory.EXPECT().Close() helloDirectory.EXPECT().Lstat("hello.pic.d").Return(filesystem.NewFileInfo("hello.pic.d", filesystem.FileTypeRegularFile), nil) helloDirectory.EXPECT().Lstat("hello.pic.o").Return(filesystem.NewFileInfo("hello.pic.o", filesystem.FileTypeExecutableFile), nil) + helloDirectory.EXPECT().Close() // Read operations against the Content Addressable Storage. contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) @@ -590,237 +627,6 @@ func TestLocalBuildExecutorSuccess(t *testing.T) { }, executeResponse) } -// TestLocalBuildExecutorSuccess tests a full invocation of a simple -// build step, equivalent to compiling a simple C++ file. -// This adds a working directory to the commands, with output files -// produced relative to that working directory. -func TestLocalBuildExecutorWithWorkingDirectorySuccess(t *testing.T) { - ctrl, ctx := gomock.WithContext(context.Background(), t) - defer ctrl.Finish() - - // File system operations that should occur against the build directory. - - // Creation of output directory's parent and the directory itself - // This will be the same directory used by the first output file - inputRootDirectory := mock.NewMockBuildDirectory(ctrl) - inputRootDirectory.EXPECT().Mkdir("outputParent", os.FileMode(0777)).Return(nil) - outputParentDirectory0 := mock.NewMockDirectoryCloser(ctrl) - inputRootDirectory.EXPECT().EnterDirectory("outputParent").Return(outputParentDirectory0, nil) - outputParentDirectory0.EXPECT().Close() - outputParentDirectory0.EXPECT().Mkdir("output", os.FileMode(0777)).Return(nil) - outputDirectory0 := mock.NewMockDirectoryCloser(ctrl) - outputParentDirectory0.EXPECT().EnterDirectory("output").Return(outputDirectory0, nil) - outputDirectory0.EXPECT().Close() - - // Creation of the parent directory of second output file - inputRootDirectory.EXPECT().Mkdir("outputParent", os.FileMode(0777)).Return(syscall.EEXIST) - outputParentDirectory1 := mock.NewMockDirectoryCloser(ctrl) - inputRootDirectory.EXPECT().EnterDirectory("outputParent").Return(outputParentDirectory1, nil) - outputParentDirectory1.EXPECT().Mkdir("output", os.FileMode(0777)).Return(syscall.EEXIST) - outputDirectory1 := mock.NewMockDirectoryCloser(ctrl) - outputParentDirectory1.EXPECT().EnterDirectory("output").Return(outputDirectory1, nil) - outputParentDirectory1.EXPECT().Close() - outputDirectory1.EXPECT().Close() - outputDirectory1.EXPECT().Mkdir("foo", os.FileMode(0777)).Return(nil) - fooDirectory := mock.NewMockDirectoryCloser(ctrl) - outputDirectory1.EXPECT().EnterDirectory("foo").Return(fooDirectory, nil) - fooDirectory.EXPECT().Close() - fooDirectory.EXPECT().Mkdir("objects", os.FileMode(0777)).Return(nil) - objectsDirectory := mock.NewMockDirectoryCloser(ctrl) - fooDirectory.EXPECT().EnterDirectory("objects").Return(objectsDirectory, nil) - objectsDirectory.EXPECT().Close() - - outputDirectory0.EXPECT().ReadDir().Return( - []filesystem.FileInfo{ - filesystem.NewFileInfo("output.file", filesystem.FileTypeRegularFile), - }, nil) - outputDirectory0.EXPECT().Lstat("hello.pic.d").Return( - filesystem.NewFileInfo("hello.pic.d", filesystem.FileTypeRegularFile), - nil) - objectsDirectory.EXPECT().Lstat("hello.pic.o").Return( - filesystem.NewFileInfo("hello.pic.o", filesystem.FileTypeExecutableFile), - nil) - - // Read operations against the Content Addressable Storage. - contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) - - // Write operations against the Content Addressable Storage. - buildDirectory := mock.NewMockBuildDirectory(ctrl) - contentAddressableStorage.EXPECT().PutFile(ctx, buildDirectory, "stdout", gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000005", 567), - nil) - contentAddressableStorage.EXPECT().PutFile(ctx, buildDirectory, "stderr", gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000006", 678), - nil) - contentAddressableStorage.EXPECT().PutFile(ctx, outputDirectory0, "hello.pic.d", gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000007", 789), - nil) - contentAddressableStorage.EXPECT().PutFile(ctx, objectsDirectory, "hello.pic.o", gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000008", 890), - nil) - contentAddressableStorage.EXPECT().PutFile(ctx, outputDirectory0, "output.file", gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000009", 901), - nil) - contentAddressableStorage.EXPECT().PutTree(ctx, - &remoteexecution.Tree{ - Root: &remoteexecution.Directory{ - Files: []*remoteexecution.FileNode{ - { - Name: "output.file", - Digest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000009", - SizeBytes: 901, - }, - }, - }, - }, - }, gomock.Any()).Return( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000010", 902), - nil) - - // Command execution. - buildDirectoryCreator := mock.NewMockBuildDirectoryCreator(ctrl) - buildDirectoryCreator.EXPECT().GetBuildDirectory( - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000001", 123), - false, - ).Return(buildDirectory, ".", nil) - filePool := mock.NewMockFilePool(ctrl) - buildDirectory.EXPECT().InstallHooks(filePool) - buildDirectory.EXPECT().Mkdir("root", os.FileMode(0777)) - buildDirectory.EXPECT().EnterBuildDirectory("root").Return(inputRootDirectory, nil) - inputRootDirectory.EXPECT().MergeDirectoryContents( - ctx, - digest.MustNewDigest("ubuntu1804", "0000000000000000000000000000000000000000000000000000000000000003", 345), - ).Return(nil) - buildDirectory.EXPECT().Mkdir("tmp", os.FileMode(0777)) - resourceUsage, err := ptypes.MarshalAny(&empty.Empty{}) - require.NoError(t, err) - runner := mock.NewMockRunner(ctrl) - runner.EXPECT().Run(gomock.Any(), &runner_pb.RunRequest{ - Arguments: []string{ - "/usr/local/bin/clang", - "-MD", - "-MF", - "../hello.pic.d", - "-c", - "hello.cc", - "-o", - "objects/hello.pic.o", - }, - EnvironmentVariables: map[string]string{ - "PATH": "/bin:/usr/bin", - "PWD": "/proc/self/cwd", - }, - WorkingDirectory: "outputParent/output/foo", - StdoutPath: "stdout", - StderrPath: "stderr", - InputRootDirectory: "root", - TemporaryDirectory: "tmp", - }).Return(&runner_pb.RunResponse{ - ExitCode: 0, - ResourceUsage: []*any.Any{resourceUsage}, - }, nil) - inputRootDirectory.EXPECT().Close() - buildDirectory.EXPECT().Close() - clock := mock.NewMockClock(ctrl) - clock.EXPECT().NewContextWithTimeout(gomock.Any(), time.Hour).DoAndReturn(func(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { - return context.WithCancel(parent) - }) - localBuildExecutor := builder.NewLocalBuildExecutor(contentAddressableStorage, buildDirectoryCreator, runner, clock, time.Hour, time.Hour, nil) - - metadata := make(chan *remoteworker.CurrentState_Executing, 10) - executeResponse := localBuildExecutor.Execute( - ctx, - filePool, - "ubuntu1804", - &remoteworker.DesiredState_Executing{ - ActionDigest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000001", - SizeBytes: 123, - }, - Action: &remoteexecution.Action{ - InputRootDigest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000003", - SizeBytes: 345, - }, - }, - Command: &remoteexecution.Command{ - Arguments: []string{ - "/usr/local/bin/clang", - "-MD", - "-MF", - "../hello.pic.d", - "-c", - "hello.cc", - "-o", - "objects/hello.pic.o", - }, - EnvironmentVariables: []*remoteexecution.Command_EnvironmentVariable{ - {Name: "PATH", Value: "/bin:/usr/bin"}, - {Name: "PWD", Value: "/proc/self/cwd"}, - }, - OutputFiles: []string{ - "../hello.pic.d", - "objects/hello.pic.o", - }, - OutputDirectories: []string{ - "..", - }, - Platform: &remoteexecution.Platform{ - Properties: []*remoteexecution.Platform_Property{ - { - Name: "container-image", - Value: "docker://gcr.io/cloud-marketplace/google/rbe-debian8@sha256:4893599fb00089edc8351d9c26b31d3f600774cb5addefb00c70fdb6ca797abf", - }, - }, - }, - WorkingDirectory: "outputParent/output/foo", - }, - }, - metadata) - require.Equal(t, &remoteexecution.ExecuteResponse{ - Result: &remoteexecution.ActionResult{ - OutputFiles: []*remoteexecution.OutputFile{ - { - Path: "../hello.pic.d", - Digest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000007", - SizeBytes: 789, - }, - }, - { - Path: "objects/hello.pic.o", - Digest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000008", - SizeBytes: 890, - }, - IsExecutable: true, - }, - }, - OutputDirectories: []*remoteexecution.OutputDirectory{ - { - Path: "..", - TreeDigest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000010", - SizeBytes: 902, - }, - }, - }, - StdoutDigest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000005", - SizeBytes: 567, - }, - StderrDigest: &remoteexecution.Digest{ - Hash: "0000000000000000000000000000000000000000000000000000000000000006", - SizeBytes: 678, - }, - ExecutionMetadata: &remoteexecution.ExecutedActionMetadata{ - AuxiliaryMetadata: []*any.Any{resourceUsage}, - }, - }, - }, executeResponse) -} - func TestLocalBuildExecutorCachingInvalidTimeout(t *testing.T) { ctrl, ctx := gomock.WithContext(context.Background(), t) defer ctrl.Finish() diff --git a/pkg/builder/output_hierarchy.go b/pkg/builder/output_hierarchy.go new file mode 100644 index 00000000..707a4cd8 --- /dev/null +++ b/pkg/builder/output_hierarchy.go @@ -0,0 +1,496 @@ +package builder + +import ( + "context" + "os" + "path" + "sort" + "strings" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-storage/pkg/cas" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/buildbarn/bb-storage/pkg/util" + "github.com/golang/protobuf/proto" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PathBuffer is a helper type for generating pathname strings. It is +// used to by OutputHierarchy to generate error messages that contain +// pathnames. +type pathBuffer struct { + components []string +} + +func newPathBuffer() pathBuffer { + return pathBuffer{ + components: []string{"."}, + } +} + +// Enter a directory. Create one more slot for storing a filename. +func (pb *pathBuffer) enter() { + pb.components = append(pb.components, "") +} + +// SetLastComponent updates the last component of the pathname. +func (pb *pathBuffer) setLastComponent(component string) { + pb.components[len(pb.components)-1] = component +} + +// Leave a directory. Pop the last filename off the pathname. +func (pb *pathBuffer) leave() { + pb.components = pb.components[:len(pb.components)-1] +} + +// Join all components of the pathname together and return its string +// representation. +func (pb *pathBuffer) join() string { + return path.Join(pb.components...) +} + +// OutputNode is a node in a directory hierarchy that contains one or +// more locations where output directories and files are expected. +type outputNode struct { + directoriesToUpload map[string][]string + filesToUpload map[string][]string + pathsToUpload map[string][]string + subdirectories map[string]*outputNode +} + +func (on *outputNode) getSubdirectoryNames() []string { + l := make([]string, 0, len(on.subdirectories)) + for k := range on.subdirectories { + l = append(l, k) + } + sort.Strings(l) + return l +} + +func sortToUpload(m map[string][]string) []string { + l := make([]string, 0, len(m)) + for k := range m { + l = append(l, k) + } + sort.Strings(l) + return l +} + +// NewOutputDirectory creates a new outputNode that is in the initial +// state. It contains no locations where output directories or files are +// expected. +func newOutputDirectory() *outputNode { + return &outputNode{ + directoriesToUpload: map[string][]string{}, + filesToUpload: map[string][]string{}, + pathsToUpload: map[string][]string{}, + subdirectories: map[string]*outputNode{}, + } +} + +// CreateParentDirectories is recursive invoked by +// OutputHierarchy.CreateParentDirectories() to create parent +// directories of locations where output directories and files are +// expected. +func (on *outputNode) createParentDirectories(d filesystem.Directory, dPath *pathBuffer) error { + dPath.enter() + defer dPath.leave() + + for _, name := range on.getSubdirectoryNames() { + dPath.setLastComponent(name) + if err := d.Mkdir(name, 0777); err != nil && !os.IsExist(err) { + return util.StatusWrapf(err, "Failed to create output parent directory %#v", dPath.join()) + } + + // Recurse if we need to create one or more directories within. + if child := on.subdirectories[name]; len(child.subdirectories) > 0 || len(child.directoriesToUpload) > 0 { + childDirectory, err := d.EnterDirectory(name) + if err != nil { + return util.StatusWrapf(err, "Failed to enter output parent directory %#v", dPath.join()) + } + err = child.createParentDirectories(childDirectory, dPath) + childDirectory.Close() + if err != nil { + return err + } + } + } + + // Although REv2 explicitly documents that only parents of + // output directories are created (i.e., not the output + // directory itself), Bazel changed its behaviour and now + // creates output directories when using local execution. See + // these issues for details: + // + // https://github.com/bazelbuild/bazel/issues/6262 + // https://github.com/bazelbuild/bazel/issues/6393 + // + // Considering that the 'output_directories' field is deprecated + // in REv2.1 anyway, be consistent with Bazel's local execution. + // Once Bazel switches to REv2.1, it will be forced to solve + // this matter in a protocol conforming way. + for _, name := range sortToUpload(on.directoriesToUpload) { + if _, ok := on.subdirectories[name]; !ok { + dPath.setLastComponent(name) + if err := d.Mkdir(name, 0777); err != nil && !os.IsExist(err) { + return util.StatusWrapf(err, "Failed to create output directory %#v", dPath.join()) + } + } + } + return nil +} + +// UploadOutputs is recursively invoked by +// OutputHierarchy.UploadOutputs() to upload output directories and +// files from the locations where they are expected. +func (on *outputNode) uploadOutputs(s *uploadOutputsState, d filesystem.Directory) { + s.dPath.enter() + defer s.dPath.leave() + + // Upload REv2.0 output directories that are expected to be + // present in this directory. + for _, component := range sortToUpload(on.directoriesToUpload) { + s.dPath.setLastComponent(component) + paths := on.directoriesToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeDirectory: + s.uploadOutputDirectory(d, component, paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, &s.actionResult.OutputDirectorySymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output directory %#v is not a directory or symlink", s.dPath.join())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output directory %#v", s.dPath.join())) + } + } + + // Upload REv2.0 output files that are expected to be present in + // this directory. + for _, component := range sortToUpload(on.filesToUpload) { + s.dPath.setLastComponent(component) + paths := on.filesToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeRegularFile, filesystem.FileTypeExecutableFile: + s.uploadOutputFile(d, component, fileType, paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, &s.actionResult.OutputFileSymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output file %#v is not a regular file or symlink", s.dPath.join())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output file %#v", s.dPath.join())) + } + } + + // Upload REv2.1 output paths that are expected to be present in + // this directory. + for _, component := range sortToUpload(on.pathsToUpload) { + s.dPath.setLastComponent(component) + paths := on.pathsToUpload[component] + if fileInfo, err := d.Lstat(component); err == nil { + switch fileType := fileInfo.Type(); fileType { + case filesystem.FileTypeDirectory: + s.uploadOutputDirectory(d, component, paths) + case filesystem.FileTypeRegularFile, filesystem.FileTypeExecutableFile: + s.uploadOutputFile(d, component, fileType, paths) + case filesystem.FileTypeSymlink: + s.uploadOutputSymlink(d, component, &s.actionResult.OutputSymlinks, paths) + default: + s.saveError(status.Errorf(codes.InvalidArgument, "Output path %#v is not a directory, regular file or symlink", s.dPath.join())) + } + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to read attributes of output path %#v", s.dPath.join())) + } + } + + // Traverse into subdirectories. + for _, component := range on.getSubdirectoryNames() { + s.dPath.setLastComponent(component) + childNode := on.subdirectories[component] + if childDirectory, err := d.EnterDirectory(component); err == nil { + childNode.uploadOutputs(s, childDirectory) + childDirectory.Close() + } else if !os.IsNotExist(err) { + s.saveError(util.StatusWrapf(err, "Failed to enter output parent directory %#v", s.dPath.join())) + } + } +} + +// UploadOutputsState is used by OutputHierarchy.UploadOutputs() to +// track common parameters during recursion. +type uploadOutputsState struct { + context context.Context + contentAddressableStorage cas.ContentAddressableStorage + parentDigest digest.Digest + actionResult *remoteexecution.ActionResult + + dPath pathBuffer + firstError error +} + +// SaveError preserves errors that occur during uploading. Even when +// errors occur, the remainder of the output files is still uploaded. +// This makes debugging easier. +func (s *uploadOutputsState) saveError(err error) { + if s.firstError == nil { + s.firstError = err + } +} + +// UploadDirectory is called to upload a single directory. Elements in +// the directory are stored in a remoteexecution.Directory, so that they +// can be placed in a remoteexecution.Tree. +func (s *uploadOutputsState) uploadDirectory(d filesystem.Directory, children map[digest.Digest]*remoteexecution.Directory) *remoteexecution.Directory { + files, err := d.ReadDir() + if err != nil { + s.saveError(util.StatusWrapf(err, "Failed to read output directory %#v", s.dPath.join())) + return &remoteexecution.Directory{} + } + + s.dPath.enter() + defer s.dPath.leave() + + var directory remoteexecution.Directory + for _, file := range files { + name := file.Name() + s.dPath.setLastComponent(name) + switch fileType := file.Type(); fileType { + case filesystem.FileTypeRegularFile, filesystem.FileTypeExecutableFile: + if childDigest, err := s.contentAddressableStorage.PutFile(s.context, d, name, s.parentDigest); err == nil { + directory.Files = append(directory.Files, &remoteexecution.FileNode{ + Name: name, + Digest: childDigest.GetPartialDigest(), + IsExecutable: fileType == filesystem.FileTypeExecutableFile, + }) + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output file %#v", s.dPath.join())) + } + case filesystem.FileTypeDirectory: + if childDirectory, err := d.EnterDirectory(name); err == nil { + child := s.uploadDirectory(childDirectory, children) + childDirectory.Close() + + // Compute digest of the child directory. This requires serializing it. + if data, err := proto.Marshal(child); err == nil { + digestGenerator := s.parentDigest.NewGenerator() + if _, err := digestGenerator.Write(data); err == nil { + childDigest := digestGenerator.Sum() + children[childDigest] = child + directory.Directories = append(directory.Directories, &remoteexecution.DirectoryNode{ + Name: name, + Digest: childDigest.GetPartialDigest(), + }) + } else { + s.saveError(util.StatusWrapf(err, "Failed to compute digest of output directory %#v", s.dPath.join())) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to marshal output directory %#v", s.dPath.join())) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to enter output directory %#v", s.dPath.join())) + } + case filesystem.FileTypeSymlink: + if target, err := d.Readlink(name); err == nil { + directory.Symlinks = append(directory.Symlinks, &remoteexecution.SymlinkNode{ + Name: name, + Target: target, + }) + } else { + s.saveError(util.StatusWrapf(err, "Failed to read output symlink %#v", s.dPath.join())) + } + } + } + return &directory +} + +// UploadOutputDirectoryEntered is called to upload a single output +// directory as a remoteexecution.Tree. The root directory is assumed to +// already be opened. +func (s *uploadOutputsState) uploadOutputDirectoryEntered(d filesystem.Directory, paths []string) { + children := map[digest.Digest]*remoteexecution.Directory{} + tree := &remoteexecution.Tree{ + Root: s.uploadDirectory(d, children), + } + + childDigests := digest.NewSetBuilder() + for childDigest := range children { + childDigests.Add(childDigest) + } + for _, childDigest := range childDigests.Build().Items() { + tree.Children = append(tree.Children, children[childDigest]) + } + + if treeDigest, err := s.contentAddressableStorage.PutTree(s.context, tree, s.parentDigest); err == nil { + for _, path := range paths { + s.actionResult.OutputDirectories = append( + s.actionResult.OutputDirectories, + &remoteexecution.OutputDirectory{ + Path: path, + TreeDigest: treeDigest.GetPartialDigest(), + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output directory %#v", s.dPath.join())) + } +} + +// UploadOutputDirectory is called to upload a single output directory +// as a remoteexecution.Tree. The root directory is opened opened by +// this function. +func (s *uploadOutputsState) uploadOutputDirectory(d filesystem.Directory, name string, paths []string) { + if childDirectory, err := d.EnterDirectory(name); err == nil { + s.uploadOutputDirectoryEntered(childDirectory, paths) + childDirectory.Close() + } else { + s.saveError(util.StatusWrapf(err, "Failed to enter output directory %#v", s.dPath.join())) + } +} + +// UploadOutputDirectory is called to upload a single output file. +func (s *uploadOutputsState) uploadOutputFile(d filesystem.Directory, name string, fileType filesystem.FileType, paths []string) { + if digest, err := s.contentAddressableStorage.PutFile(s.context, d, name, s.parentDigest); err == nil { + for _, path := range paths { + s.actionResult.OutputFiles = append( + s.actionResult.OutputFiles, + &remoteexecution.OutputFile{ + Path: path, + Digest: digest.GetPartialDigest(), + IsExecutable: fileType == filesystem.FileTypeExecutableFile, + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to store output file %#v", s.dPath.join())) + } +} + +// UploadOutputDirectory is called to read the attributes of a single +// output symlink. +func (s *uploadOutputsState) uploadOutputSymlink(d filesystem.Directory, name string, outputSymlinks *[]*remoteexecution.OutputSymlink, paths []string) { + if target, err := d.Readlink(name); err == nil { + for _, path := range paths { + *outputSymlinks = append( + *outputSymlinks, + &remoteexecution.OutputSymlink{ + Path: path, + Target: target, + }) + } + } else { + s.saveError(util.StatusWrapf(err, "Failed to read output symlink %#v", s.dPath.join())) + } +} + +// OutputHierarchy is used by LocalBuildExecutor to track output +// directories and files that are expected to be generated by the build +// action. OutputHierarchy can be used to create parent directories of +// outputs prior to execution, and to upload outputs into the CAS after +// execution. +type OutputHierarchy struct { + workingDirectory string + root outputNode + rootsToUpload []string +} + +// NewOutputHierarchy creates a new OutputHierarchy that is in the +// initial state. It contains no output directories or files. +func NewOutputHierarchy(workingDirectory string) *OutputHierarchy { + return &OutputHierarchy{ + workingDirectory: workingDirectory, + root: *newOutputDirectory(), + } +} + +func (oh *OutputHierarchy) pathToComponents(p string) ([]string, string, bool) { + // Join path with working directory and obtain pathname components. + rawComponents := strings.FieldsFunc( + path.Join(oh.workingDirectory, p), + func(r rune) bool { return r == '/' }) + components := make([]string, 0, len(rawComponents)) + for _, component := range rawComponents { + if component != "." { + components = append(components, component) + } + } + if len(components) == 0 { + // Pathname expands to the root directory. + return nil, "", false + } + // Pathname expands to a location underneath the root directory. + return components[:len(components)-1], components[len(components)-1], true +} + +func (oh *OutputHierarchy) lookup(components []string) *outputNode { + on := &oh.root + for _, component := range components { + child, ok := on.subdirectories[component] + if !ok { + child = newOutputDirectory() + on.subdirectories[component] = child + } + on = child + } + return on +} + +// AddDirectory is called to indicate that the build action is expected +// to create an REv2.0 output directory. +func (oh *OutputHierarchy) AddDirectory(path string) { + if dirName, fileName, notRoot := oh.pathToComponents(path); notRoot { + on := oh.lookup(dirName) + on.directoriesToUpload[fileName] = append(on.directoriesToUpload[fileName], path) + } else { + oh.rootsToUpload = append(oh.rootsToUpload, path) + } +} + +// AddFile is called to indicate that the build action is expected to +// create an REv2.0 output file. +func (oh *OutputHierarchy) AddFile(path string) { + if dirName, fileName, notRoot := oh.pathToComponents(path); notRoot { + on := oh.lookup(dirName) + on.filesToUpload[fileName] = append(on.filesToUpload[fileName], path) + } +} + +// AddPath is called to indicate that the build action is expected to +// create an REv2.1 output path. +func (oh *OutputHierarchy) AddPath(path string) { + if dirName, fileName, notRoot := oh.pathToComponents(path); notRoot { + on := oh.lookup(dirName) + on.pathsToUpload[fileName] = append(on.pathsToUpload[fileName], path) + } else { + oh.rootsToUpload = append(oh.rootsToUpload, path) + } +} + +// CreateParentDirectories creates parent directories of outputs. This +// function is called prior to executing the build action. +func (oh *OutputHierarchy) CreateParentDirectories(d filesystem.Directory) error { + dPath := newPathBuffer() + return oh.root.createParentDirectories(d, &dPath) +} + +// UploadOutputs uploads outputs of the build action into the CAS. This +// function is called after executing the build action. +func (oh *OutputHierarchy) UploadOutputs(ctx context.Context, d filesystem.Directory, contentAddressableStorage cas.ContentAddressableStorage, parentDigest digest.Digest, actionResult *remoteexecution.ActionResult) error { + s := uploadOutputsState{ + context: ctx, + contentAddressableStorage: contentAddressableStorage, + parentDigest: parentDigest, + actionResult: actionResult, + + dPath: newPathBuffer(), + } + + if len(oh.rootsToUpload) > 0 { + s.uploadOutputDirectoryEntered(d, oh.rootsToUpload) + } + oh.root.uploadOutputs(&s, d) + return s.firstError +} diff --git a/pkg/builder/output_hierarchy_test.go b/pkg/builder/output_hierarchy_test.go new file mode 100644 index 00000000..5516f721 --- /dev/null +++ b/pkg/builder/output_hierarchy_test.go @@ -0,0 +1,535 @@ +package builder_test + +import ( + "context" + "os" + "syscall" + "testing" + + remoteexecution "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2" + "github.com/buildbarn/bb-remote-execution/internal/mock" + "github.com/buildbarn/bb-remote-execution/pkg/builder" + "github.com/buildbarn/bb-storage/pkg/digest" + "github.com/buildbarn/bb-storage/pkg/filesystem" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestOutputHierarchyCreateParentDirectories(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + root := mock.NewMockDirectory(ctrl) + + t.Run("Noop", func(t *testing.T) { + // No parent directories should be created. + oh := builder.NewOutputHierarchy(".") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("WorkingDirectory", func(t *testing.T) { + // REv2 explicitly states that the working directory + // must be a directory that exists in the input root. + // Using a subdirectory as a working directory should + // not cause any Mkdir() calls. + oh := builder.NewOutputHierarchy("foo/bar") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("StillNoop", func(t *testing.T) { + // All of the provided paths expand to (locations under) + // the root directory. There is thus no need to create + // any output directories. + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("..") + oh.AddFile("../file") + oh.AddPath("../path") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("Success", func(t *testing.T) { + // Create /foo/bar/baz. + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)) + bar := mock.NewMockDirectoryCloser(ctrl) + foo.EXPECT().EnterDirectory("bar").Return(bar, nil) + bar.EXPECT().Mkdir("baz", os.FileMode(0777)) + bar.EXPECT().Close() + // Create /foo/qux. + foo.EXPECT().Mkdir("qux", os.FileMode(0777)) + foo.EXPECT().Close() + // Create /alice. + root.EXPECT().Mkdir("alice", os.FileMode(0777)) + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("bar/baz") + oh.AddFile("../foo/qux/xyzzy") + oh.AddPath("../alice/bob") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureParent", func(t *testing.T) { + // Failure to create the parent directory of a location + // where an output file is expected. + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)).Return(status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddFile("bar/baz") + require.Equal( + t, + status.Error(codes.Internal, "Failed to create output parent directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureParentExists", func(t *testing.T) { + // This test is identical to the previous, except that + // the error is EEXIST. This should not cause a hard + // failure. + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)).Return(syscall.EEXIST) + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddFile("bar/baz") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureOutput", func(t *testing.T) { + // Failure to create a location where an output + // directory is expected. + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)).Return(status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("bar") + require.Equal( + t, + status.Error(codes.Internal, "Failed to create output directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) + + t.Run("MkdirFailureOutputExists", func(t *testing.T) { + // This test is identical to the previous, except that + // the error is EEXIST. This should not cause a hard + // failure. + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)).Return(syscall.EEXIST) + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("bar") + require.NoError(t, oh.CreateParentDirectories(root)) + }) + + t.Run("EnterFailure", func(t *testing.T) { + root.EXPECT().Mkdir("foo", os.FileMode(0777)) + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + foo.EXPECT().Mkdir("bar", os.FileMode(0777)) + foo.EXPECT().EnterDirectory("bar").Return(nil, status.Error(codes.Internal, "I/O error")) + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("bar/baz") + require.Equal( + t, + status.Error(codes.Internal, "Failed to enter output parent directory \"foo/bar\": I/O error"), + oh.CreateParentDirectories(root)) + }) +} + +func TestOutputHierarchyUploadOutputs(t *testing.T) { + ctrl, ctx := gomock.WithContext(context.Background(), t) + defer ctrl.Finish() + + root := mock.NewMockDirectory(ctrl) + contentAddressableStorage := mock.NewMockContentAddressableStorage(ctrl) + parentDigest := digest.MustNewDigest("example", "8b1a9953c4611296a827abf8c47804d7", 5) + + t.Run("Noop", func(t *testing.T) { + // Uploading of a build action with no declared outputs + // should not trigger any I/O. + oh := builder.NewOutputHierarchy(".") + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + t.Run("Success", func(t *testing.T) { + // Declare output directories, files and paths. For each + // of these output types, let them match one of the + // valid file types. + foo := mock.NewMockDirectoryCloser(ctrl) + root.EXPECT().EnterDirectory("foo").Return(foo, nil) + + // Calls triggered to obtain the file type of the outputs. + foo.EXPECT().Lstat("directory-directory").Return(filesystem.NewFileInfo("directory-directory", filesystem.FileTypeDirectory), nil) + foo.EXPECT().Lstat("directory-symlink").Return(filesystem.NewFileInfo("directory-symlink", filesystem.FileTypeSymlink), nil) + foo.EXPECT().Lstat("directory-enoent").Return(filesystem.FileInfo{}, syscall.ENOENT) + foo.EXPECT().Lstat("file-regular").Return(filesystem.NewFileInfo("file-regular", filesystem.FileTypeRegularFile), nil) + foo.EXPECT().Lstat("file-executable").Return(filesystem.NewFileInfo("file-executable", filesystem.FileTypeExecutableFile), nil) + foo.EXPECT().Lstat("file-symlink").Return(filesystem.NewFileInfo("file-symlink", filesystem.FileTypeSymlink), nil) + foo.EXPECT().Lstat("file-enoent").Return(filesystem.FileInfo{}, syscall.ENOENT) + foo.EXPECT().Lstat("path-regular").Return(filesystem.NewFileInfo("path-regular", filesystem.FileTypeRegularFile), nil) + foo.EXPECT().Lstat("path-executable").Return(filesystem.NewFileInfo("path-executable", filesystem.FileTypeExecutableFile), nil) + foo.EXPECT().Lstat("path-directory").Return(filesystem.NewFileInfo("path-directory", filesystem.FileTypeDirectory), nil) + foo.EXPECT().Lstat("path-symlink").Return(filesystem.NewFileInfo("path-symlink", filesystem.FileTypeSymlink), nil) + foo.EXPECT().Lstat("path-enoent").Return(filesystem.FileInfo{}, syscall.ENOENT) + + // Inspection/uploading of all non-directory outputs. + foo.EXPECT().Readlink("directory-symlink").Return("directory-symlink-target", nil) + contentAddressableStorage.EXPECT().PutFile(ctx, foo, "file-regular", parentDigest). + Return(digest.MustNewDigest("example", "a58c2f2281011ca2e631b39baa1ab657", 12), nil) + contentAddressableStorage.EXPECT().PutFile(ctx, foo, "file-executable", parentDigest). + Return(digest.MustNewDigest("example", "7590e1b46240ecb5ea65a80db7ee6fae", 15), nil) + foo.EXPECT().Readlink("file-symlink").Return("file-symlink-target", nil) + contentAddressableStorage.EXPECT().PutFile(ctx, foo, "path-regular", parentDigest). + Return(digest.MustNewDigest("example", "44206648b7bb2f3b0d2ed0c52ad2e269", 12), nil) + contentAddressableStorage.EXPECT().PutFile(ctx, foo, "path-executable", parentDigest). + Return(digest.MustNewDigest("example", "87729325cd08d300fb0e238a3a8da443", 15), nil) + foo.EXPECT().Readlink("path-symlink").Return("path-symlink-target", nil) + + // Uploading of /foo/directory-directory. Files with an + // unknown type (UNIX sockets, FIFOs) should be ignored. + // Returning a hard error makes debugging harder (e.g., + // in case the full input root is declared as an output). + directoryDirectory := mock.NewMockDirectoryCloser(ctrl) + foo.EXPECT().EnterDirectory("directory-directory").Return(directoryDirectory, nil) + directoryDirectory.EXPECT().ReadDir().Return([]filesystem.FileInfo{ + filesystem.NewFileInfo("directory", filesystem.FileTypeDirectory), + filesystem.NewFileInfo("executable", filesystem.FileTypeExecutableFile), + filesystem.NewFileInfo("other", filesystem.FileTypeOther), + filesystem.NewFileInfo("regular", filesystem.FileTypeRegularFile), + filesystem.NewFileInfo("symlink", filesystem.FileTypeSymlink), + }, nil) + directoryDirectoryDirectory := mock.NewMockDirectoryCloser(ctrl) + directoryDirectory.EXPECT().EnterDirectory("directory").Return(directoryDirectoryDirectory, nil) + directoryDirectoryDirectory.EXPECT().ReadDir().Return(nil, nil) + directoryDirectoryDirectory.EXPECT().Close() + contentAddressableStorage.EXPECT().PutFile(ctx, directoryDirectory, "executable", parentDigest). + Return(digest.MustNewDigest("example", "ee7004c7949d83f130592f15d98ca343", 10), nil) + contentAddressableStorage.EXPECT().PutFile(ctx, directoryDirectory, "regular", parentDigest). + Return(digest.MustNewDigest("example", "af37d08ae228a87dc6b265fd1019c97d", 7), nil) + directoryDirectory.EXPECT().Readlink("symlink").Return("symlink-target", nil) + directoryDirectory.EXPECT().Close() + contentAddressableStorage.EXPECT().PutTree(ctx, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{ + Files: []*remoteexecution.FileNode{ + { + Name: "executable", + Digest: &remoteexecution.Digest{ + Hash: "ee7004c7949d83f130592f15d98ca343", + SizeBytes: 10, + }, + IsExecutable: true, + }, + { + Name: "regular", + Digest: &remoteexecution.Digest{ + Hash: "af37d08ae228a87dc6b265fd1019c97d", + SizeBytes: 7, + }, + }, + }, + Directories: []*remoteexecution.DirectoryNode{ + { + Name: "directory", + Digest: &remoteexecution.Digest{ + Hash: "d41d8cd98f00b204e9800998ecf8427e", + SizeBytes: 0, + }, + }, + }, + Symlinks: []*remoteexecution.SymlinkNode{ + { + Name: "symlink", + Target: "symlink-target", + }, + }, + }, + Children: []*remoteexecution.Directory{ + {}, + }, + }, parentDigest).Return(digest.MustNewDigest("example", "ea2eb40245e6ccf064d9617e2ec4bb42", 19), nil) + + // Uploading of /foo/path-directory. + pathDirectory := mock.NewMockDirectoryCloser(ctrl) + foo.EXPECT().EnterDirectory("path-directory").Return(pathDirectory, nil) + pathDirectory.EXPECT().ReadDir().Return(nil, nil) + pathDirectory.EXPECT().Close() + contentAddressableStorage.EXPECT().PutTree(ctx, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, parentDigest).Return(digest.MustNewDigest("example", "0bc0ad9d0556269ea1c0bc3d2d31dcc0", 14), nil) + + foo.EXPECT().Close() + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("directory-directory") + oh.AddDirectory("../foo/directory-directory") + oh.AddDirectory("directory-symlink") + oh.AddDirectory("../foo/directory-symlink") + oh.AddDirectory("directory-enoent") + oh.AddDirectory("../foo/directory-enoent") + oh.AddFile("file-regular") + oh.AddFile("../foo/file-regular") + oh.AddFile("file-executable") + oh.AddFile("../foo/file-executable") + oh.AddFile("file-symlink") + oh.AddFile("../foo/file-symlink") + oh.AddFile("file-enoent") + oh.AddFile("../foo/file-enoent") + oh.AddPath("path-regular") + oh.AddPath("../foo/path-regular") + oh.AddPath("path-executable") + oh.AddPath("../foo/path-executable") + oh.AddPath("path-directory") + oh.AddPath("../foo/path-directory") + oh.AddPath("path-symlink") + oh.AddPath("../foo/path-symlink") + oh.AddPath("path-enoent") + oh.AddPath("../foo/path-enoent") + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "ea2eb40245e6ccf064d9617e2ec4bb42", + SizeBytes: 19, + }, + }, + { + Path: "../foo/directory-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "ea2eb40245e6ccf064d9617e2ec4bb42", + SizeBytes: 19, + }, + }, + { + Path: "path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "0bc0ad9d0556269ea1c0bc3d2d31dcc0", + SizeBytes: 14, + }, + }, + { + Path: "../foo/path-directory", + TreeDigest: &remoteexecution.Digest{ + Hash: "0bc0ad9d0556269ea1c0bc3d2d31dcc0", + SizeBytes: 14, + }, + }, + }, + OutputDirectorySymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "directory-symlink", + Target: "directory-symlink-target", + }, + { + Path: "../foo/directory-symlink", + Target: "directory-symlink-target", + }, + }, + OutputFiles: []*remoteexecution.OutputFile{ + { + Path: "file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/file-executable", + Digest: &remoteexecution.Digest{ + Hash: "7590e1b46240ecb5ea65a80db7ee6fae", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "../foo/file-regular", + Digest: &remoteexecution.Digest{ + Hash: "a58c2f2281011ca2e631b39baa1ab657", + SizeBytes: 12, + }, + }, + { + Path: "path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "../foo/path-executable", + Digest: &remoteexecution.Digest{ + Hash: "87729325cd08d300fb0e238a3a8da443", + SizeBytes: 15, + }, + IsExecutable: true, + }, + { + Path: "path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + { + Path: "../foo/path-regular", + Digest: &remoteexecution.Digest{ + Hash: "44206648b7bb2f3b0d2ed0c52ad2e269", + SizeBytes: 12, + }, + }, + }, + OutputFileSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "file-symlink", + Target: "file-symlink-target", + }, + { + Path: "../foo/file-symlink", + Target: "file-symlink-target", + }, + }, + OutputSymlinks: []*remoteexecution.OutputSymlink{ + { + Path: "path-symlink", + Target: "path-symlink-target", + }, + { + Path: "../foo/path-symlink", + Target: "path-symlink-target", + }, + }, + }, actionResult) + }) + + t.Run("RootDirectory", func(t *testing.T) { + // Special case: it is also permitted to add the root + // directory as an REv2.0 output directory. This + // shouldn't cause any Lstat() calls, as the root + // directory always exists. It is also impossible to + // call Lstat() on it, as that would require us to + // traverse upwards. + root.EXPECT().ReadDir().Return(nil, nil) + contentAddressableStorage.EXPECT().PutTree(ctx, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, parentDigest).Return(digest.MustNewDigest("example", "63a9f0ea7bb98050796b649e85481845", 4), nil) + + oh := builder.NewOutputHierarchy("foo") + oh.AddDirectory("..") + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "..", + TreeDigest: &remoteexecution.Digest{ + Hash: "63a9f0ea7bb98050796b649e85481845", + SizeBytes: 4, + }, + }, + }, + }, actionResult) + }) + + t.Run("RootPath", func(t *testing.T) { + // Similar to the previous test, it is also permitted to + // add the root directory as an REv2.1 output path. + root.EXPECT().ReadDir().Return(nil, nil) + contentAddressableStorage.EXPECT().PutTree(ctx, &remoteexecution.Tree{ + Root: &remoteexecution.Directory{}, + }, parentDigest).Return(digest.MustNewDigest("example", "63a9f0ea7bb98050796b649e85481845", 4), nil) + + oh := builder.NewOutputHierarchy("foo") + oh.AddPath("..") + var actionResult remoteexecution.ActionResult + require.NoError(t, oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{ + OutputDirectories: []*remoteexecution.OutputDirectory{ + { + Path: "..", + TreeDigest: &remoteexecution.Digest{ + Hash: "63a9f0ea7bb98050796b649e85481845", + SizeBytes: 4, + }, + }, + }, + }, actionResult) + }) + + t.Run("LstatFailureDirectory", func(t *testing.T) { + // Failure to Lstat() an output directory should cause + // it to be skipped. + root.EXPECT().Lstat("foo").Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh := builder.NewOutputHierarchy("") + oh.AddDirectory("foo") + var actionResult remoteexecution.ActionResult + require.Equal( + t, + status.Error(codes.Internal, "Failed to read attributes of output directory \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + t.Run("LstatFailureFile", func(t *testing.T) { + // Failure to Lstat() an output file should cause it to + // be skipped. + root.EXPECT().Lstat("foo").Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh := builder.NewOutputHierarchy("") + oh.AddFile("foo") + var actionResult remoteexecution.ActionResult + require.Equal( + t, + status.Error(codes.Internal, "Failed to read attributes of output file \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + t.Run("LstatFailurePath", func(t *testing.T) { + // Failure to Lstat() an output path should cause it to + // be skipped. + root.EXPECT().Lstat("foo").Return(filesystem.FileInfo{}, status.Error(codes.Internal, "I/O error")) + + oh := builder.NewOutputHierarchy("") + oh.AddPath("foo") + var actionResult remoteexecution.ActionResult + require.Equal( + t, + status.Error(codes.Internal, "Failed to read attributes of output path \"foo\": I/O error"), + oh.UploadOutputs(ctx, root, contentAddressableStorage, parentDigest, &actionResult)) + require.Equal(t, remoteexecution.ActionResult{}, actionResult) + }) + + // TODO: Are there other cases we'd like to unit test? +}