diff --git a/atc/api/accessor/accessor_test.go b/atc/api/accessor/accessor_test.go index 978d68b1503..1a17317aec3 100644 --- a/atc/api/accessor/accessor_test.go +++ b/atc/api/accessor/accessor_test.go @@ -684,6 +684,11 @@ var _ = Describe("Accessor", func() { Entry("pipeline-operator :: "+atc.PausePipeline, atc.PausePipeline, accessor.OperatorRole, true), Entry("viewer :: "+atc.PausePipeline, atc.PausePipeline, accessor.ViewerRole, false), + Entry("owner :: "+atc.ArchivePipeline, atc.ArchivePipeline, accessor.OwnerRole, true), + Entry("member :: "+atc.ArchivePipeline, atc.ArchivePipeline, accessor.MemberRole, false), + Entry("pipeline-operator :: "+atc.ArchivePipeline, atc.ArchivePipeline, accessor.OperatorRole, false), + Entry("viewer :: "+atc.ArchivePipeline, atc.ArchivePipeline, accessor.ViewerRole, false), + Entry("owner :: "+atc.UnpausePipeline, atc.UnpausePipeline, accessor.OwnerRole, true), Entry("member :: "+atc.UnpausePipeline, atc.UnpausePipeline, accessor.MemberRole, true), Entry("pipeline-operator :: "+atc.UnpausePipeline, atc.UnpausePipeline, accessor.OperatorRole, true), diff --git a/atc/api/accessor/role_action_map.go b/atc/api/accessor/role_action_map.go index 930ee10970f..933dff8b718 100644 --- a/atc/api/accessor/role_action_map.go +++ b/atc/api/accessor/role_action_map.go @@ -69,6 +69,7 @@ var requiredRoles = map[string]string{ atc.DeletePipeline: MemberRole, atc.OrderPipelines: MemberRole, atc.PausePipeline: OperatorRole, + atc.ArchivePipeline: OwnerRole, atc.UnpausePipeline: OperatorRole, atc.ExposePipeline: MemberRole, atc.HidePipeline: MemberRole, diff --git a/atc/api/handler.go b/atc/api/handler.go index 27a2a5e0c86..b9dc3d3ebf0 100644 --- a/atc/api/handler.go +++ b/atc/api/handler.go @@ -144,6 +144,7 @@ func NewHandler( atc.DeletePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.DeletePipeline), atc.OrderPipelines: http.HandlerFunc(pipelineServer.OrderPipelines), atc.PausePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.PausePipeline), + atc.ArchivePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.ArchivePipeline), atc.UnpausePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.UnpausePipeline), atc.ExposePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.ExposePipeline), atc.HidePipeline: pipelineHandlerFactory.HandlerFor(pipelineServer.HidePipeline), diff --git a/atc/api/pipelines_test.go b/atc/api/pipelines_test.go index 656242b61e9..40e21534415 100644 --- a/atc/api/pipelines_test.go +++ b/atc/api/pipelines_test.go @@ -61,6 +61,7 @@ var _ = Describe("Pipelines API", func() { privatePipeline.IDReturns(3) privatePipeline.PausedReturns(false) privatePipeline.PublicReturns(false) + privatePipeline.ArchivedReturns(true) privatePipeline.TeamNameReturns("main") privatePipeline.NameReturns("private-pipeline") privatePipeline.GroupsReturns(atc.GroupConfigs{ @@ -273,7 +274,7 @@ var _ = Describe("Pipelines API", func() { "name": "private-pipeline", "paused": false, "public": false, - "archived": false, + "archived": true, "team_name": "main", "last_updated": 1, "groups": [ @@ -935,6 +936,42 @@ var _ = Describe("Pipelines API", func() { }) }) + Describe("PUT /api/v1/teams/:team_name/pipelines/:pipeline_name/archive", func() { + var response *http.Response + + BeforeEach(func() { + fakeaccess.IsAuthenticatedReturns(true) + fakeaccess.IsAuthorizedReturns(true) + dbTeamFactory.FindTeamReturns(fakeTeam, true, nil) + fakeTeam.PipelineReturns(dbPipeline, true, nil) + }) + + JustBeforeEach(func() { + request, _ := http.NewRequest("PUT", server.URL+"/api/v1/teams/a-team/pipelines/a-pipeline/archive", nil) + var err error + response, err = client.Do(request) + Expect(err).NotTo(HaveOccurred()) + }) + + It("returns 200", func() { + Expect(response.StatusCode).To(Equal(http.StatusOK)) + }) + + It("archives the pipeline", func() { + Expect(dbPipeline.ArchiveCallCount()).To(Equal(1), "Archive() called the wrong number of times") + }) + + Context("when archiving the pipeline fails due to the DB", func() { + BeforeEach(func() { + dbPipeline.ArchiveReturns(errors.New("pq: a db error")) + }) + + It("gives a server error", func() { + Expect(response.StatusCode).To(Equal(http.StatusInternalServerError)) + }) + }) + }) + Describe("PUT /api/v1/teams/:team_name/pipelines/:pipeline_name/unpause", func() { var response *http.Response diff --git a/atc/api/pipelineserver/archive.go b/atc/api/pipelineserver/archive.go new file mode 100644 index 00000000000..8f089fc9920 --- /dev/null +++ b/atc/api/pipelineserver/archive.go @@ -0,0 +1,18 @@ +package pipelineserver + +import ( + "net/http" + + "github.com/concourse/concourse/atc/db" +) + +func (s *Server) ArchivePipeline(pipelineDB db.Pipeline) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s.logger.Debug("archive-pipeline") + err := pipelineDB.Archive() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + s.logger.Error("archive-pipeline", err) + } + }) +} diff --git a/atc/api/pipelineserver/archive_test.go b/atc/api/pipelineserver/archive_test.go new file mode 100644 index 00000000000..eb8b24de81c --- /dev/null +++ b/atc/api/pipelineserver/archive_test.go @@ -0,0 +1,68 @@ +package pipelineserver_test + +import ( + "errors" + "net/http" + "net/http/httptest" + + "github.com/concourse/concourse/atc/api/pipelineserver" + "github.com/concourse/concourse/atc/api/pipelineserver/pipelineserverfakes" + "github.com/concourse/concourse/atc/db/dbfakes" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +//go:generate counterfeiter code.cloudfoundry.org/lager.Logger + +var _ = Describe("Archive Handler", func() { + var ( + fakeLogger *pipelineserverfakes.FakeLogger + server *pipelineserver.Server + dbPipeline *dbfakes.FakePipeline + handler http.Handler + recorder *httptest.ResponseRecorder + request *http.Request + ) + + BeforeEach(func() { + fakeLogger = new(pipelineserverfakes.FakeLogger) + server = pipelineserver.NewServer( + fakeLogger, + new(dbfakes.FakeTeamFactory), + new(dbfakes.FakePipelineFactory), + "", + ) + dbPipeline = new(dbfakes.FakePipeline) + handler = server.ArchivePipeline(dbPipeline) + recorder = httptest.NewRecorder() + request = httptest.NewRequest("PUT", "http://example.com", nil) + }) + + It("logs database errors", func() { + expectedError := errors.New("db error") + dbPipeline.ArchiveReturns(expectedError) + + handler.ServeHTTP(recorder, request) + + Expect(fakeLogger.ErrorCallCount()).To(Equal(1)) + action, actualError, _ := fakeLogger.ErrorArgsForCall(0) + Expect(action).To(Equal("archive-pipeline"), "wrong action name") + Expect(actualError).To(Equal(expectedError)) + }) + + It("write a debug log on every request", func() { + handler.ServeHTTP(recorder, request) + + Expect(fakeLogger.DebugCallCount()).To(Equal(1)) + action, _ := fakeLogger.DebugArgsForCall(0) + Expect(action).To(Equal("archive-pipeline"), "wrong action name") + }) + + It("logs no errors if everything works", func() { + dbPipeline.ArchiveReturns(nil) + + handler.ServeHTTP(recorder, request) + + Expect(fakeLogger.ErrorCallCount()).To(Equal(0)) + }) +}) diff --git a/atc/api/pipelineserver/pipelineserverfakes/fake_logger.go b/atc/api/pipelineserver/pipelineserverfakes/fake_logger.go new file mode 100644 index 00000000000..e462173356d --- /dev/null +++ b/atc/api/pipelineserver/pipelineserverfakes/fake_logger.go @@ -0,0 +1,451 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package pipelineserverfakes + +import ( + "sync" + + "code.cloudfoundry.org/lager" +) + +type FakeLogger struct { + DebugStub func(string, ...lager.Data) + debugMutex sync.RWMutex + debugArgsForCall []struct { + arg1 string + arg2 []lager.Data + } + ErrorStub func(string, error, ...lager.Data) + errorMutex sync.RWMutex + errorArgsForCall []struct { + arg1 string + arg2 error + arg3 []lager.Data + } + FatalStub func(string, error, ...lager.Data) + fatalMutex sync.RWMutex + fatalArgsForCall []struct { + arg1 string + arg2 error + arg3 []lager.Data + } + InfoStub func(string, ...lager.Data) + infoMutex sync.RWMutex + infoArgsForCall []struct { + arg1 string + arg2 []lager.Data + } + RegisterSinkStub func(lager.Sink) + registerSinkMutex sync.RWMutex + registerSinkArgsForCall []struct { + arg1 lager.Sink + } + SessionStub func(string, ...lager.Data) lager.Logger + sessionMutex sync.RWMutex + sessionArgsForCall []struct { + arg1 string + arg2 []lager.Data + } + sessionReturns struct { + result1 lager.Logger + } + sessionReturnsOnCall map[int]struct { + result1 lager.Logger + } + SessionNameStub func() string + sessionNameMutex sync.RWMutex + sessionNameArgsForCall []struct { + } + sessionNameReturns struct { + result1 string + } + sessionNameReturnsOnCall map[int]struct { + result1 string + } + WithDataStub func(lager.Data) lager.Logger + withDataMutex sync.RWMutex + withDataArgsForCall []struct { + arg1 lager.Data + } + withDataReturns struct { + result1 lager.Logger + } + withDataReturnsOnCall map[int]struct { + result1 lager.Logger + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *FakeLogger) Debug(arg1 string, arg2 ...lager.Data) { + fake.debugMutex.Lock() + fake.debugArgsForCall = append(fake.debugArgsForCall, struct { + arg1 string + arg2 []lager.Data + }{arg1, arg2}) + fake.recordInvocation("Debug", []interface{}{arg1, arg2}) + fake.debugMutex.Unlock() + if fake.DebugStub != nil { + fake.DebugStub(arg1, arg2...) + } +} + +func (fake *FakeLogger) DebugCallCount() int { + fake.debugMutex.RLock() + defer fake.debugMutex.RUnlock() + return len(fake.debugArgsForCall) +} + +func (fake *FakeLogger) DebugCalls(stub func(string, ...lager.Data)) { + fake.debugMutex.Lock() + defer fake.debugMutex.Unlock() + fake.DebugStub = stub +} + +func (fake *FakeLogger) DebugArgsForCall(i int) (string, []lager.Data) { + fake.debugMutex.RLock() + defer fake.debugMutex.RUnlock() + argsForCall := fake.debugArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeLogger) Error(arg1 string, arg2 error, arg3 ...lager.Data) { + fake.errorMutex.Lock() + fake.errorArgsForCall = append(fake.errorArgsForCall, struct { + arg1 string + arg2 error + arg3 []lager.Data + }{arg1, arg2, arg3}) + fake.recordInvocation("Error", []interface{}{arg1, arg2, arg3}) + fake.errorMutex.Unlock() + if fake.ErrorStub != nil { + fake.ErrorStub(arg1, arg2, arg3...) + } +} + +func (fake *FakeLogger) ErrorCallCount() int { + fake.errorMutex.RLock() + defer fake.errorMutex.RUnlock() + return len(fake.errorArgsForCall) +} + +func (fake *FakeLogger) ErrorCalls(stub func(string, error, ...lager.Data)) { + fake.errorMutex.Lock() + defer fake.errorMutex.Unlock() + fake.ErrorStub = stub +} + +func (fake *FakeLogger) ErrorArgsForCall(i int) (string, error, []lager.Data) { + fake.errorMutex.RLock() + defer fake.errorMutex.RUnlock() + argsForCall := fake.errorArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeLogger) Fatal(arg1 string, arg2 error, arg3 ...lager.Data) { + fake.fatalMutex.Lock() + fake.fatalArgsForCall = append(fake.fatalArgsForCall, struct { + arg1 string + arg2 error + arg3 []lager.Data + }{arg1, arg2, arg3}) + fake.recordInvocation("Fatal", []interface{}{arg1, arg2, arg3}) + fake.fatalMutex.Unlock() + if fake.FatalStub != nil { + fake.FatalStub(arg1, arg2, arg3...) + } +} + +func (fake *FakeLogger) FatalCallCount() int { + fake.fatalMutex.RLock() + defer fake.fatalMutex.RUnlock() + return len(fake.fatalArgsForCall) +} + +func (fake *FakeLogger) FatalCalls(stub func(string, error, ...lager.Data)) { + fake.fatalMutex.Lock() + defer fake.fatalMutex.Unlock() + fake.FatalStub = stub +} + +func (fake *FakeLogger) FatalArgsForCall(i int) (string, error, []lager.Data) { + fake.fatalMutex.RLock() + defer fake.fatalMutex.RUnlock() + argsForCall := fake.fatalArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3 +} + +func (fake *FakeLogger) Info(arg1 string, arg2 ...lager.Data) { + fake.infoMutex.Lock() + fake.infoArgsForCall = append(fake.infoArgsForCall, struct { + arg1 string + arg2 []lager.Data + }{arg1, arg2}) + fake.recordInvocation("Info", []interface{}{arg1, arg2}) + fake.infoMutex.Unlock() + if fake.InfoStub != nil { + fake.InfoStub(arg1, arg2...) + } +} + +func (fake *FakeLogger) InfoCallCount() int { + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + return len(fake.infoArgsForCall) +} + +func (fake *FakeLogger) InfoCalls(stub func(string, ...lager.Data)) { + fake.infoMutex.Lock() + defer fake.infoMutex.Unlock() + fake.InfoStub = stub +} + +func (fake *FakeLogger) InfoArgsForCall(i int) (string, []lager.Data) { + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + argsForCall := fake.infoArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeLogger) RegisterSink(arg1 lager.Sink) { + fake.registerSinkMutex.Lock() + fake.registerSinkArgsForCall = append(fake.registerSinkArgsForCall, struct { + arg1 lager.Sink + }{arg1}) + fake.recordInvocation("RegisterSink", []interface{}{arg1}) + fake.registerSinkMutex.Unlock() + if fake.RegisterSinkStub != nil { + fake.RegisterSinkStub(arg1) + } +} + +func (fake *FakeLogger) RegisterSinkCallCount() int { + fake.registerSinkMutex.RLock() + defer fake.registerSinkMutex.RUnlock() + return len(fake.registerSinkArgsForCall) +} + +func (fake *FakeLogger) RegisterSinkCalls(stub func(lager.Sink)) { + fake.registerSinkMutex.Lock() + defer fake.registerSinkMutex.Unlock() + fake.RegisterSinkStub = stub +} + +func (fake *FakeLogger) RegisterSinkArgsForCall(i int) lager.Sink { + fake.registerSinkMutex.RLock() + defer fake.registerSinkMutex.RUnlock() + argsForCall := fake.registerSinkArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeLogger) Session(arg1 string, arg2 ...lager.Data) lager.Logger { + fake.sessionMutex.Lock() + ret, specificReturn := fake.sessionReturnsOnCall[len(fake.sessionArgsForCall)] + fake.sessionArgsForCall = append(fake.sessionArgsForCall, struct { + arg1 string + arg2 []lager.Data + }{arg1, arg2}) + fake.recordInvocation("Session", []interface{}{arg1, arg2}) + fake.sessionMutex.Unlock() + if fake.SessionStub != nil { + return fake.SessionStub(arg1, arg2...) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.sessionReturns + return fakeReturns.result1 +} + +func (fake *FakeLogger) SessionCallCount() int { + fake.sessionMutex.RLock() + defer fake.sessionMutex.RUnlock() + return len(fake.sessionArgsForCall) +} + +func (fake *FakeLogger) SessionCalls(stub func(string, ...lager.Data) lager.Logger) { + fake.sessionMutex.Lock() + defer fake.sessionMutex.Unlock() + fake.SessionStub = stub +} + +func (fake *FakeLogger) SessionArgsForCall(i int) (string, []lager.Data) { + fake.sessionMutex.RLock() + defer fake.sessionMutex.RUnlock() + argsForCall := fake.sessionArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeLogger) SessionReturns(result1 lager.Logger) { + fake.sessionMutex.Lock() + defer fake.sessionMutex.Unlock() + fake.SessionStub = nil + fake.sessionReturns = struct { + result1 lager.Logger + }{result1} +} + +func (fake *FakeLogger) SessionReturnsOnCall(i int, result1 lager.Logger) { + fake.sessionMutex.Lock() + defer fake.sessionMutex.Unlock() + fake.SessionStub = nil + if fake.sessionReturnsOnCall == nil { + fake.sessionReturnsOnCall = make(map[int]struct { + result1 lager.Logger + }) + } + fake.sessionReturnsOnCall[i] = struct { + result1 lager.Logger + }{result1} +} + +func (fake *FakeLogger) SessionName() string { + fake.sessionNameMutex.Lock() + ret, specificReturn := fake.sessionNameReturnsOnCall[len(fake.sessionNameArgsForCall)] + fake.sessionNameArgsForCall = append(fake.sessionNameArgsForCall, struct { + }{}) + fake.recordInvocation("SessionName", []interface{}{}) + fake.sessionNameMutex.Unlock() + if fake.SessionNameStub != nil { + return fake.SessionNameStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.sessionNameReturns + return fakeReturns.result1 +} + +func (fake *FakeLogger) SessionNameCallCount() int { + fake.sessionNameMutex.RLock() + defer fake.sessionNameMutex.RUnlock() + return len(fake.sessionNameArgsForCall) +} + +func (fake *FakeLogger) SessionNameCalls(stub func() string) { + fake.sessionNameMutex.Lock() + defer fake.sessionNameMutex.Unlock() + fake.SessionNameStub = stub +} + +func (fake *FakeLogger) SessionNameReturns(result1 string) { + fake.sessionNameMutex.Lock() + defer fake.sessionNameMutex.Unlock() + fake.SessionNameStub = nil + fake.sessionNameReturns = struct { + result1 string + }{result1} +} + +func (fake *FakeLogger) SessionNameReturnsOnCall(i int, result1 string) { + fake.sessionNameMutex.Lock() + defer fake.sessionNameMutex.Unlock() + fake.SessionNameStub = nil + if fake.sessionNameReturnsOnCall == nil { + fake.sessionNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.sessionNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *FakeLogger) WithData(arg1 lager.Data) lager.Logger { + fake.withDataMutex.Lock() + ret, specificReturn := fake.withDataReturnsOnCall[len(fake.withDataArgsForCall)] + fake.withDataArgsForCall = append(fake.withDataArgsForCall, struct { + arg1 lager.Data + }{arg1}) + fake.recordInvocation("WithData", []interface{}{arg1}) + fake.withDataMutex.Unlock() + if fake.WithDataStub != nil { + return fake.WithDataStub(arg1) + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.withDataReturns + return fakeReturns.result1 +} + +func (fake *FakeLogger) WithDataCallCount() int { + fake.withDataMutex.RLock() + defer fake.withDataMutex.RUnlock() + return len(fake.withDataArgsForCall) +} + +func (fake *FakeLogger) WithDataCalls(stub func(lager.Data) lager.Logger) { + fake.withDataMutex.Lock() + defer fake.withDataMutex.Unlock() + fake.WithDataStub = stub +} + +func (fake *FakeLogger) WithDataArgsForCall(i int) lager.Data { + fake.withDataMutex.RLock() + defer fake.withDataMutex.RUnlock() + argsForCall := fake.withDataArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *FakeLogger) WithDataReturns(result1 lager.Logger) { + fake.withDataMutex.Lock() + defer fake.withDataMutex.Unlock() + fake.WithDataStub = nil + fake.withDataReturns = struct { + result1 lager.Logger + }{result1} +} + +func (fake *FakeLogger) WithDataReturnsOnCall(i int, result1 lager.Logger) { + fake.withDataMutex.Lock() + defer fake.withDataMutex.Unlock() + fake.WithDataStub = nil + if fake.withDataReturnsOnCall == nil { + fake.withDataReturnsOnCall = make(map[int]struct { + result1 lager.Logger + }) + } + fake.withDataReturnsOnCall[i] = struct { + result1 lager.Logger + }{result1} +} + +func (fake *FakeLogger) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.debugMutex.RLock() + defer fake.debugMutex.RUnlock() + fake.errorMutex.RLock() + defer fake.errorMutex.RUnlock() + fake.fatalMutex.RLock() + defer fake.fatalMutex.RUnlock() + fake.infoMutex.RLock() + defer fake.infoMutex.RUnlock() + fake.registerSinkMutex.RLock() + defer fake.registerSinkMutex.RUnlock() + fake.sessionMutex.RLock() + defer fake.sessionMutex.RUnlock() + fake.sessionNameMutex.RLock() + defer fake.sessionNameMutex.RUnlock() + fake.withDataMutex.RLock() + defer fake.withDataMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *FakeLogger) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ lager.Logger = new(FakeLogger) diff --git a/atc/api/present/pipeline.go b/atc/api/present/pipeline.go index 6e52f40012d..efdb4ad7aca 100644 --- a/atc/api/present/pipeline.go +++ b/atc/api/present/pipeline.go @@ -12,6 +12,7 @@ func Pipeline(savedPipeline db.Pipeline) atc.Pipeline { TeamName: savedPipeline.TeamName(), Paused: savedPipeline.Paused(), Public: savedPipeline.Public(), + Archived: savedPipeline.Archived(), Groups: savedPipeline.Groups(), LastUpdated: savedPipeline.LastUpdated().Unix(), } diff --git a/atc/auditor/auditor.go b/atc/auditor/auditor.go index ec697d9f3e1..d560dde8194 100644 --- a/atc/auditor/auditor.go +++ b/atc/auditor/auditor.go @@ -95,6 +95,7 @@ func (a *auditor) ValidateAction(action string) bool { atc.DeletePipeline, atc.OrderPipelines, atc.PausePipeline, + atc.ArchivePipeline, atc.UnpausePipeline, atc.ExposePipeline, atc.HidePipeline, diff --git a/atc/db/dbfakes/fake_pipeline.go b/atc/db/dbfakes/fake_pipeline.go index 198844cfda0..c4da27ed421 100644 --- a/atc/db/dbfakes/fake_pipeline.go +++ b/atc/db/dbfakes/fake_pipeline.go @@ -13,6 +13,26 @@ import ( ) type FakePipeline struct { + ArchiveStub func() error + archiveMutex sync.RWMutex + archiveArgsForCall []struct { + } + archiveReturns struct { + result1 error + } + archiveReturnsOnCall map[int]struct { + result1 error + } + ArchivedStub func() bool + archivedMutex sync.RWMutex + archivedArgsForCall []struct { + } + archivedReturns struct { + result1 bool + } + archivedReturnsOnCall map[int]struct { + result1 bool + } BuildsStub func(db.Page) ([]db.Build, db.Pagination, error) buildsMutex sync.RWMutex buildsArgsForCall []struct { @@ -486,6 +506,110 @@ type FakePipeline struct { invocationsMutex sync.RWMutex } +func (fake *FakePipeline) Archive() error { + fake.archiveMutex.Lock() + ret, specificReturn := fake.archiveReturnsOnCall[len(fake.archiveArgsForCall)] + fake.archiveArgsForCall = append(fake.archiveArgsForCall, struct { + }{}) + fake.recordInvocation("Archive", []interface{}{}) + fake.archiveMutex.Unlock() + if fake.ArchiveStub != nil { + return fake.ArchiveStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.archiveReturns + return fakeReturns.result1 +} + +func (fake *FakePipeline) ArchiveCallCount() int { + fake.archiveMutex.RLock() + defer fake.archiveMutex.RUnlock() + return len(fake.archiveArgsForCall) +} + +func (fake *FakePipeline) ArchiveCalls(stub func() error) { + fake.archiveMutex.Lock() + defer fake.archiveMutex.Unlock() + fake.ArchiveStub = stub +} + +func (fake *FakePipeline) ArchiveReturns(result1 error) { + fake.archiveMutex.Lock() + defer fake.archiveMutex.Unlock() + fake.ArchiveStub = nil + fake.archiveReturns = struct { + result1 error + }{result1} +} + +func (fake *FakePipeline) ArchiveReturnsOnCall(i int, result1 error) { + fake.archiveMutex.Lock() + defer fake.archiveMutex.Unlock() + fake.ArchiveStub = nil + if fake.archiveReturnsOnCall == nil { + fake.archiveReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.archiveReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *FakePipeline) Archived() bool { + fake.archivedMutex.Lock() + ret, specificReturn := fake.archivedReturnsOnCall[len(fake.archivedArgsForCall)] + fake.archivedArgsForCall = append(fake.archivedArgsForCall, struct { + }{}) + fake.recordInvocation("Archived", []interface{}{}) + fake.archivedMutex.Unlock() + if fake.ArchivedStub != nil { + return fake.ArchivedStub() + } + if specificReturn { + return ret.result1 + } + fakeReturns := fake.archivedReturns + return fakeReturns.result1 +} + +func (fake *FakePipeline) ArchivedCallCount() int { + fake.archivedMutex.RLock() + defer fake.archivedMutex.RUnlock() + return len(fake.archivedArgsForCall) +} + +func (fake *FakePipeline) ArchivedCalls(stub func() bool) { + fake.archivedMutex.Lock() + defer fake.archivedMutex.Unlock() + fake.ArchivedStub = stub +} + +func (fake *FakePipeline) ArchivedReturns(result1 bool) { + fake.archivedMutex.Lock() + defer fake.archivedMutex.Unlock() + fake.ArchivedStub = nil + fake.archivedReturns = struct { + result1 bool + }{result1} +} + +func (fake *FakePipeline) ArchivedReturnsOnCall(i int, result1 bool) { + fake.archivedMutex.Lock() + defer fake.archivedMutex.Unlock() + fake.ArchivedStub = nil + if fake.archivedReturnsOnCall == nil { + fake.archivedReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.archivedReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + func (fake *FakePipeline) Builds(arg1 db.Page) ([]db.Build, db.Pagination, error) { fake.buildsMutex.Lock() ret, specificReturn := fake.buildsReturnsOnCall[len(fake.buildsArgsForCall)] @@ -2736,6 +2860,10 @@ func (fake *FakePipeline) VariablesReturnsOnCall(i int, result1 vars.Variables, func (fake *FakePipeline) Invocations() map[string][][]interface{} { fake.invocationsMutex.RLock() defer fake.invocationsMutex.RUnlock() + fake.archiveMutex.RLock() + defer fake.archiveMutex.RUnlock() + fake.archivedMutex.RLock() + defer fake.archivedMutex.RUnlock() fake.buildsMutex.RLock() defer fake.buildsMutex.RUnlock() fake.buildsWithTimeMutex.RLock() diff --git a/atc/db/migration/mig b/atc/db/migration/mig new file mode 100755 index 00000000000..6f9e16946a6 Binary files /dev/null and b/atc/db/migration/mig differ diff --git a/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.down.sql b/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.down.sql new file mode 100644 index 00000000000..ef2f19b6a11 --- /dev/null +++ b/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.down.sql @@ -0,0 +1,4 @@ +BEGIN; + ALTER TABLE pipelines + DROP COLUMN IF EXISTS "archived"; +COMMIT; diff --git a/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.up.sql b/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.up.sql new file mode 100644 index 00000000000..8aa18acff89 --- /dev/null +++ b/atc/db/migration/migrations/1584564140_add_archived_to_pipelines.up.sql @@ -0,0 +1,4 @@ +BEGIN; + ALTER TABLE pipelines + ADD COLUMN IF NOT EXISTS "archived" boolean NOT NULL DEFAULT FALSE; +COMMIT; diff --git a/atc/db/pipeline.go b/atc/db/pipeline.go index b1dc8ff6392..3fd5df03302 100644 --- a/atc/db/pipeline.go +++ b/atc/db/pipeline.go @@ -46,6 +46,7 @@ type Pipeline interface { Config() (atc.Config, error) Public() bool Paused() bool + Archived() bool LastUpdated() time.Time CheckPaused() (bool, error) @@ -85,6 +86,8 @@ type Pipeline interface { Pause() error Unpause() error + Archive() error + Destroy() error Rename(string) error @@ -101,6 +104,7 @@ type pipeline struct { configVersion ConfigVersion paused bool public bool + archived bool lastUpdated time.Time conn Conn @@ -121,6 +125,7 @@ var pipelinesQuery = psql.Select(` t.name, p.paused, p.public, + p.archived, p.last_updated `). From("pipelines p"). @@ -143,6 +148,7 @@ func (p *pipeline) VarSources() atc.VarSourceConfigs { return p.varSources } func (p *pipeline) ConfigVersion() ConfigVersion { return p.configVersion } func (p *pipeline) Public() bool { return p.public } func (p *pipeline) Paused() bool { return p.paused } +func (p *pipeline) Archived() bool { return p.archived } func (p *pipeline) LastUpdated() time.Time { return p.lastUpdated } // IMPORTANT: This method is broken with the new resource config versions changes @@ -621,6 +627,18 @@ func (p *pipeline) Unpause() error { return tx.Commit() } +func (p *pipeline) Archive() error { + _, err := psql.Update("pipelines"). + Set("archived", true). + Where(sq.Eq{ + "id": p.id, + }). + RunWith(p.conn). + Exec() + + return err +} + func (p *pipeline) Hide() error { _, err := psql.Update("pipelines"). Set("public", false). diff --git a/atc/db/pipeline_test.go b/atc/db/pipeline_test.go index a18a32e80f3..ee9da3257f7 100644 --- a/atc/db/pipeline_test.go +++ b/atc/db/pipeline_test.go @@ -217,6 +217,17 @@ var _ = Describe("Pipeline", func() { }) }) + Describe("Archive", func() { + JustBeforeEach(func() { + pipeline.Archive() + }) + + It("archives the pipeline", func() { + pipeline.Reload() + Expect(pipeline.Archived()).To(BeTrue(), "pipeline was not archived") + }) + }) + Describe("Unpause", func() { JustBeforeEach(func() { Expect(pipeline.Unpause()).To(Succeed()) diff --git a/atc/db/team.go b/atc/db/team.go index 81259a2c532..4ff41db9140 100644 --- a/atc/db/team.go +++ b/atc/db/team.go @@ -1098,7 +1098,7 @@ func scanPipeline(p *pipeline, scan scannable) error { nonceStr *string lastUpdated pq.NullTime ) - err := scan.Scan(&p.id, &p.name, &groups, &varSources, &nonce, &p.configVersion, &p.teamID, &p.teamName, &p.paused, &p.public, &lastUpdated) + err := scan.Scan(&p.id, &p.name, &groups, &varSources, &nonce, &p.configVersion, &p.teamID, &p.teamName, &p.paused, &p.public, &p.archived, &lastUpdated) if err != nil { return err } diff --git a/atc/db/team_test.go b/atc/db/team_test.go index 2c7705ed336..3594c501413 100644 --- a/atc/db/team_test.go +++ b/atc/db/team_test.go @@ -1674,6 +1674,17 @@ var _ = Describe("Team", func() { Expect(pipeline.Paused()).To(BeFalse()) }) + It("is not archived by default", func() { + _, _, err := team.SavePipeline(pipelineName, config, 0, true) + Expect(err).ToNot(HaveOccurred()) + + pipeline, found, err := team.Pipeline(pipelineName) + Expect(err).ToNot(HaveOccurred()) + Expect(found).To(BeTrue()) + + Expect(pipeline.Archived()).To(BeFalse()) + }) + It("requests schedule on the pipeline", func() { requestedPipeline, _, err := team.SavePipeline(pipelineName, config, 0, false) Expect(err).ToNot(HaveOccurred()) diff --git a/atc/integration/archiving_test.go b/atc/integration/archiving_test.go new file mode 100644 index 00000000000..4b395a2ba4a --- /dev/null +++ b/atc/integration/archiving_test.go @@ -0,0 +1,77 @@ +package integration_test + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/concourse/concourse/atc" + concourse "github.com/concourse/concourse/go-concourse/concourse" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/tedsuo/ifrit" +) + +var _ = Describe("ATC Integration Test", func() { + var ( + atcProcess ifrit.Process + atcURL string + ) + + BeforeEach(func() { + atcURL = fmt.Sprintf("http://localhost:%v", cmd.BindPort) + + runner, err := cmd.Runner([]string{}) + Expect(err).NotTo(HaveOccurred()) + + atcProcess = ifrit.Invoke(runner) + + Eventually(func() error { + _, err := http.Get(atcURL + "/api/v1/info") + return err + }, 20*time.Second).ShouldNot(HaveOccurred()) + }) + + AfterEach(func() { + atcProcess.Signal(os.Interrupt) + <-atcProcess.Wait() + }) + + It("can archive pipelines", func() { + atcURL := fmt.Sprintf("http://localhost:%v", cmd.BindPort) + client := login(atcURL, "test", "test") + givenAPipeline(client, "pipeline") + whenIArchiveIt(client, "pipeline") + pipeline := getPipeline(client, "pipeline") + Expect(pipeline.Archived).To(BeTrue(), "pipeline was not archived") + Expect(pipeline.Paused).To(BeTrue(), "pipeline was not paused") + }) +}) + +func givenAPipeline(client concourse.Client, pipelineName string) { + config := []byte(` +--- +jobs: +- name: simple +`) + _, _, _, err := client.Team("main").CreateOrUpdatePipelineConfig(pipelineName, "0", config, false) + Expect(err).NotTo(HaveOccurred()) +} + +func whenIArchiveIt(client concourse.Client, pipelineName string) { + httpClient := client.HTTPClient() + request, _ := http.NewRequest( + "PUT", + client.URL()+"/api/v1/teams/main/pipelines/"+pipelineName+"/archive", + nil, + ) + _, err := httpClient.Do(request) + Expect(err).ToNot(HaveOccurred()) +} + +func getPipeline(client concourse.Client, pipelineName string) atc.Pipeline { + pipeline, _, err := client.Team("main").Pipeline(pipelineName) + Expect(err).ToNot(HaveOccurred()) + return pipeline +} diff --git a/atc/routes.go b/atc/routes.go index 9a1d32c2487..97bfece4a97 100644 --- a/atc/routes.go +++ b/atc/routes.go @@ -61,6 +61,7 @@ const ( DeletePipeline = "DeletePipeline" OrderPipelines = "OrderPipelines" PausePipeline = "PausePipeline" + ArchivePipeline = "ArchivePipeline" UnpausePipeline = "UnpausePipeline" ExposePipeline = "ExposePipeline" HidePipeline = "HidePipeline" @@ -156,6 +157,7 @@ var Routes = rata.Routes([]rata.Route{ {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name", Method: "DELETE", Name: DeletePipeline}, {Path: "/api/v1/teams/:team_name/pipelines/ordering", Method: "PUT", Name: OrderPipelines}, {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name/pause", Method: "PUT", Name: PausePipeline}, + {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name/archive", Method: "PUT", Name: ArchivePipeline}, {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name/unpause", Method: "PUT", Name: UnpausePipeline}, {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name/expose", Method: "PUT", Name: ExposePipeline}, {Path: "/api/v1/teams/:team_name/pipelines/:pipeline_name/hide", Method: "PUT", Name: HidePipeline}, diff --git a/atc/wrappa/api_auth_wrappa.go b/atc/wrappa/api_auth_wrappa.go index 2a8fd57b36b..75064c9732b 100644 --- a/atc/wrappa/api_auth_wrappa.go +++ b/atc/wrappa/api_auth_wrappa.go @@ -146,6 +146,7 @@ func (wrappa *APIAuthWrappa) Wrap(handlers rata.Handlers) rata.Handlers { atc.ExposePipeline, atc.HidePipeline, atc.SaveConfig, + atc.ArchivePipeline, atc.ClearTaskCache, atc.CreateArtifact, atc.ScheduleJob, diff --git a/atc/wrappa/api_auth_wrappa_test.go b/atc/wrappa/api_auth_wrappa_test.go index 49f396edc27..171c84ea719 100644 --- a/atc/wrappa/api_auth_wrappa_test.go +++ b/atc/wrappa/api_auth_wrappa_test.go @@ -239,6 +239,7 @@ var _ = Describe("APIAuthWrappa", func() { atc.OrderPipelines: authorized(inputHandlers[atc.OrderPipelines]), atc.PauseJob: authorized(inputHandlers[atc.PauseJob]), atc.PausePipeline: authorized(inputHandlers[atc.PausePipeline]), + atc.ArchivePipeline: authorized(inputHandlers[atc.ArchivePipeline]), atc.RenamePipeline: authorized(inputHandlers[atc.RenamePipeline]), atc.SaveConfig: authorized(inputHandlers[atc.SaveConfig]), atc.UnpauseJob: authorized(inputHandlers[atc.UnpauseJob]),