Skip to content

Commit

Permalink
Merge branch 'master' of github.com:ovh/cds into upgrade_actix
Browse files Browse the repository at this point in the history
  • Loading branch information
bnjjj committed Jun 17, 2019
2 parents d73011f + 6977b79 commit 30c4694
Show file tree
Hide file tree
Showing 217 changed files with 5,470 additions and 5,780 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ debug.test
myOpenstackIntegration.yml
myAWSS3Integration.yml
test_results.yml
test_results.xml
*.dump
.tmp
contrib/**/dist
Expand Down
8 changes: 4 additions & 4 deletions cli/cdsctl/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,15 @@ clean:

$(TARGET_BINARIES):
$(info *** building binary $@)
@$(MAKE) --no-print-directory gobuild GOOS=$(call get_os_from_binary_file,$@) GOARCH=$(call get_arch_from_binary_file,$@) OUTPUT=$@
@$(MAKE) --no-print-directory gobuild TAGS='' GOOS=$(call get_os_from_binary_file,$@) GOARCH=$(call get_arch_from_binary_file,$@) OUTPUT=$@

$(TARGET_BINARIES_VARIANT):
$(info *** building binary $@)
@$(MAKE) --no-print-directory gobuild GOOS=$(call get_os_from_binary_file,$(subst -nokeychain,,$@)) GOARCH=$(call get_arch_from_binary_file,$(subst -nokeychain,,$@)) OUTPUT=$@
$(info *** building binary variant $@)
@$(MAKE) --no-print-directory gobuild TAGS="--tags='nokeychain'" GOOS=$(call get_os_from_binary_file,$(subst -nokeychain,,$@)) GOARCH=$(call get_arch_from_binary_file,$(subst -nokeychain,,$@)) OUTPUT=$@

gobuild:
$(info ... OS:$(GOOS) ARCH:$(GOARCH) -> $(OUTPUT))
@GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD) $(TARGET_LDFLAGS) -o $(OUTPUT)
@GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD) $(TAGS) $(TARGET_LDFLAGS) -o $(OUTPUT)

build: $(TARGET_DIR) $(TARGET_BINARIES_VARIANT) $(TARGET_BINARIES)

Expand Down
2 changes: 1 addition & 1 deletion cli/cdsctl/configstore.go
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// +build freebsd openbsd linux,386 linux,arm windows,386 windows,arm nokeychain
// +build nokeychain freebsd openbsd 386

package main

Expand Down
2 changes: 2 additions & 0 deletions cli/cdsctl/configstore_linux_amd64.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
// +build !nokeychain

package main

import (
Expand Down
2 changes: 1 addition & 1 deletion contrib/helm/cds/templates/api-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ spec:
name: {{ template "cds.fullname" . }}
key: cds-api_secrets_key
command: ["/bin/sh"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 --decode > config.toml && /app/cds-engine-linux-amd64 start api --config config.toml"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 -d > config.toml && /app/cds-engine-linux-amd64 start api --config config.toml"]
ports:
- name: http
containerPort: 8081
Expand Down
2 changes: 1 addition & 1 deletion contrib/helm/cds/templates/elasticsearch-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ spec:
- name: CDS_LOG_LEVEL
value: {{ default "" .Values.logLevel | quote }}
command: ["/bin/sh"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 --decode > config.toml && /app/cds-engine-linux-amd64 start elasticsearch --config config.toml"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 -d > config.toml && /app/cds-engine-linux-amd64 start elasticsearch --config config.toml"]
ports:
- name: http
containerPort: 8084
2 changes: 1 addition & 1 deletion contrib/helm/cds/templates/hooks-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ spec:
- name: CDS_LOG_LEVEL
value: {{ default "" .Values.logLevel | quote }}
command: ["/bin/sh"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 --decode > config.toml && /app/cds-engine-linux-amd64 start hooks --config config.toml"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 -d > config.toml && /app/cds-engine-linux-amd64 start hooks --config config.toml"]
ports:
- name: http
containerPort: 8084
2 changes: 1 addition & 1 deletion contrib/helm/cds/templates/repositories-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ spec:
- name: CDS_LOG_LEVEL
value: {{ default "" .Values.logLevel | quote }}
command: ["/bin/sh"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 --decode > config.toml && /app/cds-engine-linux-amd64 start repositories --config config.toml"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 -d > config.toml && /app/cds-engine-linux-amd64 start repositories --config config.toml"]
ports:
- name: http
containerPort: 8084
Expand Down
2 changes: 1 addition & 1 deletion contrib/helm/cds/templates/vcs-deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ spec:
- name: CDS_LOG_LEVEL
value: {{ default "" .Values.logLevel | quote }}
command: ["/bin/sh"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 --decode > config.toml && /app/cds-engine-linux-amd64 start vcs --config config.toml"]
args: ["-c", "echo $CDS_CONFIG_FILE | base64 -d > config.toml && /app/cds-engine-linux-amd64 start vcs --config config.toml"]
ports:
- name: http
containerPort: 8084
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ You have to:
* link an application to a git repository
* add a Repository Webhook on the root pipeline, this pipeline have the application linked in the [context]({{< relref "/docs/concepts/workflow/pipeline-context.md" >}})

GitHub / Bitbucket / GitLab are supported by CDS.
GitHub / Github Enterprise / Bitbucket Cloud / Bitbucket Server / GitLab are supported by CDS.

> When you add a repository webhook, it will also automatically delete your runs which are linked to a deleted branch (24h after branch deletion).
2 changes: 1 addition & 1 deletion docs/content/docs/integrations/bitbucket.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ card:
name: repository-manager
---

The Bitbucket Integration have to be configured on your CDS by a CDS Administrator.
The Bitbucket Server Integration have to be configured on your CDS by a CDS Administrator.

This integration allows you to link a Git Repository hosted by your Bitbucket Server
to a CDS Application.
Expand Down
74 changes: 74 additions & 0 deletions docs/content/docs/integrations/bitbucketcloud.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
---
title: Bitbucket Cloud
main_menu: true
card:
name: repository-manager
---

The Bitbucket Cloud Integration have to be configured on your CDS by a CDS Administrator.

This integration allows you to link a Git Repository hosted by your Bitbucket Cloud
to a CDS Application.

This integration enables some features:

- [Git Repository Webhook]({{<relref "/docs/concepts/workflow/hooks/git-repo-webhook.md" >}})
- Easy to use action [CheckoutApplication]({{<relref "/docs/actions/builtin-checkoutapplication.md" >}}) and [GitClone]({{<relref "/docs/actions/builtin-gitclone.md">}}) for advanced usage
- Send [build notifications](https://confluence.atlassian.com/bitbucket/check-build-status-in-a-pull-request-945541505.html) on your Pull-Requests and Commits on Bitbucket Cloud

## How to configure Bitbucket Cloud integration

+ Go on [Bitbucket.org](https://bitbucket.org/dashboard/overview) and log in.
+ From your avatar in the bottom left, click ***Bitbucket settings***.
+ Click OAuth from the left navigation.
+ Click the Add consumer button.
+ Bitbucket requests some informations: for the `name` you can simply write CDS, `description` is optional, `callback url` must be the URL of your CDS -> {CDS_UI_URL}/cdsapi/repositories_manager/oauth2/callback (if you are in development mode you have to omit /cdsapi and replace {CDS_UI_URL} with your API URL), `URL` is optional.
+ Click on Save and toggle the consumer name to see the generated `Key` and `Secret`. It correspond to `clientId` and `clientSecret` in the CDS config.toml file.

### Complete CDS Configuration File

Set value to `clientId` and `clientSecret`.

```yaml
[vcs.servers]
[vcs.servers.bitbucketcloud]

[vcs.servers.bitbucketcloud.bitbucketcloud]

# Bitbucket Cloud OAuth Key
clientId = "XXXX"

# Bitbucket Cloud OAuth Secret
clientSecret = "XXXX"

# Does webhooks are supported by VCS Server
disableWebHooks = false

# Does webhooks creation are supported by VCS Server
disableWebHooksCreation = false

#proxyWebhook = "https://myproxy.com/"

[vcs.servers.bitbucketcloud.bitbucketcloud.Status]

# Set to true if you don't want CDS to push statuses on the VCS server
disable = false

# Set to true if you don't want CDS to push CDS URL in statuses on the VCS server
showDetail = false
```

See how to generate **[Configuration File]({{<relref "/hosting/configuration.md" >}})**

## Start the vcs µService

```bash
$ engine start vcs

# you can also start CDS api and vcs in the same process:
$ engine start api vcs
```

## Vcs events

For now, CDS supports push events. CDS uses this push event to remove existing runs for deleted branches (24h after branch deletion).
4 changes: 2 additions & 2 deletions engine/api/admin.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func selectDeleteAdminServiceCallHandler(api *API, method string) service.Handle
}

query := r.FormValue("query")
btes, code, err := services.DoRequest(ctx, srvs, method, query, nil)
btes, _, code, err := services.DoRequest(ctx, srvs, method, query, nil)
if err != nil {
return sdk.NewError(sdk.Error{
Status: code,
Expand Down Expand Up @@ -165,7 +165,7 @@ func putPostAdminServiceCallHandler(api *API, method string) service.Handler {
}
defer r.Body.Close()

btes, code, err := services.DoRequest(ctx, srvs, method, query, body)
btes, _, code, err := services.DoRequest(ctx, srvs, method, query, body)
if err != nil {
return sdk.NewError(sdk.Error{
Status: code,
Expand Down
9 changes: 4 additions & 5 deletions engine/api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -776,17 +776,16 @@ func (a *API) Serve(ctx context.Context) error {
sdk.GoRoutine(ctx, "migrate.KeyMigration", func(ctx context.Context) {
migrate.KeyMigration(a.Cache, a.DBConnectionFactory.GetDBMap, &sdk.User{Admin: true})
}, a.PanicDump())

migrate.Add(sdk.Migration{Name: "WorkflowOldStruct", Release: "0.38.1", Mandatory: true, ExecFunc: func(ctx context.Context) error {
return migrate.WorkflowRunOldModel(ctx, a.DBConnectionFactory.GetDBMap)
}})
migrate.Add(sdk.Migration{Name: "WorkflowNotification", Release: "0.38.1", Mandatory: true, ExecFunc: func(ctx context.Context) error {
return migrate.WorkflowNotifications(a.Cache, a.DBConnectionFactory.GetDBMap)
}})
migrate.Add(sdk.Migration{Name: "CleanArtifactBuiltinActions", Release: "0.38.1", Mandatory: true, ExecFunc: func(ctx context.Context) error {
return migrate.CleanArtifactBuiltinActions(ctx, a.Cache, a.DBConnectionFactory.GetDBMap)
}})
// migrate.Add(sdk.Migration{Name: "GitClonePrivateKey", Release: "0.38.1", Mandatory: true, ExecFunc: func(ctx context.Context) error {
migrate.Add(sdk.Migration{Name: "StageConditions", Release: "0.39.3", Mandatory: true, ExecFunc: func(ctx context.Context) error {
return migrate.StageConditions(a.Cache, a.DBConnectionFactory.GetDBMap())
}})
// migrate.Add(sdk.Migration{Name: "GitClonePrivateKey", Release: "0.37.0", Mandatory: true, ExecFunc: func(ctx context.Context) error {
// return migrate.GitClonePrivateKey(a.mustDB, a.Cache)
// }})
migrate.Add(sdk.Migration{Name: "ActionModelRequirements", Release: "0.39.3", Mandatory: true, ExecFunc: func(ctx context.Context) error {
Expand Down
5 changes: 1 addition & 4 deletions engine/api/api_routes.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,6 @@ func (api *API) InitRouter() {
r.Handle("/group/{permGroupName}/token", r.GET(api.getGroupTokenListHandler), r.POST(api.generateTokenHandler))
r.Handle("/group/{permGroupName}/token/{tokenid}", r.DELETE(api.deleteTokenHandler))

// Hatchery
r.Handle("/hatchery/count/{workflowNodeRunID}", r.GET(api.hatcheryCountHandler))

// Hooks
r.Handle("/hook/{uuid}/workflow/{workflowID}/vcsevent/{vcsServer}", r.GET(api.getHookPollingVCSEvents))

Expand Down Expand Up @@ -178,6 +175,7 @@ func (api *API) InitRouter() {
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/rollback/{auditID}", r.POST(api.postPipelineRollbackHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/audits", r.GET(api.getPipelineAuditHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/stage", r.POST(api.addStageHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/stage/condition", r.GET(api.getStageConditionsHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/stage/move", r.POST(api.moveStageHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/stage/{stageID}", r.GET(api.getStageHandler), r.PUT(api.updateStageHandler), r.DELETE(api.deleteStageHandler))
r.Handle("/project/{permProjectKey}/pipeline/{pipelineKey}/stage/{stageID}/job", r.POST(api.addJobToStageHandler))
Expand Down Expand Up @@ -285,7 +283,6 @@ func (api *API) InitRouter() {
r.Handle("/queue/workflows/count", r.GET(api.countWorkflowJobQueueHandler, EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{id}/take", r.POST(api.postTakeWorkflowJobHandler, NeedWorker(), EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{id}/book", r.POST(api.postBookWorkflowJobHandler, NeedHatchery(), EnableTracing(), MaintenanceAware()), r.DELETE(api.deleteBookWorkflowJobHandler, NeedHatchery(), EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{id}/attempt", r.POST(api.postIncWorkflowJobAttemptHandler, NeedHatchery(), EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{id}/infos", r.GET(api.getWorkflowJobHandler, NeedWorker(), NeedHatchery(), EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{permID}/vulnerability", r.POSTEXECUTE(api.postVulnerabilityReportHandler, NeedWorker(), EnableTracing(), MaintenanceAware()))
r.Handle("/queue/workflows/{id}/spawn/infos", r.POST(r.Asynchronous(api.postSpawnInfosWorkflowJobHandler, 1), NeedHatchery(), EnableTracing(), MaintenanceAware()))
Expand Down
2 changes: 1 addition & 1 deletion engine/api/application.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func (api *API) getApplicationVCSInfosHandler() service.Handler {
}

vcsServer := repositoriesmanager.GetProjectVCSServer(proj, app.VCSServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, vcsServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, projectKey, vcsServer)
if erra != nil {
return sdk.WrapError(sdk.ErrNoReposManagerClientAuth, "getApplicationVCSInfosHandler> Cannot get client got %s %s : %s", projectKey, app.VCSServer, erra)
}
Expand Down
6 changes: 3 additions & 3 deletions engine/api/application/dao.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,9 @@ func LoadByWorkflowID(db gorp.SqlExecutor, workflowID int64) ([]sdk.Application,
apps := []sdk.Application{}
query := fmt.Sprintf(`SELECT DISTINCT %s
FROM application
JOIN workflow_node_context ON workflow_node_context.application_id = application.id
JOIN workflow_node ON workflow_node.id = workflow_node_context.workflow_node_id
JOIN workflow ON workflow.id = workflow_node.workflow_id
JOIN w_node_context ON w_node_context.application_id = application.id
JOIN w_node ON w_node.id = w_node_context.node_id
JOIN workflow ON workflow.id = w_node.workflow_id
WHERE workflow.id = $1`, appRows)

if _, err := db.Select(&apps, query, workflowID); err != nil {
Expand Down
1 change: 0 additions & 1 deletion engine/api/application/dao_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,6 @@ func TestLoadByWorkflowID(t *testing.T) {
}

test.NoError(t, workflow.RenameNode(db, &w))
(&w).RetroMigrate()

proj, _ = project.LoadByID(db, cache, proj.ID, u, project.LoadOptions.WithApplications, project.LoadOptions.WithPipelines, project.LoadOptions.WithEnvironments, project.LoadOptions.WithGroups)

Expand Down
4 changes: 2 additions & 2 deletions engine/api/ascode.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (api *API) postImportAsCodeHandler() service.Handler {
}

vcsServer := repositoriesmanager.GetProjectVCSServer(p, ope.VCSServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, vcsServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, p.Key, vcsServer)
if erra != nil {
return sdk.WrapError(sdk.ErrNoReposManagerClientAuth, "postImportAsCodeHandler> Cannot get client for %s %s : %s", key, ope.VCSServer, erra)
}
Expand Down Expand Up @@ -153,7 +153,7 @@ func (api *API) postPerformImportAsCodeHandler() service.Handler {
// Grant CDS as a repository collaborator
// TODO for this moment, this step is not mandatory. If it's failed, continue the ascode process
vcsServer := repositoriesmanager.GetProjectVCSServer(proj, ope.VCSServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, vcsServer)
client, erra := repositoriesmanager.AuthorizedClient(ctx, api.mustDB(), api.Cache, proj.Key, vcsServer)
if erra != nil {
log.Error("postPerformImportAsCodeHandler> Cannot get client for %s %s : %s", proj.Key, ope.VCSServer, erra)
} else {
Expand Down
2 changes: 1 addition & 1 deletion engine/api/ascode_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func Test_postPerformImportAsCodeHandler(t *testing.T) {
t.Logf("RequestURI: %s", r.URL.Path)
switch r.URL.Path {
case "/task/bulk":
hooks := map[string]sdk.WorkflowNodeHook{}
hooks := map[string]sdk.NodeHook{}
if err := service.UnmarshalBody(r, &hooks); err != nil {
return nil, sdk.WithStack(err)
}
Expand Down
6 changes: 3 additions & 3 deletions engine/api/environment/environment.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,9 @@ func LoadEnvironmentByName(db gorp.SqlExecutor, projectKey, envName string) (*sd
func LoadByWorkflowID(db gorp.SqlExecutor, workflowID int64) ([]sdk.Environment, error) {
envs := []sdk.Environment{}
query := `SELECT DISTINCT environment.* FROM environment
JOIN workflow_node_context ON workflow_node_context.environment_id = environment.id
JOIN workflow_node ON workflow_node.id = workflow_node_context.workflow_node_id
JOIN workflow ON workflow.id = workflow_node.workflow_id
JOIN w_node_context ON w_node_context.environment_id = environment.id
JOIN w_node ON w_node.id = w_node_context.node_id
JOIN workflow ON workflow.id = w_node.workflow_id
WHERE workflow.id = $1`

if _, err := db.Select(&envs, query, workflowID); err != nil {
Expand Down
4 changes: 2 additions & 2 deletions engine/api/event/elasticsearch.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func PushInElasticSearch(c context.Context, db gorp.SqlExecutor, store cache.Sto
continue
}
e.Payload = nil
code, errD := services.DoJSONRequest(context.Background(), esServices, "POST", "/events", e, nil)
_, code, errD := services.DoJSONRequest(context.Background(), esServices, "POST", "/events", e, nil)
if code >= 400 || errD != nil {
log.Error("PushInElasticSearch> Unable to send event %s to elasticsearch [%d]: %v", e.EventType, code, errD)
continue
Expand All @@ -58,7 +58,7 @@ func GetEvents(db gorp.SqlExecutor, store cache.Store, filters sdk.EventFilter)
}

var esEvents []elastic.SearchHit
if _, err := services.DoJSONRequest(context.Background(), srvs, "GET", "/events", filters, &esEvents); err != nil {
if _, _, err := services.DoJSONRequest(context.Background(), srvs, "GET", "/events", filters, &esEvents); err != nil {
return nil, sdk.WrapError(err, "Unable to get events")
}

Expand Down
45 changes: 17 additions & 28 deletions engine/api/event/publish_workflow_run.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,36 +84,25 @@ func PublishWorkflowNodeRun(db gorp.SqlExecutor, nr sdk.WorkflowNodeRun, w sdk.W
var nodeName string
var app sdk.Application
var env sdk.Environment
n := w.GetNode(nr.WorkflowNodeID)
if n == nil {
// check on workflow data
wnode := w.WorkflowData.NodeByID(nr.WorkflowNodeID)
if wnode == nil {
log.Warning("PublishWorkflowNodeRun> Unable to publish event on node %d", nr.WorkflowNodeID)
return
}
nodeName = wnode.Name
if wnode.Context != nil && wnode.Context.PipelineID != 0 {
pipName = w.Pipelines[wnode.Context.PipelineID].Name
}

if wnode.Context != nil && wnode.Context.ApplicationID != 0 {
app = w.Applications[wnode.Context.ApplicationID]
}
if wnode.Context != nil && wnode.Context.EnvironmentID != 0 {
env = w.Environments[wnode.Context.EnvironmentID]
}
e.NodeType = wnode.Type
} else {
nodeName = n.Name
pipName = w.Pipelines[n.PipelineID].Name
if n.Context != nil && n.Context.Application != nil {
app = *n.Context.Application
}
if n.Context != nil && n.Context.Environment != nil {
env = *n.Context.Environment
}
// check on workflow data
wnode := w.WorkflowData.NodeByID(nr.WorkflowNodeID)
if wnode == nil {
log.Warning("PublishWorkflowNodeRun> Unable to publish event on node %d", nr.WorkflowNodeID)
return
}
nodeName = wnode.Name
if wnode.Context != nil && wnode.Context.PipelineID != 0 {
pipName = w.Pipelines[wnode.Context.PipelineID].Name
}

if wnode.Context != nil && wnode.Context.ApplicationID != 0 {
app = w.Applications[wnode.Context.ApplicationID]
}
if wnode.Context != nil && wnode.Context.EnvironmentID != 0 {
env = w.Environments[wnode.Context.EnvironmentID]
}
e.NodeType = wnode.Type

// Try to get gerrit variable
var project, changeID, branch, revision, url string
Expand Down
Loading

0 comments on commit 30c4694

Please sign in to comment.