Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge 2.1.1 to master #3803

Closed
wants to merge 18 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
18 commits
Select commit Hold shift + click to select a range
4598d68
#3632 change default sgcollect output dir (#3636)
bbrks Jun 7, 2018
4bc25b9
Try to look for gozip in the tools directory, before falling back to …
bbrks Jun 7, 2018
95b598b
Port #3637 to release/2.1.0
adamcfraser Jun 7, 2018
f95fc82
Merge pull request #3646 from couchbase/feature/port_3637
adamcfraser Jun 7, 2018
b215c94
Fix TLS support for DCP when certpath not specified (#3660)
adamcfraser Jun 25, 2018
ddff918
Ensure checkpoint data is loaded regardless of backfill status (#3666)
adamcfraser Jun 28, 2018
a036bd8
Avoid hardcoding xattr refs in views (Fixes #3678) (#3679) (#3681)
adamcfraser Jul 19, 2018
af5379b
Backport Jenkinsfile to release/2.1.1 (#3754)
bbrks Sep 20, 2018
30a11bf
Feature/issue 3570 gsi wait (#3594) (#3758)
bbrks Sep 25, 2018
0d62906
Feature/issue 3738 norev (#3763)
tleyden Sep 26, 2018
f4d396f
Ensure removal of obsolete rev from revtree (#3694) (#3768)
adamcfraser Sep 28, 2018
308786c
2.1.1 Backport #3724 Fix resync with xattrs warnings (#3730) (#3753)
bbrks Sep 28, 2018
b180df7
Avoid error when processing documents impacted by #3692 (#3769)
adamcfraser Sep 28, 2018
45b46dc
#3765 Remove documents from channel caches on _purge or _compact (#3770)
bbrks Sep 28, 2018
ce0e8e7
Seed DCP Feed from all nodes (Fixes #3756) (#3757) (#3772)
bbrks Oct 1, 2018
c494187
Optional old revision body backups during import (#3775)
adamcfraser Oct 2, 2018
f998748
Preserve low sequence across longpoll changes iterations (#3778)
adamcfraser Oct 4, 2018
fea9947
Fixes issue #3558 Initialize cache after DCP start (#3607) (#3780)
adamcfraser Oct 4, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
116 changes: 116 additions & 0 deletions Jenkinsfile
@@ -0,0 +1,116 @@
pipeline {
// Build on this uberjenkins node, as it has the Go environment set up in a known-state
// We could potentially change this to use a dockerfile agent instead so it can be portable.
agent { label 'sync-gateway-ami-builder' }

environment {
GO_VERSION = 'go1.8.5'
GVM = "/root/.gvm/bin/gvm"
GO = "/root/.gvm/gos/${GO_VERSION}/bin"
GOPATH = "${WORKSPACE}/godeps"
BRANCH = "${BRANCH_NAME}"
COVERALLS_TOKEN = credentials('SG_COVERALLS_TOKEN')
}

stages {
stage('Cloning') {
steps {
sh "git rev-parse HEAD > .git/commit-id"
script {
env.SG_COMMIT = readFile '.git/commit-id'
// Set BRANCH variable to target branch if this build is a PR
if (env.CHANGE_TARGET) {
env.BRANCH = env.CHANGE_TARGET
}
}

// Make a hidden directory to move scm
// checkout into, we'll need a bit for later,
// but mostly rely on bootstrap.sh to get our code.
//
// TODO: We could probably change the implicit checkout
// to clone directly into a subdirectory instead?
sh 'mkdir .scm-checkout'
sh 'mv * .scm-checkout/'
}
}
stage('Setup Tools') {
steps {
echo 'Setting up Go tools..'
// Use gvm to install the required Go version, if not already
sh "${GVM} install $GO_VERSION"
withEnv(["PATH+=${GO}", "GOPATH=${GOPATH}"]) {
sh "go version"
sh 'go get -v -u github.com/AlekSi/gocoverutil'
sh 'go get -v -u golang.org/x/tools/cmd/cover'
sh 'go get -v -u github.com/mattn/goveralls'
// Jenkins coverage reporting tools
// sh 'go get -v -u github.com/axw/gocov/...'
// sh 'go get -v -u github.com/AlekSi/gocov-xml'
}
}
}
stage('Bootstrap') {
steps {
echo "Bootstrapping commit ${SG_COMMIT}"
sh 'cp .scm-checkout/bootstrap.sh .'
sh 'chmod +x bootstrap.sh'
sh "./bootstrap.sh -p sg-accel -c ${SG_COMMIT}"
}
}
stage('Build') {
steps {
echo 'Building..'
withEnv(["PATH+=${GO}"]) {
sh './build.sh -v'
}
}
}
stage('Test with coverage') {
steps {
echo 'Testing with coverage..'
withEnv(["PATH+=${GO}:${GOPATH}/bin"]) {
// gocoverutil is required until we upgrade to Go 1.10, and can use -coverprofile with ./...
sh 'gocoverutil -coverprofile=cover_sg.out test -covermode=atomic github.com/couchbase/sync_gateway/...'
sh 'gocoverutil -coverprofile=cover_sga.out test -covermode=atomic github.com/couchbaselabs/sync-gateway-accel/...'

sh 'gocoverutil -coverprofile=cover_merged.out merge cover_sg.out cover_sga.out'

// Publish combined HTML coverage report
sh 'mkdir reports'
sh 'go tool cover -html=cover_merged.out -o reports/coverage.html'
publishHTML([allowMissing: false, alwaysLinkToLastBuild: false, includes: 'coverage.html', keepAll: false, reportDir: 'reports', reportFiles: 'coverage.html', reportName: 'Code Coverage', reportTitles: ''])
}

// Travis-related variables are required as coveralls only officially supports a certain set of CI tools.
withEnv(["PATH+=${GO}:${GOPATH}/bin", "TRAVIS_BRANCH=${env.BRANCH}", "TRAVIS_PULL_REQUEST=${env.CHANGE_ID}", "TRAVIS_JOB_ID=${env.BUILD_NUMBER}"]) {
// Replace count covermode values with set just for coveralls to reduce the variability in reports.
sh 'awk \'NR==1{print "mode: set";next} $NF>0{$NF=1} {print}\' cover_sg.out > cover_coveralls.out'

// Send just the SG coverage report to coveralls.io - **NOT** accel! It will expose the codebase!!!
sh "goveralls -coverprofile=cover_coveralls.out -service=uberjenkins -repotoken=${COVERALLS_TOKEN}"

// Generate Cobertura XML report that can be parsed by the Jenkins Cobertura Plugin
// TODO: Requires Cobertura Plugin to be installed on Jenkins first
// sh 'gocov convert cover_sg.out | gocov-xml > reports/coverage.xml'
// step([$class: 'CoberturaPublisher', coberturaReportFile: 'reports/coverage.xml'])
}
}
}
stage('Test Race') {
steps {
echo 'Testing with -race..'
withEnv(["PATH+=${GO}:${GOPATH}/bin"]) {
sh './test.sh -race'
}
}
}
}

post {
always {
// TODO: Might be better to clean the workspace to before a job runs instead
step([$class: 'WsCleanup'])
}
}
}
13 changes: 6 additions & 7 deletions auth/auth.go
Expand Up @@ -14,7 +14,6 @@ import (

"github.com/coreos/go-oidc/jose"
"github.com/coreos/go-oidc/oidc"
"github.com/couchbase/go-couchbase"
"github.com/couchbase/sync_gateway/base"
ch "github.com/couchbase/sync_gateway/channels"
pkgerrors "github.com/pkg/errors"
Expand Down Expand Up @@ -108,7 +107,7 @@ func (auth *Authenticator) getPrincipal(docID string, factory func() Principal)
// Be careful: this block can be invoked multiple times if there are races!
if currentValue == nil {
princ = nil
return nil, nil, couchbase.UpdateCancel
return nil, nil, base.ErrUpdateCancel
}

princ = factory()
Expand Down Expand Up @@ -143,11 +142,11 @@ func (auth *Authenticator) getPrincipal(docID string, factory func() Principal)
return updatedBytes, nil, marshalErr
} else {
// Principal is valid, so stop the update
return nil, nil, couchbase.UpdateCancel
return nil, nil, base.ErrUpdateCancel
}
})

if err != nil && err != couchbase.UpdateCancel {
if err != nil && err != base.ErrUpdateCancel {
return nil, err
}
return princ, nil
Expand Down Expand Up @@ -443,7 +442,7 @@ func (auth *Authenticator) updateVbucketSequences(docID string, factory func() P
err := auth.bucket.Update(docID, 0, func(currentValue []byte) ([]byte, *uint32, error) {
// Be careful: this block can be invoked multiple times if there are races!
if currentValue == nil {
return nil, nil, couchbase.UpdateCancel
return nil, nil, base.ErrUpdateCancel
}
princ := factory()
if err := json.Unmarshal(currentValue, princ); err != nil {
Expand Down Expand Up @@ -491,11 +490,11 @@ func (auth *Authenticator) updateVbucketSequences(docID string, factory func() P
return updatedBytes, nil, marshalErr
} else {
// No entries found requiring update, so cancel update.
return nil, nil, couchbase.UpdateCancel
return nil, nil, base.ErrUpdateCancel
}
})

if err != nil && err != couchbase.UpdateCancel {
if err != nil && err != base.ErrUpdateCancel {
return err
}
return nil
Expand Down
29 changes: 14 additions & 15 deletions base/bucket.go
Expand Up @@ -213,14 +213,11 @@ func (b BucketSpec) TLSConnect(prot, dest string) (rv *memcached.Client, err err
return nil, pkgerrors.Wrapf(err, "Error setting NoDelay on tcpConn during TLS Connect")
}

tlsConfig := &tls.Config{}
if b.Certpath != "" && b.Keypath != "" {
var configErr error
tlsConfig, configErr = TLSConfigForX509(b.Certpath, b.Keypath, b.CACertPath)
if configErr != nil {
return nil, pkgerrors.Wrapf(configErr, "Error adding x509 to TLSConfig for DCP TLS connection")
}
tlsConfig, configErr := TLSConfigForX509(b.Certpath, b.Keypath, b.CACertPath)
if configErr != nil {
return nil, pkgerrors.Wrapf(configErr, "Error creating TLSConfig for DCP TLS connection")
}

tlsConfig.ServerName = host

tlsConn := tls.Client(tcpConn, tlsConfig)
Expand All @@ -233,20 +230,19 @@ func (b BucketSpec) TLSConnect(prot, dest string) (rv *memcached.Client, err err

}

// Returns a TLSConfig based on the specified certificate paths. If none are provided, returns tlsConfig with
// InsecureSkipVerify:true.
func TLSConfigForX509(certpath, keypath, cacertpath string) (*tls.Config, error) {

cacertpaths := []string{cacertpath}

tlsConfig := &tls.Config{}

if len(cacertpaths) > 0 {
if cacertpath != "" {
cacertpaths := []string{cacertpath}
rootCerts := x509.NewCertPool()
for _, path := range cacertpaths {
cacert, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}

ok := rootCerts.AppendCertsFromPEM(cacert)
if !ok {
return nil, fmt.Errorf("can't append certs from PEM")
Expand All @@ -261,6 +257,7 @@ func TLSConfigForX509(certpath, keypath, cacertpath string) (*tls.Config, error)
tlsConfig.InsecureSkipVerify = true
}

// If client cert and key are provided, add to config as x509 key pair
if certpath != "" && keypath != "" {
cert, err := tls.LoadX509KeyPair(certpath, keypath)
if err != nil {
Expand All @@ -269,6 +266,7 @@ func TLSConfigForX509(certpath, keypath, cacertpath string) (*tls.Config, error)

tlsConfig.Certificates = []tls.Certificate{cert}
}

return tlsConfig, nil
}

Expand Down Expand Up @@ -495,16 +493,17 @@ func GetStatsVbSeqno(stats map[string]map[string]string, maxVbno uint16, useAbsH
}

highSeqno, err := strconv.ParseUint(serverMap[highSeqnoKey], 10, 64)
if err == nil && highSeqno > 0 {
// Each node will return seqnos for its active and replica vBuckets. Iterating over all nodes will give us
// numReplicas*maxVbno results. Rather than filter by active/replica (which would require a separate STATS call)
// simply pick the highest.
if err == nil && highSeqno > highSeqnos[i] {
highSeqnos[i] = highSeqno
uuid, err := strconv.ParseUint(serverMap[uuidKey], 10, 64)
if err == nil {
uuids[i] = uuid
}
}
}
// We're only using a single server, so can break after the first entry in the map.
break
}
return

Expand Down