diff --git a/command/s3_put.go b/command/s3_put.go index 2ca573b677a..c5c7a5f327e 100644 --- a/command/s3_put.go +++ b/command/s3_put.go @@ -399,7 +399,7 @@ func (s3pc *s3put) createPailBucket(httpClient *http.Client) error { Credentials: pail.CreateAWSCredentials(s3pc.AwsKey, s3pc.AwsSecret, ""), Region: endpoints.UsEast1RegionID, Name: s3pc.Bucket, - Permission: s3pc.Permissions, + Permissions: pail.S3Permissions(s3pc.Permissions), ContentType: s3pc.ContentType, } bucket, err := pail.NewS3MultiPartBucketWithHTTPClient(httpClient, opts) diff --git a/config.go b/config.go index 50740b7c6ef..ef0a9e2fbf0 100644 --- a/config.go +++ b/config.go @@ -54,6 +54,7 @@ type Settings struct { Banner string `bson:"banner" json:"banner" yaml:"banner"` BannerTheme BannerTheme `bson:"banner_theme" json:"banner_theme" yaml:"banner_theme"` Bugsnag string `yaml:"bugsnag" bson:"bugsnag" json:"bugsnag"` + Backup BackupConfig `bson:"backup" json:"backup" yaml:"backup"` ClientBinariesDir string `yaml:"client_binaries_dir" bson:"client_binaries_dir" json:"client_binaries_dir"` CommitQueue CommitQueueConfig `yaml:"commit_queue" bson:"commit_queue" json:"commit_queue" id:"commit_queue"` ConfigDir string `yaml:"configdir" bson:"configdir" json:"configdir"` diff --git a/config_backup.go b/config_backup.go new file mode 100644 index 00000000000..1978338790f --- /dev/null +++ b/config_backup.go @@ -0,0 +1,64 @@ +package evergreen + +import ( + "github.com/pkg/errors" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type BackupConfig struct { + BucketName string `bson:"bucket_name" json:"bucket_name" yaml:"bucket_name"` + Key string `bson:"key" json:"key" yaml:"key"` + Secret string `bson:"secret" json:"secret" yaml:"secret"` + Prefix string `bson:"prefix" json:"prefix" yaml:"prefix"` + Compress bool `bson:"compress" json:"compress" yaml:"compress"` +} + +func (c *BackupConfig) SectionId() string { return "backup" } +func (c *BackupConfig) ValidateAndDefault() error { return nil } + +func (c *BackupConfig) Populated() bool { + return c.BucketName != "" && c.Prefix != "" +} + +func (c *BackupConfig) Set() error { + env := GetEnvironment() + ctx, cancel := env.Context() + defer cancel() + coll := env.DB().Collection(ConfigCollection) + + _, err := coll.UpdateOne(ctx, byId(c.SectionId()), bson.M{ + "$set": bson.M{ + "bucket_name": c.BucketName, + "key": c.Key, + "secret": c.Secret, + "compress": c.Compress, + "prefix": c.Prefix, + }, + }, options.Update().SetUpsert(true)) + + return errors.Wrapf(err, "error updating section %s", c.SectionId()) +} + +func (c *BackupConfig) Get(env Environment) error { + ctx, cancel := env.Context() + defer cancel() + coll := env.DB().Collection(ConfigCollection) + + res := coll.FindOne(ctx, byId(c.SectionId())) + if err := res.Err(); err != nil { + return errors.Wrapf(err, "error retrieving section %s", c.SectionId()) + } + + if err := res.Decode(c); err != nil { + if err == mongo.ErrNoDocuments { + *c = BackupConfig{} + return nil + } + + return errors.Wrap(err, "problem decoding result") + } + + return nil +} diff --git a/config_db.go b/config_db.go index 2a235126b9b..3e038fdbc42 100644 --- a/config_db.go +++ b/config_db.go @@ -56,6 +56,7 @@ var ( containerPoolsKey = bsonutil.MustHaveTag(Settings{}, "ContainerPools") commitQueueKey = bsonutil.MustHaveTag(Settings{}, "CommitQueue") ldapRoleMapKey = bsonutil.MustHaveTag(Settings{}, "LDAPRoleMap") + backupConfig = bsonutil.MustHaveTag(Settings{}, "Backup") // degraded mode flags taskDispatchKey = bsonutil.MustHaveTag(ServiceFlags{}, "TaskDispatchDisabled") @@ -82,6 +83,7 @@ var ( commitQueueDisabledKey = bsonutil.MustHaveTag(ServiceFlags{}, "CommitQueueDisabled") plannerDisabledKey = bsonutil.MustHaveTag(ServiceFlags{}, "PlannerDisabled") hostAllocatorDisabledKey = bsonutil.MustHaveTag(ServiceFlags{}, "HostAllocatorDisabled") + drBackupDisabledKey = bsonutil.MustHaveTag(ServiceFlags{}, "DRBackupDisabled") // ContainerPoolsConfig keys poolsKey = bsonutil.MustHaveTag(ContainerPoolsConfig{}, "Pools") diff --git a/config_serviceflags.go b/config_serviceflags.go index e6ee6c26132..78d19f4f354 100644 --- a/config_serviceflags.go +++ b/config_serviceflags.go @@ -27,6 +27,7 @@ type ServiceFlags struct { CommitQueueDisabled bool `bson:"commit_queue_disabled" json:"commit_queue_disabled"` PlannerDisabled bool `bson:"planner_disabled" json:"planner_disabled"` HostAllocatorDisabled bool `bson:"host_allocator_disabled" json:"host_allocator_disabled"` + DRBackupDisabled bool `bson:"dr_backup_disabled" json:"dr_backup_disabled"` // Notification Flags EventProcessingDisabled bool `bson:"event_processing_disabled" json:"event_processing_disabled"` @@ -91,6 +92,7 @@ func (c *ServiceFlags) Set() error { commitQueueDisabledKey: c.CommitQueueDisabled, plannerDisabledKey: c.PlannerDisabled, hostAllocatorDisabledKey: c.HostAllocatorDisabled, + drBackupDisabledKey: c.DRBackupDisabled, }, }, options.Update().SetUpsert(true)) diff --git a/db/db_utils.go b/db/db_utils.go index a3f48fe1c63..4cfc185ea9d 100644 --- a/db/db_utils.go +++ b/db/db_utils.go @@ -309,7 +309,7 @@ func WriteGridFile(fsPrefix, name string, source io.Reader) error { defer cancel() bucket, err := pail.NewGridFSBucketWithClient(ctx, env.Client(), pail.GridFSOptions{ Database: env.DB().Name(), - Prefix: fsPrefix, + Name: fsPrefix, }) if err != nil { @@ -325,7 +325,7 @@ func GetGridFile(fsPrefix, name string) (io.ReadCloser, error) { defer cancel() bucket, err := pail.NewGridFSBucketWithClient(ctx, env.Client(), pail.GridFSOptions{ Database: env.DB().Name(), - Prefix: fsPrefix, + Name: fsPrefix, }) if err != nil { diff --git a/glide.lock b/glide.lock index 5522acbe0dc..235580b468c 100644 --- a/glide.lock +++ b/glide.lock @@ -126,7 +126,7 @@ imports: - name: github.com/evergreen-ci/shrub version: 32e668cd99410328bf6659e55671c11a6c727d9a - name: github.com/evergreen-ci/pail - version: 7b2f8e0b2d972ca621cad3777d68276a54da13d0 + version: 4a0b306b2db0e74641aa30b8b9e748054adbee79 - name: github.com/mongodb/jasper version: 61a695020101f18236583d806735d4ea1ccfd1fe - name: go.mongodb.org/mongo-driver @@ -163,8 +163,8 @@ imports: # anser and deps - name: github.com/evergreen-ci/birch - version: 5b054047680765b089c1e6f77d0896803d1fd73d + version: 3a26bb67719ad6a9e7daed059abd5d018586f3cd - name: github.com/mongodb/anser - version: ee4e72afa4fed132f32d481b87983ab413f8bece + version: cc2c8355390715b964f103949e92d09139819e2e - name: github.com/mongodb/ftdc version: 7e505d9a86240264dd35b1d900c2e88ce74f6066 diff --git a/model/stats/db.go b/model/stats/db.go index 557e36c9161..73441e9f1c3 100644 --- a/model/stats/db.go +++ b/model/stats/db.go @@ -80,10 +80,10 @@ import ( ) const ( - hourlyTestStatsCollection = "hourly_test_stats" - dailyTestStatsCollection = "daily_test_stats" + HourlyTestStatsCollection = "hourly_test_stats" + DailyTestStatsCollection = "daily_test_stats" DailyTaskStatsCollection = "daily_task_stats" - dailyStatsStatusCollection = "daily_stats_status" + DailyStatsStatusCollection = "daily_stats_status" bulkSize = 1000 nsInASecond = time.Second / time.Nanosecond ) @@ -186,7 +186,7 @@ func hourlyTestStatsForOldTasksPipeline(projectId string, requester string, star // And the merge the documents with the existing ones. mergePipeline := []bson.M{ {"$lookup": bson.M{ - "from": hourlyTestStatsCollection, + "from": HourlyTestStatsCollection, "localField": dbTestStatsIdKey, "foreignField": dbTestStatsIdKey, "as": "existing", @@ -1117,7 +1117,7 @@ func makeSum(condition bson.M) bson.M { func GetDailyTestDoc(id DbTestStatsId) (*dbTestStats, error) { doc := dbTestStats{} - err := db.FindOne(dailyTestStatsCollection, bson.M{"_id": id}, db.NoProjection, db.NoSort, &doc) + err := db.FindOne(DailyTestStatsCollection, bson.M{"_id": id}, db.NoProjection, db.NoSort, &doc) if adb.ResultsNotFound(err) { return nil, nil } @@ -1126,7 +1126,7 @@ func GetDailyTestDoc(id DbTestStatsId) (*dbTestStats, error) { func GetHourlyTestDoc(id DbTestStatsId) (*dbTestStats, error) { doc := dbTestStats{} - err := db.FindOne(hourlyTestStatsCollection, bson.M{"_id": id}, db.NoProjection, db.NoSort, &doc) + err := db.FindOne(HourlyTestStatsCollection, bson.M{"_id": id}, db.NoProjection, db.NoSort, &doc) if adb.ResultsNotFound(err) { return nil, nil } diff --git a/model/stats/query.go b/model/stats/query.go index 6ea4e445be0..94a24d1cc01 100644 --- a/model/stats/query.go +++ b/model/stats/query.go @@ -263,7 +263,7 @@ func GetTestStats(filter StatsFilter) ([]TestStats, error) { } var stats []TestStats pipeline := filter.testStatsQueryPipeline() - err = db.Aggregate(dailyTestStatsCollection, pipeline, &stats) + err = db.Aggregate(DailyTestStatsCollection, pipeline, &stats) if err != nil { return nil, errors.Wrap(err, "Failed to aggregate test statistics") } diff --git a/model/stats/query_test.go b/model/stats/query_test.go index da95ff020c7..fcf8f8d760c 100644 --- a/model/stats/query_test.go +++ b/model/stats/query_test.go @@ -28,7 +28,7 @@ func TestStatsQuerySuite(t *testing.T) { } func (s *statsQuerySuite) SetupTest() { - s.clearCollection(dailyTestStatsCollection) + s.clearCollection(DailyTestStatsCollection) s.clearCollection(DailyTaskStatsCollection) s.baseTestFilter = StatsFilter{ @@ -948,7 +948,7 @@ func (s *statsQuerySuite) clearCollection(name string) { func (s *statsQuerySuite) insertDailyTestStats(project string, requester string, testFile string, taskName string, variant string, distro string, date time.Time, numPass int, numFail int, avgDuration float64) { - err := db.Insert(dailyTestStatsCollection, bson.M{ + err := db.Insert(DailyTestStatsCollection, bson.M{ "_id": DbTestStatsId{ Project: project, Requester: requester, diff --git a/model/stats/stats.go b/model/stats/stats.go index ef5bb7e2a2a..c2d3ec0c9a5 100644 --- a/model/stats/stats.go +++ b/model/stats/stats.go @@ -51,7 +51,7 @@ func createDefaultStatsStatus(projectId string) StatsStatus { func GetStatsStatus(projectId string) (StatsStatus, error) { status := StatsStatus{} query := statsStatusQuery(projectId) - err := db.FindOne(dailyStatsStatusCollection, query, db.NoProjection, db.NoSort, &status) + err := db.FindOne(DailyStatsStatusCollection, query, db.NoProjection, db.NoSort, &status) if adb.ResultsNotFound(err) { return createDefaultStatsStatus(projectId), nil } @@ -69,7 +69,7 @@ func UpdateStatsStatus(projectId string, lastJobRun time.Time, processedTasksUnt ProcessedTasksUntil: processedTasksUntil, Runtime: runtime, } - _, err := db.Upsert(dailyStatsStatusCollection, bson.M{"_id": projectId}, status) + _, err := db.Upsert(DailyStatsStatusCollection, bson.M{"_id": projectId}, status) if err != nil { return errors.Wrap(err, "Failed to update test stats status") } @@ -104,7 +104,7 @@ func GenerateHourlyTestStats(ctx context.Context, opts GenerateOptions) error { end := start.Add(time.Hour) // Generate the stats based on tasks. pipeline := hourlyTestStatsPipeline(opts.ProjectID, opts.Requester, start, end, opts.Tasks, opts.Runtime) - err := aggregateIntoCollection(ctx, task.Collection, pipeline, hourlyTestStatsCollection) + err := aggregateIntoCollection(ctx, task.Collection, pipeline, HourlyTestStatsCollection) if err != nil { return errors.Wrap(err, "Failed to generate hourly stats") } @@ -119,7 +119,7 @@ func GenerateHourlyTestStats(ctx context.Context, opts GenerateOptions) error { }) // Generate/Update the stats for old tasks. pipeline = hourlyTestStatsForOldTasksPipeline(opts.ProjectID, opts.Requester, start, end, opts.Tasks, opts.Runtime) - err = aggregateIntoCollection(ctx, task.OldCollection, pipeline, hourlyTestStatsCollection) + err = aggregateIntoCollection(ctx, task.OldCollection, pipeline, HourlyTestStatsCollection) if err != nil { return errors.Wrap(err, "Failed to generate hourly stats for old tasks") } @@ -141,7 +141,7 @@ func GenerateDailyTestStatsFromHourly(ctx context.Context, opts GenerateOptions) start := util.GetUTCDay(opts.Window) end := start.Add(24 * time.Hour) pipeline := dailyTestStatsFromHourlyPipeline(opts.ProjectID, opts.Requester, start, end, opts.Tasks, opts.Runtime) - err := aggregateIntoCollection(ctx, hourlyTestStatsCollection, pipeline, dailyTestStatsCollection) + err := aggregateIntoCollection(ctx, HourlyTestStatsCollection, pipeline, DailyTestStatsCollection) if err != nil { return errors.Wrap(err, "Failed to aggregate hourly stats into daily stats") } diff --git a/model/stats/stats_test.go b/model/stats/stats_test.go index 8af5c4f66a1..3e998961394 100644 --- a/model/stats/stats_test.go +++ b/model/stats/stats_test.go @@ -38,9 +38,9 @@ func TestStatsSuite(t *testing.T) { func (s *statsSuite) SetupTest() { collectionsToClear := []string{ - hourlyTestStatsCollection, - dailyTestStatsCollection, - dailyStatsStatusCollection, + HourlyTestStatsCollection, + DailyTestStatsCollection, + DailyStatsStatusCollection, DailyTaskStatsCollection, task.Collection, task.OldCollection, @@ -615,7 +615,7 @@ func (s *statsSuite) initHourly() { func (s *statsSuite) insertHourlyTestStats(project string, requester string, testFile string, taskName string, variant string, distro string, date time.Time, numPass int, numFail int, avgDuration float64, lastID mgobson.ObjectId) { - err := db.Insert(hourlyTestStatsCollection, bson.M{ + err := db.Insert(HourlyTestStatsCollection, bson.M{ "_id": DbTestStatsId{ Project: project, Requester: requester, @@ -866,11 +866,11 @@ func (s *statsSuite) countDocs(collection string) int { } func (s *statsSuite) countDailyTestDocs() int { - return s.countDocs(dailyTestStatsCollection) + return s.countDocs(DailyTestStatsCollection) } func (s *statsSuite) countHourlyTestDocs() int { - return s.countDocs(hourlyTestStatsCollection) + return s.countDocs(HourlyTestStatsCollection) } func (s *statsSuite) countDailyTaskDocs() int { @@ -926,7 +926,7 @@ func (s *statsSuite) getLastHourlyTestStat(testStatsID DbTestStatsId) (*dbTestSt "$lt": end, }, } - err := db.FindAll(hourlyTestStatsCollection, qry, db.NoProjection, []string{"-last_id"}, db.NoSkip, 1, &testResults) + err := db.FindAll(HourlyTestStatsCollection, qry, db.NoProjection, []string{"-last_id"}, db.NoSkip, 1, &testResults) if adb.ResultsNotFound(err) { return nil, nil } diff --git a/operations/agent.go b/operations/agent.go index 4eb8d9dcc0a..3f91e7b9a43 100644 --- a/operations/agent.go +++ b/operations/agent.go @@ -107,7 +107,7 @@ func Agent() cli.Command { Credentials: pail.CreateAWSCredentials(os.Getenv("S3_KEY"), os.Getenv("S3_SECRET"), ""), Region: endpoints.UsEast1RegionID, Name: os.Getenv("S3_BUCKET"), - Permission: "public-read", + Permissions: pail.S3PermissionsPublicRead, ContentType: "text/plain", }, } diff --git a/rest/model/admin.go b/rest/model/admin.go index 01031e58ab7..4707b180aa9 100644 --- a/rest/model/admin.go +++ b/rest/model/admin.go @@ -18,6 +18,7 @@ func NewConfigModel() *APIAdminSettings { Amboy: &APIAmboyConfig{}, Api: &APIapiConfig{}, AuthConfig: &APIAuthConfig{}, + Backup: &APIBackupConfig{}, CommitQueue: &APICommitQueueConfig{}, ContainerPools: &APIContainerPoolsConfig{}, Credentials: map[string]string{}, @@ -52,6 +53,7 @@ type APIAdminSettings struct { AuthConfig *APIAuthConfig `json:"auth,omitempty"` Banner APIString `json:"banner,omitempty"` BannerTheme APIString `json:"banner_theme,omitempty"` + Backup *APIBackupConfig `json:"backup,omitempty"` ClientBinariesDir APIString `json:"client_binaries_dir,omitempty"` CommitQueue *APICommitQueueConfig `json:"commit_queue,omitempty"` ConfigDir APIString `json:"configdir,omitempty"` @@ -433,6 +435,42 @@ func (a *APIAuthConfig) ToService() (interface{}, error) { }, nil } +type APIBackupConfig struct { + BucketName APIString `bson:"bucket_name" json:"bucket_name" yaml:"bucket_name"` + Key APIString `bson:"key" json:"key" yaml:"key"` + Secret APIString `bson:"secret" json:"secret" yaml:"secret"` + Prefix APIString `bson:"prefix" json:"prefix" yaml:"prefix"` + Compress bool `bson:"compress" json:"compress" yaml:"compress"` +} + +func (a *APIBackupConfig) BuildFromService(c interface{}) error { + switch conf := c.(type) { + case evergreen.BackupConfig: + a.BucketName = ToAPIString(conf.BucketName) + a.Key = ToAPIString(conf.Key) + a.Secret = ToAPIString(conf.Secret) + a.Compress = conf.Compress + a.Prefix = ToAPIString(conf.Prefix) + + return nil + default: + return errors.Errorf("%T is not a supported type", c) + } +} +func (a *APIBackupConfig) ToService() (interface{}, error) { + if a == nil { + return nil, nil + } + + return evergreen.BackupConfig{ + BucketName: FromAPIString(a.BucketName), + Key: FromAPIString(a.Key), + Secret: FromAPIString(a.Secret), + Prefix: FromAPIString(a.Prefix), + Compress: a.Compress, + }, nil +} + type APILDAPConfig struct { URL APIString `json:"url"` Port APIString `json:"port"` @@ -1288,6 +1326,7 @@ type APIServiceFlags struct { CommitQueueDisabled bool `json:"commit_queue_disabled"` PlannerDisabled bool `json:"planner_disabled"` HostAllocatorDisabled bool `json:"host_allocator_disabled"` + DRBackupDisabled bool `json:"dr_backup_disabled"` // Notifications Flags EventProcessingDisabled bool `json:"event_processing_disabled"` @@ -1542,6 +1581,7 @@ func (as *APIServiceFlags) BuildFromService(h interface{}) error { as.CommitQueueDisabled = v.CommitQueueDisabled as.PlannerDisabled = v.PlannerDisabled as.HostAllocatorDisabled = v.HostAllocatorDisabled + as.DRBackupDisabled = v.DRBackupDisabled default: return errors.Errorf("%T is not a supported service flags type", h) } @@ -1575,6 +1615,7 @@ func (as *APIServiceFlags) ToService() (interface{}, error) { CommitQueueDisabled: as.CommitQueueDisabled, PlannerDisabled: as.PlannerDisabled, HostAllocatorDisabled: as.HostAllocatorDisabled, + DRBackupDisabled: as.DRBackupDisabled, }, nil } diff --git a/rest/route/admin_test.go b/rest/route/admin_test.go index 560d4ede34e..9c9bf148160 100644 --- a/rest/route/admin_test.go +++ b/rest/route/admin_test.go @@ -92,7 +92,9 @@ func (s *AdminRouteSuite) TestAdminRoute() { s.NoError(s.getHandler.Parse(ctx, nil)) resp = s.getHandler.Run(ctx) s.NotNil(resp) - settingsResp, err := resp.Data().(restModel.Model).ToService() + respm, ok := resp.Data().(restModel.Model) + s.Require().True(ok, "%+v", resp.Data()) + settingsResp, err := respm.ToService() s.NoError(err) settings, ok := settingsResp.(evergreen.Settings) s.True(ok) diff --git a/service/api_plugin_s3copy.go b/service/api_plugin_s3copy.go index ea1b4920da7..1f69868c3a6 100644 --- a/service/api_plugin_s3copy.go +++ b/service/api_plugin_s3copy.go @@ -89,7 +89,7 @@ func (as *APIServer) s3copyPlugin(w http.ResponseWriter, r *http.Request) { Credentials: pail.CreateAWSCredentials(s3CopyReq.AwsKey, s3CopyReq.AwsSecret, ""), Region: region, Name: s3CopyReq.S3SourceBucket, - Permission: s3CopyReq.S3Permissions, + Permissions: pail.S3Permissions(s3CopyReq.S3Permissions), } srcBucket, err := pail.NewS3MultiPartBucket(srcOpts) if err != nil { @@ -99,7 +99,7 @@ func (as *APIServer) s3copyPlugin(w http.ResponseWriter, r *http.Request) { Credentials: pail.CreateAWSCredentials(s3CopyReq.AwsKey, s3CopyReq.AwsSecret, ""), Region: region, Name: s3CopyReq.S3DestinationBucket, - Permission: s3CopyReq.S3Permissions, + Permissions: pail.S3Permissions(s3CopyReq.S3Permissions), } destBucket, err := pail.NewS3MultiPartBucket(destOpts) if err != nil { diff --git a/service/templates/admin.html b/service/templates/admin.html index 1cdc64bd3d8..6ffe57de8fa 100644 --- a/service/templates/admin.html +++ b/service/templates/admin.html @@ -84,10 +84,11 @@
Other
+ - +
Non-Configuration
@@ -99,181 +100,190 @@ - - -
- - -
- - - - Service Degrading - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + +
+ + + + Service Degrading + + + + +
- EnabledDisabled
Dispatch tasks - - - - -
Create and provision hosts - - - - -
Monitor hosts and tasks - - - - -
Alert for spawn host expiration - - - - -
Start agents on hosts - - - - -
Track GitHub repositories - - - - -
Schedule tasks - - - - -
Test GitHub pull requests - - - - -
Update CLI - - - - -
Collect background statistics - - - - -
Persist task and test logs - - - - -
Cache historical statistics - - - - -
Cache historical statistics endpoint - - - - -
Cache historical statistics old tasks - - - - -
Process Commit Queue - - - - -
Planner - - - - -
Host Allocator - - - - -
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1319,6 +1329,37 @@ + + + + Misc Settings + + + + + + + + + + + + + + + + + + + + + + Compress Backups + + + + + diff --git a/units/backup.go b/units/backup.go new file mode 100644 index 00000000000..330987364c3 --- /dev/null +++ b/units/backup.go @@ -0,0 +1,104 @@ +package units + +import ( + "context" + "fmt" + "path" + "time" + + "github.com/evergreen-ci/evergreen" + "github.com/evergreen-ci/evergreen/util" + "github.com/evergreen-ci/pail" + "github.com/mongodb/amboy" + "github.com/mongodb/amboy/dependency" + "github.com/mongodb/amboy/job" + "github.com/mongodb/amboy/registry" + "github.com/mongodb/anser/backup" + "github.com/mongodb/grip" + "github.com/mongodb/grip/message" + "github.com/mongodb/grip/sometimes" + "github.com/pkg/errors" +) + +const backupMDBJobName = "backup-mdb-collection" + +func init() { + registry.AddJobType(backupMDBJobName, func() amboy.Job { + return makeBackupMDBCollectionJob() + }) +} + +type backupMDBCollectionJob struct { + Options backup.Options `bson:"options" json:"options" yaml:"options"` + job.Base `bson:"metadata" json:"metadata" yaml:"metadata"` + + env evergreen.Environment +} + +func makeBackupMDBCollectionJob() *backupMDBCollectionJob { + j := &backupMDBCollectionJob{ + Base: job.Base{ + JobType: amboy.JobType{ + Name: backupMDBJobName, + Version: 0, + }, + }, + } + + j.SetDependency(dependency.NewAlways()) + + return j +} + +func NewBackupMDBCollectionJob(opts backup.Options, ts time.Time) amboy.Job { + j := makeBackupMDBCollectionJob() + j.Options = opts + j.SetID(fmt.Sprintf("%s.%s.%s", backupMDBJobName, opts.NS.String(), ts.Format(TSFormat))) + return j +} + +func (j *backupMDBCollectionJob) Run(ctx context.Context) { + defer j.MarkComplete() + if j.env == nil { + j.env = evergreen.GetEnvironment() + } + + flags, err := evergreen.GetServiceFlags() + if err != nil { + j.AddError(errors.Wrapf(err, "Can't get degraded mode flags")) + return + } + + if flags.DRBackupDisabled { + grip.InfoWhen(sometimes.Percent(evergreen.DegradedLoggingPercent), message.Fields{ + "job": backupMDBJobName, + "message": "disaster recovery backup job disabled", + }) + return + } + conf := j.env.Settings().Backup + + client := util.GetHTTPClient() + client.Timeout = 60 * time.Minute + defer util.PutHTTPClient(client) + + bucket, err := pail.NewS3MultiPartBucketWithHTTPClient(client, pail.S3Options{ + Credentials: pail.CreateAWSCredentials(conf.Key, conf.Secret, ""), + Permissions: pail.S3PermissionsPrivate, + Name: conf.BucketName, + Compress: conf.Compress, + Prefix: path.Join(conf.Prefix, j.TimeInfo().Created.Format(TSFormat), "dump"), + }) + if err != nil { + j.AddError(err) + return + } + + if err := bucket.Check(ctx); err != nil { + j.AddError(err) + return + } + + j.Options.Target = bucket.Writer + j.AddError(backup.Collection(ctx, j.env.Client(), j.Options)) +} diff --git a/units/backup_spec.go b/units/backup_spec.go new file mode 100644 index 00000000000..fb446cc23e4 --- /dev/null +++ b/units/backup_spec.go @@ -0,0 +1,139 @@ +package units + +import ( + "context" + "time" + + "github.com/evergreen-ci/evergreen" + "github.com/evergreen-ci/evergreen/db" + "github.com/evergreen-ci/evergreen/model" + "github.com/evergreen-ci/evergreen/model/commitqueue" + "github.com/evergreen-ci/evergreen/model/distro" + "github.com/evergreen-ci/evergreen/model/event" + "github.com/evergreen-ci/evergreen/model/manifest" + "github.com/evergreen-ci/evergreen/model/patch" + "github.com/evergreen-ci/evergreen/model/stats" + "github.com/evergreen-ci/evergreen/model/task" + "github.com/evergreen-ci/evergreen/model/testresult" + "github.com/evergreen-ci/evergreen/model/user" + "github.com/evergreen-ci/evergreen/util" + "github.com/mongodb/anser/backup" + amodel "github.com/mongodb/anser/model" + "github.com/mongodb/grip" + "github.com/mongodb/grip/message" + "github.com/mongodb/grip/sometimes" + "github.com/pkg/errors" +) + +func AddBackupJobs(ctx context.Context, env evergreen.Environment, ts time.Time) error { + flags, err := evergreen.GetServiceFlags() + if err != nil { + return errors.WithStack(err) + } + + settings := env.Settings() + + if flags.DRBackupDisabled || !settings.Backup.Populated() { + grip.InfoWhen(sometimes.Percent(evergreen.DegradedLoggingPercent), message.Fields{ + "message": "disaster recovery backups disabled or not configured", + "impact": "backup jobs not dispatched", + "mode": "disabled", + "populated": settings.Backup.Populated(), + }) + return nil + } + + util.RoundPartOfDay(6) + queue, err := env.RemoteQueueGroup().Get(ctx, "backup_collector") + if err != nil { + return errors.WithStack(err) + } + + collections := appendAmboyCollections(settings.Amboy, []backup.Options{}) + collections = appendFullBackupCollections(settings.Database.DB, collections) + collections = appendIndexOnlyBackupCollections(settings.Database.DB, collections) + + catcher := grip.NewBasicCatcher() + for _, opt := range collections { + catcher.Add(queue.Put(ctx, NewBackupMDBCollectionJob(opt, ts))) + } + + return catcher.Resolve() +} + +func appendIndexOnlyBackupCollections(dbName string, in []backup.Options) []backup.Options { + for _, coll := range []string{ + task.OldCollection, + stats.HourlyTestStatsCollection, + stats.DailyTaskStatsCollection, + stats.DailyTestStatsCollection, + stats.DailyStatsStatusCollection, + model.TaskAliasQueuesCollection, + model.TaskQueuesCollection, + model.TestLogCollection, + testresult.Collection, + model.NotifyTimesCollection, + model.NotifyHistoryCollection, + event.AllLogCollection, + event.SubscriptionsCollection, + patch.IntentCollection, + } { + in = append(in, backup.Options{ + NS: amodel.Namespace{ + DB: dbName, + Collection: coll, + }, + IndexesOnly: true, + }) + } + + return in +} + +func appendFullBackupCollections(dbName string, in []backup.Options) []backup.Options { + for _, coll := range []string{ + evergreen.ConfigCollection, + evergreen.CredentialsCollection, // grpc CA + evergreen.ScopeCollection, // acl data + model.GithubHooksCollection, + model.KeyValCollection, + model.ProjectAliasCollection, + model.ProjectRefCollection, + model.ProjectVarsCollection, + model.PushlogCollection, // s3copy pushes + model.RepositoriesCollection, // last seen hash + user.Collection, + db.GlobalsCollection, // revision_orderNumber + distro.Collection, + commitqueue.Collection, + manifest.Collection, + } { + in = append(in, backup.Options{ + NS: amodel.Namespace{ + DB: dbName, + Collection: coll, + }, + IndexesOnly: false, + }) + } + return in +} + +func appendAmboyCollections(conf evergreen.AmboyConfig, in []backup.Options) []backup.Options { + return append(in, + backup.Options{ + NS: amodel.Namespace{ + DB: conf.DB, + Collection: conf.Name + ".jobs", + }, + IndexesOnly: true, + }, + backup.Options{ + NS: amodel.Namespace{ + DB: conf.DB, + Collection: conf.Name + ".group", + }, + IndexesOnly: true, + }, + ) +} diff --git a/units/crons.go b/units/crons.go index e285466759b..f7d6c53f2cc 100644 --- a/units/crons.go +++ b/units/crons.go @@ -1131,7 +1131,7 @@ func PopulateLocalQueueJobs(env evergreen.Environment) amboy.QueueOperation { if flags.BackgroundStatsDisabled { grip.InfoWhen(sometimes.Percent(evergreen.DegradedLoggingPercent), message.Fields{ - "message": "system stats ", + "message": "system stats", "impact": "memory, cpu, runtime stats", "mode": "degraded", }) diff --git a/units/crons_remote_hour.go b/units/crons_remote_hour.go index 7873cffe2ff..629ff230b49 100644 --- a/units/crons_remote_hour.go +++ b/units/crons_remote_hour.go @@ -63,6 +63,8 @@ func (j *cronsRemoteHourJob) Run(ctx context.Context) { catcher.Add(op(ctx, queue)) } + + catcher.Add(AddBackupJobs(ctx, j.env, util.RoundPartOfHour(0))) j.ErrorCount = catcher.Len() grip.Debug(message.Fields{ diff --git a/vendor/github.com/evergreen-ci/birch/.golangci.yml b/vendor/github.com/evergreen-ci/birch/.golangci.yml new file mode 100644 index 00000000000..2c7376b7c29 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/.golangci.yml @@ -0,0 +1,52 @@ +linters: + disable-all: true + enable: + - deadcode + - errcheck + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosimple + - govet + - ineffassign + - interfacer + - maligned + - misspell + - staticcheck + - structcheck + - typecheck + - unconvert + - varcheck + - stylecheck + - wsl + # - godox + - prealloc + - nakedret + +run: + skip-dirs: + - build + max-same-issues: 100 + max-issues-per-linter: 100 + exclude-use-default: false + timeout: 10m + +linters-settings: + maligned: + suggest-new: true + +issues: + exclude-rules: + - linters: + - goconst + # Ignore GOOS warnings. + text: "string `windows`" + - path: _test\.go + linters: + - gocognit + - goconst + - linters: + - golint + text: ".*should have name of the form ErrFoo" diff --git a/vendor/github.com/evergreen-ci/birch/LICENSE b/vendor/github.com/evergreen-ci/birch/LICENSE new file mode 100644 index 00000000000..5df6e7cbf17 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 MongoDB, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/evergreen-ci/birch/array.go b/vendor/github.com/evergreen-ci/birch/array.go index dde8e0c8cd5..50fd912eac5 100644 --- a/vendor/github.com/evergreen-ci/birch/array.go +++ b/vendor/github.com/evergreen-ci/birch/array.go @@ -58,7 +58,8 @@ func (a *Array) Reset() { // Validate ensures that the array's underlying BSON is valid. It returns the the number of bytes // in the underlying BSON if it is valid or an error if it isn't. func (a *Array) Validate() (uint32, error) { - var size uint32 = 4 + 1 + size := uint32(4 + 1) + for i, elem := range a.doc.elems { n, err := elem.value.validate(false) if err != nil { @@ -77,16 +78,12 @@ func (a *Array) Validate() (uint32, error) { } // Lookup returns the value in the array at the given index or an error if it cannot be found. -// -// TODO: We should fix this to align with the semantics of the *Document type, -// e.g. have Lookup return just a *Value or panic if it's out of bounds and have -// a LookupOK that returns a bool. Although if we want to align with the -// semantics of how Go arrays and slices work, we would not provide a LookupOK -// and force users to use the Len method before hand to avoid panics. func (a *Array) Lookup(index uint) *Value { return a.doc.ElementAt(index).value } +// LookupErr returns the value at the specified index, returning an +// OutOfBounds error if that element doesn't exist. func (a *Array) LookupErr(index uint) (*Value, error) { v, ok := a.doc.ElementAtOK(index) if !ok { @@ -96,6 +93,8 @@ func (a *Array) LookupErr(index uint) (*Value, error) { return v.value, nil } +// LookupElementErr returns the element at the specified index, +// returning an OutOfBounds error if that element doesn't exist. func (a *Array) LookupElementErr(index uint) (*Element, error) { v, ok := a.doc.ElementAtOK(index) if !ok { @@ -105,8 +104,15 @@ func (a *Array) LookupElementErr(index uint) (*Element, error) { return v, nil } +// LookupElement returns the element at the specified index, panicing +// if that index does not exist func (a *Array) LookupElement(index uint) *Element { - return a.doc.ElementAt(index) + v, ok := a.doc.ElementAtOK(index) + if !ok { + panic(bsonerr.OutOfBounds) + } + + return v } func (a *Array) lookupTraverse(index uint, keys ...string) (*Value, error) { @@ -159,7 +165,9 @@ func (a *Array) AppendInterfaceErr(elem interface{}) error { if err != nil { return errors.WithStack(err) } + a.doc.Append(e) + return nil } @@ -190,7 +198,12 @@ func (a *Array) Set(index uint, value *Value) *Array { return a } -func (a *Array) Extend(ar2 *Array) *Array { a.doc.Append(ar2.doc.elems...); return a } +// Extend adds the values from the second array to the first array, +// returning the original array for chaining. +func (a *Array) Extend(ar2 *Array) *Array { a.doc.Append(ar2.doc.elems...); return a } + +// ExtendFromDocument adds the values from the elements in the +// document returning the array for chaining. func (a *Array) ExtendFromDocument(doc *Document) *Array { a.doc.Append(doc.elems...); return a } // Delete removes the value at the given index from the array. @@ -208,13 +221,17 @@ func (a *Array) Delete(index uint) *Value { // String implements the fmt.Stringer interface. func (a *Array) String() string { var buf bytes.Buffer + buf.Write([]byte("bson.Array[")) + for idx, elem := range a.doc.elems { if idx > 0 { buf.Write([]byte(", ")) } + fmt.Fprintf(&buf, "%s", elem.value.Interface()) } + buf.WriteByte(']') return buf.String() @@ -224,14 +241,17 @@ func (a *Array) String() string { // at the given start position. func (a *Array) writeByteSlice(start uint, size uint32, b []byte) (int64, error) { var total int64 - var pos = start + + pos := start if len(b) < int(start)+int(size) { return 0, newErrTooSmall() } + n, err := elements.Int32.Encode(start, b, int32(size)) total += int64(n) pos += uint(n) + if err != nil { return total, err } @@ -250,6 +270,7 @@ func (a *Array) writeByteSlice(start uint, size uint32, b []byte) (int64, error) n, err := elem.writeElement(false, pos, b) total += n pos += uint(n) + if err != nil { return total, err } @@ -257,9 +278,11 @@ func (a *Array) writeByteSlice(start uint, size uint32, b []byte) (int64, error) n, err = elements.Byte.Encode(pos, b, '\x00') total += int64(n) + if err != nil { return total, err } + return total, nil } @@ -269,11 +292,13 @@ func (a *Array) MarshalBSON() ([]byte, error) { if err != nil { return nil, err } + b := make([]byte, size) - _, err = a.writeByteSlice(0, size, b) - if err != nil { + + if _, err = a.writeByteSlice(0, size, b); err != nil { return nil, err } + return b, nil } diff --git a/vendor/github.com/evergreen-ci/birch/array_test.go b/vendor/github.com/evergreen-ci/birch/array_test.go index 21be57f7df9..56a6258253e 100644 --- a/vendor/github.com/evergreen-ci/birch/array_test.go +++ b/vendor/github.com/evergreen-ci/birch/array_test.go @@ -388,7 +388,6 @@ func TestArray(t *testing.T) { }) }) - } type testArrayPrependAppendGenerator struct{} @@ -518,16 +517,19 @@ func ExampleArray() { ), VC.String("go1.9.2"), ) + if appName != "" { arr.Append(VC.DocumentFromElements(EC.String("name", appName))) } return arr } + buf, err := f("hello-world").MarshalBSON() if err != nil { fmt.Println(err) } + fmt.Println(buf) // Output: [154 0 0 0 3 48 0 52 0 0 0 2 110 97 109 101 0 16 0 0 0 109 111 110 103 111 45 103 111 45 100 114 105 118 101 114 0 2 118 101 114 115 105 111 110 0 8 0 0 0 49 50 51 52 53 54 55 0 0 3 49 0 46 0 0 0 2 116 121 112 101 0 7 0 0 0 100 97 114 119 105 110 0 2 97 114 99 104 105 116 101 99 116 117 114 101 0 6 0 0 0 97 109 100 54 52 0 0 2 50 0 8 0 0 0 103 111 49 46 57 46 50 0 3 51 0 27 0 0 0 2 110 97 109 101 0 12 0 0 0 104 101 108 108 111 45 119 111 114 108 100 0 0 0] diff --git a/vendor/github.com/evergreen-ci/birch/bsonerr/document.go b/vendor/github.com/evergreen-ci/birch/bsonerr/document.go index f3420980f22..8eac7563a37 100644 --- a/vendor/github.com/evergreen-ci/birch/bsonerr/document.go +++ b/vendor/github.com/evergreen-ci/birch/bsonerr/document.go @@ -34,9 +34,8 @@ var InvalidDocumentType = errors.New("invalid document type") // InvalidDepthTraversal indicates that a provided path of keys to a nested value in a document // does not exist. // -// TODO(skriptble): This error message is pretty awful. // Please fix. -var InvalidDepthTraversal = errors.New("invalid depth traversal") +var InvalidDepthTraversal = errors.New("invalid depth traversal for key path") // ElementNotFound indicates that an Element matching a certain condition does not exist. var ElementNotFound = errors.New("element not found") diff --git a/vendor/github.com/evergreen-ci/birch/constructor.go b/vendor/github.com/evergreen-ci/birch/constructor.go index 97c88b8319d..97e04fb1439 100644 --- a/vendor/github.com/evergreen-ci/birch/constructor.go +++ b/vendor/github.com/evergreen-ci/birch/constructor.go @@ -25,7 +25,7 @@ var VC ValueConstructor // ElementConstructor is used as a namespace for document element constructor functions. type ElementConstructor struct{} -// ValueConstructor is used as a namespace for document element constructor functions. +// ValueConstructor is used as a namespace for value constructor functions. type ValueConstructor struct{} // Interface will attempt to turn the provided key and value into an Element. @@ -111,12 +111,10 @@ func (ElementConstructor) Interface(key string, value interface{}) *Element { elem = EC.SubDocument(key, DC.MapTime(t)) case map[string]time.Duration: elem = EC.SubDocument(key, DC.MapDuration(t)) + case map[string]DocumentMarshaler: + elem = EC.SubDocument(key, DC.MapDocumentMarshaler(t)) case map[string]Marshaler: - var doc *Document - doc, err = DC.MapMarshalerErr(t) - if err == nil { - elem = EC.SubDocument(key, doc) - } + elem = EC.SubDocument(key, DC.MapMarshaler(t)) case map[string][]string: elem = EC.SubDocument(key, DC.MapSliceString(t)) case map[string][]interface{}: @@ -135,12 +133,10 @@ func (ElementConstructor) Interface(key string, value interface{}) *Element { elem = EC.SubDocument(key, DC.MapSliceTime(t)) case map[string][]time.Duration: elem = EC.SubDocument(key, DC.MapSliceDuration(t)) + case map[string][]DocumentMarshaler: + elem = EC.SubDocument(key, DC.MapSliceDocumentMarshaler(t)) case map[string][]Marshaler: - var doc *Document - doc, err = DC.MapSliceMarshalerErr(t) - if err == nil { - elem = EC.SubDocument(key, doc) - } + elem = EC.SubDocument(key, DC.MapSliceMarshaler(t)) case map[interface{}]interface{}: elem = EC.SubDocument(key, DC.Interface(t)) case []interface{}: @@ -161,6 +157,8 @@ func (ElementConstructor) Interface(key string, value interface{}) *Element { elem = EC.SliceTime(key, t) case []time.Duration: elem = EC.SliceDuration(key, t) + case []DocumentMarshaler: + elem, err = EC.SliceDocumentMarshalerErr(key, t) case []Marshaler: elem, err = EC.SliceMarshalerErr(key, t) case []*Element: @@ -175,10 +173,13 @@ func (ElementConstructor) Interface(key string, value interface{}) *Element { } case Reader: var doc *Document + doc, err = DC.ReaderErr(t) if err == nil { elem = EC.SubDocument(key, doc) } + case DocumentMarshaler: + elem, err = EC.DocumentMarshalerErr(key, t) case Marshaler: elem, err = EC.MarshalerErr(key, t) default: @@ -214,34 +215,24 @@ func (ElementConstructor) InterfaceErr(key string, value interface{}) (*Element, default: return EC.Int64(key, int64(t)), nil } - case bool, int8, int16, int32, int, int64, uint8, uint16, uint32, string, float32, float64, - *Element, *Document, Reader, types.Timestamp, - time.Time: - + case bool, int8, int16, int32, int, int64, uint8, uint16, uint32, string, float32, float64, *Element, *Document, Reader, types.Timestamp, time.Time: return EC.Interface(key, t), nil - - case map[string]string, map[string]float32, map[string]float64, - map[string]int32, map[string]int64, map[string]int, - map[string]time.Time, map[string]time.Duration: - + case map[string]string, map[string]float32, map[string]float64, map[string]int32, map[string]int64, map[string]int, map[string]time.Time, map[string]time.Duration: return EC.Interface(key, t), nil - - case map[string]interface{}, map[interface{}]interface{}, map[string]Marshaler: - - return EC.InterfaceErr(key, t) - - case map[string][]string, map[string][]int32, map[string][]int64, map[string][]int, - map[string][]time.Time, map[string][]time.Duration, map[string][]float32, map[string][]float64: - + case map[string][]string, map[string][]int32, map[string][]int64, map[string][]int, map[string][]time.Time, map[string][]time.Duration, map[string][]float32, map[string][]float64: return EC.Interface(key, value), nil case []string, []int32, []int64, []int, []time.Time, []time.Duration, []float64, []float32: return EC.Interface(key, value), nil - case map[string][]interface{}, map[string][]Marshaler: + case map[string]interface{}, map[interface{}]interface{}, map[string]Marshaler, map[string]DocumentMarshaler: + return EC.InterfaceErr(key, t) + case map[string][]interface{}, map[string][]Marshaler, map[string][]DocumentMarshaler: return EC.InterfaceErr(key, value) - case []interface{}, []Marshaler: + case []interface{}, []Marshaler, []DocumentMarshaler: return EC.InterfaceErr(key, value) case *Value: return EC.ValueErr(key, t) + case DocumentMarshaler: + return EC.DocumentMarshalerErr(key, t) case Marshaler: return EC.MarshalerErr(key, t) default: @@ -253,12 +244,13 @@ func (ElementConstructor) InterfaceErr(key string, value interface{}) (*Element, func (ElementConstructor) Double(key string, f float64) *Element { b := make([]byte, 1+len(key)+1+8) elem := newElement(0, 1+uint32(len(key))+1) - _, err := elements.Double.Element(0, b, key, f) - if err != nil { + + if _, err := elements.Double.Element(0, b, key, f); err != nil { panic(err) } elem.value.data = b + return elem } @@ -267,11 +259,13 @@ func (ElementConstructor) String(key string, val string) *Element { size := uint32(1 + len(key) + 1 + 4 + len(val) + 1) b := make([]byte, size) elem := newElement(0, 1+uint32(len(key))+1) - _, err := elements.String.Element(0, b, key, val) - if err != nil { + + if _, err := elements.String.Element(0, b, key, val); err != nil { panic(err) } + elem.value.data = b + return elem } @@ -280,16 +274,18 @@ func (ElementConstructor) SubDocument(key string, d *Document) *Element { size := uint32(1 + len(key) + 1) b := make([]byte, size) elem := newElement(0, size) - _, err := elements.Byte.Encode(0, b, '\x03') - if err != nil { + + if _, err := elements.Byte.Encode(0, b, '\x03'); err != nil { panic(err) } - _, err = elements.CString.Encode(1, b, key) - if err != nil { + + if _, err := elements.CString.Encode(1, b, key); err != nil { panic(err) } + elem.value.data = b elem.value.d = d + return elem } @@ -298,18 +294,20 @@ func (ElementConstructor) SubDocumentFromReader(key string, r Reader) *Element { size := uint32(1 + len(key) + 1 + len(r)) b := make([]byte, size) elem := newElement(0, uint32(1+len(key)+1)) - _, err := elements.Byte.Encode(0, b, '\x03') - if err != nil { + + if _, err := elements.Byte.Encode(0, b, '\x03'); err != nil { panic(err) } - _, err = elements.CString.Encode(1, b, key) - if err != nil { + + if _, err := elements.CString.Encode(1, b, key); err != nil { panic(err) } + // NOTE: We don't validate the Reader here since we don't validate the // Document when provided to SubDocument. copy(b[1+len(key)+1:], r) elem.value.data = b + return elem } @@ -324,16 +322,18 @@ func (ElementConstructor) Array(key string, a *Array) *Element { size := uint32(1 + len(key) + 1) b := make([]byte, size) elem := newElement(0, size) - _, err := elements.Byte.Encode(0, b, '\x04') - if err != nil { + + if _, err := elements.Byte.Encode(0, b, '\x04'); err != nil { panic(err) } - _, err = elements.CString.Encode(1, b, key) - if err != nil { + + if _, err := elements.CString.Encode(1, b, key); err != nil { panic(err) } + elem.value.data = b elem.value.d = a.doc + return elem } @@ -358,12 +358,13 @@ func (ElementConstructor) BinaryWithSubtype(key string, b []byte, btype byte) *E buf := make([]byte, size) elem := newElement(0, 1+uint32(len(key))+1) - _, err := elements.Binary.Element(0, buf, key, b, btype) - if err != nil { + + if _, err := elements.Binary.Element(0, buf, key, b, btype); err != nil { panic(err) } elem.value.data = buf + return elem } @@ -372,15 +373,17 @@ func (ElementConstructor) Undefined(key string) *Element { size := 1 + uint32(len(key)) + 1 b := make([]byte, size) elem := newElement(0, size) - _, err := elements.Byte.Encode(0, b, '\x06') - if err != nil { + + if _, err := elements.Byte.Encode(0, b, '\x06'); err != nil { panic(err) } - _, err = elements.CString.Encode(1, b, key) - if err != nil { + + if _, err := elements.CString.Encode(1, b, key); err != nil { panic(err) } + elem.value.data = b + return elem } @@ -390,8 +393,7 @@ func (ElementConstructor) ObjectID(key string, oid types.ObjectID) *Element { elem := newElement(0, 1+uint32(len(key))+1) elem.value.data = make([]byte, size) - _, err := elements.ObjectID.Element(0, elem.value.data, key, oid) - if err != nil { + if _, err := elements.ObjectID.Element(0, elem.value.data, key, oid); err != nil { panic(err) } @@ -404,8 +406,7 @@ func (ElementConstructor) Boolean(key string, b bool) *Element { elem := newElement(0, 1+uint32(len(key))+1) elem.value.data = make([]byte, size) - _, err := elements.Boolean.Element(0, elem.value.data, key, b) - if err != nil { + if _, err := elements.Boolean.Element(0, elem.value.data, key, b); err != nil { panic(err) } @@ -419,8 +420,7 @@ func (ElementConstructor) DateTime(key string, dt int64) *Element { elem := newElement(0, 1+uint32(len(key))+1) elem.value.data = make([]byte, size) - _, err := elements.DateTime.Element(0, elem.value.data, key, dt) - if err != nil { + if _, err := elements.DateTime.Element(0, elem.value.data, key, dt); err != nil { panic(err) } @@ -438,15 +438,17 @@ func (ElementConstructor) Null(key string) *Element { size := uint32(1 + len(key) + 1) b := make([]byte, size) elem := newElement(0, uint32(1+len(key)+1)) - _, err := elements.Byte.Encode(0, b, '\x0A') - if err != nil { + + if _, err := elements.Byte.Encode(0, b, '\x0A'); err != nil { panic(err) } - _, err = elements.CString.Encode(1, b, key) - if err != nil { + + if _, err := elements.CString.Encode(1, b, key); err != nil { panic(err) } + elem.value.data = b + return elem } @@ -635,6 +637,8 @@ func (ElementConstructor) Value(key string, value *Value) *Element { return convertValueToElem(key, value) } +// ValueErr constructs an element using the specified value, but +// returns an error if the value is nil or otherwise invalid. func (ElementConstructor) ValueErr(key string, value *Value) (*Element, error) { elem := EC.Value(key, value) if elem == nil { diff --git a/vendor/github.com/evergreen-ci/birch/constructor_test.go b/vendor/github.com/evergreen-ci/birch/constructor_test.go index 5617e49582e..45c22612216 100644 --- a/vendor/github.com/evergreen-ci/birch/constructor_test.go +++ b/vendor/github.com/evergreen-ci/birch/constructor_test.go @@ -1169,8 +1169,8 @@ func TestDocumentConstructor(t *testing.T) { Type: bsontype.EmbeddedDocument, Size: 2, Input: map[string][]interface{}{ - "a": []interface{}{"1", 2, "3"}, - "b": []interface{}{false, true, "1", 2, "3"}, + "a": {"1", 2, "3"}, + "b": {false, true, "1", 2, "3"}, }, }, { @@ -1205,8 +1205,8 @@ func TestDocumentConstructor(t *testing.T) { Size: 2, Type: bsontype.EmbeddedDocument, Input: map[string][]Marshaler{ - "one": []Marshaler{NewDocument(), NewDocument()}, - "two": []Marshaler{NewArray()}, + "one": {NewDocument(), NewDocument()}, + "two": {NewArray()}, }, }, { @@ -1220,9 +1220,9 @@ func TestDocumentConstructor(t *testing.T) { Size: 3, Type: bsontype.EmbeddedDocument, Input: map[string][]string{ - "hi": []string{"hello", "world"}, - "results": []string{}, - "other": []string{"one"}, + "hi": {"hello", "world"}, + "results": {}, + "other": {"one"}, }, }, { @@ -1285,5 +1285,4 @@ func TestDocumentConstructor(t *testing.T) { }) } }) - } diff --git a/vendor/github.com/evergreen-ci/birch/decimal/decimal.go b/vendor/github.com/evergreen-ci/birch/decimal/decimal.go index 56e1331b11f..f186f02edd0 100644 --- a/vendor/github.com/evergreen-ci/birch/decimal/decimal.go +++ b/vendor/github.com/evergreen-ci/birch/decimal/decimal.go @@ -34,9 +34,12 @@ func (d Decimal128) GetBytes() (uint64, uint64) { // String returns a string representation of the decimal value. func (d Decimal128) String() string { - var pos int // positive sign - var e int // exponent - var h, l uint64 // significand high/low + var ( + pos int // positive sign + e int // exponent + h uint64 // significand high + l uint64 // significand low + ) if d.h>>63&1 == 0 { pos = 1 @@ -50,6 +53,7 @@ func (d Decimal128) String() string { } l = d.l + if d.h>>61&3 == 3 { // Bits: 1*sign 2*ignored 14*exponent 111*significand. // Implicit 0b100 prefix in significand. @@ -68,11 +72,15 @@ func (d Decimal128) String() string { return "-0"[pos:] } - var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. - var last = len(repr) - var i = len(repr) - var dot = len(repr) + e - var rem uint32 + var ( + rem uint32 + repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. + ) + + last := len(repr) + i := len(repr) + dot := len(repr) + e + Loop: for d9 := 0; d9 < 5; d9++ { h, l, rem = divmod(h, l, 1e9) @@ -103,15 +111,18 @@ Loop: } } } + repr[last-1] = '-' last-- if e > 0 { return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) } + if e < 0 { return string(repr[last+pos:]) + "E" + strconv.Itoa(e) } + return string(repr[last+pos:]) } @@ -129,6 +140,7 @@ func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { d := cr<<32 + l&(1<<32-1) dq := d / div64 dr := d % div64 + return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) } @@ -144,9 +156,11 @@ func dErr(s string) (Decimal128, error) { // Decimal128 value. func ParseDecimal128(s string) (Decimal128, error) { orig := s + if s == "" { return dErr(orig) } + neg := s[0] == '-' if neg || s[0] == '+' { s = s[1:] @@ -156,100 +170,135 @@ func ParseDecimal128(s string) (Decimal128, error) { if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { return dNaN, nil } + if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { if neg { return dNegInf, nil } + return dPosInf, nil } + return dErr(orig) } - var h, l uint64 - var e int + var ( + h uint64 + l uint64 + e int + add uint32 + ovr uint32 + ) + + mul := uint32(1) + dot := -1 + digits := 0 + i := 0 - var add, ovr uint32 - var mul uint32 = 1 - var dot = -1 - var digits = 0 - var i = 0 for i < len(s) { c := s[i] + if mul == 1e9 { h, l, ovr = muladd(h, l, mul, add) mul, add = 1, 0 + if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } + if c >= '0' && c <= '9' { i++ + if c > '0' || digits > 0 { digits++ } + if digits > 34 { if c == '0' { // Exact rounding. e++ continue } + return dErr(orig) } + mul *= 10 add *= 10 add += uint32(c - '0') + continue } + if c == '.' { i++ + if dot >= 0 || i == 1 && len(s) == 1 { return dErr(orig) } + if i == len(s) { break } + if s[i] < '0' || s[i] > '9' || e > 0 { return dErr(orig) } + dot = i + continue } + break } + if i == 0 { return dErr(orig) } + if mul > 1 { h, l, ovr = muladd(h, l, mul, add) if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } + if dot >= 0 { e += dot - i } + if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { i++ eneg := s[i] == '-' + if eneg || s[i] == '+' { i++ + if i == len(s) { return dErr(orig) } } + n := 0 + for i < len(s) && n < 1e4 { c := s[i] i++ + if c < '0' || c > '9' { return dErr(orig) } + n *= 10 n += int(c - '0') } + if eneg { n = -n } + e += n for e < -6176 { // Subnormal. @@ -258,12 +307,15 @@ func ParseDecimal128(s string) (Decimal128, error) { div *= 10 e++ } + var rem uint32 h, l, rem = divmod(h, l, div) + if rem > 0 { return dErr(orig) } } + for e > 6111 { // Clamped. var mul uint32 = 1 @@ -271,11 +323,14 @@ func ParseDecimal128(s string) (Decimal128, error) { mul *= 10 e-- } + h, l, ovr = muladd(h, l, mul, 0) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { return dErr(orig) } } + if e < -6176 || e > 6111 { return dErr(orig) } @@ -289,6 +344,7 @@ func ParseDecimal128(s string) (Decimal128, error) { if neg { h |= 1 << 63 } + return Decimal128{h, l}, nil } diff --git a/vendor/github.com/evergreen-ci/birch/document.go b/vendor/github.com/evergreen-ci/birch/document.go index 86ed4cd4753..c38850e01b8 100644 --- a/vendor/github.com/evergreen-ci/birch/document.go +++ b/vendor/github.com/evergreen-ci/birch/document.go @@ -35,10 +35,11 @@ func NewDocument(elems ...*Element) *Document { return DC.Elements(elems...) } // slice of bytes is not a valid BSON document, this method will return an error. func ReadDocument(b []byte) (*Document, error) { var doc = new(Document) - err := doc.UnmarshalBSON(b) - if err != nil { + + if err := doc.UnmarshalBSON(b); err != nil { return nil, err } + return doc, nil } @@ -87,9 +88,12 @@ func (d *Document) recursiveKeys(recursive bool, prefix ...string) (Keys, error) } ks := make(Keys, 0, len(d.elems)) + for _, elem := range d.elems { key := elem.Key() + ks = append(ks, Key{Prefix: prefix, Name: key}) + if !recursive { continue } @@ -99,19 +103,24 @@ func (d *Document) recursiveKeys(recursive bool, prefix ...string) (Keys, error) case '\x03': subprefix := append(prefix, key) subkeys, err := elem.value.MutableDocument().recursiveKeys(recursive, subprefix...) + if err != nil { return nil, err } + ks = append(ks, subkeys...) case '\x04': subprefix := append(prefix, key) subkeys, err := elem.value.MutableArray().doc.recursiveKeys(recursive, subprefix...) + if err != nil { return nil, err } + ks = append(ks, subkeys...) } } + return ks, nil } @@ -135,11 +144,13 @@ func (d *Document) Append(elems ...*Element) *Document { // instead of panicking here. panic(bsonerr.NilElement) } + d.elems = append(d.elems, elem) i := sort.Search(len(d.index), func(i int) bool { return bytes.Compare( d.keyFromIndex(i), elem.value.data[elem.value.start+1:elem.value.offset]) >= 0 }) + if i < len(d.index) { d.index = append(d.index, 0) copy(d.index[i+1:], d.index[i:]) @@ -148,6 +159,7 @@ func (d *Document) Append(elems ...*Element) *Document { d.index = append(d.index, uint32(len(d.elems)-1)) } } + return d } @@ -168,6 +180,7 @@ func (d *Document) Prepend(elems ...*Element) *Document { copy(d.elems[len(elems):], d.elems) remaining := len(elems) + for idx, elem := range elems { if elem == nil { if d.IgnoreNilInsert { @@ -175,6 +188,7 @@ func (d *Document) Prepend(elems ...*Element) *Document { copy(d.elems[idx:], d.elems[idx+1:]) d.elems[len(d.elems)-1] = nil d.elems = d.elems[:len(d.elems)-1] + continue } // Not very efficient, but we're about to blow up so ¯\_(ツ)_/¯ @@ -186,10 +200,13 @@ func (d *Document) Prepend(elems ...*Element) *Document { panic(bsonerr.NilElement) } remaining-- + d.elems[idx] = elem + for idx := range d.index { d.index[idx]++ } + i := sort.Search(len(d.index), func(i int) bool { return bytes.Compare( d.keyFromIndex(i), elem.value.data[elem.value.start+1:elem.value.offset]) >= 0 @@ -202,6 +219,7 @@ func (d *Document) Prepend(elems ...*Element) *Document { d.index = append(d.index, 0) } } + return d } @@ -219,11 +237,13 @@ func (d *Document) Set(elem *Element) *Document { if d.IgnoreNilInsert { return d } + panic(bsonerr.NilElement) } key := elem.Key() + "\x00" i := sort.Search(len(d.index), func(i int) bool { return bytes.Compare(d.keyFromIndex(i), []byte(key)) >= 0 }) + if i < len(d.index) && bytes.Equal(d.keyFromIndex(i), []byte(key)) { d.elems[d.index[i]] = elem return d @@ -231,6 +251,7 @@ func (d *Document) Set(elem *Element) *Document { d.elems = append(d.elems, elem) position := uint32(len(d.elems) - 1) + if i < len(d.index) { d.index = append(d.index, 0) copy(d.index[i+1:], d.index[i:]) @@ -289,15 +310,22 @@ func (d *Document) RecursiveLookupElementErr(key ...string) (*Element, error) { if len(key) == 0 { return nil, bsonerr.EmptyKey } - var elem *Element - var err error + + var ( + elem *Element + err error + ) + first := []byte(key[0] + "\x00") i := sort.Search(len(d.index), func(i int) bool { return bytes.Compare(d.keyFromIndex(i), first) >= 0 }) + if i < len(d.index) && bytes.Equal(d.keyFromIndex(i), first) { elem = d.elems[d.index[i]] + if len(key) == 1 { return elem, nil } + switch elem.value.Type() { case '\x03': elem, err = elem.value.MutableDocument().RecursiveLookupElementErr(key[1:]...) @@ -321,14 +349,17 @@ func (d *Document) RecursiveLookupElementErr(key ...string) (*Element, error) { err = bsonerr.InvalidDepthTraversal } } + if err != nil { return nil, err } + if elem == nil { // TODO(skriptble): This should also be a clearer error message. // Preferably we should track the depth at which the key was not found. return nil, bsonerr.ElementNotFound } + return elem, nil } @@ -347,21 +378,27 @@ func (d *Document) Delete(key ...string) *Element { // Do a binary search through the index, delete the element from // the index and delete the element from the elems array. var elem *Element + first := []byte(key[0] + "\x00") i := sort.Search(len(d.index), func(i int) bool { return bytes.Compare(d.keyFromIndex(i), first) >= 0 }) + if i < len(d.index) && bytes.Equal(d.keyFromIndex(i), first) { keyIndex := d.index[i] elem = d.elems[keyIndex] + if len(key) == 1 { d.index = append(d.index[:i], d.index[i+1:]...) d.elems = append(d.elems[:keyIndex], d.elems[keyIndex+1:]...) + for j := range d.index { if d.index[j] > keyIndex { d.index[j]-- } } + return elem } + switch elem.value.Type() { case '\x03': elem = elem.value.MutableDocument().Delete(key[1:]...) @@ -371,6 +408,7 @@ func (d *Document) Delete(key ...string) *Element { elem = nil } } + return elem } @@ -409,9 +447,22 @@ func (d *Document) Iterator() Iterator { return newIterator(d) } -func (d *Document) Extend(d2 *Document) *Document { d.Append(d2.elems...); return d } +// Extend merges a second document into the document. It may produce a +// document with duplicate keys. +func (d *Document) Extend(d2 *Document) *Document { d.Append(d2.elems...); return d } + +// ExtendReader merges the contents of a document in the form of a +// reader (byte slice) into the document. May result in a document +// with duplicate keys. func (d *Document) ExtendReader(r Reader) *Document { d.Append(DC.Reader(r).elems...); return d } +// ExtendInterface constructs a document using the interace +// constructor method +func (d *Document) ExtendInterface(in interface{}) *Document { + d.Append(DC.Interface(in).elems...) + return d +} + // Reset clears a document so it can be reused. This method clears references // to the underlying pointers to elements so they can be garbage collected. func (d *Document) Reset() { @@ -422,6 +473,7 @@ func (d *Document) Reset() { for idx := range d.elems { d.elems[idx] = nil } + d.elems = d.elems[:0] d.index = d.index[:0] } @@ -434,13 +486,16 @@ func (d *Document) Validate() (uint32, error) { // Header and Footer var size uint32 = 4 + 1 + for _, elem := range d.elems { n, err := elem.Validate() if err != nil { return 0, err } + size += n } + return size, nil } @@ -457,7 +512,9 @@ func (d *Document) WriteTo(w io.Writer) (int64, error) { if err != nil { return 0, err } + n, err := w.Write(b) + return int64(n), err } @@ -469,21 +526,24 @@ func (d *Document) WriteDocument(start uint, writer interface{}) (int64, error) } var total int64 - var pos = start + size, err := d.Validate() if err != nil { return total, err } + switch w := writer.(type) { case []byte: - n, err := d.writeByteSlice(pos, size, w) + n, err := d.writeByteSlice(start, size, w) total += n + if err != nil { return total, err } default: return 0, bsonerr.InvalidWriter } + return total, nil } @@ -495,20 +555,26 @@ func (d *Document) writeByteSlice(start uint, size uint32, b []byte) (int64, err } var total int64 - var pos = start + + pos := start + if len(b) < int(start)+int(size) { return 0, newErrTooSmall() } + n, err := elements.Int32.Encode(start, b, int32(size)) total += int64(n) pos += uint(n) + if err != nil { return total, err } + for _, elem := range d.elems { n, err := elem.writeElement(true, pos, b) total += n pos += uint(n) + if err != nil { return total, err } @@ -516,9 +582,11 @@ func (d *Document) writeByteSlice(start uint, size uint32, b []byte) (int64, err n, err = elements.Byte.Encode(pos, b, '\x00') total += int64(n) + if err != nil { return total, err } + return total, nil } @@ -533,11 +601,14 @@ func (d *Document) MarshalBSON() ([]byte, error) { if err != nil { return nil, err } + b := make([]byte, size) _, err = d.writeByteSlice(0, size, b) + if err != nil { return nil, err } + return b, nil } @@ -567,6 +638,7 @@ func (d *Document) UnmarshalBSON(b []byte) error { } return nil }) + return err } @@ -577,22 +649,26 @@ func (d *Document) ReadFrom(r io.Reader) (int64, error) { } var total int64 + sizeBuf := make([]byte, 4) n, err := io.ReadFull(r, sizeBuf) total += int64(n) + if err != nil { return total, err } + givenLength := readi32(sizeBuf) b := make([]byte, givenLength) copy(b[0:4], sizeBuf) n, err = io.ReadFull(r, b[4:]) total += int64(n) + if err != nil { return total, err } - err = d.UnmarshalBSON(b) - return total, err + + return total, d.UnmarshalBSON(b) } // keyFromIndex returns the key for the element. The idx parameter is the @@ -604,6 +680,7 @@ func (d *Document) keyFromIndex(idx int) []byte { } haystack := d.elems[d.index[idx]] + return haystack.value.data[haystack.value.start+1 : haystack.value.offset] } @@ -614,13 +691,17 @@ func (d *Document) String() string { } var buf bytes.Buffer + buf.Write([]byte("bson.Document{")) + for idx, elem := range d.elems { if idx > 0 { buf.Write([]byte(", ")) } + fmt.Fprintf(&buf, "%s", elem) } + buf.WriteByte('}') return buf.String() diff --git a/vendor/github.com/evergreen-ci/birch/document_test.go b/vendor/github.com/evergreen-ci/birch/document_test.go index b0a16db721d..00cd9a743cd 100644 --- a/vendor/github.com/evergreen-ci/birch/document_test.go +++ b/vendor/github.com/evergreen-ci/birch/document_test.go @@ -14,8 +14,8 @@ import ( "reflect" "testing" - "github.com/google/go-cmp/cmp" "github.com/evergreen-ci/birch/bsonerr" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -858,7 +858,6 @@ func TestDocument(t *testing.T) { }) }) - } var tpag testPrependAppendGenerator @@ -949,16 +948,19 @@ func ExampleDocument() { ), EC.String("platform", "go1.9.2"), ) + if appName != "" { doc.Append(EC.SubDocumentFromElements("application", EC.String("name", appName))) } return doc } + buf, err := f("hello-world").MarshalBSON() if err != nil { fmt.Println(err) } + fmt.Println(buf) // Output: [177 0 0 0 3 100 114 105 118 101 114 0 52 0 0 0 2 110 97 109 101 0 16 0 0 0 109 111 110 103 111 45 103 111 45 100 114 105 118 101 114 0 2 118 101 114 115 105 111 110 0 8 0 0 0 49 50 51 52 53 54 55 0 0 3 111 115 0 46 0 0 0 2 116 121 112 101 0 7 0 0 0 100 97 114 119 105 110 0 2 97 114 99 104 105 116 101 99 116 117 114 101 0 6 0 0 0 97 109 100 54 52 0 0 2 112 108 97 116 102 111 114 109 0 8 0 0 0 103 111 49 46 57 46 50 0 3 97 112 112 108 105 99 97 116 105 111 110 0 27 0 0 0 2 110 97 109 101 0 12 0 0 0 104 101 108 108 111 45 119 111 114 108 100 0 0 0] @@ -966,7 +968,9 @@ func ExampleDocument() { func BenchmarkDocument(b *testing.B) { b.ReportAllocs() + internalVersion := "1234567" + for i := 0; i < b.N; i++ { doc := NewDocument( EC.SubDocumentFromElements("driver", @@ -999,6 +1003,7 @@ func valueEqual(v1, v2 *Value) bool { if v1.offset != v2.offset { return false } + return true } @@ -1006,6 +1011,7 @@ func elementEqual(e1, e2 *Element) bool { if e1 == nil && e2 == nil { return true } + if e1 == nil || e2 == nil { return false } @@ -1017,11 +1023,13 @@ func documentComparer(d1, d2 *Document) bool { if (len(d1.elems) != len(d2.elems)) || (len(d1.index) != len(d2.index)) { return false } + for index := range d1.elems { b1, err := d1.elems[index].MarshalBSON() if err != nil { return false } + b2, err := d2.elems[index].MarshalBSON() if err != nil { return false @@ -1035,5 +1043,6 @@ func documentComparer(d1, d2 *Document) bool { return false } } + return true } diff --git a/vendor/github.com/evergreen-ci/birch/element.go b/vendor/github.com/evergreen-ci/birch/element.go index 953d6a59c74..a081d2164a0 100644 --- a/vendor/github.com/evergreen-ci/birch/element.go +++ b/vendor/github.com/evergreen-ci/birch/element.go @@ -32,7 +32,8 @@ func newElement(start uint32, offset uint32) *Element { return &Element{&Value{start: start, offset: offset}} } -// Clone creates a of the element/ +// Copy creates a new Element which has a copy of the content from +// original value, but is otherwise entirely independent. func (e *Element) Copy() *Element { return &Element{e.value.Copy()} } @@ -47,21 +48,27 @@ func (e *Element) Validate() (uint32, error) { if e == nil { return 0, bsonerr.NilElement } + if e.value == nil { return 0, bsonerr.UninitializedElement } var total uint32 = 1 + n, err := e.validateKey() total += n + if err != nil { return total, err } + n, err = e.value.validate(false) total += n + if err != nil { return total, err } + return total, nil } @@ -70,18 +77,24 @@ func (e *Element) validateKey() (uint32, error) { return 0, bsonerr.UninitializedElement } - pos, end := e.value.start+1, e.value.offset + pos := e.value.start + 1 + end := e.value.offset + var total uint32 + if end > uint32(len(e.value.data)) { end = uint32(len(e.value.data)) } + for ; pos < end && e.value.data[pos] != '\x00'; pos++ { total++ } + if pos == end || e.value.data[pos] != '\x00' { return total, bsonerr.InvalidKey } total++ + return total, nil } @@ -92,9 +105,12 @@ func (e *Element) Key() string { if !ok { panic(bsonerr.UninitializedElement) } + return key } +// KeyOK returns the key of the document, return a false OK value if +// the element is uninitialized. func (e *Element) KeyOK() (string, bool) { if e == nil || e.value == nil || e.value.offset == 0 || e.value.data == nil { return "", false @@ -119,28 +135,33 @@ func (e *Element) writeElement(key bool, start uint, writer interface{}) (int64, // TODO(skriptble): Figure out if we want to use uint or uint32 and // standardize across all packages. var total int64 + size, err := e.Validate() if err != nil { return 0, err } + switch w := writer.(type) { case []byte: n, err := e.writeByteSlice(key, start, size, w) if err != nil { return 0, newErrTooSmall() } + total += n case io.Writer: return e.WriteTo(w) default: return 0, bsonerr.InvalidWriter } + return total, nil } // writeByteSlice handles writing this element to a slice of bytes. func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (int64, error) { var startToWrite uint + needed := start + uint(size) if key { @@ -157,6 +178,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i } var n int + switch e.Value().Type() { case bsontype.EmbeddedDocument: if e.value.d == nil { @@ -166,6 +188,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i header := e.value.offset - e.value.start size -= header + if key { n += copy(b[start:], e.value.data[startToWrite:e.value.offset]) start += uint(n) @@ -173,6 +196,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i nn, err := e.value.d.writeByteSlice(start, size, b) n += int(nn) + if err != nil { return int64(n), err } @@ -184,6 +208,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i header := e.value.offset - e.value.start size -= header + if key { n += copy(b[start:], e.value.data[startToWrite:e.value.offset]) start += uint(n) @@ -193,6 +218,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i nn, err := arr.writeByteSlice(start, size, b) n += int(nn) + if err != nil { return int64(n), err } @@ -210,8 +236,8 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i } codeWithScopeLength := lengthWithoutScope + int32(scopeLength) - _, err = elements.Int32.Encode(uint(e.value.offset), e.value.data, codeWithScopeLength) - if err != nil { + + if _, err = elements.Int32.Encode(uint(e.value.offset), e.value.data, codeWithScopeLength); err != nil { return int64(n), err } @@ -223,6 +249,7 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i nn, err := e.value.d.writeByteSlice(start, scopeLength, b) n += int(nn) + if err != nil { return int64(n), err } @@ -239,8 +266,8 @@ func (e *Element) writeByteSlice(key bool, start uint, size uint32, b []byte) (i // Set the length of the value codeWithScopeLength := codeWithScopeEnd - int32(e.value.offset) - _, err := elements.Int32.Encode(uint(e.value.offset), e.value.data, codeWithScopeLength) - if err != nil { + + if _, err := elements.Int32.Encode(uint(e.value.offset), e.value.data, codeWithScopeLength); err != nil { return 0, err } @@ -258,11 +285,13 @@ func (e *Element) MarshalBSON() ([]byte, error) { if err != nil { return nil, err } + b := make([]byte, size) - _, err = e.writeByteSlice(true, 0, size, b) - if err != nil { + + if _, err = e.writeByteSlice(true, 0, size, b); err != nil { return nil, err } + return b, nil } @@ -272,6 +301,7 @@ func (e *Element) String() string { if s, ok := val.(string); ok && e.Value().Type() == bsontype.String { val = strconv.Quote(s) } + return fmt.Sprintf(`bson.Element{[%s]"%s": %v}`, e.Value().Type(), e.Key(), val) } @@ -280,6 +310,7 @@ func (e *Element) Equal(e2 *Element) bool { if e == nil && e2 == nil { return true } + if e == nil || e2 == nil { return false } @@ -287,6 +318,7 @@ func (e *Element) Equal(e2 *Element) bool { if e.Key() != e2.Key() { return false } + return e.value.Equal(e2.value) } @@ -322,6 +354,7 @@ func convertValueToElem(key string, v *Value) *Element { elem := newElement(0, uint32(keyLen+2)) elem.value.data = d elem.value.d = nil + if v.d != nil { elem.value.d = v.d.Copy() } diff --git a/vendor/github.com/evergreen-ci/birch/element_test.go b/vendor/github.com/evergreen-ci/birch/element_test.go index d915ab39333..17c11bf0bcf 100644 --- a/vendor/github.com/evergreen-ci/birch/element_test.go +++ b/vendor/github.com/evergreen-ci/birch/element_test.go @@ -842,7 +842,7 @@ func TestElement(t *testing.T) { }, {"Not double", &Element{&Value{start: 0, offset: 2, data: []byte{0x02, 0x00}}}, 0, - bsonerr.ElementType{"compact.Element.double", bsontype.Type(0x02)}, + bsonerr.NewElementTypeError("compact.Element.double", bsontype.Type(0x02)), }, {"Success", &Element{&Value{ @@ -885,7 +885,7 @@ func TestElement(t *testing.T) { }, {"Not String", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", - bsonerr.ElementType{"compact.Element.String", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.String", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -928,7 +928,7 @@ func TestElement(t *testing.T) { }, {"Not Document", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, nil, - bsonerr.ElementType{"compact.Element.Document", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.Document", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -971,7 +971,7 @@ func TestElement(t *testing.T) { }, {"Not Array", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, nil, - bsonerr.ElementType{"compact.Element.Array", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.Array", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1015,7 +1015,7 @@ func TestElement(t *testing.T) { }, {"Not binary", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, 0x00, nil, - bsonerr.ElementType{"compact.Element.binary", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.binary", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1062,7 +1062,7 @@ func TestElement(t *testing.T) { }, {"Not objectID", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, empty, - bsonerr.ElementType{"compact.Element.ObejctID", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.ObejctID", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1111,7 +1111,7 @@ func TestElement(t *testing.T) { }, {"Not Boolean", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, false, - bsonerr.ElementType{"compact.Element.Boolean", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.Boolean", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1155,7 +1155,7 @@ func TestElement(t *testing.T) { }, {"Not UTC dateTime", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, empty, - bsonerr.ElementType{"compact.Element.dateTime", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.dateTime", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1199,7 +1199,7 @@ func TestElement(t *testing.T) { }, {"Not UTC dateTime", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, empty, - bsonerr.ElementType{"compact.Element.dateTime", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.dateTime", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1243,7 +1243,7 @@ func TestElement(t *testing.T) { }, {"Not regex", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", "", - bsonerr.ElementType{"compact.Element.regex", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.regex", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1291,7 +1291,7 @@ func TestElement(t *testing.T) { }, {"Not dbPointer", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", empty, - bsonerr.ElementType{"compact.Element.dbPointer", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.dbPointer", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1345,7 +1345,7 @@ func TestElement(t *testing.T) { }, {"Not JavaScript", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", - bsonerr.ElementType{"compact.Element.JavaScript", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.JavaScript", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1388,7 +1388,7 @@ func TestElement(t *testing.T) { }, {"Not JavaScript", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", - bsonerr.ElementType{"compact.Element.symbol", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.symbol", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1432,7 +1432,7 @@ func TestElement(t *testing.T) { }, {"Not JavascriptWithScope", &Element{&Value{start: 0, offset: 2, data: []byte{0x01, 0x00}}}, "", nil, - bsonerr.ElementType{"compact.Element.JavaScriptWithScope", bsontype.Type(0x01)}, + bsonerr.NewElementTypeError("compact.Element.JavaScriptWithScope", bsontype.Type(0x01)), }, {"Success", &Element{&Value{ @@ -1482,7 +1482,7 @@ func TestElement(t *testing.T) { }, {"Not int32", &Element{&Value{start: 0, offset: 2, data: []byte{0x02, 0x00}}}, 0, - bsonerr.ElementType{"compact.Element.int32", bsontype.Type(0x02)}, + bsonerr.NewElementTypeError("compact.Element.int32", bsontype.Type(0x02)), }, {"Success", &Element{&Value{ @@ -1526,7 +1526,7 @@ func TestElement(t *testing.T) { }, {"Not timestamp", &Element{&Value{start: 0, offset: 2, data: []byte{0x02, 0x00}}}, 0, 0, - bsonerr.ElementType{"compact.Element.timestamp", bsontype.Type(0x02)}, + bsonerr.NewElementTypeError("compact.Element.timestamp", bsontype.Type(0x02)), }, {"Success", &Element{&Value{ @@ -1570,7 +1570,7 @@ func TestElement(t *testing.T) { }, {"Not int64Type", &Element{&Value{start: 0, offset: 2, data: []byte{0x02, 0x00}}}, 0, - bsonerr.ElementType{"compact.Element.int64Type", bsontype.Type(0x02)}, + bsonerr.NewElementTypeError("compact.Element.int64Type", bsontype.Type(0x02)), }, {"Success", &Element{&Value{ @@ -1614,7 +1614,7 @@ func TestElement(t *testing.T) { }, {"Not int64Type", &Element{&Value{start: 0, offset: 2, data: []byte{0x02, 0x00}}}, empty, - bsonerr.ElementType{"compact.Element.Decimal128", bsontype.Type(0x02)}, + bsonerr.NewElementTypeError("compact.Element.Decimal128", bsontype.Type(0x02)), }, {"Success", &Element{&Value{ @@ -1728,7 +1728,6 @@ func TestElement(t *testing.T) { elem.SetValue(VC.Int32(42)) assert.Equal(t, 42, elem.Value().Int()) }) - } func testConvertValueToElem(t *testing.T) { diff --git a/vendor/github.com/evergreen-ci/birch/elements/elements.go b/vendor/github.com/evergreen-ci/birch/elements/elements.go index e82f7293326..b458f684f70 100644 --- a/vendor/github.com/evergreen-ci/birch/elements/elements.go +++ b/vendor/github.com/evergreen-ci/birch/elements/elements.go @@ -128,6 +128,7 @@ func (DoubleNS) Element(start uint, writer []byte, key string, f float64) (int, n, err := Byte.Encode(start, writer, '\x01') start += uint(n) total += n + if err != nil { return total, err } @@ -135,12 +136,14 @@ func (DoubleNS) Element(start uint, writer []byte, key string, f float64) (int, n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Double.Encode(start, writer, f) total += n + if err != nil { return total, err } @@ -155,11 +158,16 @@ func (StringNS) Encode(start uint, writer []byte, s string) (int, error) { written, err := Int32.Encode(start, writer, int32(len(s))+1) total += written + if err != nil { return total, err } written, err = CString.Encode(start+uint(total), writer, s) + if err != nil { + return total, err + } + total += written return total, nil @@ -173,6 +181,7 @@ func (StringNS) Element(start uint, writer []byte, key string, s string) (int, e n, err := Byte.Encode(start, writer, '\x02') start += uint(n) total += n + if err != nil { return total, err } @@ -180,12 +189,14 @@ func (StringNS) Element(start uint, writer []byte, key string, s string) (int, e n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = String.Encode(start, writer, s) total += n + if err != nil { return total, err } @@ -207,6 +218,7 @@ func (DocumentNS) Element(start uint, writer []byte, key string, doc []byte) (in n, err := Byte.Encode(start, writer, '\x03') start += uint(n) total += n + if err != nil { return total, err } @@ -214,12 +226,14 @@ func (DocumentNS) Element(start uint, writer []byte, key string, doc []byte) (in n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Document.Encode(start, writer, doc) total += n + if err != nil { return total, err } @@ -241,6 +255,7 @@ func (ArrayNS) Element(start uint, writer []byte, key string, arr []byte) (int, n, err := Byte.Encode(start, writer, '\x04') start += uint(n) total += n + if err != nil { return total, err } @@ -248,12 +263,14 @@ func (ArrayNS) Element(start uint, writer []byte, key string, arr []byte) (int, n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Array.Encode(start, writer, arr) total += n + if err != nil { return total, err } @@ -278,6 +295,7 @@ func (BinNS) Encode(start uint, writer []byte, b []byte, btype byte) (int, error n, err := Int32.Encode(start, writer, int32(len(b))) start += uint(n) total += n + if err != nil { return total, err } @@ -302,6 +320,7 @@ func (BinNS) encodeSubtype2(start uint, writer []byte, b []byte) (int, error) { n, err := Int32.Encode(start, writer, int32(len(b))+4) start += uint(n) total += n + if err != nil { return total, err } @@ -313,6 +332,7 @@ func (BinNS) encodeSubtype2(start uint, writer []byte, b []byte) (int, error) { n, err = Int32.Encode(start, writer, int32(len(b))) start += uint(n) total += n + if err != nil { return total, err } @@ -330,6 +350,7 @@ func (BinNS) Element(start uint, writer []byte, key string, b []byte, btype byte n, err := Byte.Encode(start, writer, '\x05') start += uint(n) total += n + if err != nil { return total, err } @@ -337,12 +358,14 @@ func (BinNS) Element(start uint, writer []byte, key string, b []byte, btype byte n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Binary.Encode(start, writer, b, btype) total += n + if err != nil { return total, err } @@ -364,6 +387,7 @@ func (ObjectIDNS) Element(start uint, writer []byte, key string, oid [12]byte) ( n, err := Byte.Encode(start, writer, '\x07') start += uint(n) total += n + if err != nil { return total, err } @@ -371,13 +395,14 @@ func (ObjectIDNS) Element(start uint, writer []byte, key string, oid [12]byte) ( n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = ObjectID.Encode(start, writer, oid) - start += uint(n) total += n + if err != nil { return total, err } @@ -409,6 +434,7 @@ func (BooleanNS) Element(start uint, writer []byte, key string, b bool) (int, er n, err := Byte.Encode(start, writer, '\x08') start += uint(n) total += n + if err != nil { return total, err } @@ -416,13 +442,14 @@ func (BooleanNS) Element(start uint, writer []byte, key string, b bool) (int, er n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Boolean.Encode(start, writer, b) - start += uint(n) total += n + if err != nil { return total, err } @@ -444,6 +471,7 @@ func (DatetimeNS) Element(start uint, writer []byte, key string, dt int64) (int, n, err := Byte.Encode(start, writer, '\x09') start += uint(n) total += n + if err != nil { return total, err } @@ -451,13 +479,14 @@ func (DatetimeNS) Element(start uint, writer []byte, key string, dt int64) (int, n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = DateTime.Encode(start, writer, dt) - start += uint(n) total += n + if err != nil { return total, err } @@ -472,6 +501,7 @@ func (RegexNS) Encode(start uint, writer []byte, pattern, options string) (int, written, err := CString.Encode(start, writer, pattern) total += written + if err != nil { return total, err } @@ -490,6 +520,7 @@ func (RegexNS) Element(start uint, writer []byte, key string, pattern, options s n, err := Byte.Encode(start, writer, '\x0B') start += uint(n) total += n + if err != nil { return total, err } @@ -497,6 +528,7 @@ func (RegexNS) Element(start uint, writer []byte, key string, pattern, options s n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } @@ -504,13 +536,14 @@ func (RegexNS) Element(start uint, writer []byte, key string, pattern, options s n, err = CString.Encode(start, writer, pattern) start += uint(n) total += n + if err != nil { return total, err } n, err = CString.Encode(start, writer, options) - start += uint(n) total += n + if err != nil { return total, err } @@ -525,6 +558,7 @@ func (DBPointerNS) Encode(start uint, writer []byte, ns string, oid [12]byte) (i written, err := String.Encode(start, writer, ns) total += written + if err != nil { return total, err } @@ -543,6 +577,7 @@ func (DBPointerNS) Element(start uint, writer []byte, key string, ns string, oid n, err := Byte.Encode(start, writer, '\x0C') start += uint(n) total += n + if err != nil { return total, err } @@ -550,19 +585,19 @@ func (DBPointerNS) Element(start uint, writer []byte, key string, ns string, oid n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = DBPointer.Encode(start, writer, ns, oid) - start += uint(n) total += n + if err != nil { return total, err } return total, nil - } // Encode encodes a JavaScript string into a BSON JavaScript element and serializes the bytes to the @@ -579,6 +614,7 @@ func (JavaScriptNS) Element(start uint, writer []byte, key string, code string) n, err := Byte.Encode(start, writer, '\x0D') start += uint(n) total += n + if err != nil { return total, err } @@ -586,13 +622,14 @@ func (JavaScriptNS) Element(start uint, writer []byte, key string, code string) n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = JavaScript.Encode(start, writer, code) - start += uint(n) total += n + if err != nil { return total, err } @@ -614,6 +651,7 @@ func (SymbolNS) Element(start uint, writer []byte, key string, symbol string) (i n, err := Byte.Encode(start, writer, '\x0E') start += uint(n) total += n + if err != nil { return total, err } @@ -621,13 +659,14 @@ func (SymbolNS) Element(start uint, writer []byte, key string, symbol string) (i n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Symbol.Encode(start, writer, symbol) - start += uint(n) total += n + if err != nil { return total, err } @@ -644,6 +683,7 @@ func (CodeWithScopeNS) Encode(start uint, writer []byte, code string, doc []byte n, err := Int32.Encode(start, writer, 9+int32(len(code))+int32(len(doc))) start += uint(n) total += n + if err != nil { return total, err } @@ -651,6 +691,7 @@ func (CodeWithScopeNS) Encode(start uint, writer []byte, code string, doc []byte n, err = String.Encode(start, writer, code) start += uint(n) total += n + if err != nil { return total, err } @@ -669,6 +710,7 @@ func (CodeWithScopeNS) Element(start uint, writer []byte, key string, code strin n, err := Byte.Encode(start, writer, '\x0F') start += uint(n) total += n + if err != nil { return total, err } @@ -676,13 +718,14 @@ func (CodeWithScopeNS) Element(start uint, writer []byte, key string, code strin n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = CodeWithScope.Encode(start, writer, code, scope) - start += uint(n) total += n + if err != nil { return total, err } @@ -700,7 +743,6 @@ func (Int32NS) Encode(start uint, writer []byte, i int32) (int, error) { binary.LittleEndian.PutUint32(writer[start:start+4], signed32ToUnsigned(i)) return 4, nil - } // Element encodes an int32 and a key into a BSON int32 element and serializes the bytes to the @@ -711,6 +753,7 @@ func (Int32NS) Element(start uint, writer []byte, key string, i int32) (int, err n, err := Byte.Encode(start, writer, '\x10') start += uint(n) total += n + if err != nil { return total, err } @@ -718,12 +761,14 @@ func (Int32NS) Element(start uint, writer []byte, key string, i int32) (int, err n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Int32.Encode(start, writer, i) total += n + if err != nil { return total, err } @@ -739,12 +784,12 @@ func (TimestampNS) Encode(start uint, writer []byte, t uint32, i uint32) (int, e n, err := encodeUint32(start, writer, i) start += uint(n) total += n + if err != nil { return total, err } n, err = encodeUint32(start, writer, t) - start += uint(n) total += n return total, err @@ -758,6 +803,7 @@ func (TimestampNS) Element(start uint, writer []byte, key string, t uint32, i ui n, err := Byte.Encode(start, writer, '\x11') start += uint(n) total += n + if err != nil { return total, err } @@ -765,12 +811,14 @@ func (TimestampNS) Element(start uint, writer []byte, key string, t uint32, i ui n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Timestamp.Encode(start, writer, t, i) total += n + if err != nil { return total, err } @@ -794,6 +842,7 @@ func (Int64NS) Element(start uint, writer []byte, key string, i int64) (int, err n, err := Byte.Encode(start, writer, '\x12') start += uint(n) total += n + if err != nil { return total, err } @@ -801,12 +850,14 @@ func (Int64NS) Element(start uint, writer []byte, key string, i int64) (int, err n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Int64.Encode(start, writer, i) total += n + if err != nil { return total, err } @@ -818,10 +869,12 @@ func (Int64NS) Element(start uint, writer []byte, key string, i int64) (int, err // provided writer. func (Decimal128NS) Encode(start uint, writer []byte, d decimal.Decimal128) (int, error) { var total int + high, low := d.GetBytes() written, err := encodeUint64(start, writer, low) total += written + if err != nil { return total, err } @@ -840,6 +893,7 @@ func (Decimal128NS) Element(start uint, writer []byte, key string, d decimal.Dec n, err := Byte.Encode(start, writer, '\x13') start += uint(n) total += n + if err != nil { return total, err } @@ -847,12 +901,14 @@ func (Decimal128NS) Element(start uint, writer []byte, key string, d decimal.Dec n, err = CString.Encode(start, writer, key) start += uint(n) total += n + if err != nil { return total, err } n, err = Decimal128.Encode(start, writer, d) total += n + if err != nil { return total, err } @@ -904,7 +960,6 @@ func encodeUint32(start uint, writer []byte, u uint32) (int, error) { binary.LittleEndian.PutUint32(writer[start:], u) return 4, nil - } func encodeUint64(start uint, writer []byte, u uint64) (int, error) { @@ -915,7 +970,6 @@ func encodeUint64(start uint, writer []byte, u uint64) (int, error) { binary.LittleEndian.PutUint64(writer[start:], u) return 8, nil - } func signed32ToUnsigned(i int32) uint32 { diff --git a/vendor/github.com/evergreen-ci/birch/iterator.go b/vendor/github.com/evergreen-ci/birch/iterator.go index 9db3e3fa6d5..7a11d8706e1 100644 --- a/vendor/github.com/evergreen-ci/birch/iterator.go +++ b/vendor/github.com/evergreen-ci/birch/iterator.go @@ -68,9 +68,11 @@ type readerIterator struct { // newReaderIterator constructors a new readerIterator over a given Reader. func newReaderIterator(r Reader) (*readerIterator, error) { itr := new(readerIterator) + if len(r) < 5 { return nil, newErrTooSmall() } + givenLength := readi32(r[0:4]) if len(r) < int(givenLength) { return nil, bsonerr.InvalidLength @@ -92,13 +94,16 @@ func (itr *readerIterator) Next() bool { itr.err = bsonerr.InvalidReadOnlyDocument return false } + if itr.r[itr.pos] == '\x00' { return false } + elemStart := itr.pos itr.pos++ n, err := itr.r.validateKey(itr.pos, itr.end) itr.pos += n + if err != nil { itr.err = err return false @@ -111,10 +116,12 @@ func (itr *readerIterator) Next() bool { n, err = itr.elem.value.validate(false) itr.pos += n + if err != nil { itr.err = err return false } + return true } diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/constructor.go b/vendor/github.com/evergreen-ci/birch/jsonx/constructor.go new file mode 100644 index 00000000000..547840726c0 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/constructor.go @@ -0,0 +1,229 @@ +package jsonx + +import ( + "io" + "io/ioutil" + + "github.com/pkg/errors" +) + +type DocumentConstructor struct{} + +var DC = DocumentConstructor{} + +func (DocumentConstructor) New() *Document { return DC.Make(0) } +func (DocumentConstructor) Make(n int) *Document { return &Document{elems: make([]*Element, 0, n)} } +func (DocumentConstructor) Bytes(in []byte) *Document { return docConstructorOrPanic(DC.BytesErr(in)) } + +func (DocumentConstructor) Reader(in io.Reader) *Document { + return docConstructorOrPanic(DC.ReaderErr(in)) +} + +func (DocumentConstructor) Elements(elems ...*Element) *Document { + return DC.Make(len(elems)).Append(elems...) +} + +func (DocumentConstructor) BytesErr(in []byte) (*Document, error) { + d := DC.New() + + if err := d.UnmarshalJSON(in); err != nil { + return nil, err + } + + return d, nil +} + +func (DocumentConstructor) ReaderErr(in io.Reader) (*Document, error) { + buf, err := ioutil.ReadAll(in) + if err != nil { + return nil, errors.WithStack(err) + } + + return DC.BytesErr(buf) +} + +type ArrayConstructor struct{} + +var AC = ArrayConstructor{} + +func (ArrayConstructor) New() *Array { return AC.Make(0) } +func (ArrayConstructor) Make(n int) *Array { return &Array{elems: make([]*Value, 0, n)} } +func (ArrayConstructor) Elements(elems ...*Value) *Array { return AC.Make(len(elems)).Append(elems...) } +func (ArrayConstructor) Bytes(in []byte) *Array { return arrayConstructorOrPanic(AC.BytesErr(in)) } +func (ArrayConstructor) Reader(in io.Reader) *Array { return arrayConstructorOrPanic(AC.ReaderErr(in)) } + +func (ArrayConstructor) BytesErr(in []byte) (*Array, error) { + a := AC.New() + if err := a.UnmarshalJSON(in); err != nil { + return nil, errors.WithStack(err) + } + + return a, nil +} + +func (ArrayConstructor) ReaderErr(in io.Reader) (*Array, error) { + buf, err := ioutil.ReadAll(in) + if err != nil { + return nil, errors.WithStack(err) + } + + return AC.BytesErr(buf) +} + +type ElementConstructor struct{} + +var EC = ElementConstructor{} + +func (ElementConstructor) Value(key string, val *Value) *Element { + return &Element{key: key, value: val} +} + +func (ElementConstructor) String(key string, value string) *Element { + return EC.Value(key, VC.String(value)) +} + +func (ElementConstructor) Boolean(key string, val bool) *Element { + return EC.Value(key, VC.Boolean(val)) +} + +func (ElementConstructor) Int(key string, n int) *Element { + return EC.Value(key, VC.Int(n)) +} + +func (ElementConstructor) Int32(key string, n int32) *Element { + return EC.Value(key, VC.Int32(n)) +} + +func (ElementConstructor) Int64(key string, n int64) *Element { + return EC.Value(key, VC.Int64(n)) +} + +func (ElementConstructor) Float64(key string, n float64) *Element { + return EC.Value(key, VC.Float64(n)) +} + +func (ElementConstructor) Float32(key string, n float32) *Element { + return EC.Value(key, VC.Float32(n)) +} + +func (ElementConstructor) Nil(key string) *Element { + return EC.Value(key, VC.Nil()) +} + +func (ElementConstructor) Object(key string, doc *Document) *Element { + return EC.Value(key, VC.Object(doc)) +} + +func (ElementConstructor) ObjectFromElements(key string, elems ...*Element) *Element { + return EC.Value(key, VC.ObjectFromElements(elems...)) +} + +func (ElementConstructor) Array(key string, a *Array) *Element { + return EC.Value(key, VC.Array(a)) +} + +func (ElementConstructor) ArrayFromElements(key string, elems ...*Value) *Element { + return EC.Value(key, VC.ArrayFromElements(elems...)) +} + +type ValueConstructor struct{} + +var VC = ValueConstructor{} + +func (ValueConstructor) Bytes(in []byte) *Value { return valueConstructorOrPanic(VC.BytesErr(in)) } +func (ValueConstructor) BytesErr(in []byte) (*Value, error) { + val := &Value{} + + if err := val.UnmarshalJSON(in); err != nil { + return nil, errors.WithStack(err) + } + return val, nil +} + +func (ValueConstructor) Reader(in io.Reader) *Value { return valueConstructorOrPanic(VC.ReaderErr(in)) } +func (ValueConstructor) ReaderErr(in io.Reader) (*Value, error) { + buf, err := ioutil.ReadAll(in) + if err != nil { + return nil, errors.WithStack(err) + } + + return VC.BytesErr(buf) +} + +func (ValueConstructor) String(s string) *Value { + return &Value{ + t: String, + value: s, + } +} + +func (ValueConstructor) Int(n int) *Value { + return &Value{ + t: NumberInteger, + value: n, + } +} + +func (ValueConstructor) Int32(n int32) *Value { + return &Value{ + t: NumberInteger, + value: n, + } +} + +func (ValueConstructor) Int64(n int64) *Value { + return &Value{ + t: NumberInteger, + value: n, + } +} + +func (ValueConstructor) Float64(n float64) *Value { + return &Value{ + t: NumberDouble, + value: n, + } +} + +func (ValueConstructor) Float32(n float32) *Value { + return &Value{ + t: NumberDouble, + value: n, + } +} + +func (ValueConstructor) Nil() *Value { + return &Value{ + t: Null, + value: nil, + } +} + +func (ValueConstructor) Boolean(b bool) *Value { + return &Value{ + t: Bool, + value: b, + } +} + +func (ValueConstructor) Object(doc *Document) *Value { + return &Value{ + t: ObjectValue, + value: doc, + } +} + +func (ValueConstructor) ObjectFromElements(elems ...*Element) *Value { + return VC.Object(DC.Elements(elems...)) +} + +func (ValueConstructor) Array(a *Array) *Value { + return &Value{ + t: ArrayValue, + value: a, + } +} + +func (ValueConstructor) ArrayFromElements(elems ...*Value) *Value { + return VC.Array(AC.Elements(elems...)) +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/constructor_util.go b/vendor/github.com/evergreen-ci/birch/jsonx/constructor_util.go new file mode 100644 index 00000000000..e1c3f85840d --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/constructor_util.go @@ -0,0 +1,11 @@ +package jsonx + +func errOrPanic(err error) { + if err != nil { + panic(err) + } +} + +func docConstructorOrPanic(doc *Document, err error) *Document { errOrPanic(err); return doc } +func arrayConstructorOrPanic(a *Array, err error) *Array { errOrPanic(err); return a } +func valueConstructorOrPanic(v *Value, err error) *Value { errOrPanic(err); return v } diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/internal/LICENSE b/vendor/github.com/evergreen-ci/birch/jsonx/internal/LICENSE new file mode 100644 index 00000000000..180a8d75d77 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/internal/LICENSE @@ -0,0 +1,22 @@ +For [gjson.go gjson_test.go] +---- +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson.go b/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson.go new file mode 100644 index 00000000000..49a91dacc46 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson.go @@ -0,0 +1,1342 @@ +package internal + +import ( + "encoding/json" + "errors" + "strconv" + "time" + "unicode/utf16" + "unicode/utf8" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Parse(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + return t.Str != "" && t.Str != "0" && t.Str != "false" + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + n, ok := floatToInt(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseInt(t.Raw) + if !ok { + // fallback to a standard conversion + return int64(t.Num) + } + } + return n + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + n, ok := floatToUint(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseUint(t.Raw) + if !ok { + // fallback to a standard conversion + return uint64(t.Num) + } + } + return n + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a non-existent value, then an empty array will be +// returned. If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if t.Type != JSON { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Lookup iterates through the document looking for a matching key +func (t Result) Lookup(key string) Result { + var res Result + t.ForEach(func(k, v Result) bool { + if k.Str == key { + res = v + return false + } + return true + }) + + return res +} + +func (t Result) FirstKey(key string) bool { + var seen bool + t.ForEach(func(k, v Result) bool { + seen = (k.Str == key) + return false + }) + + return seen +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back an map of values. The result should be a JSON array. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +func Parse(json string) (Result, error) { + var value Result + + if !validateJSON([]byte(json)) { + return value, errors.New("invalid json") + } + + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{}, nil + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value, nil +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) (Result, error) { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' or '(' or '"' + // squash the value, ignoring all nested arrays and objects. + var i, depth int + if json[0] != '"' { + i, depth = 1, 1 + } + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + if depth == 0 { + return json[:i+1] + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + path string + op string + value string + all bool + } +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +// ParseLines iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ParseLines(json string) []Result { + res := []Result{} + forEachLine(json, func(line Result) bool { + res = append(res, line) + return true + }) + return res +} + +func forEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|': + return false + } + } + return true +} + +func appendJSONString(dst []byte, s string) []byte { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] == '\\' || s[i] == '"' || s[i] > 126 { + d, _ := json.Marshal(s) + return append(dst, string(d)...) + } + } + dst = append(dst, '"') + dst = append(dst, s...) + dst = append(dst, '"') + return dst +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { //, error) { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + return i, res, true + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + case 't', 'f', 'n': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + } + } + return i, res, false +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' or '(' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' or '(' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[', '(': + depth++ + case '}', ']', ')': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +var ( // used for testing + testWatchForFallback bool +) + +var validate uintptr = 1 + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +// +func validateJSON(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +const minUint53 = 0 +const maxUint53 = 4503599627370495 +const minInt53 = -2251799813685248 +const maxInt53 = 2251799813685247 + +func floatToUint(f float64) (n uint64, ok bool) { + n = uint64(f) + if float64(n) == f && n >= minUint53 && n <= maxUint53 { + return n, true + } + return 0, false +} + +func floatToInt(f float64) (n int64, ok bool) { + n = int64(f) + if float64(n) == f && n >= minInt53 && n <= maxInt53 { + return n, true + } + return 0, false +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson_test.go b/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson_test.go new file mode 100644 index 00000000000..55ebf536a34 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/internal/gjson_test.go @@ -0,0 +1,538 @@ +package internal + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "math/rand" + "strconv" + "testing" + "time" +) + +// TestRandomData is a fuzzing test that throws random data at the Parse +// function looking for panics. +func TestRandomData(t *testing.T) { + var lstr string + defer func() { + if v := recover(); v != nil { + println("'" + hex.EncodeToString([]byte(lstr)) + "'") + println("'" + lstr + "'") + panic(v) + } + }() + rand.Seed(time.Now().UnixNano()) + b := make([]byte, 200) + for i := 0; i < 2000000; i++ { + n, err := rand.Read(b[:rand.Int()%len(b)]) + if err != nil { + t.Fatal(err) + } + lstr = string(b[:n]) + Parse(lstr) + } +} + +// this json block is poorly formed on purpose. +var basicJSON = `{"age":100, "name":{"here":"B\\\"R"}, + "noop":{"what is a wren?":"a bird"}, + "happy":true,"immortal":false, + "items":[1,2,3,{"tags":[1,2,3],"points":[[1,2],[3,4]]},4,5,6,7], + "arr":["1",2,"3",{"hello":"world"},"4",5], + "vals":[1,2,3,{"sadf":sdf"asdf"}],"name":{"first":"tom","last":null}, + "created":"2014-05-16T08:28:06.989Z", + "loggy":{ + "programmers": [ + { + "firstName": "Brett", + "lastName": "McLaughlin", + "email": "aaaa", + "tag": "good" + }, + { + "firstName": "Jason", + "lastName": "Hunter", + "email": "bbbb", + "tag": "bad" + }, + { + "firstName": "Elliotte", + "lastName": "Harold", + "email": "cccc", + "tag":, "good" + }, + { + "firstName": 1002.3, + "age": 101 + } + ] + }, + "lastly":{"yay":"final"} +}` +var basicJSONB = []byte(basicJSON) + +func must(res Result, err error) Result { + if err != nil { + panic(err) + } + + return res +} + +func TestParseAny(t *testing.T) { + assert(t, must(Parse("100")).Float() == 100) + assert(t, must(Parse("true")).Bool()) + assert(t, must(Parse("false")).Bool() == false) +} + +func TestTypes(t *testing.T) { + assert(t, (Result{Type: String}).Type.String() == "String") + assert(t, (Result{Type: Number}).Type.String() == "Number") + assert(t, (Result{Type: Null}).Type.String() == "Null") + assert(t, (Result{Type: False}).Type.String() == "False") + assert(t, (Result{Type: True}).Type.String() == "True") + assert(t, (Result{Type: JSON}).Type.String() == "JSON") + assert(t, (Result{Type: 100}).Type.String() == "") + // bool + assert(t, (Result{Type: String, Str: "true"}).Bool()) + assert(t, (Result{Type: True}).Bool()) + assert(t, (Result{Type: False}).Bool() == false) + assert(t, (Result{Type: Number, Num: 1}).Bool()) + // int + assert(t, (Result{Type: String, Str: "1"}).Int() == 1) + assert(t, (Result{Type: True}).Int() == 1) + assert(t, (Result{Type: False}).Int() == 0) + assert(t, (Result{Type: Number, Num: 1}).Int() == 1) + // uint + assert(t, (Result{Type: String, Str: "1"}).Uint() == 1) + assert(t, (Result{Type: True}).Uint() == 1) + assert(t, (Result{Type: False}).Uint() == 0) + assert(t, (Result{Type: Number, Num: 1}).Uint() == 1) + // float + assert(t, (Result{Type: String, Str: "1"}).Float() == 1) + assert(t, (Result{Type: True}).Float() == 1) + assert(t, (Result{Type: False}).Float() == 0) + assert(t, (Result{Type: Number, Num: 1}).Float() == 1) +} +func TestForEach(t *testing.T) { + Result{}.ForEach(nil) + Result{Type: String, Str: "Hello"}.ForEach(func(_, value Result) bool { + assert(t, value.String() == "Hello") + return false + }) + Result{Type: JSON, Raw: "*invalid*"}.ForEach(nil) + + json := ` {"name": {"first": "Janet","last": "Prichard"}, + "asd\nf":"\ud83d\udd13","age": 47}` + var count int + must(ParseBytes([]byte(json))).ForEach(func(key, value Result) bool { + count++ + return true + }) + assert(t, count == 3) +} +func TestMap(t *testing.T) { + assert(t, len(must(ParseBytes([]byte(`"asdf"`))).Map()) == 0) + assert(t, len(Result{Type: JSON, Raw: "**invalid**"}.Map()) == 0) + assert(t, Result{Type: JSON, Raw: "**invalid**"}.Value() == nil) + assert(t, Result{Type: JSON, Raw: "{"}.Map() != nil) +} +func TestUnescape(t *testing.T) { + unescape(string([]byte{'\\', '\\', 0})) + unescape(string([]byte{'\\', '/', '\\', 'b', '\\', 'f'})) +} +func assert(t testing.TB, cond bool) { + if !cond { + panic("assert failed") + } +} + +var exampleJSON = `{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +}` + +func TestNewParse(t *testing.T) { + //fmt.Printf("%v\n", parse2(exampleJSON, "widget").String()) +} + +func TestUnmarshalMap(t *testing.T) { + var m1 = must(Parse(exampleJSON)).Value().(map[string]interface{}) + var m2 map[string]interface{} + if err := json.Unmarshal([]byte(exampleJSON), &m2); err != nil { + t.Fatal(err) + } + b1, err := json.Marshal(m1) + if err != nil { + t.Fatal(err) + } + b2, err := json.Marshal(m2) + if err != nil { + t.Fatal(err) + } + if bytes.Compare(b1, b2) != 0 { + t.Fatal("b1 != b2") + } +} + +var manyJSON = ` { + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{ + "a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"hello":"world" + }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} + "position":{"type":"Point","coordinates":[-115.24,33.09]}, + "loves":["world peace"], + "name":{"last":"Anderson","first":"Nancy"}, + "age":31 + "":{"a":"emptya","b":"emptyb"}, + "name.last":"Yellow", + "name.first":"Cat", +}` + +func combine(results []Result) string { + return fmt.Sprintf("%v", results) +} + +type ComplicatedType struct { + unsettable int + Tagged string `json:"tagged"` + NotTagged bool + Nested struct { + Yellow string `json:"yellow"` + } + NestedTagged struct { + Green string + Map map[string]interface{} + Ints struct { + Int int `json:"int"` + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 `json:"int64"` + } + Uints struct { + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + } + Floats struct { + Float64 float64 + Float32 float32 + } + Byte byte + Bool bool + } `json:"nestedTagged"` + LeftOut string `json:"-"` + SelfPtr *ComplicatedType + SelfSlice []ComplicatedType + SelfSlicePtr []*ComplicatedType + SelfPtrSlice *[]ComplicatedType + Interface interface{} `json:"interface"` + Array [3]int + Time time.Time `json:"time"` + Binary []byte + NonBinary []byte +} + +var complicatedJSON = ` +{ + "tagged": "OK", + "Tagged": "KO", + "NotTagged": true, + "unsettable": 101, + "Nested": { + "Yellow": "Green", + "yellow": "yellow" + }, + "nestedTagged": { + "Green": "Green", + "Map": { + "this": "that", + "and": "the other thing" + }, + "Ints": { + "Uint": 99, + "Uint16": 16, + "Uint32": 32, + "Uint64": 65 + }, + "Uints": { + "int": -99, + "Int": -98, + "Int16": -16, + "Int32": -32, + "int64": -64, + "Int64": -65 + }, + "Uints": { + "Float32": 32.32, + "Float64": 64.64 + }, + "Byte": 254, + "Bool": true + }, + "LeftOut": "you shouldn't be here", + "SelfPtr": {"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}, + "SelfSlice": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}], + "SelfSlicePtr": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}], + "SelfPtrSlice": [{"tagged":"OK","nestedTagged":{"Ints":{"Uint32":32}}}], + "interface": "Tile38 Rocks!", + "Interface": "Please Download", + "Array": [0,2,3,4,5], + "time": "2017-05-07T13:24:43-07:00", + "Binary": "R0lGODlhPQBEAPeo", + "NonBinary": [9,3,100,115] +} +` + +func testvalid(t *testing.T, json string, expect bool) { + t.Helper() + _, ok := validpayload([]byte(json), 0) + if ok != expect { + t.Fatal("mismatch") + } +} + +func TestValidBasic(t *testing.T) { + testvalid(t, "0", true) + testvalid(t, "00", false) + testvalid(t, "-00", false) + testvalid(t, "-.", false) + testvalid(t, "0.0", true) + testvalid(t, "10.0", true) + testvalid(t, "10e1", true) + testvalid(t, "10EE", false) + testvalid(t, "10E-", false) + testvalid(t, "10E+", false) + testvalid(t, "10E123", true) + testvalid(t, "10E-123", true) + testvalid(t, "10E-0123", true) + testvalid(t, "", false) + testvalid(t, " ", false) + testvalid(t, "{}", true) + testvalid(t, "{", false) + testvalid(t, "-", false) + testvalid(t, "-1", true) + testvalid(t, "-1.", false) + testvalid(t, "-1.0", true) + testvalid(t, " -1.0", true) + testvalid(t, " -1.0 ", true) + testvalid(t, "-1.0 ", true) + testvalid(t, "-1.0 i", false) + testvalid(t, "-1.0 i", false) + testvalid(t, "true", true) + testvalid(t, " true", true) + testvalid(t, " true ", true) + testvalid(t, " True ", false) + testvalid(t, " tru", false) + testvalid(t, "false", true) + testvalid(t, " false", true) + testvalid(t, " false ", true) + testvalid(t, " False ", false) + testvalid(t, " fals", false) + testvalid(t, "null", true) + testvalid(t, " null", true) + testvalid(t, " null ", true) + testvalid(t, " Null ", false) + testvalid(t, " nul", false) + testvalid(t, " []", true) + testvalid(t, " [true]", true) + testvalid(t, " [ true, null ]", true) + testvalid(t, " [ true,]", false) + testvalid(t, `{"hello":"world"}`, true) + testvalid(t, `{ "hello": "world" }`, true) + testvalid(t, `{ "hello": "world", }`, false) + testvalid(t, `{"a":"b",}`, false) + testvalid(t, `{"a":"b","a"}`, false) + testvalid(t, `{"a":"b","a":}`, false) + testvalid(t, `{"a":"b","a":1}`, true) + testvalid(t, `{"a":"b",2"1":2}`, false) + testvalid(t, `{"a":"b","a": 1, "c":{"hi":"there"} }`, true) + testvalid(t, `{"a":"b","a": 1, "c":{"hi":"there", "easy":["going",`+ + `{"mixed":"bag"}]} }`, true) + testvalid(t, `""`, true) + testvalid(t, `"`, false) + testvalid(t, `"\n"`, true) + testvalid(t, `"\"`, false) + testvalid(t, `"\\"`, true) + testvalid(t, `"a\\b"`, true) + testvalid(t, `"a\\b\\\"a"`, true) + testvalid(t, `"a\\b\\\uFFAAa"`, true) + testvalid(t, `"a\\b\\\uFFAZa"`, false) + testvalid(t, `"a\\b\\\uFFA"`, false) + testvalid(t, string(complicatedJSON), true) + testvalid(t, string(exampleJSON), true) +} + +var jsonchars = []string{"{", "[", ",", ":", "}", "]", "1", "0", "true", + "false", "null", `""`, `"\""`, `"a"`} + +func makeRandomJSONChars(b []byte) { + var bb []byte + for len(bb) < len(b) { + bb = append(bb, jsonchars[rand.Int()%len(jsonchars)]...) + } + copy(b, bb[:len(b)]) +} + +func TestValidRandom(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + b := make([]byte, 100000) + start := time.Now() + for time.Since(start) < time.Second*3 { + n := rand.Int() % len(b) + rand.Read(b[:n]) + validpayload(b[:n], 0) + } + + start = time.Now() + for time.Since(start) < time.Second*3 { + n := rand.Int() % len(b) + makeRandomJSONChars(b[:n]) + validpayload(b[:n], 0) + } +} + +func TestResultRawForLiteral(t *testing.T) { + for _, lit := range []string{"null", "true", "false"} { + result := must(Parse(lit)) + if result.Raw != lit { + t.Fatalf("expected '%v', got '%v'", lit, result.Raw) + } + } +} + +func randomString() string { + var key string + N := 1 + rand.Int()%16 + for i := 0; i < N; i++ { + r := rand.Int() % 62 + if r < 10 { + key += string(byte('0' + r)) + } else if r-10 < 26 { + key += string(byte('a' + r - 10)) + } else { + key += string(byte('A' + r - 10 - 26)) + } + } + return `"` + key + `"` +} +func randomBool() string { + switch rand.Int() % 2 { + default: + return "false" + case 1: + return "true" + } +} +func randomNumber() string { + return strconv.FormatInt(int64(rand.Int()%1000000), 10) +} + +func randomObjectOrArray(keys []string, prefix string, array bool, depth int) ( + string, []string) { + N := 5 + rand.Int()%5 + var json string + if array { + json = "[" + } else { + json = "{" + } + for i := 0; i < N; i++ { + if i > 0 { + json += "," + } + var pkey string + if array { + pkey = prefix + "." + strconv.FormatInt(int64(i), 10) + } else { + key := randomString() + pkey = prefix + "." + key[1:len(key)-1] + json += key + `:` + } + keys = append(keys, pkey[1:]) + var kind int + if depth == 5 { + kind = rand.Int() % 4 + } else { + kind = rand.Int() % 6 + } + switch kind { + case 0: + json += randomString() + case 1: + json += randomBool() + case 2: + json += "null" + case 3: + json += randomNumber() + case 4: + var njson string + njson, keys = randomObjectOrArray(keys, pkey, true, depth+1) + json += njson + case 5: + var njson string + njson, keys = randomObjectOrArray(keys, pkey, false, depth+1) + json += njson + } + + } + if array { + json += "]" + } else { + json += "}" + } + return json, keys +} + +func randomJSON() (json string, keys []string) { + return randomObjectOrArray(nil, "", false, 0) +} + +func BenchmarkValid(b *testing.B) { + for i := 0; i < b.N; i++ { + validateJSON([]byte(complicatedJSON)) + } +} + +func BenchmarkValidBytes(b *testing.B) { + complicatedJSON := []byte(complicatedJSON) + for i := 0; i < b.N; i++ { + validateJSON(complicatedJSON) + } +} + +func BenchmarkGoStdlibValidBytes(b *testing.B) { + complicatedJSON := []byte(complicatedJSON) + for i := 0; i < b.N; i++ { + json.Valid(complicatedJSON) + } +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/iterator.go b/vendor/github.com/evergreen-ci/birch/jsonx/iterator.go new file mode 100644 index 00000000000..f4522deecc4 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/iterator.go @@ -0,0 +1,50 @@ +package jsonx + +type Iterator interface { + Err() error + Next() bool + Element() *Element + Value() *Value +} + +type documentIterImpl struct { + idx int + doc *Document + current *Element + err error +} + +func (iter *documentIterImpl) Next() bool { + if iter.idx+1 > iter.doc.Len() { + return false + } + + iter.current = iter.doc.elems[iter.idx].Copy() + iter.idx++ + return true +} + +func (iter *documentIterImpl) Element() *Element { return iter.current } +func (iter *documentIterImpl) Value() *Value { return iter.current.value } +func (iter *documentIterImpl) Err() error { return nil } + +type arrayIterImpl struct { + idx int + array *Array + current *Value + err error +} + +func (iter *arrayIterImpl) Next() bool { + if iter.idx+1 > iter.array.Len() { + return false + } + + iter.current = iter.array.elems[iter.idx].Copy() + iter.idx++ + return true +} + +func (iter *arrayIterImpl) Element() *Element { return &Element{value: iter.current} } +func (iter *arrayIterImpl) Value() *Value { return iter.current } +func (iter *arrayIterImpl) Err() error { return nil } diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/juniper.go b/vendor/github.com/evergreen-ci/birch/jsonx/juniper.go new file mode 100644 index 00000000000..6cc29d4a09c --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/juniper.go @@ -0,0 +1,86 @@ +package jsonx + +type Document struct { + elems []*Element +} + +func (d *Document) Append(elems ...*Element) *Document { d.elems = append(d.elems, elems...); return d } +func (d *Document) Len() int { return len(d.elems) } +func (d *Document) Iterator() Iterator { return &documentIterImpl{doc: d} } +func (d *Document) Copy() *Document { + nd := DC.Make(d.Len()) + for _, elem := range d.elems { + nd.Append(elem.Copy()) + } + return nd +} + +func (d *Document) KeyAtIndex(idx int) string { + elem := d.ElementAtIndex(idx) + if elem == nil { + return "" + } + + return elem.Key() +} + +func (d *Document) ElementAtIndex(idx int) *Element { + if idx+1 > d.Len() { + return nil + } + + return d.elems[idx] +} + +type Array struct { + elems []*Value +} + +func (a *Array) Append(vals ...*Value) *Array { a.elems = append(a.elems, vals...); return a } +func (a *Array) Len() int { return len(a.elems) } +func (a *Array) Iterator() Iterator { return &arrayIterImpl{array: a} } + +func (a *Array) Copy() *Array { + na := AC.Make(a.Len()) + for _, elem := range a.elems { + na.Append(elem.Copy()) + } + return na +} + +type Element struct { + key string + value *Value +} + +func (e *Element) Key() string { return e.key } +func (e *Element) Value() *Value { return e.value } +func (e *Element) ValueOK() (*Value, bool) { return e.value, e.value != nil } +func (e *Element) Copy() *Element { return EC.Value(e.key, e.value.Copy()) } + +type Value struct { + t Type + value interface{} +} + +func (v *Value) Type() Type { return v.t } +func (v *Value) Interface() interface{} { return v.value } +func (v *Value) StringValue() string { return v.value.(string) } +func (v *Value) Array() *Array { return v.value.(*Array) } +func (v *Value) Document() *Document { return v.value.(*Document) } +func (v *Value) Boolean() bool { return v.value.(bool) } +func (v *Value) Int() int { return v.value.(int) } +func (v *Value) Float64() float64 { return v.value.(float64) } +func (v *Value) StringValueOK() (string, bool) { out, ok := v.value.(string); return out, ok } +func (v *Value) ArrayOK() (*Array, bool) { out, ok := v.value.(*Array); return out, ok } +func (v *Value) DocumentOK() (*Document, bool) { out, ok := v.value.(*Document); return out, ok } +func (v *Value) BooleanOK() (bool, bool) { out, ok := v.value.(bool); return out, ok } +func (v *Value) IntOK() (int, bool) { out, ok := v.value.(int); return out, ok } +func (v *Value) Float64OK() (float64, bool) { out, ok := v.value.(float64); return out, ok } + +func (v *Value) Copy() *Value { + return &Value{ + t: v.t, + value: v.value, + } +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/marhsal.go b/vendor/github.com/evergreen-ci/birch/jsonx/marhsal.go new file mode 100644 index 00000000000..51322e29d5a --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/marhsal.go @@ -0,0 +1,103 @@ +package jsonx + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +func (d *Document) MarshalJSON() ([]byte, error) { + if d == nil { + return nil, errors.New("cannot marshal nil document") + } + + out := []byte{'{'} + + first := true + for _, elem := range d.elems { + if !first { + out = append(out, ',') + } + + out = append(out, []byte(fmt.Sprintf(`"%s":`, elem.key))...) + + val, err := elem.value.MarshalJSON() + if err != nil { + return nil, errors.Wrapf(err, "problem marshaling value for key %s", elem.key) + } + + out = append(out, val...) + + first = false + } + + return append(out, '}'), nil +} + +func (a *Array) MarshalJSON() ([]byte, error) { + if a == nil { + return nil, errors.New("cannot marshal nil array") + } + + out := []byte{'['} + + first := true + for idx, elem := range a.elems { + if !first { + out = append(out, ',') + } + + val, err := elem.MarshalJSON() + if err != nil { + return nil, errors.Wrapf(err, "problem marshaling array value for index %d", idx) + } + + out = append(out, val...) + + first = false + } + + return append(out, ']'), nil +} + +func (v *Value) MarshalJSON() ([]byte, error) { + if v == nil { + return nil, errors.New("cannot marshal nil value") + } + + switch v.t { + case String: + return writeJSONString([]byte(fmt.Sprintf(`%s`, v.value))), nil + case NumberDouble, NumberInteger, Number: + switch v.value.(type) { + case int64, int32, int: + return []byte(fmt.Sprintf(`%d`, v.value)), nil + case float64, float32: + return []byte(fmt.Sprintf(`%f`, v.value)), nil + default: + return nil, errors.Errorf("unsupported number type %T", v.value) + } + case Null: + return []byte("null"), nil + case Bool: + switch bv := v.value.(type) { + case bool: + if bv { + return []byte("true"), nil + } + return []byte("false"), nil + default: + return nil, errors.Errorf("unsupported bool type %T", bv) + } + case ArrayValue, ObjectValue: + switch obj := v.value.(type) { + case json.Marshaler: + return obj.MarshalJSON() + default: + return nil, errors.Errorf("unsupported object value type %T", obj) + } + default: + return nil, errors.Errorf("unknown type=%s", v.t) + } +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/types.go b/vendor/github.com/evergreen-ci/birch/jsonx/types.go new file mode 100644 index 00000000000..27c53f33587 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/types.go @@ -0,0 +1,56 @@ +package jsonx + +// Type describes +type Type int + +const ( + // String is a json string + String Type = iota + + // Number is a json number. + Number + + // Bool is a boolean value that is either true or false. + Bool + + // Null is a null json value. + Null + + // NumberInteger refers to integer values. This translates to a + // JSON number. + NumberInteger + + // NumberInteger refers to a float/double value. This translates to a + // JSON number. + NumberDouble + + // ObjectValues are json objects. + ObjectValue + + // ArrayValues are json arrays. + ArrayValue +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectValue: + return "object" + case ArrayValue: + return "array" + case NumberInteger: + return "integer" + case NumberDouble: + return "double" + default: + return "" + } +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/unmarshal.go b/vendor/github.com/evergreen-ci/birch/jsonx/unmarshal.go new file mode 100644 index 00000000000..b2f1dcf49f7 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/unmarshal.go @@ -0,0 +1,131 @@ +package jsonx + +import ( + "encoding/json" + + "github.com/evergreen-ci/birch/jsonx/internal" + "github.com/pkg/errors" +) + +func (d *Document) UnmarshalJSON(in []byte) error { + res, err := internal.ParseBytes(in) + if err != nil { + return errors.Wrap(err, "problem parsing raw json") + } + + if !res.IsObject() { + return errors.New("cannot unmarshal values or arrays into Documents") + } + + res.ForEach(func(key, value internal.Result) bool { + var val *Value + val, err = getValueForResult(value) + if err != nil { + return false + } + + d.Append(EC.Value(key.Str, val)) + return true + }) + if err != nil { + return errors.WithStack(err) + } + + return nil +} + +func (a *Array) UnmarshalJSON(in []byte) error { + res, err := internal.ParseBytes(in) + if err != nil { + return errors.Wrap(err, "problem parsing raw json") + } + + if !res.IsArray() { + return errors.New("cannot unmarshal a non-arrays into an array") + } + + for _, item := range res.Array() { + val, err := getValueForResult(item) + if err != nil { + return errors.WithStack(err) + } + + a.Append(val) + } + + return nil +} + +func (v *Value) UnmarshalJSON(in []byte) error { + res, err := internal.ParseBytes(in) + if err != nil { + return errors.Wrap(err, "problem parsing raw json") + } + + out, err := getValueForResult(res) + if err != nil { + return errors.WithStack(err) + } + + v.value = out.value + v.t = out.t + return nil +} + +/////////////////////////////////// +// +// Internal + +func getValueForResult(value internal.Result) (*Value, error) { + switch { + case value.Type == internal.String: + return VC.String(value.Str), nil + case value.Type == internal.Null: + return VC.Nil(), nil + case value.Type == internal.True: + return VC.Boolean(true), nil + case value.Type == internal.False: + return VC.Boolean(false), nil + case value.Type == internal.Number: + num := json.Number(value.String()) + if igr, err := num.Int64(); err == nil { + return VC.Int(int(igr)), nil + } else if df, err := num.Float64(); err == nil { + return VC.Float64(df), nil + } + + return nil, errors.Errorf("number value [%s] is invalid [%+v]", value.Str, value) + case value.IsArray(): + source := value.Array() + array := AC.Make(len(source)) + for _, elem := range source { + val, err := getValueForResult(elem) + if err != nil { + return nil, errors.WithStack(err) + } + + array.Append(val) + } + + return VC.Array(array), nil + case value.IsObject(): + var err error + doc := DC.New() + value.ForEach(func(key, value internal.Result) bool { + val, err := getValueForResult(value) + if err != nil { + err = errors.Wrapf(err, "problem with subdocument at key %s", key.Str) + return false + } + doc.Append(EC.Value(key.Str, val)) + return true + }) + if err != nil { + return nil, errors.WithStack(err) + } + + return VC.Object(doc), nil + default: + return nil, errors.Errorf("unknown json value type '%s'", value.Type) + } +} diff --git a/vendor/github.com/evergreen-ci/birch/jsonx/write.go b/vendor/github.com/evergreen-ci/birch/jsonx/write.go new file mode 100644 index 00000000000..65864686e3b --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/jsonx/write.go @@ -0,0 +1,171 @@ +package jsonx + +// code in this file from: https://github.com/json-iterator/go/blob/master/stream_str.go + +import "unicode/utf8" + +var safeSet = [utf8.RuneSelf]bool{ + ' ': true, + '!': true, + '"': false, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + ',': true, + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + ';': true, + '<': true, + '=': true, + '>': true, + '?': true, + '@': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + '[': true, + '\\': false, + ']': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '{': true, + '|': true, + '}': true, + '~': true, + '\u007f': true, +} + +const hex = "0123456789abcdef" + +func writeJSONString(s []byte) []byte { + out := make([]byte, 0, len(s)) + + valLen := len(s) + out = append(out, '"') + + // write string, the fast path, without utf8 and escape support + i := 0 + for ; i < valLen; i++ { + c := s[i] + if c > 31 && c != '"' && c != '\\' { + out = append(out, c) + } else { + break + } + } + if i == valLen { + return append(out, '"') + } + + // Slow path... + + start := i + // for the remaining parts, we process them char by char + for i < valLen { + if b := s[i]; b < utf8.RuneSelf { + if safeSet[b] { + i++ + continue + } + if start < i { + out = append(out, []byte(s[start:i])...) + } + switch b { + case '\\', '"': + out = append(out, '\\', b) + case '\n': + out = append(out, '\\', 'n') + case '\r': + out = append(out, '\\', 'r') + case '\t': + out = append(out, '\\', 't') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // If escapeHTML is set, it also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + out = append(out, []byte(`\u00`)...) + out = append(out, hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + i++ + continue + } + if start < len(s) { + out = append(out, []byte(s[start:])...) + } + out = append(out, []byte(s[start:])...) + return append(out, '"') +} diff --git a/vendor/github.com/evergreen-ci/birch/parser/ast/ast.go b/vendor/github.com/evergreen-ci/birch/parser/ast/ast.go deleted file mode 100644 index b4479098106..00000000000 --- a/vendor/github.com/evergreen-ci/birch/parser/ast/ast.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package ast - -import ( - "fmt" - - "github.com/evergreen-ci/birch/decimal" - "github.com/evergreen-ci/birch/objectid" -) - -// Document represents a BSON Document. -type Document struct { - Length int32 - EList []Element -} - -// Element represents an individual element of a BSON document. All element -// types implement the Element interface. -type Element interface { - elementNode() -} - -func (*FloatElement) elementNode() {} -func (*StringElement) elementNode() {} -func (*DocumentElement) elementNode() {} -func (*ArrayElement) elementNode() {} -func (*BinaryElement) elementNode() {} -func (*UndefinedElement) elementNode() {} -func (*ObjectIDElement) elementNode() {} -func (*BoolElement) elementNode() {} -func (*DateTimeElement) elementNode() {} -func (*NullElement) elementNode() {} -func (*RegexElement) elementNode() {} -func (*DBPointerElement) elementNode() {} -func (*JavaScriptElement) elementNode() {} -func (*SymbolElement) elementNode() {} -func (*CodeWithScopeElement) elementNode() {} -func (*Int32Element) elementNode() {} -func (*TimestampElement) elementNode() {} -func (*Int64Element) elementNode() {} -func (*DecimalElement) elementNode() {} -func (*MinKeyElement) elementNode() {} -func (*MaxKeyElement) elementNode() {} - -// FloatElement represents a BSON double element. -type FloatElement struct { - Name *ElementKeyName - Double float64 -} - -// StringElement represents a BSON string element. -type StringElement struct { - Name *ElementKeyName - String string -} - -// DocumentElement represents a BSON subdocument element. -type DocumentElement struct { - Name *ElementKeyName - Document *Document -} - -// ArrayElement represents a BSON array element. -type ArrayElement struct { - Name *ElementKeyName - Array *Document -} - -// BinaryElement represents a BSON binary element. -type BinaryElement struct { - Name *ElementKeyName - Binary *Binary -} - -// UndefinedElement represents a BSON undefined element. -type UndefinedElement struct { - Name *ElementKeyName -} - -// ObjectIDElement represents a BSON objectID element. -type ObjectIDElement struct { - Name *ElementKeyName - ID objectid.ObjectID -} - -// BoolElement represents a BSON boolean element. -type BoolElement struct { - Name *ElementKeyName - Bool bool -} - -// DateTimeElement represents a BSON datetime element. -type DateTimeElement struct { - Name *ElementKeyName - // TODO(skriptble): This should be an actual time.Time value - DateTime int64 -} - -// NullElement represents a BSON null element. -type NullElement struct { - Name *ElementKeyName -} - -// RegexElement represents a BSON regex element. -type RegexElement struct { - Name *ElementKeyName - RegexPattern *CString - RegexOptions *CString -} - -// DBPointerElement represents a BSON db pointer element. -type DBPointerElement struct { - Name *ElementKeyName - String string - Pointer objectid.ObjectID -} - -// JavaScriptElement represents a BSON JavaScript element. -type JavaScriptElement struct { - Name *ElementKeyName - String string -} - -// SymbolElement represents a BSON symbol element. -type SymbolElement struct { - Name *ElementKeyName - String string -} - -// CodeWithScopeElement represents a BSON JavaScript with scope element. -type CodeWithScopeElement struct { - Name *ElementKeyName - CodeWithScope *CodeWithScope -} - -// Int32Element represents a BSON int32 element. -type Int32Element struct { - Name *ElementKeyName - Int32 int32 -} - -// TimestampElement represents a BSON timestamp element. -type TimestampElement struct { - Name *ElementKeyName - Timestamp uint64 -} - -// Int64Element represents a BSON int64 element. -type Int64Element struct { - Name *ElementKeyName - Int64 int64 -} - -// DecimalElement represents a BSON Decimal128 element. -// -// TODO(skriptble): Borrowing the Decimal128 implementation from mgo/bson -// for now until we write a new implementation, preferably using the math/big -// package and providing a way to return a big.Float. -type DecimalElement struct { - Name *ElementKeyName - Decimal128 decimal.Decimal128 -} - -// MinKeyElement represents a BSON min key element. -type MinKeyElement struct { - Name *ElementKeyName -} - -// MaxKeyElement represents a BSON max key element. -type MaxKeyElement struct { - Name *ElementKeyName -} - -// ElementKeyName represents the key for a BSON Document element. -type ElementKeyName struct { - Key string -} - -// String implements the fmt.Stringer interface. -func (ekn *ElementKeyName) String() string { - if ekn == nil { - return "" - } - return fmt.Sprintf("&ElementKeyName{Key:%s}", ekn.Key) -} - -// GoString implements the fmt.GoStringer interface. -func (ekn *ElementKeyName) GoString() string { - if ekn == nil { - return "" - } - return fmt.Sprintf("&ElementKeyName{Key:%s}", ekn.Key) -} - -// CString represents a BSON cstring. -type CString struct { - String string -} - -// Binary represents a BSON binary node. -type Binary struct { - Subtype BinarySubtype - Data []byte -} - -// String implements the fmt.Stringer interface. -func (b *Binary) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("&Binary{Subtype:%d, Data:%#v}", b.Subtype, b.Data) -} - -// GoString implements the fmt.GoStringer interface. -func (b *Binary) GoString() string { - if b == nil { - return "" - } - return fmt.Sprintf("&Binary{Subtype:%d, Data:%#v}", b.Subtype, b.Data) -} - -// BinarySubtype describes the subtype of a Binary node. -type BinarySubtype byte - -// The possible Binary Subtypes. -const ( - SubtypeGeneric BinarySubtype = '\x00' // Generic, default - SubtypeFunction BinarySubtype = '\x01' // Function - SubtypeBinaryOld BinarySubtype = '\x02' // Old Binary, prefixed with length - SubtypeUUIDOld BinarySubtype = '\x03' // Old UUID - SubtypeUUID BinarySubtype = '\x04' // UUID - SubtypeMD5 BinarySubtype = '\x05' // MD5 - SubtypeUserDefined BinarySubtype = '\x80' // User defined types, anything greater than this -) - -// CodeWithScope represents a BSON JavaScript with scope node. -type CodeWithScope struct { - String string - Document *Document -} diff --git a/vendor/github.com/evergreen-ci/birch/parser/parser.go b/vendor/github.com/evergreen-ci/birch/parser/parser.go deleted file mode 100644 index 0187747bf79..00000000000 --- a/vendor/github.com/evergreen-ci/birch/parser/parser.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package parser - -import ( - "bufio" - "encoding/binary" - "io" - "math" - - "github.com/evergreen-ci/birch/decimal" - "github.com/evergreen-ci/birch/parser/ast" - "github.com/pkg/errors" -) - -// ErrCorruptDocument is returned when the parser reaches a corrupt point -// within a BSON document. -var ErrCorruptDocument = errors.New("bson/parser: corrupted document") - -// ErrUnknownSubtype is returned when the subtype of a binary node is undefined. -var ErrUnknownSubtype = errors.New("bson/parser: unknown binary subtype") - -// ErrNilReader is returned when a nil reader is passed to NewBSONParser. -var ErrNilReader = errors.New("bson/parser: nil or invalid reader provided") - -// Parser is a BSON parser. -type Parser struct { - r *bufio.Reader -} - -// NewBSONParser instantiates a new BSON Parser with the given reader. -func NewBSONParser(r io.Reader) (*Parser, error) { - if r == nil { - return nil, ErrNilReader - } - return &Parser{r: bufio.NewReader(r)}, nil -} - -// readInt32 reads a single int32 from the parser's reader. -func (p *Parser) readInt32() (int32, error) { - var i int32 - err := binary.Read(p.r, binary.LittleEndian, &i) - return i, err -} - -// readInt64 reads a single int64 from the parser's reader. -func (p *Parser) readInt64() (int64, error) { - var i int64 - err := binary.Read(p.r, binary.LittleEndian, &i) - return i, err -} - -// readUint64 reads a single uint64 from the parser's reader. -func (p *Parser) readUint64() (uint64, error) { - var u uint64 - err := binary.Read(p.r, binary.LittleEndian, &u) - return u, err -} - -// readObjectID reads a single objectID from the parser's reader. -func (p *Parser) readObjectID() ([12]byte, error) { - var id [12]byte - b := make([]byte, 12) - _, err := io.ReadFull(p.r, b) - if err != nil { - return id, err - } - copy(id[:], b) - return id, nil -} - -// readBoolean reads a single boolean from the parser's reader. -func (p *Parser) readBoolean() (bool, error) { - var bv bool - b, err := p.r.ReadByte() - if err != nil { - return false, err - } - switch b { - case '\x00': - bv = false - case '\x01': - bv = true - default: - return false, ErrCorruptDocument - } - return bv, nil -} - -// ParseDocument parses an entire document from the parser's reader. -func (p *Parser) ParseDocument() (*ast.Document, error) { - doc := new(ast.Document) - // Lex document length - l, err := p.readInt32() - if err != nil { - return nil, err - } - doc.Length = l - // Lex and parse each item of the list - elist, err := p.ParseEList() - if err != nil { - return nil, err - } - doc.EList = elist - - // ensure the document ends with \x00 - eol, err := p.r.ReadByte() - if err != nil { - return nil, err - } - if eol != '\x00' { - return nil, ErrCorruptDocument - } - - return doc, nil -} - -// ParseEList parses an entire element list from the parser's reader. -func (p *Parser) ParseEList() ([]ast.Element, error) { - var element ast.Element - var err error - var idents []byte - list := make([]ast.Element, 0) - - for { - idents, err = p.r.Peek(1) - if err != nil { - return list, err - } - - if idents[0] == '\x00' { - break - } - - element, err = p.ParseElement() - if err != nil { - return list, err - } - list = append(list, element) - } - return list, nil -} - -// ParseElement parses an element from the parser's reader. -func (p *Parser) ParseElement() (ast.Element, error) { - var ident byte - var err error - var key *ast.ElementKeyName - var el ast.Element - - ident, err = p.r.ReadByte() - if err != nil { - return nil, err - } - if ident == '\x00' { - return nil, nil - } - - key, err = p.ParseEName() - if err != nil { - return nil, err - } - - switch ident { - case '\x01': - f, err := p.ParseDouble() - if err != nil { - return nil, err - } - el = &ast.FloatElement{ - Name: key, - Double: f, - } - case '\x02': - str, err := p.ParseString() - if err != nil { - return nil, err - } - el = &ast.StringElement{ - Name: key, - String: str, - } - case '\x03': - doc, err := p.ParseDocument() - if err != nil { - return nil, err - } - el = &ast.DocumentElement{ - Name: key, - Document: doc, - } - case '\x04': - doc, err := p.ParseDocument() - if err != nil { - return nil, err - } - el = &ast.ArrayElement{ - Name: key, - Array: doc, - } - case '\x05': - bin, err := p.ParseBinary() - if err != nil { - return nil, err - } - el = &ast.BinaryElement{ - Name: key, - Binary: bin, - } - case '\x06': - el = &ast.UndefinedElement{ - Name: key, - } - case '\x07': - id, err := p.readObjectID() - if err != nil { - return nil, err - } - el = &ast.ObjectIDElement{ - Name: key, - ID: id, - } - case '\x08': - bl, err := p.readBoolean() - if err != nil { - return nil, err - } - el = &ast.BoolElement{ - Name: key, - Bool: bl, - } - case '\x09': - i64, err := p.readInt64() - if err != nil { - return nil, err - } - el = &ast.DateTimeElement{ - Name: key, - DateTime: i64, - } - case '\x0A': - el = &ast.NullElement{ - Name: key, - } - case '\x0B': - pattern, err := p.ParseCString() - if err != nil { - return nil, err - } - options, err := p.ParseCString() - if err != nil { - return nil, err - } - el = &ast.RegexElement{ - Name: key, - RegexPattern: &ast.CString{String: pattern}, - RegexOptions: &ast.CString{String: options}, - } - case '\x0C': - str, err := p.ParseString() - if err != nil { - return nil, err - } - pointer, err := p.readObjectID() - if err != nil { - return nil, err - } - el = &ast.DBPointerElement{ - Name: key, - String: str, - Pointer: pointer, - } - case '\x0D': - str, err := p.ParseString() - if err != nil { - return nil, err - } - el = &ast.JavaScriptElement{ - Name: key, - String: str, - } - case '\x0E': - str, err := p.ParseString() - if err != nil { - return nil, err - } - el = &ast.SymbolElement{ - Name: key, - String: str, - } - case '\x0F': - cws, err := p.ParseCodeWithScope() - if err != nil { - return nil, err - } - el = &ast.CodeWithScopeElement{ - Name: key, - CodeWithScope: cws, - } - case '\x10': - i, err := p.readInt32() - if err != nil { - return nil, err - } - el = &ast.Int32Element{ - Name: key, - Int32: i, - } - case '\x11': - u, err := p.readUint64() - if err != nil { - return nil, err - } - el = &ast.TimestampElement{ - Name: key, - Timestamp: u, - } - case '\x12': - i, err := p.readInt64() - if err != nil { - return nil, err - } - el = &ast.Int64Element{ - Name: key, - Int64: i, - } - case '\x13': - l, err := p.readUint64() - if err != nil { - return nil, err - } - h, err := p.readUint64() - if err != nil { - return nil, err - } - d := decimal.NewDecimal128(h, l) - el = &ast.DecimalElement{ - Name: key, - Decimal128: d, - } - case '\xFF': - el = &ast.MinKeyElement{ - Name: key, - } - case '\x7F': - el = &ast.MaxKeyElement{ - Name: key, - } - } - - return el, nil -} - -// ParseEName parses an element's key from the parser's reader. -func (p *Parser) ParseEName() (*ast.ElementKeyName, error) { - str, err := p.ParseCString() - if err != nil { - return nil, err - } - ekn := &ast.ElementKeyName{ - Key: str, - } - return ekn, nil -} - -// ParseString parses a string from the parser's reader. -// -// TODO(skriptble): Should this be a read* method since it's returning a Go -// primitive? That would fit with the rest of the read* methods. -func (p *Parser) ParseString() (string, error) { - l, err := p.readInt32() - if err != nil { - return "", err - } - - if l > 0 { - l-- - } - - b := make([]byte, l) - _, err = io.ReadFull(p.r, b) - if err != nil { - return "", err - } - eol, err := p.r.ReadByte() - if err != nil { - return "", err - } - if eol != '\x00' { - return "", ErrCorruptDocument - } - - return string(b), nil -} - -// ParseCString parses a c-style string from the parser's reader. -// -// TODO(skriptble): Should this be a read* method since it's returning a Go -// primitive? That would fit with the rest of the read* methods. -func (p *Parser) ParseCString() (string, error) { - b, err := p.r.ReadBytes('\x00') - if err != nil { - return "", err - } - return string(b[:len(b)-1]), nil -} - -// ParseBinary parses a binary node from the parser's reader. -func (p *Parser) ParseBinary() (*ast.Binary, error) { - l, err := p.readInt32() - if err != nil { - return nil, err - } - - bst, err := p.ParseSubtype() - if err != nil { - return nil, err - } - - b := make([]byte, l) - _, err = io.ReadFull(p.r, b) - if err != nil { - return nil, err - } - - if bst == ast.SubtypeBinaryOld { - if len(b) < 4 { - // TODO(skriptble): Return a more informative error - return nil, ErrCorruptDocument - } - b = b[4:] - } - - bin := &ast.Binary{ - Subtype: bst, - Data: b, - } - - return bin, err -} - -// ParseSubtype parses the subtype for a binary node from the parser's reader. -func (p *Parser) ParseSubtype() (ast.BinarySubtype, error) { - r, err := p.r.ReadByte() - if err != nil { - return 0, err - } - var bst ast.BinarySubtype - - switch r { - case '\x00': - bst = ast.SubtypeGeneric - case '\x01': - bst = ast.SubtypeFunction - case '\x02': - bst = ast.SubtypeBinaryOld - case '\x03': - bst = ast.SubtypeUUIDOld - case '\x04': - bst = ast.SubtypeUUID - case '\x05': - bst = ast.SubtypeMD5 - default: - if r >= '\x80' { - bst = ast.SubtypeUserDefined - } else { - return 0, ErrUnknownSubtype - } - } - return bst, nil -} - -// ParseDouble parses a float64 from the parser's reader. -// -// TODO(skriptble): This should be a read* method. -func (p *Parser) ParseDouble() (float64, error) { - var bits uint64 - if err := binary.Read(p.r, binary.LittleEndian, &bits); err != nil { - return 0, err - } - return math.Float64frombits(bits), nil -} - -// ParseCodeWithScope parses a JavaScript Code with Scope node from the -// parser's reader. -func (p *Parser) ParseCodeWithScope() (*ast.CodeWithScope, error) { - // TODO(skriptble): We should probably keep track of this length - _, err := p.readInt32() - if err != nil { - return nil, err - } - str, err := p.ParseString() - if err != nil { - return nil, err - } - doc, err := p.ParseDocument() - if err != nil { - return nil, err - } - cws := &ast.CodeWithScope{ - String: str, - Document: doc, - } - return cws, nil -} diff --git a/vendor/github.com/evergreen-ci/birch/parser/parser_test.go b/vendor/github.com/evergreen-ci/birch/parser/parser_test.go deleted file mode 100644 index 4141840e693..00000000000 --- a/vendor/github.com/evergreen-ci/birch/parser/parser_test.go +++ /dev/null @@ -1,967 +0,0 @@ -// Copyright (C) MongoDB, Inc. 2017-present. -// -// Licensed under the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -package parser - -import ( - "bufio" - "bytes" - "crypto/rand" - "encoding/binary" - "math" - "reflect" - "testing" - - "github.com/evergreen-ci/birch/decimal" - "github.com/evergreen-ci/birch/parser/ast" - "github.com/pkg/errors" -) - -func TestBSONParser(t *testing.T) { - t.Run("read-int-32", func(t *testing.T) { - var want int32 = 123 - buf := make([]byte, 8) - binary.LittleEndian.PutUint32(buf, uint32(want)) - rdr := bytes.NewReader(buf) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readInt32() - if err != nil { - t.Errorf("Unexpected error while reading int32: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %d; want %d", got, want) - } - t.Run("read-error", func(t *testing.T) { - want := errors.New("read-int-32-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.readInt32() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - - t.Run("read-int-64", func(t *testing.T) { - var want int64 = 123 - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, uint64(want)) - rdr := bytes.NewReader(buf) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readInt64() - if err != nil { - t.Errorf("Unexpected error while reading int64: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %d; want %d", got, want) - } - t.Run("read-error", func(t *testing.T) { - want := errors.New("read-int-64-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.readInt64() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - - t.Run("read-uint-64", func(t *testing.T) { - var want uint64 = 123 - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, want) - rdr := bytes.NewReader(buf) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readUint64() - if err != nil { - t.Errorf("Unexpected error while reading uint64: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %d; want %d", got, want) - } - t.Run("read-error", func(t *testing.T) { - want := errors.New("read-uint-64-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.readInt64() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - t.Run("read-object-id", func(t *testing.T) { - var want [12]byte - _, err := rand.Read(want[:]) - if err != nil { - t.Errorf("Error while creating random object id: %s", err) - } - rdr := bytes.NewReader(want[:]) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readObjectID() - if err != nil { - t.Errorf("Unexpected error while reading objectID: %s", err) - } - if !bytes.Equal(got[:], want[:]) { - t.Errorf("Unexpected result. got %d; want %d", got, want) - } - t.Run("read-error", func(t *testing.T) { - want := errors.New("read-object-id-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.readObjectID() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - - t.Run("read-bool", func(t *testing.T) { - t.Run("false", func(t *testing.T) { - want := false - rdr := bytes.NewReader([]byte{'\x00'}) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readBoolean() - if err != nil { - t.Errorf("Unexpected error while reading objectID: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %t; want %t", got, want) - } - }) - t.Run("true", func(t *testing.T) { - want := true - rdr := bytes.NewReader([]byte{'\x01'}) - p := &Parser{r: bufio.NewReader(rdr)} - got, err := p.readBoolean() - if err != nil { - t.Errorf("Unexpected error while reading objectID: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %t; want %t", got, want) - } - }) - t.Run("corrupt-document", func(t *testing.T) { - want := ErrCorruptDocument - rdr := bytes.NewReader([]byte{'\x03'}) - p := &Parser{r: bufio.NewReader(rdr)} - _, got := p.readBoolean() - if got != want { - t.Errorf("Unexpected result. got %s; want %s", got, want) - } - }) - t.Run("read-error", func(t *testing.T) { - want := errors.New("read-bool-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.readBoolean() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - - t.Run("parse-document", func(t *testing.T) { - want := &ast.Document{ - Length: 5, - EList: []ast.Element{}, - } - b := make([]byte, 5) - binary.LittleEndian.PutUint32(b[:4], 5) - b[4] = '\x00' - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseDocument() - if err != nil { - t.Errorf("Unexpected error while parsing document: %s", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Unexpected result. got %v; want %v", got, want) - } - - t.Run("read-int-error", func(t *testing.T) { - want := errors.New("parse-document-int-32-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseDocument() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("parse-elist-error", func(t *testing.T) { - want := errors.New("parse-document-parse-elist-error") - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, 5) - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseDocument() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - }) - - t.Run("parse-elist", func(t *testing.T) { - t.Run("peek-error", func(t *testing.T) { - want := errors.New("parse-elist-peek-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseEList() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("peek-null", func(t *testing.T) { - want := []ast.Element{} - r := bytes.NewReader([]byte{'\x00'}) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseEList() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("Unexpected result. got %v; want %v", got, want) - } - }) - t.Run("parse-element-error", func(t *testing.T) { - want := errors.New("parse-element-error") - b := []byte{'\x01'} - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseEList() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - - want := []ast.Element{&ast.NullElement{Name: &ast.ElementKeyName{Key: "foo"}}} - r := bytes.NewReader([]byte{'\x0A', 'f', 'o', 'o', '\x00', '\x00'}) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseEList() - if err != nil { - t.Errorf("Unexpected error while parsing elist: %s", err) - } - for _, elem := range got { - ne, ok := elem.(*ast.NullElement) - if !ok { - t.Errorf("Unexpected result. got %T; want %T", ne, want[0]) - } - if ne.Name.Key != want[0].(*ast.NullElement).Name.Key { - t.Errorf("Unexpected result. got %v; want %v", ne.Name, want[0].(*ast.NullElement).Name) - } - } - }) - - t.Run("parse-element", parseElementTest) - - t.Run("parse-ename", func(t *testing.T) { - t.Run("parse-cstring-error", func(t *testing.T) { - want := errors.New("parse-cstring-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseEName() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - want := &ast.ElementKeyName{Key: "foo"} - r := bytes.NewReader([]byte{'f', 'o', 'o', '\x00'}) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseEName() - if err != nil { - t.Errorf("Unexpected error while parsing elist: %s", err) - } - if got.Key != want.Key { - t.Errorf("Unexpected result. got %s; want %s", got.Key, want.Key) - } - }) - - t.Run("parse-string", func(t *testing.T) { - t.Run("read-int32-error", func(t *testing.T) { - want := errors.New("read-int32-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseString() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("readfull-error", func(t *testing.T) { - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, 10) - want := errors.New("readfull-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseString() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("readbyte-error", func(t *testing.T) { - b := make([]byte, 7) - binary.LittleEndian.PutUint32(b[:4], 3) - b[4], b[5], b[6] = 'f', 'o', 'o' - want := ErrCorruptDocument - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseString() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("corrupt-document", func(t *testing.T) { - b := make([]byte, 8) - binary.LittleEndian.PutUint32(b[:4], 3) - b[4], b[5], b[6], b[7] = 'f', 'o', 'o', '\x01' - want := ErrCorruptDocument - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseString() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - b := make([]byte, 8) - binary.LittleEndian.PutUint32(b[:4], 4) - b[4], b[5], b[6], b[7] = 'f', 'o', 'o', '\x00' - want := "foo" - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseString() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %s; want %s", got, want) - } - }) - - t.Run("parse-cstring", func(t *testing.T) { - t.Run("read-bytes-error", func(t *testing.T) { - want := errors.New("read-bytes-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseCString() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - b := []byte{'f', 'o', 'o', '\x00'} - want := "foo" - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseCString() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %s; want %s", got, want) - } - - }) - - t.Run("parse-binary", func(t *testing.T) { - t.Run("read-int-32-error", func(t *testing.T) { - want := errors.New("read-int-32-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseBinary() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("parse-subtype-error", func(t *testing.T) { - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, 5) - want := errors.New("parse-subtype-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseBinary() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("read-full-error", func(t *testing.T) { - b := make([]byte, 5) - binary.LittleEndian.PutUint32(b[:4], 5) - b[4] = '\x00' - want := errors.New("read-full-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseBinary() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("old-binary-corrupt-document-error", func(t *testing.T) { - b := make([]byte, 7) - binary.LittleEndian.PutUint32(b[:4], 2) - b[4], b[5], b[6] = '\x02', 'f', 'o' - want := ErrCorruptDocument - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseBinary() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("old-binary-success", func(t *testing.T) { - b := make([]byte, 10) - binary.LittleEndian.PutUint32(b[:4], 5) - b[4] = '\x02' - binary.LittleEndian.PutUint32(b[5:9], 1) - b[9] = 'f' - want := &ast.Binary{Subtype: ast.SubtypeBinaryOld, Data: []byte{'f'}} - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseBinary() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if !reflect.DeepEqual(&got, &want) { - t.Errorf("Unexpected result. got %v; want %v", got, want) - } - }) - b := make([]byte, 8) - binary.LittleEndian.PutUint32(b[:4], 3) - b[4], b[5], b[6], b[7] = '\x00', 'f', 'o', 'o' - want := &ast.Binary{Subtype: ast.SubtypeGeneric, Data: []byte{'f', 'o', 'o'}} - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseBinary() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if !reflect.DeepEqual(&got, &want) { - t.Errorf("Unexpected result. got %v; want %v", got, want) - } - }) - - t.Run("parse-subtype", func(t *testing.T) { - t.Run("read-byte-error", func(t *testing.T) { - want := errors.New("read-byte-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseSubtype() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("unknown-subtype-error", func(t *testing.T) { - b := []byte{'\x7F'} - want := ErrUnknownSubtype - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - _, got := p.ParseSubtype() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - testCases := []struct { - name string - b []byte - want ast.BinarySubtype - }{ - {"generic", []byte{'\x00'}, ast.SubtypeGeneric}, - {"function", []byte{'\x01'}, ast.SubtypeFunction}, - {"binary-old", []byte{'\x02'}, ast.SubtypeBinaryOld}, - {"UUID-old", []byte{'\x03'}, ast.SubtypeUUIDOld}, - {"UUID", []byte{'\x04'}, ast.SubtypeUUID}, - {"MD5", []byte{'\x05'}, ast.SubtypeMD5}, - {"user-defined", []byte{'\x80'}, ast.SubtypeUserDefined}, - {"user-defined-max", []byte{'\xFF'}, ast.SubtypeUserDefined}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - r := bytes.NewReader(tc.b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseSubtype() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if got != tc.want { - t.Errorf("Unexpected result. got %v; want %v", got, tc.want) - } - }) - } - }) - - t.Run("parse-double", func(t *testing.T) { - t.Run("binary-read-error", func(t *testing.T) { - want := errors.New("binary-read-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseDouble() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - want := 3.14159 - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b, math.Float64bits(want)) - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseDouble() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if got != want { - t.Errorf("Unexpected result. got %f; want %f", got, want) - } - }) - - t.Run("parse-code-with-scope", func(t *testing.T) { - t.Run("read-int-32-error", func(t *testing.T) { - want := errors.New("read-int-32-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseCodeWithScope() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("parse-string-error", func(t *testing.T) { - b := make([]byte, 4) - binary.LittleEndian.PutUint32(b, 4) - want := errors.New("parse-string-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseCodeWithScope() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("parse-document-error", func(t *testing.T) { - b := make([]byte, 9) - binary.LittleEndian.PutUint32(b[:4], 4) - binary.LittleEndian.PutUint32(b[4:8], 0) - b[8] = '\x00' - want := errors.New("parse-document-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseCodeWithScope() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - want := ast.CodeWithScope{ - String: "var a = 10;", - Document: &ast.Document{Length: 5}, - } - b := make([]byte, 8) - binary.LittleEndian.PutUint32(b[:4], 0) - binary.LittleEndian.PutUint32(b[4:8], uint32(len(want.String)+1)) - b = append(b, []byte(want.String)...) - b = append(b, '\x00') - doclen := make([]byte, 4) - binary.LittleEndian.PutUint32(doclen, 5) - b = append(b, doclen...) - b = append(b, '\x00') - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseCodeWithScope() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if got.String != want.String { - t.Errorf("String contents do not match. got %s; want %s", got.String, want.String) - } - if len(got.Document.EList) != len(want.Document.EList) { - t.Errorf("Number of elements in document does not match. got %d; want %d", - len(got.Document.EList), len(want.Document.EList)) - } - if got.Document.Length != want.Document.Length { - t.Errorf("Length of documents does not match. got %d; want %d", - got.Document.Length, want.Document.Length) - } - }) -} - -func parseElementTest(t *testing.T) { - t.Run("read-byte-error", func(t *testing.T) { - want := errors.New("read-byte-error") - p := &Parser{r: bufio.NewReader(&errReader{err: want})} - _, got := p.ParseElement() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - t.Run("null-byte", func(t *testing.T) { - b := []byte{'\x00'} - r := bytes.NewReader(b) - p := &Parser{r: bufio.NewReader(r)} - got, gotErr := p.ParseElement() - if gotErr != nil { - t.Errorf("Unexpected error. got %s; want %v", gotErr, nil) - } - if got != nil { - t.Errorf("Unexpected element. got %v; want %v", got, nil) - } - }) - t.Run("parse-ename-error", func(t *testing.T) { - b := []byte{'\x01'} - want := errors.New("parse-ename-error") - p := &Parser{r: bufio.NewReader(&errReader{b: b, err: want})} - _, got := p.ParseElement() - if got != want { - t.Errorf("Expected error. got %s; want %s", got, want) - } - }) - - doubleBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+6+1+8) - b[0] = '\x01' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint64(b[8:], math.Float64bits(3.14159)) - return b - } - stringBytes := func() []byte { - key := "foobar" - val := "bazqux" - b := make([]byte, 1+len(key)+1+4+len(val)+1) - b[0] = '\x02' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(len(val)+1)) - copy(b[12:18], []byte(val)) - b[18] = '\x00' - return b - } - documentBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1+5) - b[0] = '\x03' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], 5) - b[12] = '\x00' - return b - } - arrayBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1+5) - b[0] = '\x04' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], 5) - b[12] = '\x00' - return b - } - binaryBytes := func() []byte { - key := "foobar" - bin := []byte{'\x00', '\x01', '\x02'} - b := make([]byte, 1+len(key)+1+4+1+len(bin)) - b[0] = '\x05' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(len(bin))) - b[12] = '\x00' - copy(b[13:], bin) - return b - } - undefinedBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1) - b[0] = '\x06' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - return b - } - objectIDBytes := func() []byte { - key := "foobar" - id := [12]byte{ - '\x01', '\x02', '\x03', '\x04', - '\x05', '\x06', '\x07', '\x08', - '\x09', '\x10', '\x11', '\x12', - } - b := make([]byte, 1+len(key)+1+12) - b[0] = '\x07' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - copy(b[8:], id[:]) - return b - } - boolBytes := func() []byte { - key := "foobar" - boolean := '\x01' - b := make([]byte, 1+len(key)+1+1) - b[0] = '\x08' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - b[8] = byte(boolean) - return b - } - datetimeBytes := func() []byte { - key := "foobar" - datetime := 1234567890 - b := make([]byte, 1+len(key)+1+8) - b[0] = '\x09' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint64(b[8:], uint64(datetime)) - return b - } - nullBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1) - b[0] = '\x0A' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - return b - } - regexBytes := func() []byte { - key := "foobar" - pattern := "hello\x00" - options := "world\x00" - b := make([]byte, 1+len(key)+1) - b[0] = '\x0B' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - b = append(b, pattern...) - b = append(b, options...) - return b - } - dbpointerBytes := func() []byte { - key := "foobar" - str := "hello" - id := [12]byte{ - '\x01', '\x02', '\x03', '\x04', - '\x05', '\x06', '\x07', '\x08', - '\x09', '\x10', '\x11', '\x12', - } - b := make([]byte, 1+len(key)+1+4+len(str)+1+12) - b[0] = '\x0C' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(len(str)+1)) - copy(b[12:17], []byte(str)) - b[17] = '\x00' - copy(b[18:], id[:]) - return b - } - javascriptBytes := func() []byte { - key := "foobar" - js := `var hello = "world";` - b := make([]byte, 1+len(key)+1+4+len(js)+1) - b[0] = '\x0D' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(len(js)+1)) - copy(b[12:32], []byte(js)) - b[32] = byte('\x00') - return b - } - symbolBytes := func() []byte { - key := "foobar" - js := `12345` - b := make([]byte, 1+len(key)+1+4+len(js)+1) - b[0] = '\x0E' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(len(js)+1)) - copy(b[12:17], []byte(js)) - b[17] = byte('\x00') - return b - } - codewithscopeBytes := func() []byte { - key := "foobar" - js := `var hello = "world";` - b := make([]byte, 1+len(key)+1+4+4+len(js)+1+5) - b[0] = '\x0F' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:12], uint32(4+len(js)+1+5)) - binary.LittleEndian.PutUint32(b[12:16], uint32(len(js)+1)) - copy(b[16:36], []byte(js)) - b[36] = byte('\x00') - binary.LittleEndian.PutUint32(b[37:41], 5) - b[41] = byte('\x00') - return b - } - int32Bytes := func() []byte { - key := "foobar" - b := make([]byte, 1+6+1+4) - b[0] = '\x10' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint32(b[8:], 12345) - return b - } - timestampBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+6+1+8) - b[0] = '\x11' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint64(b[8:], 123456) - return b - } - int64Bytes := func() []byte { - key := "foobar" - b := make([]byte, 1+6+1+8) - b[0] = '\x12' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint64(b[8:], 1234567890) - return b - } - decimalBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+6+1+8+8) - b[0] = '\x13' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - binary.LittleEndian.PutUint64(b[8:16], 12345) - binary.LittleEndian.PutUint64(b[16:], 0) - return b - } - minKeyBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1) - b[0] = '\xFF' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - return b - } - maxKeyBytes := func() []byte { - key := "foobar" - b := make([]byte, 1+len(key)+1) - b[0] = '\x7F' - copy(b[1:7], []byte(key)) - b[7] = '\x00' - return b - } - - testCases := []struct { - name string - want ast.Element - b []byte - }{ - {"double", &ast.FloatElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, Double: 3.14159}, - doubleBytes(), - }, - {"string", &ast.StringElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, String: "bazqux"}, - stringBytes(), - }, - {"document", &ast.DocumentElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Document: &ast.Document{Length: 5, EList: []ast.Element{}}}, - documentBytes(), - }, - {"array", &ast.ArrayElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Array: &ast.Document{Length: 5, EList: []ast.Element{}}}, - arrayBytes(), - }, - {"binary", &ast.BinaryElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Binary: &ast.Binary{ - Subtype: ast.SubtypeGeneric, Data: []byte{'\x00', '\x01', '\x02'}, - }}, - binaryBytes(), - }, - {"undefined", &ast.UndefinedElement{ - Name: &ast.ElementKeyName{Key: "foobar"}}, - undefinedBytes(), - }, - {"object-ID", &ast.ObjectIDElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - ID: [12]byte{ - '\x01', '\x02', '\x03', '\x04', - '\x05', '\x06', '\x07', '\x08', - '\x09', '\x10', '\x11', '\x12', - }}, - objectIDBytes(), - }, - {"boolean", &ast.BoolElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Bool: true}, - boolBytes(), - }, - {"date-time", &ast.DateTimeElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - DateTime: 1234567890}, - datetimeBytes(), - }, - {"null", &ast.NullElement{ - Name: &ast.ElementKeyName{Key: "foobar"}}, - nullBytes(), - }, - {"regex", &ast.RegexElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - RegexPattern: &ast.CString{String: "hello"}, - RegexOptions: &ast.CString{String: "world"}}, - regexBytes(), - }, - {"db-pointer", &ast.DBPointerElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - String: "hello", - Pointer: [12]byte{ - '\x01', '\x02', '\x03', '\x04', - '\x05', '\x06', '\x07', '\x08', - '\x09', '\x10', '\x11', '\x12', - }}, - dbpointerBytes(), - }, - {"javascript", &ast.JavaScriptElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - String: `var hello = "world";`}, - javascriptBytes(), - }, - {"symbol", &ast.SymbolElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - String: `12345`}, - symbolBytes(), - }, - {"code-with-scope", &ast.CodeWithScopeElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - CodeWithScope: &ast.CodeWithScope{ - String: `var hello = "world";`, - Document: &ast.Document{ - Length: 5, - EList: []ast.Element{}, - }, - }}, - codewithscopeBytes(), - }, - {"int32", &ast.Int32Element{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Int32: 12345}, - int32Bytes(), - }, - {"timestamp", &ast.TimestampElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Timestamp: 123456}, - timestampBytes(), - }, - {"int64", &ast.Int64Element{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Int64: 1234567890}, - int64Bytes(), - }, - {"decimal128", &ast.DecimalElement{ - Name: &ast.ElementKeyName{Key: "foobar"}, - Decimal128: decimal.NewDecimal128(0, 12345)}, - decimalBytes(), - }, - {"min-key", &ast.MinKeyElement{ - Name: &ast.ElementKeyName{Key: "foobar"}}, - minKeyBytes(), - }, - {"max-key", &ast.MaxKeyElement{ - Name: &ast.ElementKeyName{Key: "foobar"}}, - maxKeyBytes(), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - r := bytes.NewReader(tc.b) - p := &Parser{r: bufio.NewReader(r)} - got, err := p.ParseElement() - if err != nil { - t.Errorf("Unexpected error: %s", err) - } - if !reflect.DeepEqual(got, tc.want) { - t.Errorf("Results don't match. got %#v; want %#v", got, tc.want) - } - }) - } -} - -type errReader struct { - b []byte - err error -} - -func (er *errReader) Read(b []byte) (int, error) { - if len(er.b) > 0 { - total := copy(b, er.b) - er.b = er.b[total:] - return total, nil - } - return 0, er.err -} diff --git a/vendor/github.com/evergreen-ci/birch/reader.go b/vendor/github.com/evergreen-ci/birch/reader.go index 324d2e58458..b797555ee18 100644 --- a/vendor/github.com/evergreen-ci/birch/reader.go +++ b/vendor/github.com/evergreen-ci/birch/reader.go @@ -47,6 +47,7 @@ func NewFromIOReader(r io.Reader) (Reader, error) { if length < 0 { return nil, bsonerr.InvalidLength } + reader := make([]byte, length) copy(reader, lengthBytes[:]) @@ -83,13 +84,16 @@ func (r Reader) Validate() (size uint32, err error) { func (r Reader) validateKey(pos, end uint32) (uint32, error) { // Read a CString, return the length, including the '\x00' var total uint32 + for ; pos < end && r[pos] != '\x00'; pos++ { total++ } + if pos == end || r[pos] != '\x00' { return total, bsonerr.InvalidKey } total++ + return total, nil } @@ -108,6 +112,7 @@ func (r Reader) RecursiveLookup(key ...string) (*Element, error) { } var elem *Element + _, err := r.readElements(func(e *Element) error { if key[0] == e.Key() { if len(key) > 1 { @@ -147,8 +152,11 @@ func (r Reader) RecursiveLookup(key ...string) (*Element, error) { // method will validate all the elements up to and including the element at // the given index. func (r Reader) ElementAt(index uint) (*Element, error) { - var current uint - var elem *Element + var ( + current uint + elem *Element + ) + _, err := r.readElements(func(e *Element) error { if current != index { current++ @@ -160,9 +168,11 @@ func (r Reader) ElementAt(index uint) (*Element, error) { if err != nil { return nil, err } + if elem == nil { return nil, bsonerr.OutOfBounds } + return elem, nil } @@ -183,8 +193,11 @@ func (r Reader) Keys(recursive bool) (Keys, error) { // String implements the fmt.Stringer interface. func (r Reader) String() string { var buf bytes.Buffer + buf.Write([]byte("bson.Reader{")) + idx := 0 + _, _ = r.readElements(func(elem *Element) error { if idx > 0 { buf.Write([]byte(", ")) @@ -193,6 +206,7 @@ func (r Reader) String() string { idx++ return nil }) + buf.WriteByte('}') return buf.String() @@ -202,10 +216,10 @@ func (r Reader) String() string { // // This method does not copy the bytes from r. func (r Reader) MarshalBSON() ([]byte, error) { - _, err := r.Validate() - if err != nil { + if _, err := r.Validate(); err != nil { return nil, err } + return r, nil } @@ -236,9 +250,11 @@ func (r Reader) recursiveKeys(recursive bool, prefix ...string) (Keys, error) { } return nil }) + if err != nil { return nil, err } + return ks, nil } @@ -259,40 +275,53 @@ func (r Reader) readElements(f func(e *Element) error) (uint32, error) { if len(r) < int(givenLength) || givenLength < 0 { return 0, bsonerr.InvalidLength } - var pos uint32 = 4 - var elemStart, elemValStart uint32 - var elem *Element + + pos := uint32(4) end := uint32(givenLength) + + var ( + elemStart uint32 + elemValStart uint32 + elem *Element + ) + for { if pos >= end { // We've gone off the end of the buffer and we're missing // a null terminator. return pos, bsonerr.InvalidReadOnlyDocument } + if r[pos] == '\x00' { break } + elemStart = pos pos++ n, err := r.validateKey(pos, end) pos += n + if err != nil { return pos, err } + elemValStart = pos elem = newElement(elemStart, elemValStart) elem.value.data = r n, err = elem.value.validate(true) pos += n + if err != nil { return pos, err } + if f != nil { err = f(elem) if err != nil { if err == errValidateDone { break } + return pos, err } } @@ -301,7 +330,6 @@ func (r Reader) readElements(f func(e *Element) error) (uint32, error) { // The size is always 1 larger than the position, since position is 0 // indexed. return pos + 1, nil - } // Keys represents the keys of a BSON document. @@ -320,6 +348,7 @@ func (k Key) String() string { if str != "" { return str + "." + k.Name } + return k.Name } diff --git a/vendor/github.com/evergreen-ci/birch/reader_test.go b/vendor/github.com/evergreen-ci/birch/reader_test.go index bb67fa849ee..18ea752c177 100644 --- a/vendor/github.com/evergreen-ci/birch/reader_test.go +++ b/vendor/github.com/evergreen-ci/birch/reader_test.go @@ -14,8 +14,8 @@ import ( "reflect" "testing" - "github.com/google/go-cmp/cmp" "github.com/evergreen-ci/birch/bsonerr" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" ) @@ -34,7 +34,6 @@ func BenchmarkReaderValidate(b *testing.B) { rdr[250], rdr[251], rdr[252], rdr[253], rdr[254] = '\x05', '\x00', '\x00', '\x00', '\x00' _, _ = rdr[250:].Validate() } - } func TestReader(t *testing.T) { @@ -541,9 +540,11 @@ func readerElementEqual(e1, e2 *Element) bool { if e1.value.start != e2.value.start { return false } + if e1.value.offset != e2.value.offset { return false } + return true } @@ -552,10 +553,12 @@ func readerElementComparer(e1, e2 *Element) bool { if err != nil { return false } + b2, err := e2.MarshalBSON() if err != nil { return false } + if !bytes.Equal(b1, b2) { return false } @@ -564,5 +567,5 @@ func readerElementComparer(e1, e2 *Element) bool { } func fromElement(e *Element) *Element { - return (*Element)(e) + return e } diff --git a/vendor/github.com/evergreen-ci/birch/types/objectid.go b/vendor/github.com/evergreen-ci/birch/types/objectid.go index 8eb20873f7c..093126c3888 100644 --- a/vendor/github.com/evergreen-ci/birch/types/objectid.go +++ b/vendor/github.com/evergreen-ci/birch/types/objectid.go @@ -60,9 +60,9 @@ func (id ObjectID) IsZero() bool { return bytes.Equal(id[:], NilObjectID[:]) } -// FromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a +// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a // valid ObjectID. -func FromHex(s string) (ObjectID, error) { +func ObjectIDFromHex(s string) (ObjectID, error) { b, err := hex.DecodeString(s) if err != nil { return NilObjectID, err @@ -73,11 +73,23 @@ func FromHex(s string) (ObjectID, error) { } var oid [12]byte - copy(oid[:], b[:]) + + copy(oid[:], b) return oid, nil } +// MustObjectIDFromHex builds an ObjectID, panicing if the hex string +// isn't valid +func MustObjectIDFromHex(s string) ObjectID { + oid, err := ObjectIDFromHex(s) + if err != nil { + panic(err) + } + + return oid +} + // MarshalJSON returns the ObjectID as a string func (id ObjectID) MarshalJSON() ([]byte, error) { return json.Marshal(id.Hex()) @@ -89,6 +101,7 @@ func (id ObjectID) MarshalJSON() ([]byte, error) { // return an error. func (id *ObjectID) UnmarshalJSON(b []byte) error { var err error + switch len(b) { case 12: copy(id[:], b) @@ -96,19 +109,23 @@ func (id *ObjectID) UnmarshalJSON(b []byte) error { // Extended JSON var res interface{} err := json.Unmarshal(b, &res) + if err != nil { return err } + str, ok := res.(string) if !ok { m, ok := res.(map[string]interface{}) if !ok { return errors.New("not an extended JSON ObjectID") } + oid, ok := m["$oid"] if !ok { return errors.New("not an extended JSON ObjectID") } + str, ok = oid.(string) if !ok { return errors.New("not an extended JSON ObjectID") @@ -130,8 +147,8 @@ func (id *ObjectID) UnmarshalJSON(b []byte) error { func processUniqueBytes() [5]byte { var b [5]byte - _, err := io.ReadFull(rand.Reader, b[:]) - if err != nil { + + if _, err := io.ReadFull(rand.Reader, b[:]); err != nil { panic(errors.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) } @@ -140,8 +157,8 @@ func processUniqueBytes() [5]byte { func readRandomUint32() uint32 { var b [4]byte - _, err := io.ReadFull(rand.Reader, b[:]) - if err != nil { + + if _, err := io.ReadFull(rand.Reader, b[:]); err != nil { panic(errors.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) } diff --git a/vendor/github.com/evergreen-ci/birch/types/objectid_test.go b/vendor/github.com/evergreen-ci/birch/types/objectid_test.go index 35dc0d0a50f..900db0cb202 100644 --- a/vendor/github.com/evergreen-ci/birch/types/objectid_test.go +++ b/vendor/github.com/evergreen-ci/birch/types/objectid_test.go @@ -69,15 +69,17 @@ func TestTimeStamp(t *testing.T) { for _, testcase := range testCases { id, err := FromHex(testcase.Hex) require.NoError(t, err) + secs := int64(binary.BigEndian.Uint32(id[0:4])) timestamp := time.Unix(secs, 0).UTC() require.Equal(t, testcase.Expected, timestamp.String()) } - } func TestCounterOverflow(t *testing.T) { objectIDCounter = 0xFFFFFFFF - NewObjectID() + + _ = NewObjectID() + require.Equal(t, uint32(0), objectIDCounter) } diff --git a/vendor/github.com/evergreen-ci/birch/value.go b/vendor/github.com/evergreen-ci/birch/value.go index 5791eab60e4..9b26fffe5eb 100644 --- a/vendor/github.com/evergreen-ci/birch/value.go +++ b/vendor/github.com/evergreen-ci/birch/value.go @@ -40,6 +40,8 @@ type Value struct { d *Document } +// Copy constructs an entirely new value object with the same data as +// the original. func (v *Value) Copy() *Value { return &Value{ start: v.start, @@ -49,6 +51,15 @@ func (v *Value) Copy() *Value { } } +// Set changes the internal representation of a value to have the +// internal representation of a second value +func (v *Value) Set(v2 *Value) { + v.start = v2.start + v.offset = v2.offset + v.data = v2.data + v.d = v2.d +} + // Interface returns the Go value of this Value as an empty interface. // // For embedded documents and arrays, Interface will convert the @@ -96,6 +107,7 @@ func (v *Value) Interface() interface{} { case bsontype.CodeWithScope: code, scope := v.MutableJavaScriptWithScope() val, _ := scope.MarshalBSON() + return types.CodeWithScope{Code: code, Scope: val} case bsontype.Int32: return v.Int32() @@ -134,13 +146,16 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if int(v.offset+8) > len(v.data) { return total, newErrTooSmall() } + total += 8 case '\x02', '\x0D', '\x0E': if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 4 + if int32(v.offset)+4+l > int32(len(v.data)) { return total, newErrTooSmall() } @@ -151,90 +166,115 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if !sizeOnly && v.data[v.offset+4+uint32(l)-1] != 0x00 { return total, bsonerr.InvalidString } + total += uint32(l) case '\x03': if v.d != nil { n, err := v.d.Validate() total += n + if err != nil { return total, err } + break } if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 4 + if l < 5 { return total, bsonerr.InvalidReadOnlyDocument } + if int32(v.offset)+l > int32(len(v.data)) { return total, newErrTooSmall() } + if !sizeOnly { n, err := Reader(v.data[v.offset : v.offset+uint32(l)]).Validate() total += n - 4 + if err != nil { return total, err } + break } + total += uint32(l) - 4 case '\x04': if v.d != nil { n, err := (&Array{v.d}).Validate() total += n + if err != nil { return total, err } + break } if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 4 + if l < 5 { return total, bsonerr.InvalidReadOnlyDocument } + if int32(v.offset)+l > int32(len(v.data)) { return total, newErrTooSmall() } + if !sizeOnly { n, err := Reader(v.data[v.offset : v.offset+uint32(l)]).Validate() total += n - 4 + if err != nil { return total, err } + break } + total += uint32(l) - 4 case '\x05': if int(v.offset+5) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 5 + if v.data[v.offset+4] > '\x05' && v.data[v.offset+4] < '\x80' { return total, bsonerr.InvalidBinarySubtype } + if int32(v.offset)+5+l > int32(len(v.data)) { return total, newErrTooSmall() } + total += uint32(l) case '\x07': if int(v.offset+12) > len(v.data) { return total, newErrTooSmall() } + total += 12 case '\x08': if int(v.offset+1) > len(v.data) { return total, newErrTooSmall() } total++ + if v.data[v.offset] != '\x00' && v.data[v.offset] != '\x01' { return total, bsonerr.InvalidBooleanType } @@ -242,20 +282,24 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if int(v.offset+8) > len(v.data) { return total, newErrTooSmall() } + total += 8 case '\x0B': i := v.offset for ; int(i) < len(v.data) && v.data[i] != '\x00'; i++ { total++ } + if int(i) == len(v.data) || v.data[i] != '\x00' { return total, bsonerr.InvalidString } i++ total++ + for ; int(i) < len(v.data) && v.data[i] != '\x00'; i++ { total++ } + if int(i) == len(v.data) || v.data[i] != '\x00' { return total, bsonerr.InvalidString } @@ -264,11 +308,14 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 4 + if int32(v.offset)+4+l+12 > int32(len(v.data)) { return total, newErrTooSmall() } + total += uint32(l) + 12 case '\x0F': if v.d != nil { @@ -280,31 +327,41 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if int(v.offset+8) > len(v.data) { return total, newErrTooSmall() } + total += 8 sLength := readi32(v.data[v.offset+4 : v.offset+8]) + if int(sLength) > len(v.data)+8 { return total, newErrTooSmall() } + total += uint32(sLength) + if !sizeOnly && v.data[v.offset+8+uint32(sLength)-1] != 0x00 { return total, bsonerr.InvalidString } n, err := v.d.Validate() total += n + if err != nil { return total, err } + break } + if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + l := readi32(v.data[v.offset : v.offset+4]) total += 4 + if int32(v.offset)+l > int32(len(v.data)) { return total, newErrTooSmall() } + if !sizeOnly { sLength := readi32(v.data[v.offset+4 : v.offset+8]) total += 4 @@ -324,31 +381,37 @@ func (v *Value) validate(sizeOnly bool) (uint32, error) { if v.data[v.offset+8+uint32(sLength)-1] != 0x00 { return total, bsonerr.InvalidString } + total += uint32(sLength) n, err := Reader(v.data[v.offset+8+uint32(sLength) : v.offset+uint32(l)]).Validate() total += n + if err != nil { return total, err } + break } + total += uint32(l) - 4 case '\x10': if int(v.offset+4) > len(v.data) { return total, newErrTooSmall() } + total += 4 case '\x11', '\x12': if int(v.offset+8) > len(v.data) { return total, newErrTooSmall() } + total += 8 case '\x13': if int(v.offset+16) > len(v.data) { return total, newErrTooSmall() } - total += 16 + total += 16 default: return total, bsonerr.InvalidElement } @@ -367,6 +430,7 @@ func (v *Value) Type() bsontype.Type { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + return bsontype.Type(v.data[v.start]) } @@ -376,9 +440,11 @@ func (v *Value) Double() float64 { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x01' { - panic(bsonerr.ElementType{"compact.Element.double", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.double", bsontype.Type(v.data[v.start]))) } + return math.Float64frombits(v.getUint64()) } @@ -387,6 +453,7 @@ func (v *Value) DoubleOK() (float64, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Double { return 0, false } + return v.Double(), true } @@ -399,10 +466,13 @@ func (v *Value) StringValue() string { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x02' { - panic(bsonerr.ElementType{"compact.Element.String", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.String", bsontype.Type(v.data[v.start]))) } + l := readi32(v.data[v.offset : v.offset+4]) + return string(v.data[v.offset+4 : int32(v.offset)+4+l-1]) } @@ -412,6 +482,7 @@ func (v *Value) StringValueOK() (string, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.String { return "", false } + return v.StringValue(), true } @@ -423,7 +494,7 @@ func (v *Value) ReaderDocument() Reader { } if v.data[v.start] != '\x03' { - panic(bsonerr.ElementType{"compact.Element.Document", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Document", bsontype.Type(v.data[v.start]))) } return v.getReader() @@ -442,6 +513,7 @@ func (v *Value) Reader() Reader { func (v *Value) getReader() Reader { var r Reader + if v.d == nil { l := readi32(v.data[v.offset : v.offset+4]) r = Reader(v.data[v.offset : v.offset+uint32(l)]) @@ -463,6 +535,7 @@ func (v *Value) ReaderDocumentOK() (Reader, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.EmbeddedDocument { return nil, false } + return v.ReaderDocument(), true } @@ -471,17 +544,22 @@ func (v *Value) MutableDocument() *Document { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x03' { - panic(bsonerr.ElementType{"compact.Element.Document", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Document", bsontype.Type(v.data[v.start]))) } + if v.d == nil { var err error + l := int32(binary.LittleEndian.Uint32(v.data[v.offset : v.offset+4])) + v.d, err = ReadDocument(v.data[v.offset : v.offset+uint32(l)]) if err != nil { panic(err) } } + return v.d } @@ -491,6 +569,7 @@ func (v *Value) MutableDocumentOK() (*Document, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.EmbeddedDocument { return nil, false } + return v.MutableDocument(), true } @@ -502,7 +581,7 @@ func (v *Value) ReaderArray() Reader { } if v.data[v.start] != '\x04' { - panic(bsonerr.ElementType{"compact.Element.Array", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Array", bsontype.Type(v.data[v.start]))) } return v.getReader() @@ -514,6 +593,7 @@ func (v *Value) ReaderArrayOK() (Reader, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Array { return nil, false } + return v.ReaderArray(), true } @@ -522,17 +602,22 @@ func (v *Value) MutableArray() *Array { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x04' { - panic(bsonerr.ElementType{"compact.Element.Array", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Array", bsontype.Type(v.data[v.start]))) } + if v.d == nil { var err error + l := int32(binary.LittleEndian.Uint32(v.data[v.offset : v.offset+4])) v.d, err = ReadDocument(v.data[v.offset : v.offset+uint32(l)]) + if err != nil { panic(err) } } + return &Array{v.d} } @@ -542,6 +627,7 @@ func (v *Value) MutableArrayOK() (*Array, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Array { return nil, false } + return v.MutableArray(), true } @@ -551,18 +637,23 @@ func (v *Value) Binary() (subtype byte, data []byte) { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x05' { - panic(bsonerr.ElementType{"compact.Element.binary", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.binary", bsontype.Type(v.data[v.start]))) } + l := readi32(v.data[v.offset : v.offset+4]) st := v.data[v.offset+4] offset := uint32(5) + if st == 0x02 { offset += 4 l = readi32(v.data[v.offset+5 : v.offset+9]) } + b := make([]byte, l) copy(b, v.data[v.offset+offset:int32(v.offset)+int32(offset)+l]) + return st, b } @@ -572,7 +663,9 @@ func (v *Value) BinaryOK() (subtype byte, data []byte, ok bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Binary { return 0x00, nil, false } + st, b := v.Binary() + return st, b, true } @@ -582,11 +675,15 @@ func (v *Value) ObjectID() types.ObjectID { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x07' { - panic(bsonerr.ElementType{"compact.Element.ObejctID", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.ObejctID", bsontype.Type(v.data[v.start]))) } + var arr [12]byte + copy(arr[:], v.data[v.offset:v.offset+12]) + return arr } @@ -594,9 +691,11 @@ func (v *Value) ObjectID() types.ObjectID { // panicking. func (v *Value) ObjectIDOK() (types.ObjectID, bool) { var empty types.ObjectID + if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.ObjectID { return empty, false } + return v.ObjectID(), true } @@ -606,9 +705,11 @@ func (v *Value) Boolean() bool { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x08' { - panic(bsonerr.ElementType{"compact.Element.Boolean", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Boolean", bsontype.Type(v.data[v.start]))) } + return v.data[v.offset] == '\x01' } @@ -618,6 +719,7 @@ func (v *Value) BooleanOK() (bool, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Boolean { return false, false } + return v.Boolean(), true } @@ -627,9 +729,11 @@ func (v *Value) DateTime() int64 { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x09' { - panic(bsonerr.ElementType{"compact.Element.dateTime", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.dateTime", bsontype.Type(v.data[v.start]))) } + return int64(v.getUint64()) } @@ -646,6 +750,7 @@ func (v *Value) TimeOK() (time.Time, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.DateTime { return time.Time{}, false } + return v.Time(), true } @@ -655,20 +760,31 @@ func (v *Value) Regex() (pattern, options string) { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x0B' { - panic(bsonerr.ElementType{"compact.Element.regex", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.regex", bsontype.Type(v.data[v.start]))) } // TODO(skriptble): Use the elements package here. - var pstart, pend, ostart, oend uint32 + var ( + pstart uint32 + pend uint32 + ostart uint32 + oend uint32 + ) + i := v.offset pstart = i + for ; v.data[i] != '\x00'; i++ { } + pend = i i++ ostart = i + for ; v.data[i] != '\x00'; i++ { } + oend = i return string(v.data[pstart:pend]), string(v.data[ostart:oend]) @@ -680,6 +796,7 @@ func (v *Value) DateTimeOK() (int64, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.DateTime { return 0, false } + return v.DateTime(), true } @@ -689,12 +806,17 @@ func (v *Value) DBPointer() (string, types.ObjectID) { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x0C' { - panic(bsonerr.ElementType{"compact.Element.dbPointer", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.dbPointer", bsontype.Type(v.data[v.start]))) } + l := readi32(v.data[v.offset : v.offset+4]) + var p [12]byte + copy(p[:], v.data[v.offset+4+uint32(l):v.offset+4+uint32(l)+12]) + return string(v.data[v.offset+4 : int32(v.offset)+4+l-1]), p } @@ -702,10 +824,13 @@ func (v *Value) DBPointer() (string, types.ObjectID) { // instead of panicking. func (v *Value) DBPointerOK() (string, types.ObjectID, bool) { var empty types.ObjectID + if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.DBPointer { return "", empty, false } + s, o := v.DBPointer() + return s, o, true } @@ -715,10 +840,13 @@ func (v *Value) JavaScript() string { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x0D' { - panic(bsonerr.ElementType{"compact.Element.JavaScript", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.JavaScript", bsontype.Type(v.data[v.start]))) } + l := readi32(v.data[v.offset : v.offset+4]) + return string(v.data[v.offset+4 : int32(v.offset)+4+l-1]) } @@ -728,6 +856,7 @@ func (v *Value) JavaScriptOK() (string, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.JavaScript { return "", false } + return v.JavaScript(), true } @@ -737,10 +866,13 @@ func (v *Value) Symbol() string { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x0E' { - panic(bsonerr.ElementType{"compact.Element.symbol", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.symbol", bsontype.Type(v.data[v.start]))) } + l := readi32(v.data[v.offset : v.offset+4]) + return string(v.data[v.offset+4 : int32(v.offset)+4+l-1]) } @@ -753,7 +885,7 @@ func (v *Value) ReaderJavaScriptWithScope() (string, Reader) { } if v.data[v.start] != '\x0F' { - panic(bsonerr.ElementType{"compact.Element.JavaScriptWithScope", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.JavaScriptWithScope", bsontype.Type(v.data[v.start]))) } sLength := readi32(v.data[v.offset+4 : v.offset+8]) @@ -763,6 +895,7 @@ func (v *Value) ReaderJavaScriptWithScope() (string, Reader) { str := string(v.data[v.offset+8 : v.offset+8+uint32(sLength)-1]) var r Reader + if v.d == nil { l := readi32(v.data[v.offset : v.offset+4]) r = Reader(v.data[v.offset+8+uint32(sLength) : v.offset+uint32(l)]) @@ -784,7 +917,9 @@ func (v *Value) ReaderJavaScriptWithScopeOK() (string, Reader, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.CodeWithScope { return "", nil, false } + s, r := v.ReaderJavaScriptWithScope() + return s, r, true } @@ -794,8 +929,9 @@ func (v *Value) MutableJavaScriptWithScope() (code string, d *Document) { if v == nil || v.offset == 0 { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x0F' { - panic(bsonerr.ElementType{"compact.Element.JavaScriptWithScope", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.JavaScriptWithScope", bsontype.Type(v.data[v.start]))) } // TODO(skriptble): This is wrong and could cause a panic. l := int32(binary.LittleEndian.Uint32(v.data[v.offset : v.offset+4])) @@ -805,13 +941,16 @@ func (v *Value) MutableJavaScriptWithScope() (code string, d *Document) { // field minus the int32 for length, 5 bytes for a minimum document // size, and an int32 for the string length the value is invalid. str := string(v.data[v.offset+4+4 : v.offset+4+4+uint32(sLength)-1]) // offset + total length + string length + bytes - null byte + if v.d == nil { var err error + v.d, err = ReadDocument(v.data[v.offset+4+4+uint32(sLength) : v.offset+uint32(l)]) if err != nil { panic(err) } } + return str, v.d } @@ -821,7 +960,9 @@ func (v *Value) MutableJavaScriptWithScopeOK() (string, *Document, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.CodeWithScope { return "", nil, false } + s, d := v.MutableJavaScriptWithScope() + return s, d, true } @@ -831,9 +972,11 @@ func (v *Value) Int32() int32 { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x10' { - panic(bsonerr.ElementType{"compact.Element.int32", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.int32", bsontype.Type(v.data[v.start]))) } + return readi32(v.data[v.offset : v.offset+4]) } @@ -843,6 +986,7 @@ func (v *Value) Int32OK() (int32, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Int32 { return 0, false } + return v.Int32(), true } @@ -852,9 +996,11 @@ func (v *Value) Timestamp() (uint32, uint32) { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if bsontype.Type(v.data[v.start]) != bsontype.Timestamp { - panic(bsonerr.ElementType{"compact.Element.timestamp", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.timestamp", bsontype.Type(v.data[v.start]))) } + return binary.LittleEndian.Uint32(v.data[v.offset+4 : v.offset+8]), binary.LittleEndian.Uint32(v.data[v.offset : v.offset+4]) } @@ -864,7 +1010,9 @@ func (v *Value) TimestampOK() (uint32, uint32, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Timestamp { return 0, 0, false } + t, i := v.Timestamp() + return t, i, true } @@ -874,9 +1022,11 @@ func (v *Value) Int64() int64 { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x12' { - panic(bsonerr.ElementType{"compact.Element.int64Type", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.int64Type", bsontype.Type(v.data[v.start]))) } + return int64(v.getUint64()) } @@ -884,6 +1034,9 @@ func (v *Value) getUint64() uint64 { return binary.LittleEndian.Uint64(v.data[v.offset : v.offset+8]) } +// Int returns a flexible integer value, from an underlying bson value +// that is either an int32 or an int64. Int() panics if the value is +// a different type. func (v *Value) Int() int { if val, ok := v.Int32OK(); ok { return int(val) @@ -893,9 +1046,13 @@ func (v *Value) Int() int { return int(val) } - panic(bsonerr.ElementType{"int", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("int", bsontype.Type(v.data[v.start]))) } +// IntOK returns a flexible integer value from an underlying bson +// value that is either an int32 or int64. The second value is false +// when the underlying type is a different type, or the value is +// invalid. func (v *Value) IntOK() (int, bool) { if v == nil || v.offset == 0 || v.data == nil { return 0, false @@ -914,6 +1071,7 @@ func (v *Value) Int64OK() (int64, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Int64 { return 0, false } + return v.Int64(), true } @@ -923,12 +1081,14 @@ func (v *Value) Decimal128() decimal.Decimal128 { if v == nil || v.offset == 0 || v.data == nil { panic(bsonerr.UninitializedElement) } + if v.data[v.start] != '\x13' { - panic(bsonerr.ElementType{"compact.Element.Decimal128", bsontype.Type(v.data[v.start])}) + panic(bsonerr.NewElementTypeError("compact.Element.Decimal128", bsontype.Type(v.data[v.start]))) } - l := binary.LittleEndian.Uint64(v.data[v.offset : v.offset+8]) - h := binary.LittleEndian.Uint64(v.data[v.offset+8 : v.offset+16]) - return decimal.NewDecimal128(h, l) + + return decimal.NewDecimal128( + binary.LittleEndian.Uint64(v.data[v.offset:v.offset+8]), + binary.LittleEndian.Uint64(v.data[v.offset+8:v.offset+16])) } // Decimal128OK is the same as Decimal128, except that it returns a boolean @@ -937,6 +1097,7 @@ func (v *Value) Decimal128OK() (decimal.Decimal128, bool) { if v == nil || v.offset == 0 || v.data == nil || bsontype.Type(v.data[v.start]) != bsontype.Decimal128 { return decimal.NewDecimal128(0, 0), false } + return v.Decimal128(), true } @@ -964,6 +1125,7 @@ func (v *Value) Equal(v2 *Value) bool { if err != nil { return false } + data2, err := v2.docToBytes(t2) if err != nil { return false @@ -987,10 +1149,12 @@ func (v *Value) docToBytes(t bsontype.Type) ([]byte, error) { if err != nil { return nil, err } + code, _, ok := readJavaScriptValue(v.data[v.offset+4:]) if !ok { return nil, errors.New("invalid code component") } + return appendCodeWithScope(nil, code, scope), nil default: return v.data[v.offset:], nil diff --git a/vendor/github.com/evergreen-ci/birch/x_array.go b/vendor/github.com/evergreen-ci/birch/x_array.go index f9727a93228..0b29544b28c 100644 --- a/vendor/github.com/evergreen-ci/birch/x_array.go +++ b/vendor/github.com/evergreen-ci/birch/x_array.go @@ -10,5 +10,6 @@ func (a *Array) Interface() []interface{} { for iter.Next() { out = append(out, iter.Value().Interface()) } + return out } diff --git a/vendor/github.com/evergreen-ci/birch/x_constructor.go b/vendor/github.com/evergreen-ci/birch/x_constructor.go index 6d7dada599e..80b390620b4 100644 --- a/vendor/github.com/evergreen-ci/birch/x_constructor.go +++ b/vendor/github.com/evergreen-ci/birch/x_constructor.go @@ -1,21 +1,24 @@ package birch import ( + "io" "math" "time" - "github.com/mongodb/grip" "github.com/pkg/errors" ) +// DC is a convenience variable provided for access to the DocumentConstructor methods. var DC DocumentConstructor +// DocumentConstructor is used as a namespace for document constructor functions. type DocumentConstructor struct{} +// New returns an empty document. func (DocumentConstructor) New() *Document { return DC.Make(0) } // Make returns a document with the underlying storage -// allocated as specified. Provides some efficency when building +// allocated as specified. Provides some efficiency when building // larger documents iteratively. func (DocumentConstructor) Make(n int) *Document { return &Document{ @@ -24,10 +27,15 @@ func (DocumentConstructor) Make(n int) *Document { } } +// Elements returns a document initialized with the elements passed as +// arguments. func (DocumentConstructor) Elements(elems ...*Element) *Document { return DC.Make(len(elems)).Append(elems...) } +// Reader constructs a document from a bson reader, which is a wrapper +// around a byte slice representation of a bson document. Reader +// panics if there is a problem reading the document. func (DocumentConstructor) Reader(r Reader) *Document { doc, err := DC.ReaderErr(r) if err != nil { @@ -37,10 +45,46 @@ func (DocumentConstructor) Reader(r Reader) *Document { return doc } +// ReaderErr constructs a document from a bson reader, which is a wrapper +// around a byte slice representation of a bson document. Reader +// returns an error if there is a problem reading the document. func (DocumentConstructor) ReaderErr(r Reader) (*Document, error) { return ReadDocument(r) } +// ReadFrom builds a document reading a bytes sequence from an +// io.Reader, panicing if there's a problem reading from the reader. +func (DocumentConstructor) ReadFrom(in io.Reader) *Document { + doc, err := DC.ReadFromErr(in) + if err == io.EOF { + return nil + } + + if err != nil { + panic(err) + } + + return doc +} + +// ReadFromErr builds a document reading a bytes sequence from an +// io.Reader, returning an error if there's a problem reading from the +// reader. +func (DocumentConstructor) ReadFromErr(in io.Reader) (*Document, error) { + doc := DC.New() + + _, err := doc.ReadFrom(in) + if err == io.EOF { + return nil, err + } + + if err != nil { + return nil, errors.WithStack(err) + } + + return doc, nil +} + func (DocumentConstructor) Marshaler(in Marshaler) *Document { doc, err := DC.MarshalerErr(in) if err != nil { @@ -73,22 +117,23 @@ func (DocumentConstructor) MapInterface(in map[string]interface{}) *Document { for k, v := range in { elems = append(elems, EC.Interface(k, v)) } + return DC.Elements(elems...) } func (DocumentConstructor) MapInterfaceErr(in map[string]interface{}) (*Document, error) { - catcher := grip.NewBasicCatcher() elems := make([]*Element, 0, len(in)) + for k, v := range in { elem, err := EC.InterfaceErr(k, v) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if elem != nil { elems = append(elems, elem) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() - } return DC.Elements(elems...), nil } @@ -167,19 +212,18 @@ func (DocumentConstructor) MapMarshaler(in map[string]Marshaler) *Document { func (DocumentConstructor) MapMarshalerErr(in map[string]Marshaler) (*Document, error) { elems := make([]*Element, 0, len(in)) - catcher := grip.NewBasicCatcher() + for k, v := range in { elem, err := EC.MarshalerErr(k, v) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if elem != nil { elems = append(elems, elem) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() - } - return DC.Elements(elems...), nil } @@ -194,18 +238,68 @@ func (DocumentConstructor) MapSliceMarshaler(in map[string][]Marshaler) *Documen func (DocumentConstructor) MapSliceMarshalerErr(in map[string][]Marshaler) (*Document, error) { elems := make([]*Element, 0, len(in)) - catcher := grip.NewBasicCatcher() for k, v := range in { elem, err := EC.SliceMarshalerErr(k, v) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if elem != nil { elems = append(elems, elem) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() + return DC.Elements(elems...), nil +} + +func (DocumentConstructor) MapDocumentMarshaler(in map[string]DocumentMarshaler) *Document { + elems := make([]*Element, 0, len(in)) + for k, v := range in { + elems = append(elems, EC.DocumentMarshaler(k, v)) + } + + return DC.Elements(elems...) +} + +func (DocumentConstructor) MapDocumentMarshalerErr(in map[string]DocumentMarshaler) (*Document, error) { + elems := make([]*Element, 0, len(in)) + + for k, v := range in { + elem, err := EC.DocumentMarshalerErr(k, v) + if err != nil { + return nil, errors.WithStack(err) + } + + if elem != nil { + elems = append(elems, elem) + } + } + + return DC.Elements(elems...), nil +} + +func (DocumentConstructor) MapSliceDocumentMarshaler(in map[string][]DocumentMarshaler) *Document { + elems := make([]*Element, 0, len(in)) + for k, v := range in { + elems = append(elems, EC.SliceDocumentMarshaler(k, v)) + } + + return DC.Elements(elems...) +} + +func (DocumentConstructor) MapSliceDocumentMarshalerErr(in map[string][]DocumentMarshaler) (*Document, error) { + elems := make([]*Element, 0, len(in)) + + for k, v := range in { + elem, err := EC.SliceDocumentMarshalerErr(k, v) + if err != nil { + return nil, errors.WithStack(err) + } + + if elem != nil { + elems = append(elems, elem) + } } return DC.Elements(elems...), nil @@ -230,21 +324,19 @@ func (DocumentConstructor) MapSliceInterface(in map[string][]interface{}) *Docum } func (DocumentConstructor) MapSliceInterfaceErr(in map[string][]interface{}) (*Document, error) { - catcher := grip.NewBasicCatcher() elems := make([]*Element, 0, len(in)) for k, v := range in { elem, err := EC.SliceInterfaceErr(k, v) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if elem != nil { elems = append(elems, elem) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() - } - return DC.Elements(elems...), nil } @@ -326,6 +418,10 @@ func (DocumentConstructor) Interface(value interface{}) *Document { doc = DC.MapInterface(t) case map[string][]interface{}: doc = DC.MapSliceInterface(t) + case map[string]DocumentMarshaler: + doc, err = DC.MapDocumentMarshalerErr(t) + case map[string][]DocumentMarshaler: + doc, err = DC.MapSliceDocumentMarshalerErr(t) case map[string]Marshaler: doc, err = DC.MapMarshalerErr(t) case map[string][]Marshaler: @@ -363,6 +459,8 @@ func (DocumentConstructor) Interface(value interface{}) *Document { doc = t case Reader: doc, err = DC.ReaderErr(t) + case DocumentMarshaler: + doc, err = t.MarshalDocument() case Marshaler: doc, err = DC.MarshalerErr(t) case []*Element: @@ -378,18 +476,16 @@ func (DocumentConstructor) Interface(value interface{}) *Document { func (DocumentConstructor) InterfaceErr(value interface{}) (*Document, error) { switch t := value.(type) { - case map[string]string, map[string][]string, - map[string]int64, map[string][]int64, - map[string]int32, map[string][]int32, map[string]int, map[string][]int, - map[string]time.Time, map[string][]time.Time, map[string]time.Duration, - map[string][]time.Duration, map[interface{}]interface{}: - + case map[string]string, map[string][]string, map[string]int64, map[string][]int64, map[string]int32, map[string][]int32, map[string]int, map[string][]int, map[string]time.Time, map[string][]time.Time, map[string]time.Duration, map[string][]time.Duration, map[interface{}]interface{}: return DC.Interface(t), nil - case map[string]Marshaler: return DC.MapMarshalerErr(t) case map[string][]Marshaler: return DC.MapSliceMarshalerErr(t) + case map[string]DocumentMarshaler: + return DC.MapDocumentMarshalerErr(t) + case map[string][]DocumentMarshaler: + return DC.MapSliceDocumentMarshalerErr(t) case map[string]interface{}: return DC.MapInterfaceErr(t) case map[string][]interface{}: @@ -402,6 +498,8 @@ func (DocumentConstructor) InterfaceErr(value interface{}) (*Document, error) { return DC.Elements(t...), nil case *Document: return t, nil + case DocumentMarshaler: + return t.MarshalDocument() case Marshaler: return DC.MarshalerErr(t) default: @@ -423,13 +521,33 @@ func (ElementConstructor) MarshalerErr(key string, val Marshaler) (*Element, err if err != nil { return nil, errors.WithStack(err) } + return EC.SubDocumentFromReader(key, doc), nil } +func (ElementConstructor) DocumentMarshaler(key string, val DocumentMarshaler) *Element { + doc, err := val.MarshalDocument() + if err != nil { + panic(err) + } + + return EC.SubDocument(key, doc) +} + +func (ElementConstructor) DocumentMarshalerErr(key string, val DocumentMarshaler) (*Element, error) { + doc, err := val.MarshalDocument() + if err != nil { + return nil, errors.WithStack(err) + } + + return EC.SubDocument(key, doc), nil +} + func (ElementConstructor) Int(key string, i int) *Element { if i < math.MaxInt32 { return EC.Int32(key, int32(i)) } + return EC.Int64(key, int64(i)) } @@ -454,21 +572,19 @@ func (ElementConstructor) SliceInterface(key string, in []interface{}) *Element } func (ElementConstructor) SliceInterfaceErr(key string, in []interface{}) (*Element, error) { - catcher := grip.NewBasicCatcher() vals := make([]*Value, 0, len(in)) for idx := range in { elem, err := VC.InterfaceErr(in[idx]) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if elem != nil { vals = append(vals, elem) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() - } - return EC.Array(key, NewArray(vals...)), nil } @@ -554,18 +670,43 @@ func (ElementConstructor) SliceMarshaler(key string, in []Marshaler) *Element { func (ElementConstructor) SliceMarshalerErr(key string, in []Marshaler) (*Element, error) { vals := make([]*Value, 0, len(in)) - catcher := grip.NewBasicCatcher() for idx := range in { val, err := VC.MarshalerErr(in[idx]) - catcher.Add(err) + if err != nil { + return nil, errors.WithStack(err) + } + if val != nil { vals = append(vals, val) } } - if catcher.HasErrors() { - return nil, catcher.Resolve() + return EC.Array(key, NewArray(vals...)), nil +} + +func (ElementConstructor) SliceDocumentMarshaler(key string, in []DocumentMarshaler) *Element { + vals := make([]*Value, len(in)) + + for idx := range in { + vals[idx] = VC.DocumentMarshaler(in[idx]) + } + + return EC.Array(key, NewArray(vals...)) +} + +func (ElementConstructor) SliceDocumentMarshalerErr(key string, in []DocumentMarshaler) (*Element, error) { + vals := make([]*Value, 0, len(in)) + + for idx := range in { + val, err := VC.DocumentMarshalerErr(in[idx]) + if err != nil { + return nil, errors.WithStack(err) + } + + if val != nil { + vals = append(vals, val) + } } return EC.Array(key, NewArray(vals...)), nil @@ -588,6 +729,7 @@ func (ValueConstructor) InterfaceErr(in interface{}) (*Value, error) { if err != nil { return nil, errors.WithStack(err) } + return elem.value, nil } @@ -604,6 +746,19 @@ func (ValueConstructor) MarshalerErr(in Marshaler) (*Value, error) { return elem.value, nil } +func (ValueConstructor) DocumentMarshaler(in DocumentMarshaler) *Value { + return EC.DocumentMarshaler("", in).value +} + +func (ValueConstructor) DocumentMarshalerErr(in DocumentMarshaler) (*Value, error) { + elem, err := EC.DocumentMarshalerErr("", in) + if err != nil { + return nil, errors.WithStack(err) + } + + return elem.value, nil +} + func (ValueConstructor) Duration(t time.Duration) *Value { return VC.Int64(int64(t)) } @@ -745,8 +900,8 @@ func (ValueConstructor) SliceMarshalerErr(in []Marshaler) (*Value, error) { if err != nil { return nil, errors.WithStack(err) } - return elem.value, nil + return elem.value, nil } func (ValueConstructor) SliceInterfaceErr(in []interface{}) (*Value, error) { diff --git a/vendor/github.com/evergreen-ci/birch/x_core.go b/vendor/github.com/evergreen-ci/birch/x_core.go index ca0f6a5e9e5..0d8e76afa8b 100644 --- a/vendor/github.com/evergreen-ci/birch/x_core.go +++ b/vendor/github.com/evergreen-ci/birch/x_core.go @@ -7,8 +7,9 @@ import ( ) func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) { - var length int32 + length := int32(0) ok := true + switch t { case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope: length, _, ok = readLength(src) @@ -39,11 +40,13 @@ func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) { ok = false break } + pattern := bytes.IndexByte(src[regex+1:], 0x00) if pattern < 0 { ok = false break } + length = int32(int64(regex) + 1 + int64(pattern) + 1) default: ok = false diff --git a/vendor/github.com/evergreen-ci/birch/x_document.go b/vendor/github.com/evergreen-ci/birch/x_document.go index d5f75d596dd..0084dbe2085 100644 --- a/vendor/github.com/evergreen-ci/birch/x_document.go +++ b/vendor/github.com/evergreen-ci/birch/x_document.go @@ -7,6 +7,23 @@ import ( "github.com/evergreen-ci/birch/bsontype" ) +// MarshalDocument satisfies the DocumentMarshaler interface, and +// returns the document itself. +func (d *Document) MarshalDocument() (*Document, error) { return d, nil } + +// UnmarshalDocument satisfies the DocumentUnmarshaler interface and +// appends the elements of the input document to the underlying +// document. If the document is populated this could result in a +// document that has multiple identical keys. +func (d *Document) UnmarshalDocument(in *Document) error { + iter := in.Iterator() + for iter.Next() { + d.Append(iter.Element()) + } + + return nil +} + // ExportMap converts the values of the document to a map of strings // to interfaces, recursively, using the Value.Interface() method. func (d *Document) ExportMap() map[string]interface{} { @@ -21,6 +38,8 @@ func (d *Document) ExportMap() map[string]interface{} { return out } +// Elements is a representation of a slice of elements, and implements +// the sort.Interface to support ordering the keys of a document. type Elements []*Element func (c Elements) Len() int { return len(c) } @@ -28,6 +47,7 @@ func (c Elements) Swap(i, j int) { c[i], c[j] = c[j], c[i] } func (c Elements) Less(i, j int) bool { ik := c[i].Key() jk := c[j].Key() + if ik != jk { return ik < jk } @@ -56,33 +76,49 @@ func (c Elements) Less(i, j int) bool { return false } } + +// Copy returns a new Elements slice with the same underlying +// Elements. The copy is "shallow." func (c Elements) Copy() Elements { out := make(Elements, len(c)) for idx := range c { out[idx] = c[idx] } + return out } +// Elements provides access to a slice of the Elements in the +// document. Mutating this list will mutate the content of the +// document. func (d *Document) Elements() Elements { return d.elems } +// Sorted returns a new document containing a (shallow copy) of the +// elements from the source document ordered according to their value. func (d *Document) Sorted() *Document { elems := d.Elements().Copy() sort.Stable(elems) + return DC.Elements(elems...) } +// LookupElement iterates through the elements in a document looking +// for one with the correct key and returns that element. It is NOT +// recursive. When the element is not defined, the return value +// is nil. func (d *Document) LookupElement(key string) *Element { iter := d.Iterator() for iter.Next() { elem := iter.Element() elemKey, ok := elem.KeyOK() + if !ok { continue } + if elemKey == key { return elem } @@ -91,14 +127,23 @@ func (d *Document) LookupElement(key string) *Element { return nil } +// Lookup iterates through the elements in a document looking +// for one with the correct key and returns the value for that key. It +// is NOT recursive. When the element is not defined, the return value +// is nil. func (d *Document) Lookup(key string) *Value { elem := d.LookupElement(key) if elem == nil { return nil } + return elem.value } +// LookupElementErr iterates through the elements in a document looking +// for one with the correct key and returns the Element for that key. It +// is NOT recursive. When the element is not defined, it returns a +// ElementNotFound error. func (d *Document) LookupElementErr(key string) (*Element, error) { elem := d.LookupElement(key) if elem == nil { @@ -108,6 +153,10 @@ func (d *Document) LookupElementErr(key string) (*Element, error) { return elem, nil } +// LookupErr iterates through the elements in a document looking +// for one with the correct key and returns the value for that key. It +// is NOT recursive. When the element is not defined, it returns a +// ElementNotFound error. func (d *Document) LookupErr(key string) (*Value, error) { elem := d.LookupElement(key) if elem == nil { diff --git a/vendor/github.com/evergreen-ci/birch/x_element.go b/vendor/github.com/evergreen-ci/birch/x_element.go index acfdc3b07cc..aaa8ed2cde1 100644 --- a/vendor/github.com/evergreen-ci/birch/x_element.go +++ b/vendor/github.com/evergreen-ci/birch/x_element.go @@ -1,3 +1,4 @@ package birch +// SetValue makes it possible to modify the value of an element in place func (e *Element) SetValue(v *Value) { e.value = v } diff --git a/vendor/github.com/evergreen-ci/birch/x_json.go b/vendor/github.com/evergreen-ci/birch/x_json.go deleted file mode 100644 index 1cbf978a79a..00000000000 --- a/vendor/github.com/evergreen-ci/birch/x_json.go +++ /dev/null @@ -1 +0,0 @@ -package birch diff --git a/vendor/github.com/evergreen-ci/birch/x_json_marshal.go b/vendor/github.com/evergreen-ci/birch/x_json_marshal.go new file mode 100644 index 00000000000..77d3eb86264 --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/x_json_marshal.go @@ -0,0 +1,131 @@ +package birch + +import ( + "time" + + "github.com/evergreen-ci/birch/bsontype" + "github.com/evergreen-ci/birch/jsonx" +) + +// MarshalJSON produces a JSON representation of the Document, +// preserving the order of the keys, and type information for types +// that have no JSON equivlent using MongoDB's extended JSON format +// where needed. +func (d *Document) MarshalJSON() ([]byte, error) { return d.toJSON().MarshalJSON() } + +func (d *Document) toJSON() *jsonx.Document { + iter := d.Iterator() + out := jsonx.DC.Make(d.Len()) + for iter.Next() { + elem := iter.Element() + out.Append(jsonx.EC.Value(elem.Key(), elem.Value().toJSON())) + } + if iter.Err() != nil { + return nil + } + + return out +} + +// MarshalJSON produces a JSON representation of an Array preserving +// the type information for the types that have no JSON equivalent +// using MongoDB's extended JSON format where needed. +func (a *Array) MarshalJSON() ([]byte, error) { return a.toJSON().MarshalJSON() } + +func (a *Array) toJSON() *jsonx.Array { + iter := a.Iterator() + out := jsonx.AC.Make(a.Len()) + for iter.Next() { + out.Append(iter.Value().toJSON()) + } + if iter.Err() != nil { + panic(iter.Err()) + return nil + } + + return out +} + +func (v *Value) MarshalJSON() ([]byte, error) { return v.toJSON().MarshalJSON() } + +func (v *Value) toJSON() *jsonx.Value { + switch v.Type() { + case bsontype.Double: + return jsonx.VC.Float64(v.Double()) + case bsontype.String: + return jsonx.VC.String(v.StringValue()) + case bsontype.EmbeddedDocument: + return jsonx.VC.Object(v.MutableDocument().toJSON()) + case bsontype.Array: + return jsonx.VC.Array(v.MutableArray().toJSON()) + case bsontype.Binary: + t, d := v.Binary() + + return jsonx.VC.ObjectFromElements( + jsonx.EC.ObjectFromElements("$binary", + jsonx.EC.String("base64", string(t)), + jsonx.EC.String("subType", string(d)), + ), + ) + case bsontype.Undefined: + return jsonx.VC.ObjectFromElements(jsonx.EC.Boolean("$undefined", true)) + case bsontype.ObjectID: + return jsonx.VC.ObjectFromElements(jsonx.EC.String("$oid", v.ObjectID().Hex())) + case bsontype.Boolean: + return jsonx.VC.Boolean(v.Boolean()) + case bsontype.DateTime: + return jsonx.VC.ObjectFromElements(jsonx.EC.String("$date", v.Time().Format(time.RFC3339))) + case bsontype.Null: + return jsonx.VC.Nil() + case bsontype.Regex: + pattern, opts := v.Regex() + + return jsonx.VC.ObjectFromElements( + jsonx.EC.ObjectFromElements("$regularExpression", + jsonx.EC.String("pattern", pattern), + jsonx.EC.String("options", opts), + ), + ) + case bsontype.DBPointer: + ns, oid := v.DBPointer() + + return jsonx.VC.ObjectFromElements( + jsonx.EC.ObjectFromElements("$dbPointer", + jsonx.EC.String("$ref", ns), + jsonx.EC.String("$id", oid.Hex()), + ), + ) + case bsontype.JavaScript: + return jsonx.VC.ObjectFromElements(jsonx.EC.String("$code", v.JavaScript())) + case bsontype.Symbol: + return jsonx.VC.ObjectFromElements(jsonx.EC.String("$symbol", v.Symbol())) + case bsontype.CodeWithScope: + code, scope := v.MutableJavaScriptWithScope() + + return jsonx.VC.ObjectFromElements( + jsonx.EC.String("$code", code), + jsonx.EC.Object("$scope", scope.toJSON()), + ) + case bsontype.Int32: + return jsonx.VC.Int32(v.Int32()) + case bsontype.Timestamp: + t, i := v.Timestamp() + + return jsonx.VC.ObjectFromElements( + jsonx.EC.ObjectFromElements("$timestamp", + jsonx.EC.Int64("t", int64(t)), + jsonx.EC.Int64("i", int64(i)), + ), + ) + case bsontype.Int64: + return jsonx.VC.Int64(v.Int64()) + case bsontype.Decimal128: + return jsonx.VC.ObjectFromElements(jsonx.EC.String("$numberDecimal", v.Decimal128().String())) + case bsontype.MinKey: + return jsonx.VC.ObjectFromElements(jsonx.EC.Int("$minKey", 1)) + case bsontype.MaxKey: + return jsonx.VC.ObjectFromElements(jsonx.EC.Int("$maxKey", 1)) + default: + return nil + } +} diff --git a/vendor/github.com/evergreen-ci/birch/x_json_test.go b/vendor/github.com/evergreen-ci/birch/x_json_test.go new file mode 100644 index 00000000000..8c5533fa18a --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/x_json_test.go @@ -0,0 +1,417 @@ +package birch + +import ( + "fmt" + "math" + "testing" + "time" + + "github.com/evergreen-ci/birch/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type jsonDocumentTestCase struct { + Name string + ShouldSkip bool + Doc *Document + Expected string +} + +func makeDocumentTestCases(depth int) []jsonDocumentTestCase { + depth++ + now := time.Now().Round(time.Hour) + + base := []jsonDocumentTestCase{ + { + Name: "SimpleString", + Doc: DC.Elements(EC.String("hello", "world")), + Expected: `{"hello":"world"}`, + }, + { + Name: "MultiString", + Doc: DC.Elements(EC.String("hello", "world"), EC.String("this", "that")), + Expected: `{"hello":"world","this":"that"}`, + }, + { + Name: "SimpleTrue", + Doc: DC.Elements(EC.Boolean("hello", true)), + Expected: `{"hello":true}`, + }, + { + Name: "SimpleFalse", + Doc: DC.Elements(EC.Boolean("hello", false)), + Expected: `{"hello":false}`, + }, + { + Name: "ObjectID", + Doc: DC.Elements(EC.ObjectID("_id", types.MustObjectIDFromHex("5df67fa01cbe64e51b598f18"))), + Expected: `{"_id":{"$oid":"5df67fa01cbe64e51b598f18"}}`, + }, + { + Name: "RegularExpression", + Doc: DC.Elements(EC.Regex("rex", ".*", "i")), + Expected: `{"rex":{"$regularExpression":{"pattern":".*","options":"i"}}}`, + }, + { + Name: "SimpleInt", + Doc: DC.Elements(EC.Int("hello", 42)), + Expected: `{"hello":42}`, + }, + { + Name: "MaxKey", + Doc: DC.Elements(EC.MaxKey("most")), + Expected: `{"most":{"$maxKey":1}}`, + }, + { + Name: "MinKey", + Doc: DC.Elements(EC.MinKey("most")), + Expected: `{"most":{"$minKey":1}}`, + }, + { + Name: "Undefined", + Doc: DC.Elements(EC.Undefined("kip")), + Expected: `{"kip":{"$undefined":true}}`, + }, + { + Name: "Code", + Doc: DC.Elements(EC.JavaScript("js", "let out = map(function(k, v){})")), + Expected: `{"js":{"$code":"let out = map(function(k, v){})"}}`, + }, + { + Name: "CodeWithScope", + Doc: DC.Elements(EC.CodeWithScope("js", "let out = map(function(k, v){v+a})", DC.Elements(EC.Int("a", 1)))), + Expected: `{"js":{"$code":"let out = map(function(k, v){v+a})","$scope":{"a":1}}}`, + }, + { + Name: "Symbol", + Doc: DC.Elements(EC.Symbol("signified", "signifier")), + Expected: `{"signified":{"$symbol":"signifier"}}`, + }, + { + + Name: "MDBTimeStamp", + Doc: DC.Elements(EC.Timestamp("mdbts", uint32(now.Unix()), 1)), + Expected: fmt.Sprintf(`{"mdbts":{"$timestamp":{"t":%d,"i":1}}}`, now.Unix()), + }, + { + Name: "SimpleInt64", + Doc: DC.Elements(EC.Int("hello", math.MaxInt32+2)), + Expected: `{"hello":2147483649}`, + }, + { + Name: "SimpleTimestamp", + Doc: DC.Elements(EC.Time("nowish", now)), + Expected: fmt.Sprintf(`{"nowish":{"$date":"%s"}}`, now.Format(time.RFC3339)), + }, + { + Name: "Mixed", + Doc: DC.Elements( + EC.Int("first", 42), + EC.String("second", "stringval"), + EC.SubDocumentFromElements("third", + EC.Boolean("exists", true), + EC.Null("does_not"), + ), + ), + Expected: `{"first":42,"second":"stringval","third":{"exists":true,"does_not":null}}`, + }, + } + + if depth > 2 { + return base + } + + for _, at := range makeArrayTestCases(depth) { + base = append(base, jsonDocumentTestCase{ + Name: "Array/" + at.Name, + ShouldSkip: at.ShouldSkip, + Doc: DC.Elements( + EC.Boolean("isArray", true), + EC.Array("array", at.Array), + ), + Expected: fmt.Sprintf(`{"isArray":true,"array":%s}`, at.Expected), + }) + } + + for _, vt := range makeValueTestCases(depth) { + base = append(base, jsonDocumentTestCase{ + Name: "Value/" + vt.Name, + ShouldSkip: vt.ShouldSkip, + Doc: DC.Elements( + EC.Boolean("isValue", true), + EC.Value("value", vt.Val), + ), + Expected: fmt.Sprintf(`{"isValue":true,"value":%s}`, vt.Expected), + }) + } + + for _, dt := range makeDocumentTestCases(depth) { + base = append(base, jsonDocumentTestCase{ + Name: "SubDocument/" + dt.Name, + ShouldSkip: dt.ShouldSkip, + Doc: DC.Elements( + EC.Boolean("isSubDoc", true), + EC.SubDocument("first", dt.Doc), + EC.SubDocument("second", dt.Doc), + ), + Expected: fmt.Sprintf(`{"isSubDoc":true,"first":%s,"second":%s}`, dt.Expected, dt.Expected), + }) + } + return base +} + +type jsonArrayTestCase struct { + Name string + ShouldSkip bool + Array *Array + Expected string +} + +func makeArrayTestCases(depth int) []jsonArrayTestCase { + depth++ + base := []jsonArrayTestCase{ + { + Name: "Empty", + Array: NewArray(), + Expected: "[]", + }, + { + Name: "Bools", + Array: NewArray(VC.Boolean(true), VC.Boolean(true), VC.Boolean(false), VC.Boolean(false)), + Expected: "[true,true,false,false]", + }, + { + Name: "Strings", + Array: NewArray(VC.String("one"), VC.String("two"), VC.String("three")), + Expected: `["one","two","three"]`, + }, + { + Name: "SingleSubDocument", + Array: NewArray( + VC.Document(DC.Elements(EC.Int("a", 1))), + ), + Expected: `[{"a":1}]`, + }, + { + Name: "SingleKeyDocuments", + Array: NewArray( + VC.Document(DC.Elements(EC.Int("a", 1))), + VC.Document(DC.Elements(EC.Int("a", 1))), + VC.Document(DC.Elements(EC.Int("a", 1))), + ), + Expected: `[{"a":1},{"a":1},{"a":1}]`, + }, + } + + if depth > 2 { + return base + } + + for _, dt := range makeDocumentTestCases(depth) { + base = append(base, jsonArrayTestCase{ + Name: "Document/" + dt.Name, + ShouldSkip: dt.ShouldSkip, + Array: NewArray(VC.Document(dt.Doc), VC.Document(dt.Doc), VC.Document(dt.Doc)), + Expected: fmt.Sprintf(`[%s,%s,%s]`, dt.Expected, dt.Expected, dt.Expected), + }) + } + + for _, vt := range makeValueTestCases(depth) { + base = append(base, jsonArrayTestCase{ + Name: "Value/" + vt.Name, + ShouldSkip: vt.ShouldSkip, + Array: NewArray(vt.Val, vt.Val, vt.Val), + Expected: fmt.Sprintf(`[%s,%s,%s]`, vt.Expected, vt.Expected, vt.Expected), + }) + } + + for _, at := range makeArrayTestCases(depth + 1) { + base = append(base, jsonArrayTestCase{ + Name: "DoubleArray/" + at.Name, + ShouldSkip: at.ShouldSkip, + Array: NewArray(VC.Array(at.Array), VC.Array(at.Array), VC.Array(at.Array)), + Expected: fmt.Sprintf(`[%s,%s,%s]`, at.Expected, at.Expected, at.Expected), + }) + } + + return base +} + +type jsonValueTestCase struct { + Name string + ShouldSkip bool + Val *Value + Expected string +} + +func makeValueTestCases(depth int) []jsonValueTestCase { + depth++ + base := []jsonValueTestCase{ + { + Name: "True", + Val: VC.Boolean(true), + Expected: "true", + }, + { + Name: "False", + Val: VC.Boolean(false), + Expected: "false", + }, + { + Name: "Null", + Val: VC.Null(), + Expected: "null", + }, + { + Name: "String", + Val: VC.String("helloWorld!"), + Expected: `"helloWorld!"`, + }, + { + Name: "ObjectID", + Val: VC.ObjectID(types.MustObjectIDFromHex("5df67fa01cbe64e51b598f18")), + Expected: `{"$oid":"5df67fa01cbe64e51b598f18"}`, + }, + { + Name: "SingleKeyDoc", + Val: VC.Document(DC.Elements(EC.Int("a", 1))), + Expected: `{"a":1}`, + }, + } + + if depth > 2 { + return base + } + + for _, docTests := range makeDocumentTestCases(depth) { + base = append(base, jsonValueTestCase{ + Name: "Document/" + docTests.Name, + Val: VC.Document(docTests.Doc), + Expected: docTests.Expected, + ShouldSkip: docTests.ShouldSkip, + }) + } + + for _, arrayTests := range makeArrayTestCases(depth) { + base = append(base, jsonValueTestCase{ + Name: "Array/" + arrayTests.Name, + Val: VC.Array(arrayTests.Array), + Expected: arrayTests.Expected, + ShouldSkip: arrayTests.ShouldSkip, + }) + } + + return base +} + +func TestJSON(t *testing.T) { + t.Run("Marshal", func(t *testing.T) { + t.Run("Document", func(t *testing.T) { + for _, test := range makeDocumentTestCases(0) { + if test.ShouldSkip { + continue + } + + t.Run(test.Name, func(t *testing.T) { + out, err := test.Doc.MarshalJSON() + + require.NoError(t, err) + require.Equal(t, test.Expected, string(out)) + }) + } + }) + t.Run("Array", func(t *testing.T) { + for _, test := range makeArrayTestCases(0) { + if test.ShouldSkip { + continue + } + t.Run(test.Name, func(t *testing.T) { + out, err := test.Array.MarshalJSON() + + require.NoError(t, err) + require.Equal(t, test.Expected, string(out)) + }) + } + }) + t.Run("Value", func(t *testing.T) { + for _, test := range makeValueTestCases(0) { + if test.ShouldSkip { + continue + } + + t.Run(test.Name, func(t *testing.T) { + out, err := test.Val.MarshalJSON() + + require.NoError(t, err) + require.Equal(t, test.Expected, string(out)) + }) + } + }) + }) + t.Run("Unmarshal", func(t *testing.T) { + t.Run("Document", func(t *testing.T) { + for _, test := range makeDocumentTestCases(0) { + if test.ShouldSkip { + continue + } + + t.Run(test.Name, func(t *testing.T) { + doc := DC.New() + err := doc.UnmarshalJSON([]byte(test.Expected)) + require.NoError(t, err) + iter := doc.Iterator() + for iter.Next() { + elem := iter.Element() + expected, err := test.Doc.LookupErr(elem.Key()) + require.NoError(t, err) + assert.True(t, elem.Value().Equal(expected), "[%s] %s != %s", + test.Expected, + expected.Interface(), elem.Value().Interface()) + } + require.NoError(t, iter.Err()) + }) + } + }) + t.Run("Array", func(t *testing.T) { + for _, test := range makeArrayTestCases(0) { + if test.ShouldSkip { + continue + } + + t.Run(test.Name, func(t *testing.T) { + array := NewArray() + err := array.UnmarshalJSON([]byte(test.Expected)) + require.NoError(t, err) + + iter := array.Iterator() + idx := uint(0) + for iter.Next() { + elem := iter.Value() + expected, err := test.Array.LookupErr(idx) + require.NoError(t, err) + assert.True(t, elem.Equal(expected)) + idx++ + } + require.NoError(t, iter.Err()) + }) + } + }) + t.Run("Value", func(t *testing.T) { + for _, test := range makeValueTestCases(0) { + if test.ShouldSkip { + continue + } + + t.Run(test.Name, func(t *testing.T) { + value := &Value{} + err := value.UnmarshalJSON([]byte(test.Expected)) + require.NoError(t, err) + + assert.True(t, value.Equal(test.Val)) + }) + } + }) + + }) +} diff --git a/vendor/github.com/evergreen-ci/birch/x_json_unmarshal.go b/vendor/github.com/evergreen-ci/birch/x_json_unmarshal.go new file mode 100644 index 00000000000..9a4ad2b4dcd --- /dev/null +++ b/vendor/github.com/evergreen-ci/birch/x_json_unmarshal.go @@ -0,0 +1,287 @@ +package birch + +import ( + "time" + + "github.com/evergreen-ci/birch/decimal" + "github.com/evergreen-ci/birch/jsonx" + "github.com/evergreen-ci/birch/types" + "github.com/pkg/errors" +) + +// UnmarshalJSON converts the contents of a document to JSON +// recursively, preserving the order of keys and the rich types from +// bson using MongoDB's extended JSON format for BSON types that have +// no equivalent in JSON. +// +// The underlying document is not emptied before this operation, which +// for non-empty documents could result in duplicate keys. +func (d *Document) UnmarshalJSON(in []byte) error { + jdoc, err := jsonx.DC.BytesErr(in) + if err != nil { + return errors.WithStack(err) + } + iter := jdoc.Iterator() + for iter.Next() { + elem, err := convertJSONElements(iter.Element()) + if err != nil { + return errors.WithStack(err) + } + + d.Append(elem) + } + + return nil +} + +func (a *Array) UnmarshalJSON(in []byte) error { + ja, err := jsonx.AC.BytesErr(in) + if err != nil { + return errors.WithStack(err) + } + iter := ja.Iterator() + for iter.Next() { + elem, err := convertJSONElements(iter.Element()) + if err != nil { + return errors.WithStack(err) + } + + a.Append(elem.value) + } + + return nil +} + +func (v *Value) UnmarshalJSON(in []byte) error { + va, err := jsonx.VC.BytesErr(in) + if err != nil { + return errors.WithStack(err) + } + + elem, err := convertJSONElements(jsonx.EC.Value("", va)) + if err != nil { + return errors.WithStack(err) + } + v.Set(elem.Value()) + return nil +} + +func convertJSONElements(in *jsonx.Element) (*Element, error) { + inv := in.Value() + switch inv.Type() { + case jsonx.String: + val, ok := inv.StringValueOK() + if !ok { + return nil, errors.New("mismatched json type") + } + return EC.String(in.Key(), val), nil + case jsonx.Bool: + val, ok := inv.BooleanOK() + if !ok { + return nil, errors.New("mismatched json type") + } + return EC.Boolean(in.Key(), val), nil + case jsonx.Null: + return EC.Null(in.Key()), nil + case jsonx.NumberInteger: + val, ok := inv.IntOK() + if !ok { + return nil, errors.New("mismatched json type") + } + return EC.Int(in.Key(), val), nil + case jsonx.NumberDouble: + val, ok := inv.Float64OK() + if !ok { + return nil, errors.New("mismatched json type") + } + return EC.Double(in.Key(), val), nil + case jsonx.Number: + return EC.Interface(in.Key(), inv.Interface()), nil + case jsonx.ObjectValue: + indoc := in.Value().Document() + switch indoc.KeyAtIndex(0) { + case "$minKey": + return EC.MinKey(in.Key()), nil + case "$maxKey": + return EC.MaxKey(in.Key()), nil + case "$numberDecimal": + val, err := decimal.ParseDecimal128(indoc.ElementAtIndex(0).Value().StringValue()) + if err != nil { + return nil, errors.WithStack(err) + } + + return EC.Decimal128(in.Key(), val), nil + case "$timestamp": + var ( + t int64 + i int64 + val int + ok bool + ) + + tsDoc := indoc.ElementAtIndex(0).Value().Document() + iter := tsDoc.Iterator() + count := 0 + for iter.Next() { + if count >= 3 { + break + } + elem := iter.Element() + + switch elem.Key() { + case "t": + val, ok = elem.Value().IntOK() + if !ok { + return nil, errors.Errorf("problem decoding number for timestamp at %s [%T]", in.Key(), elem.Value().Interface()) + } + t = int64(val) + case "i": + val, ok = elem.Value().IntOK() + if !ok { + return nil, errors.Errorf("problem decoding number for timestamp at %s [%T]", in.Key(), elem.Value().Interface()) + } + i = int64(val) + } + count++ + } + + return EC.Timestamp(in.Key(), uint32(t), uint32(i)), nil + case "$symbol": + return EC.Symbol(in.Key(), indoc.ElementAtIndex(0).Value().StringValue()), nil + case "$code": + js, ok := indoc.ElementAtIndex(0).Value().StringValueOK() + if !ok { + return nil, errors.New("invalid code document") + } + + if second := indoc.KeyAtIndex(1); second == "" { + return EC.JavaScript(in.Key(), js), nil + } else if second == "$scope" { + scope, err := convertJSONElements(indoc.ElementAtIndex(1)) + if err != nil { + return nil, errors.WithStack(err) + } + + return EC.CodeWithScope(in.Key(), js, scope.Value().MutableDocument()), nil + } else { + return nil, errors.Errorf("invalid key '%s' in code with scope for %s", second, in.Key()) + } + case "$dbPointer": + var ( + ns string + oid string + ok bool + ) + debref := indoc.ElementAtIndex(0).Value().Document() + iter := debref.Iterator() + count := 0 + for iter.Next() { + if count >= 2 { + break + } + elem := iter.Element() + + switch elem.Key() { + case "$ref": + ns, ok = elem.Value().StringValueOK() + if !ok { + return nil, errors.Errorf("problem decoding ns for dbref in %s", in.Key()) + } + case "$id": + oid, ok = elem.Value().StringValueOK() + if !ok { + return nil, errors.Errorf("problem decoding ns for oid in %s", in.Key()) + } + } + count++ + } + if ns == "" || oid == "" { + return nil, errors.New("values for dbref are not defined") + } + + oidp, err := types.ObjectIDFromHex(oid) + if err != nil { + return nil, errors.Wrapf(err, "problem parsing oid from dbref at %s", in.Key()) + } + + return EC.DBPointer(in.Key(), ns, oidp), nil + case "$regularExpression": + var ( + pattern string + options string + ok bool + ) + rex := indoc.ElementAtIndex(0).Value().Document() + iter := rex.Iterator() + count := 0 + for iter.Next() { + if count >= 2 { + break + } + elem := iter.Element() + + switch elem.Key() { + case "pattern": + pattern, ok = elem.Value().StringValueOK() + if !ok { + return nil, errors.Errorf("problem decoding ns for dbref in %s", in.Key()) + } + case "options": + options, ok = elem.Value().StringValueOK() + if !ok { + return nil, errors.Errorf("problem decoding ns for oid in %s", in.Key()) + } + } + count++ + } + + return EC.Regex(in.Key(), pattern, options), nil + case "$date": + date, err := time.Parse(time.RFC3339, indoc.ElementAtIndex(0).Value().StringValue()) + if err != nil { + return nil, errors.WithStack(err) + } + return EC.Time(in.Key(), date), nil + case "$oid": + oid, err := types.ObjectIDFromHex(indoc.ElementAtIndex(0).Value().StringValue()) + if err != nil { + return nil, errors.WithStack(err) + } + + return EC.ObjectID(in.Key(), oid), nil + case "$undefined": + return EC.Undefined(in.Key()), nil + case "$binary": + return EC.Binary(in.Key(), []byte(indoc.ElementAtIndex(0).Value().StringValue())), nil + default: + iter := indoc.Iterator() + + doc := DC.Make(indoc.Len()) + for iter.Next() { + elem, err := convertJSONElements(iter.Element()) + if err != nil { + return nil, errors.WithStack(err) + } + + doc.Append(elem) + } + return EC.SubDocument(in.Key(), doc), nil + } + case jsonx.ArrayValue: + ina := inv.Array() + iter := ina.Iterator() + + array := MakeArray(ina.Len()) + for iter.Next() { + elem, err := convertJSONElements(iter.Element()) + if err != nil { + return nil, errors.WithStack(err) + } + + array.Append(elem.value) + } + return EC.Array(in.Key(), array), nil + default: + return nil, errors.Errorf("unknown value type '%s' [%v]", inv.Type(), inv.Interface()) + } +} diff --git a/vendor/github.com/evergreen-ci/birch/x_marshaler.go b/vendor/github.com/evergreen-ci/birch/x_marshaler.go index a0898fae9fa..b1c19ee020b 100644 --- a/vendor/github.com/evergreen-ci/birch/x_marshaler.go +++ b/vendor/github.com/evergreen-ci/birch/x_marshaler.go @@ -1,5 +1,7 @@ package birch +import "github.com/pkg/errors" + // Marshaler describes types that know how to marshal a document // representation of themselves into bson. Do not use this interface // for types that would marshal themselves into values. @@ -12,3 +14,31 @@ type Marshaler interface { type Unmarshaler interface { UnmarshalBSON([]byte) error } + +// DocumentMarshaler describes types that are able to produce Document +// represntations of themselves. +type DocumentMarshaler interface { + MarshalDocument() (*Document, error) +} + +// DocumentUnmarshaler describes a type that can populate itself from +// a document. +type DocumentUnmarshaler interface { + UnmarshalDocument(*Document) error +} + +// MarshalDocumentBSON provides a convience function to convert +// document marshalers directly to bson. +func MarshalDocumentBSON(dm DocumentMarshaler) ([]byte, error) { + doc, err := dm.MarshalDocument() + if err != nil { + return nil, errors.WithStack(err) + } + + out, err := doc.MarshalBSON() + if err != nil { + return nil, errors.WithStack(err) + } + + return out, nil +} diff --git a/vendor/github.com/evergreen-ci/birch/x_value.go b/vendor/github.com/evergreen-ci/birch/x_value.go index 07516d4b0de..f89514d1e2c 100644 --- a/vendor/github.com/evergreen-ci/birch/x_value.go +++ b/vendor/github.com/evergreen-ci/birch/x_value.go @@ -11,14 +11,17 @@ func checkEqualVal(t1, t2 bsontype.Type, v1, v2 []byte) bool { if t1 != t2 { return false } + v1, _, ok := readValue(v1, t1) if !ok { return false } + v2, _, ok = readValue(v2, t2) if !ok { return false } + return bytes.Equal(v1, v2) } @@ -27,6 +30,7 @@ func readstring(src []byte) (string, []byte, bool) { if !ok { return "", src, false } + if len(src[4:]) < int(l) { return "", src, false } @@ -50,6 +54,7 @@ func appendstring(dst []byte, s string) []byte { l := int32(len(s) + 1) dst = appendi32(dst, l) dst = append(dst, s...) + return append(dst, 0x00) } diff --git a/vendor/github.com/evergreen-ci/pail/LICENSE b/vendor/github.com/evergreen-ci/pail/LICENSE new file mode 100644 index 00000000000..5df6e7cbf17 --- /dev/null +++ b/vendor/github.com/evergreen-ci/pail/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 MongoDB, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/evergreen-ci/pail/bucket_test.go b/vendor/github.com/evergreen-ci/pail/bucket_test.go index b4970941d2d..d790a089944 100644 --- a/vendor/github.com/evergreen-ci/pail/bucket_test.go +++ b/vendor/github.com/evergreen-ci/pail/bucket_test.go @@ -5,7 +5,6 @@ import ( "context" "crypto/rand" "encoding/hex" - "fmt" "io/ioutil" "os" "path/filepath" @@ -14,10 +13,6 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - homedir "github.com/mitchellh/go-homedir" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,58 +27,9 @@ func newUUID() string { return hex.EncodeToString(b) } -func createS3Client(region string) (*s3.S3, error) { - sess, err := session.NewSession(&aws.Config{Region: aws.String(region)}) - if err != nil { - return nil, errors.Wrap(err, "problem connecting to AWS") - } - svc := s3.New(sess) - return svc, nil -} - -func cleanUpS3Bucket(name, prefix, region string) error { - svc, err := createS3Client(region) - if err != nil { - return errors.Wrap(err, "clean up failed") - } - deleteObjectsInput := &s3.DeleteObjectsInput{ - Bucket: aws.String(name), - Delete: &s3.Delete{}, - } - listInput := &s3.ListObjectsInput{ - Bucket: aws.String(name), - Prefix: aws.String(prefix), - } - var result *s3.ListObjectsOutput - - for { - result, err = svc.ListObjects(listInput) - if err != nil { - return errors.Wrap(err, "clean up failed") - } - - for _, object := range result.Contents { - deleteObjectsInput.Delete.Objects = append(deleteObjectsInput.Delete.Objects, &s3.ObjectIdentifier{ - Key: object.Key, - }) - } - - if deleteObjectsInput.Delete.Objects != nil { - _, err = svc.DeleteObjects(deleteObjectsInput) - if err != nil { - return errors.Wrap(err, "failed to delete S3 bucket") - } - deleteObjectsInput.Delete = &s3.Delete{} - } - - if *result.IsTruncated { - listInput.Marker = result.Contents[len(result.Contents)-1].Key - } else { - break - } - } - - return nil +type bucketTestCase struct { + id string + test func(*testing.T, Bucket) } func TestBucket(t *testing.T) { @@ -114,11 +60,6 @@ func TestBucket(t *testing.T) { defer cancel() require.NoError(t, client.Connect(connctx)) - type bucketTestCase struct { - id string - test func(*testing.T, Bucket) - } - for _, impl := range []struct { name string constructor func(*testing.T) Bucket @@ -127,9 +68,9 @@ func TestBucket(t *testing.T) { { name: "Local", constructor: func(t *testing.T) Bucket { - path := filepath.Join(tempdir, uuid, newUUID()) + path := filepath.Join(tempdir, uuid) require.NoError(t, os.MkdirAll(path, 0777)) - return &localFileSystem{path: path} + return &localFileSystem{path: path, prefix: newUUID()} }, tests: []bucketTestCase{ { @@ -227,7 +168,9 @@ func TestBucket(t *testing.T) { cancel() bucket := b.(*localFileSystem) bucket.path = "" - err := b.Pull(tctx, "", filepath.Dir(file)) + bucket.prefix = "" + opts := SyncOptions{Remote: filepath.Dir(file)} + err := b.Pull(tctx, opts) assert.Error(t, err) }, }, @@ -236,7 +179,8 @@ func TestBucket(t *testing.T) { test: func(t *testing.T, b Bucket) { tctx, cancel := context.WithCancel(ctx) cancel() - err := b.Push(tctx, filepath.Dir(file), "") + opts := SyncOptions{Local: filepath.Dir(file)} + err := b.Push(tctx, opts) assert.Error(t, err) }, }, @@ -247,6 +191,7 @@ func TestBucket(t *testing.T) { constructor: func(t *testing.T) Bucket { require.NoError(t, client.Database(uuid).Drop(ctx)) b, err := NewGridFSBucketWithClient(ctx, client, GridFSOptions{ + Name: newUUID(), Prefix: newUUID(), Database: uuid, }) @@ -259,6 +204,7 @@ func TestBucket(t *testing.T) { constructor: func(t *testing.T) Bucket { require.NoError(t, client.Database(uuid).Drop(ctx)) b, err := NewLegacyGridFSBucketWithSession(ses.Clone(), GridFSOptions{ + Name: newUUID(), Prefix: newUUID(), Database: uuid, }) @@ -301,223 +247,48 @@ func TestBucket(t *testing.T) { require.NoError(t, err) return b }, - tests: []bucketTestCase{ - { - id: "VerifyBucketType", - test: func(t *testing.T, b Bucket) { - bucket, ok := b.(*s3BucketSmall) - require.True(t, ok) - assert.NotNil(t, bucket) - }, - }, - { - id: "TestCredentialsOverrideDefaults", - test: func(t *testing.T, b Bucket) { - input := &s3.GetBucketLocationInput{ - Bucket: aws.String(s3BucketName), - } - - rawBucket := b.(*s3BucketSmall) - _, err := rawBucket.svc.GetBucketLocationWithContext(ctx, input) - assert.NoError(t, err) - - badOptions := S3Options{ - Credentials: CreateAWSCredentials("asdf", "asdf", "asdf"), - Region: s3Region, - Name: s3BucketName, - } - badBucket, err := NewS3Bucket(badOptions) - require.NoError(t, err) - rawBucket = badBucket.(*s3BucketSmall) - _, err = rawBucket.svc.GetBucketLocationWithContext(ctx, input) - assert.Error(t, err) - }, - }, - { - id: "TestCheckPassesWhenDoNotHaveAccess", - test: func(t *testing.T, b Bucket) { - rawBucket := b.(*s3BucketSmall) - rawBucket.name = "mciuploads" - assert.NoError(t, rawBucket.Check(ctx)) - }, - }, - { - id: "TestCheckFailsWhenBucketDNE", - test: func(t *testing.T, b Bucket) { - rawBucket := b.(*s3BucketSmall) - rawBucket.name = newUUID() - assert.Error(t, rawBucket.Check(ctx)) - }, - }, - { - id: "TestSharedCredentialsOption", - test: func(t *testing.T, b Bucket) { - require.NoError(t, b.Check(ctx)) - - newFile, err := os.Create(filepath.Join(tempdir, "creds")) - require.NoError(t, err) - defer newFile.Close() - _, err = newFile.WriteString("[my_profile]\n") - require.NoError(t, err) - awsKey := fmt.Sprintf("aws_access_key_id = %s\n", os.Getenv("AWS_KEY")) - _, err = newFile.WriteString(awsKey) - require.NoError(t, err) - awsSecret := fmt.Sprintf("aws_secret_access_key = %s\n", os.Getenv("AWS_SECRET")) - _, err = newFile.WriteString(awsSecret) - require.NoError(t, err) - - sharedCredsOptions := S3Options{ - SharedCredentialsFilepath: filepath.Join(tempdir, "creds"), - SharedCredentialsProfile: "my_profile", - Region: s3Region, - Name: s3BucketName, - } - sharedCredsBucket, err := NewS3Bucket(sharedCredsOptions) - require.NoError(t, err) - assert.NoError(t, sharedCredsBucket.Check(ctx)) - }, - }, - { - id: "TestSharedCredentialsUsesCorrectDefaultFile", - test: func(t *testing.T, b Bucket) { - require.NoError(t, b.Check(ctx)) - - sharedCredsOptions := S3Options{ - SharedCredentialsProfile: "default", - Region: s3Region, - Name: s3BucketName, - } - sharedCredsBucket, err := NewS3Bucket(sharedCredsOptions) - require.NoError(t, err) - homeDir, err := homedir.Dir() - require.NoError(t, err) - fileName := filepath.Join(homeDir, ".aws", "credentials") - _, err = os.Stat(fileName) - if err == nil { - assert.NoError(t, sharedCredsBucket.Check(ctx)) - } else { - assert.True(t, os.IsNotExist(err)) - } - }, - }, - { - id: "TestSharedCredentialsFailsWhenProfileDNE", - test: func(t *testing.T, b Bucket) { - require.NoError(t, b.Check(ctx)) - - sharedCredsOptions := S3Options{ - SharedCredentialsProfile: "DNE", - Region: s3Region, - Name: s3BucketName, - } - _, err := NewS3Bucket(sharedCredsOptions) - assert.Error(t, err) - }, - }, + tests: getS3SmallBucketTests(ctx, tempdir, s3BucketName, s3Prefix, s3Region), + }, + { + name: "S3BucketChecksums", + constructor: func(t *testing.T) Bucket { + s3Options := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + MaxRetries: 20, + UseSingleFileChecksums: true, + } + b, err := NewS3Bucket(s3Options) + require.NoError(t, err) + return b + }, + tests: getS3SmallBucketTests(ctx, tempdir, s3BucketName, s3Prefix, s3Region), + }, + { + name: "ParallelLocal", + constructor: func(t *testing.T) Bucket { + t.Skip() + path := filepath.Join(tempdir, uuid, newUUID()) + require.NoError(t, os.MkdirAll(path, 0777)) + bucket := &localFileSystem{path: path} - { - id: "TestPermissions", - test: func(t *testing.T, b Bucket) { - // default permissions - key1 := newUUID() - writer, err := b.Writer(ctx, key1) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket := b.(*s3BucketSmall) - objectACLInput := &s3.GetObjectAclInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key1)), - } - objectACLOutput, err := rawBucket.svc.GetObjectAcl(objectACLInput) - require.NoError(t, err) - require.Equal(t, 1, len(objectACLOutput.Grants)) - assert.Equal(t, "FULL_CONTROL", *objectACLOutput.Grants[0].Permission) - - // explicitly set permissions - openOptions := S3Options{ - Region: s3Region, - Name: s3BucketName, - Prefix: s3Prefix + newUUID(), - Permission: "public-read", - } - openBucket, err := NewS3Bucket(openOptions) - require.NoError(t, err) - key2 := newUUID() - writer, err = openBucket.Writer(ctx, key2) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket = openBucket.(*s3BucketSmall) - objectACLInput = &s3.GetObjectAclInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key2)), - } - objectACLOutput, err = rawBucket.svc.GetObjectAcl(objectACLInput) - require.NoError(t, err) - require.Equal(t, 2, len(objectACLOutput.Grants)) - assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) - - // copy with permissions - destKey := newUUID() - copyOpts := CopyOptions{ - SourceKey: key1, - DestinationKey: destKey, - DestinationBucket: openBucket, - } - require.NoError(t, b.Copy(ctx, copyOpts)) - require.NoError(t, err) - require.Equal(t, 2, len(objectACLOutput.Grants)) - assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) - }, - }, - { - id: "TestContentType", - test: func(t *testing.T, b Bucket) { - // default content type - key := newUUID() - writer, err := b.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket := b.(*s3BucketSmall) - getObjectInput := &s3.GetObjectInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - getObjectOutput, err := rawBucket.svc.GetObject(getObjectInput) - require.NoError(t, err) - assert.Nil(t, getObjectOutput.ContentType) - - // explicitly set content type - htmlOptions := S3Options{ - Region: s3Region, - Name: s3BucketName, - Prefix: s3Prefix + newUUID(), - ContentType: "html/text", - } - htmlBucket, err := NewS3Bucket(htmlOptions) - require.NoError(t, err) - key = newUUID() - writer, err = htmlBucket.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket = htmlBucket.(*s3BucketSmall) - getObjectInput = &s3.GetObjectInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - getObjectOutput, err = rawBucket.svc.GetObject(getObjectInput) - require.NoError(t, err) - require.NotNil(t, getObjectOutput.ContentType) - assert.Equal(t, "html/text", *getObjectOutput.ContentType) - }, - }, + return NewParallelSyncBucket(ParallelBucketOptions{Workers: runtime.NumCPU()}, bucket) + }, + }, + { + name: "ParallelS3Bucket", + constructor: func(t *testing.T) Bucket { + s3Options := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + MaxRetries: 20, + UseSingleFileChecksums: true, + } + b, err := NewS3Bucket(s3Options) + require.NoError(t, err) + return NewParallelSyncBucket(ParallelBucketOptions{Workers: runtime.NumCPU()}, b) }, }, { @@ -533,127 +304,23 @@ func TestBucket(t *testing.T) { require.NoError(t, err) return b }, - tests: []bucketTestCase{ - { - id: "VerifyBucketType", - test: func(t *testing.T, b Bucket) { - bucket, ok := b.(*s3BucketLarge) - require.True(t, ok) - assert.NotNil(t, bucket) - }, - }, - { - id: "TestPermissions", - test: func(t *testing.T, b Bucket) { - // default permissions - key := newUUID() - writer, err := b.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket := b.(*s3BucketLarge) - objectACLInput := &s3.GetObjectAclInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - objectACLOutput, err := rawBucket.svc.GetObjectAcl(objectACLInput) - require.NoError(t, err) - require.Equal(t, 1, len(objectACLOutput.Grants)) - assert.Equal(t, "FULL_CONTROL", *objectACLOutput.Grants[0].Permission) - - // explicitly set permissions - openOptions := S3Options{ - Region: s3Region, - Name: s3BucketName, - Prefix: s3Prefix + newUUID(), - Permission: "public-read", - } - openBucket, err := NewS3MultiPartBucket(openOptions) - require.NoError(t, err) - key = newUUID() - writer, err = openBucket.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket = openBucket.(*s3BucketLarge) - objectACLInput = &s3.GetObjectAclInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - objectACLOutput, err = rawBucket.svc.GetObjectAcl(objectACLInput) - require.NoError(t, err) - require.Equal(t, 2, len(objectACLOutput.Grants)) - assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) - }, - }, - { - id: "TestContentType", - test: func(t *testing.T, b Bucket) { - // default content type - key := newUUID() - writer, err := b.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket := b.(*s3BucketLarge) - getObjectInput := &s3.GetObjectInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - getObjectOutput, err := rawBucket.svc.GetObject(getObjectInput) - require.NoError(t, err) - assert.Nil(t, getObjectOutput.ContentType) - - // explicitly set content type - htmlOptions := S3Options{ - Region: s3Region, - Name: s3BucketName, - Prefix: s3Prefix + newUUID(), - ContentType: "html/text", - } - htmlBucket, err := NewS3MultiPartBucket(htmlOptions) - require.NoError(t, err) - key = newUUID() - writer, err = htmlBucket.Writer(ctx, key) - require.NoError(t, err) - _, err = writer.Write([]byte("hello world")) - require.NoError(t, err) - require.NoError(t, writer.Close()) - rawBucket = htmlBucket.(*s3BucketLarge) - getObjectInput = &s3.GetObjectInput{ - Bucket: aws.String(s3BucketName), - Key: aws.String(rawBucket.normalizeKey(key)), - } - getObjectOutput, err = rawBucket.svc.GetObject(getObjectInput) - require.NoError(t, err) - require.NotNil(t, getObjectOutput.ContentType) - assert.Equal(t, "html/text", *getObjectOutput.ContentType) - }, - }, - { - id: "TestLargeFileRoundTrip", - test: func(t *testing.T, b Bucket) { - size := int64(10000000) - key := newUUID() - bigBuff := make([]byte, size) - path := filepath.Join(tempdir, "bigfile.test0") - - // upload large empty file - require.NoError(t, ioutil.WriteFile(path, bigBuff, 0666)) - require.NoError(t, b.Upload(ctx, key, path)) - - // check size of empty file - path = filepath.Join(tempdir, "bigfile.test1") - require.NoError(t, b.Download(ctx, key, path)) - fi, err := os.Stat(path) - require.NoError(t, err) - assert.Equal(t, size, fi.Size()) - }, - }, + tests: getS3LargeBucketTests(ctx, tempdir, s3BucketName, s3Prefix, s3Region), + }, + { + name: "S3MultiPartBucketChecksum", + constructor: func(t *testing.T) Bucket { + s3Options := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + MaxRetries: 20, + UseSingleFileChecksums: true, + } + b, err := NewS3MultiPartBucket(s3Options) + require.NoError(t, err) + return b }, + tests: getS3LargeBucketTests(ctx, tempdir, s3BucketName, s3Prefix, s3Region), }, } { t.Run(impl.name, func(t *testing.T) { @@ -1013,27 +680,27 @@ func TestBucket(t *testing.T) { assert.NoError(t, writeDataToFile(ctx, bucket, key, contents)) _, err := os.Stat(path) - assert.True(t, os.IsNotExist(err)) - assert.NoError(t, bucket.Download(ctx, key, path)) + require.True(t, os.IsNotExist(err)) + require.NoError(t, bucket.Download(ctx, key, path)) _, err = os.Stat(path) - assert.False(t, os.IsNotExist(err)) + require.False(t, os.IsNotExist(err)) data, err := ioutil.ReadFile(path) require.NoError(t, err) - assert.Equal(t, contents, string(data)) + require.Equal(t, contents, string(data)) // writes file to disk with dry run bucket setDryRun(bucket, true) path = filepath.Join(tempdir, uuid, newUUID()) _, err = os.Stat(path) - assert.True(t, os.IsNotExist(err)) - assert.NoError(t, bucket.Download(ctx, key, path)) + require.True(t, os.IsNotExist(err)) + require.NoError(t, bucket.Download(ctx, key, path)) _, err = os.Stat(path) - assert.False(t, os.IsNotExist(err)) + require.False(t, os.IsNotExist(err)) data, err = ioutil.ReadFile(path) require.NoError(t, err) - assert.Equal(t, contents, string(data)) + require.Equal(t, contents, string(data)) }) t.Run("ListRespectsPrefixes", func(t *testing.T) { bucket := impl.constructor(t) @@ -1059,7 +726,7 @@ func TestBucket(t *testing.T) { }) t.Run("RoundTripManyFiles", func(t *testing.T) { data := map[string]string{} - for i := 0; i < 300; i++ { + for i := 0; i < 3; i++ { data[newUUID()] = strings.Join([]string{newUUID(), newUUID(), newUUID()}, "\n") } @@ -1094,7 +761,7 @@ func TestBucket(t *testing.T) { }) t.Run("PullFromBucket", func(t *testing.T) { data := map[string]string{} - for i := 0; i < 100; i++ { + for i := 0; i < 50; i++ { data[newUUID()] = strings.Join([]string{newUUID(), newUUID(), newUUID()}, "\n") } @@ -1106,125 +773,266 @@ func TestBucket(t *testing.T) { t.Run("BasicPull", func(t *testing.T) { mirror := filepath.Join(tempdir, "pull-one", newUUID()) require.NoError(t, os.MkdirAll(mirror, 0700)) - for i := 0; i < 3; i++ { - assert.NoError(t, bucket.Pull(ctx, mirror, "")) - files, err := walkLocalTree(ctx, mirror) - require.NoError(t, err) - assert.Len(t, files, 100) + opts := SyncOptions{Local: mirror} + assert.NoError(t, bucket.Pull(ctx, opts)) + files, err := walkLocalTree(ctx, mirror) + require.NoError(t, err) + require.Len(t, files, 50) - if !strings.Contains(impl.name, "GridFS") { - for _, fn := range files { - _, ok := data[filepath.Base(fn)] - require.True(t, ok) - } + if !strings.Contains(impl.name, "GridFS") { + for _, fn := range files { + _, ok := data[filepath.Base(fn)] + require.True(t, ok) } } }) t.Run("DryRunBucketPulls", func(t *testing.T) { setDryRun(bucket, true) - mirror := filepath.Join(tempdir, "pull-one", newUUID()) + mirror := filepath.Join(tempdir, "pull-one", newUUID(), "") require.NoError(t, os.MkdirAll(mirror, 0700)) - for i := 0; i < 3; i++ { - assert.NoError(t, bucket.Pull(ctx, mirror, "")) - files, err := walkLocalTree(ctx, mirror) - require.NoError(t, err) - assert.Len(t, files, 100) + opts := SyncOptions{Local: mirror} + assert.NoError(t, bucket.Pull(ctx, opts)) + files, err := walkLocalTree(ctx, mirror) + require.NoError(t, err) + require.Len(t, files, 50) - if !strings.Contains(impl.name, "GridFS") { - for _, fn := range files { - _, ok := data[filepath.Base(fn)] - require.True(t, ok) - } + if !strings.Contains(impl.name, "GridFS") { + for _, fn := range files { + _, ok := data[filepath.Base(fn)] + require.True(t, ok) } } setDryRun(bucket, false) }) + t.Run("PullWithExcludes", func(t *testing.T) { + require.NoError(t, writeDataToFile(ctx, bucket, "python.py", "exclude")) + require.NoError(t, writeDataToFile(ctx, bucket, "python2.py", "exclude2")) + + mirror := filepath.Join(tempdir, "not_excludes", newUUID()) + require.NoError(t, os.MkdirAll(mirror, 0700)) + opts := SyncOptions{Local: mirror} + assert.NoError(t, bucket.Pull(ctx, opts)) + files, err := walkLocalTree(ctx, mirror) + require.NoError(t, err) + require.Len(t, files, 52) + + if !strings.Contains(impl.name, "GridFS") { + for _, fn := range files { + _, ok := data[filepath.Base(fn)] + if !ok { + ok = filepath.Base(fn) == "python.py" || filepath.Base(fn) == "python2.py" + } + require.True(t, ok) + } + } + + mirror = filepath.Join(tempdir, "excludes", newUUID()) + require.NoError(t, os.MkdirAll(mirror, 0700)) + opts.Local = mirror + opts.Exclude = ".*\\.py" + assert.NoError(t, bucket.Pull(ctx, opts)) + files, err = walkLocalTree(ctx, mirror) + require.NoError(t, err) + require.Len(t, files, 50) + + if !strings.Contains(impl.name, "GridFS") { + for _, fn := range files { + _, ok := data[filepath.Base(fn)] + require.True(t, ok) + } + } + + require.NoError(t, bucket.Remove(ctx, "python.py")) + require.NoError(t, bucket.Remove(ctx, "python2.py")) + }) t.Run("DeleteOnSync", func(t *testing.T) { setDeleteOnSync(bucket, true) // dry run bucket does not delete mirror := filepath.Join(tempdir, "pull-one", newUUID()) require.NoError(t, os.MkdirAll(mirror, 0700)) + require.NoError(t, writeDataToDisk(mirror, "delete1", "should be deleted")) + require.NoError(t, writeDataToDisk(mirror, "delete2", "this should also be deleted")) setDryRun(bucket, true) - require.NoError(t, bucket.Pull(ctx, mirror, "")) + opts := SyncOptions{Local: mirror} + require.NoError(t, bucket.Pull(ctx, opts)) files, err := walkLocalTree(ctx, mirror) require.NoError(t, err) - require.Len(t, files, 100) - - iter, err := bucket.List(ctx, "") - require.NoError(t, err) - count := 0 - for iter.Next(ctx) { - require.NotNil(t, iter.Item()) - count++ - } - assert.NoError(t, iter.Err()) - assert.Equal(t, 100, count) + require.Len(t, files, 52) setDryRun(bucket, false) require.NoError(t, os.RemoveAll(mirror)) // with out dry run set mirror = filepath.Join(tempdir, "pull-one", newUUID()) require.NoError(t, os.MkdirAll(mirror, 0700)) - assert.NoError(t, bucket.Pull(ctx, mirror, "")) + require.NoError(t, writeDataToDisk(mirror, "delete1", "should be deleted")) + require.NoError(t, writeDataToDisk(mirror, "delete2", "this should also be deleted")) + opts.Local = mirror + assert.NoError(t, bucket.Pull(ctx, opts)) files, err = walkLocalTree(ctx, mirror) require.NoError(t, err) - assert.Len(t, files, 100) + assert.Len(t, files, 50) + setDeleteOnSync(bucket, false) + }) + t.Run("LargePull", func(t *testing.T) { + prefix := newUUID() + largeData := map[string]string{} + for i := 0; i < 1050; i++ { + largeData[newUUID()] = strings.Join([]string{newUUID(), newUUID(), newUUID()}, "\n") + } + for k, v := range largeData { + require.NoError(t, writeDataToFile(ctx, bucket, prefix+"/"+k, v)) + } - iter, err = bucket.List(ctx, "") + mirror := filepath.Join(tempdir, "pull-one", newUUID(), "") + require.NoError(t, os.MkdirAll(mirror, 0700)) + + opts := SyncOptions{Local: mirror, Remote: prefix} + assert.NoError(t, bucket.Pull(ctx, opts)) + files, err := walkLocalTree(ctx, mirror) require.NoError(t, err) - assert.False(t, iter.Next(ctx)) - assert.Nil(t, iter.Item()) - assert.NoError(t, iter.Err()) + assert.Len(t, files, len(largeData)) - setDeleteOnSync(bucket, false) + if !strings.Contains(impl.name, "GridFS") { + for _, fn := range files { + _, ok := largeData[fn] + require.True(t, ok) + } + } }) }) t.Run("PushToBucket", func(t *testing.T) { prefix := filepath.Join(tempdir, newUUID()) - for i := 0; i < 100; i++ { + filenames := map[string]bool{} + for i := 0; i < 50; i++ { + fn := newUUID() + filenames[fn] = true require.NoError(t, writeDataToDisk(prefix, - newUUID(), strings.Join([]string{newUUID(), newUUID(), newUUID()}, "\n"))) + fn, strings.Join([]string{newUUID(), newUUID(), newUUID()}, "\n"))) } bucket := impl.constructor(t) t.Run("NoPrefix", func(t *testing.T) { - assert.NoError(t, bucket.Push(ctx, prefix, "")) - assert.NoError(t, bucket.Push(ctx, prefix, "")) + opts := SyncOptions{Local: prefix} + assert.NoError(t, bucket.Push(ctx, opts)) + + iter, err := bucket.List(ctx, "") + require.NoError(t, err) + counter := 0 + for iter.Next(ctx) { + require.True(t, filenames[iter.Item().Name()]) + counter++ + } + assert.NoError(t, iter.Err()) + assert.Equal(t, 50, counter) }) t.Run("ShortPrefix", func(t *testing.T) { - assert.NoError(t, bucket.Push(ctx, prefix, "foo")) - assert.NoError(t, bucket.Push(ctx, prefix, "foo")) + remotePrefix := "foo" + opts := SyncOptions{Local: prefix, Remote: remotePrefix} + assert.NoError(t, bucket.Push(ctx, opts)) + + iter, err := bucket.List(ctx, remotePrefix) + require.NoError(t, err) + counter := 0 + for iter.Next(ctx) { + fn, err := filepath.Rel(remotePrefix, iter.Item().Name()) + require.NoError(t, err) + require.True(t, filenames[fn]) + counter++ + } + assert.NoError(t, iter.Err()) + assert.Equal(t, 50, counter) }) t.Run("DryRunBucketDoesNotPush", func(t *testing.T) { + remotePrefix := "bar" setDryRun(bucket, true) - assert.NoError(t, bucket.Push(ctx, prefix, "bar")) + opts := SyncOptions{Local: prefix, Remote: remotePrefix} + assert.NoError(t, bucket.Push(ctx, opts)) + + iter, err := bucket.List(ctx, remotePrefix) + require.NoError(t, err) + counter := 0 + for iter.Next(ctx) { + counter++ + } + assert.NoError(t, iter.Err()) + assert.Equal(t, 0, counter) + setDryRun(bucket, false) }) - t.Run("BucketContents", func(t *testing.T) { - iter, err := bucket.List(ctx, "") + t.Run("PushWithExcludes", func(t *testing.T) { + require.NoError(t, writeDataToDisk(prefix, "python.py", "exclude")) + require.NoError(t, writeDataToDisk(prefix, "python2.py", "exclude2")) + + remotePrefix := "not_excludes" + opts := SyncOptions{Local: prefix, Remote: remotePrefix} + assert.NoError(t, bucket.Push(ctx, opts)) + iter, err := bucket.List(ctx, remotePrefix) require.NoError(t, err) counter := 0 for iter.Next(ctx) { + fn, err := filepath.Rel(remotePrefix, iter.Item().Name()) + require.NoError(t, err) + ok := filenames[fn] + if !ok { + ok = fn == "python.py" || fn == "python2.py" + } + require.True(t, ok) counter++ } assert.NoError(t, iter.Err()) - assert.Equal(t, 200, counter) + assert.Equal(t, 52, counter) + + remotePrefix = "excludes" + opts.Remote = remotePrefix + opts.Exclude = ".*\\.py" + assert.NoError(t, bucket.Push(ctx, opts)) + iter, err = bucket.List(ctx, remotePrefix) + require.NoError(t, err) + counter = 0 + for iter.Next(ctx) { + fn, err := filepath.Rel(remotePrefix, iter.Item().Name()) + require.NoError(t, err) + require.True(t, filenames[fn]) + counter++ + } + assert.NoError(t, iter.Err()) + assert.Equal(t, 50, counter) + + require.NoError(t, os.RemoveAll(filepath.Join(prefix, "python.py"))) + require.NoError(t, os.RemoveAll(filepath.Join(prefix, "python2.py"))) }) t.Run("DeleteOnSync", func(t *testing.T) { setDeleteOnSync(bucket, true) - // dry run bucket does not delete + contents := []byte("should be deleted") + require.NoError(t, bucket.Put(ctx, filepath.Join("baz", "delete1"), bytes.NewBuffer(contents))) + contents = []byte("this should also be deleted") + require.NoError(t, bucket.Put(ctx, filepath.Join("baz", "delete2"), bytes.NewBuffer(contents))) + + // dry run bucket does not push or delete setDryRun(bucket, true) - assert.NoError(t, bucket.Push(ctx, prefix, "baz")) - files, err := walkLocalTree(ctx, prefix) - require.NoError(t, err) - assert.Equal(t, 100, len(files)) + opts := SyncOptions{Local: prefix, Remote: "baz"} + assert.NoError(t, bucket.Push(ctx, opts)) setDryRun(bucket, false) + iter, err := bucket.List(ctx, "baz") + require.NoError(t, err) + count := 0 + for iter.Next(ctx) { + require.NotNil(t, iter.Item()) + count++ + } + assert.Equal(t, 2, count) - assert.NoError(t, bucket.Push(ctx, prefix, "baz")) - _, err = os.Stat(prefix) - assert.True(t, os.IsNotExist(err)) + assert.NoError(t, bucket.Push(ctx, opts)) + iter, err = bucket.List(ctx, "baz") + require.NoError(t, err) + count = 0 + for iter.Next(ctx) { + require.NotNil(t, iter.Item()) + count++ + } + assert.Equal(t, 50, count) setDeleteOnSync(bucket, false) }) @@ -1233,7 +1041,6 @@ func TestBucket(t *testing.T) { bucket := impl.constructor(t) err := bucket.Upload(ctx, "key", "foo\x00bar") require.Error(t, err) - assert.Contains(t, err.Error(), "problem opening file") }) t.Run("DownloadWithBadFileName", func(t *testing.T) { bucket := impl.constructor(t) @@ -1248,7 +1055,6 @@ func TestBucket(t *testing.T) { err = bucket.Download(ctx, "key", "location-\x00/key-name") require.Error(t, err) - assert.Contains(t, err.Error(), "problem creating enclosing directory") }) t.Run("DownloadToBadFileName", func(t *testing.T) { bucket := impl.constructor(t) @@ -1258,7 +1064,6 @@ func TestBucket(t *testing.T) { err = bucket.Download(ctx, "key", "location-\x00-key-name") require.Error(t, err) - assert.Contains(t, err.Error(), "problem creating file") }) }) } @@ -1280,7 +1085,6 @@ func writeDataToFile(ctx context.Context, bucket Bucket, key, data string) error if err != nil { return errors.WithStack(err) } - _, err = writer.Write([]byte(data)) if err != nil { return errors.WithStack(err) @@ -1328,6 +1132,9 @@ func setDryRun(b Bucket, set bool) { i.dryRun = set case *gridfsBucket: i.opts.DryRun = set + case *parallelBucketImpl: + i.dryRun = set + setDryRun(i.Bucket, set) } } @@ -1343,5 +1150,8 @@ func setDeleteOnSync(b Bucket, set bool) { i.deleteOnSync = set case *gridfsBucket: i.opts.DeleteOnSync = set + case *parallelBucketImpl: + i.deleteOnSync = set + setDeleteOnSync(i.Bucket, set) } } diff --git a/vendor/github.com/evergreen-ci/pail/evergreen.yaml b/vendor/github.com/evergreen-ci/pail/evergreen.yaml index a2cdcd05b17..bc1fc10a0dd 100644 --- a/vendor/github.com/evergreen-ci/pail/evergreen.yaml +++ b/vendor/github.com/evergreen-ci/pail/evergreen.yaml @@ -21,8 +21,8 @@ variables: type: system params: optional: true - aws_key: ${aws_key} - aws_secret: ${aws_secret} + aws_key: ${aws_s3_key} + aws_secret: ${aws_s3_secret} local_file: gopath/src/github.com/evergreen-ci/pail/build/${task_name}.tar.gz remote_file: pail/${build_id}-${build_variant}/pail-${task_name}-${revision}.tar.gz bucket: mciuploads @@ -55,6 +55,7 @@ functions: silent: true working_dir: gopath/src/github.com/evergreen-ci/pail env: + WORK_DIR: ${workdir} AWS_KEY: ${aws_key} AWS_SECRET: ${aws_secret} command: bash scripts/setup-credentials.sh @@ -70,8 +71,10 @@ functions: DISABLE_COVERAGE: ${disable_coverage} GOROOT: ${goroot} GOPATH: ${workdir}/gopath + USERPROFILE: ${workdir} AWS_KEY: ${aws_key} AWS_SECRET: ${aws_secret} + RACE_DETECTOR: ${race_detector} set-up-mongodb: - command: subprocess.exec type: setup @@ -102,8 +105,9 @@ post: params: files: - "gopath/src/github.com/evergreen-ci/pail/build/output.*" - - "gopath/src/github.com/evergreen-ci/pail/build/test.*.out" - - "gopath/src/github.com/evergreen-ci/pail/build/race.*.out" + - "gopath/src/github.com/evergreen-ci/pail/build/test.out" + - "gopath/src/github.com/evergreen-ci/pail/build/race.out" + - "gopath/src/github.com/evergreen-ci/pail/build/cover*" - command: subprocess.exec type: setup params: @@ -111,9 +115,9 @@ post: - command: s3.put type: system params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_files_include_filter: ["gopath/src/github.com/evergreen-ci/pail/build/output.*.coverage.html"] + aws_key: ${aws_s3_key} + aws_secret: ${aws_s3_secret} + local_files_include_filter: ["gopath/src/github.com/evergreen-ci/pail/build/cover.html"] remote_file: pail/${task_id}/ bucket: mciuploads content_type: text/html @@ -122,9 +126,9 @@ post: - command: s3.put type: system params: - aws_key: ${aws_key} - aws_secret: ${aws_secret} - local_files_include_filter: ["gopath/src/github.com/evergreen-ci/pail/build/output.*.coverage"] + aws_key: ${aws_s3_key} + aws_secret: ${aws_s3_secret} + local_files_include_filter: ["gopath/src/github.com/evergreen-ci/pail/build/cover.out"] remote_file: pail/${task_id}/ bucket: mciuploads content_type: text/plain @@ -172,34 +176,48 @@ buildvariants: - name: race-detector display_name: Race Detector (Arch Linux) expansions: + disable_coverage: true + goroot: /opt/golang/go1.13/bin/go + gobin: /opt/golang/go1.13 mongodb_url: http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-4.0.1.tgz + race_detector: true run_on: - archlinux-test tasks: - name: ".race" + + - name: coverage + display_name: Coverage (Arch Linux) + expansions: + gobin: /opt/golang/go1.13/bin/go + goroot: /opt/golang/go1.13 + mongodb_url: http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-4.0.1.tgz + run_on: + - archlinux-test + tasks: - name: ".report" - name: ubuntu1604 display_name: Ubuntu 16.04 expansions: - mongodb_url: http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-4.0.1.tgz - goroot: /opt/go1.8/go - gobin: /opt/go1.8/go/bin/go disable_coverage: true + gobin: /opt/golang/go1.9/bin/go + goroot: /opt/golang/go1.9 + mongodb_url: http://fastdl.mongodb.org/linux/mongodb-linux-x86_64-4.0.1.tgz run_on: - ubuntu1604-test tasks: - name: ".test" - name: macos - display_name: macOS 10.12 + display_name: macOS 10.14 expansions: - mongodb_url: https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-4.0.1.tgz - goroot: /usr/local/go1.8/go - gobin: /usr/local/go1.8/go/bin/go disable_coverage: true + gobin: /opt/golang/go1.9/bin/go + goroot: /opt/golang/go1.9 + mongodb_url: https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-4.0.1.tgz run_on: - - macos-1012 + - macos-1014 tasks: - name: ".test" @@ -208,11 +226,9 @@ buildvariants: run_on: - windows-64-vs2015-small expansions: - mongodb_url: https://fastdl.mongodb.org/win32/mongodb-win32-x86_64-2008plus-ssl-4.0.1.zip - goroot: c:/go1.8/go - gobin: /cygdrive/c/go1.8/go/bin/go disable_coverage: true - extension: ".exe" - archiveExt: ".zip" + gobin: /cygdrive/c/golang/go1.9/bin/go + goroot: c:/golang/go1.9 + mongodb_url: https://fastdl.mongodb.org/win32/mongodb-win32-x86_64-2008plus-ssl-4.0.1.zip tasks: - name: ".test" diff --git a/vendor/github.com/evergreen-ci/pail/gridfs.go b/vendor/github.com/evergreen-ci/pail/gridfs.go index 8bda263b295..f5f91d8d3e1 100644 --- a/vendor/github.com/evergreen-ci/pail/gridfs.go +++ b/vendor/github.com/evergreen-ci/pail/gridfs.go @@ -9,8 +9,11 @@ import ( "regexp" "time" + "github.com/mongodb/grip" + "github.com/mongodb/grip/message" "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/gridfs" "go.mongodb.org/mongo-driver/mongo/options" @@ -19,11 +22,13 @@ import ( // GridFSOptions support the use and creation of GridFS backed // buckets. type GridFSOptions struct { + Name string Prefix string Database string MongoDBURI string DryRun bool DeleteOnSync bool + Verbose bool } type gridfsBucket struct { @@ -31,6 +36,20 @@ type gridfsBucket struct { client *mongo.Client } +func (b *gridfsBucket) normalizeKey(key string) string { + if key == "" { + return b.opts.Prefix + } + return consistentJoin(b.opts.Prefix, key) +} + +func (b *gridfsBucket) denormalizeKey(key string) string { + if b.opts.Prefix != "" && len(key) > len(b.opts.Prefix)+1 { + key = key[len(b.opts.Prefix)+1:] + } + return key +} + // NewGridFSBucketWithClient constructs a Bucket implementation using // GridFS and the new MongoDB driver. If client is nil, then this // method falls back to the behavior of NewGridFS bucket. Use the @@ -43,7 +62,7 @@ func NewGridFSBucketWithClient(ctx context.Context, client *mongo.Client, opts G return &gridfsBucket{opts: opts, client: client}, nil } -// NewGridFSBucket creates a Bucket instance backed by the new MongoDB +// NewGridFSBucket creates a Bucket instance backed by the new MongoDb // driver, creating a new client and connecting to the URI. // Use the Check method to verify that this bucket ise operationsal. func NewGridFSBucket(ctx context.Context, opts GridFSOptions) (Bucket, error) { @@ -76,7 +95,7 @@ func (b *gridfsBucket) bucket(ctx context.Context) (*gridfs.Bucket, error) { return nil, errors.Wrap(err, "cannot fetch bucket with canceled context") } - gfs, err := gridfs.NewBucket(b.client.Database(b.opts.Database), options.GridFSBucket().SetName(b.opts.Prefix)) + gfs, err := gridfs.NewBucket(b.client.Database(b.opts.Database), options.GridFSBucket().SetName(b.opts.Name)) if err != nil { return nil, errors.WithStack(err) } @@ -91,6 +110,15 @@ func (b *gridfsBucket) bucket(ctx context.Context) (*gridfs.Bucket, error) { } func (b *gridfsBucket) Writer(ctx context.Context, name string) (io.WriteCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "writer", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + grid, err := b.bucket(ctx) if err != nil { return nil, errors.Wrap(err, "problem resolving bucket") @@ -100,7 +128,7 @@ func (b *gridfsBucket) Writer(ctx context.Context, name string) (io.WriteCloser, return &mockWriteCloser{}, nil } - writer, err := grid.OpenUploadStream(name) + writer, err := grid.OpenUploadStream(b.normalizeKey(name)) if err != nil { return nil, errors.Wrap(err, "problem opening stream") } @@ -109,12 +137,20 @@ func (b *gridfsBucket) Writer(ctx context.Context, name string) (io.WriteCloser, } func (b *gridfsBucket) Reader(ctx context.Context, name string) (io.ReadCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "reader", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + grid, err := b.bucket(ctx) if err != nil { return nil, errors.Wrap(err, "problem resolving bucket") } - reader, err := grid.OpenDownloadStreamByName(name) + reader, err := grid.OpenDownloadStreamByName(b.normalizeKey(name)) if err != nil { return nil, errors.Wrap(err, "problem opening stream") } @@ -123,6 +159,15 @@ func (b *gridfsBucket) Reader(ctx context.Context, name string) (io.ReadCloser, } func (b *gridfsBucket) Put(ctx context.Context, name string, input io.Reader) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "put", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + grid, err := b.bucket(ctx) if err != nil { return errors.Wrap(err, "problem resolving bucket") @@ -132,7 +177,7 @@ func (b *gridfsBucket) Put(ctx context.Context, name string, input io.Reader) er return nil } - if _, err = grid.UploadFromStream(name, input); err != nil { + if _, err = grid.UploadFromStream(b.normalizeKey(name), input); err != nil { return errors.Wrap(err, "problem uploading file") } @@ -140,10 +185,28 @@ func (b *gridfsBucket) Put(ctx context.Context, name string, input io.Reader) er } func (b *gridfsBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "get", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + return b.Reader(ctx, name) } func (b *gridfsBucket) Upload(ctx context.Context, name, path string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "upload", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + "path": path, + }) + f, err := os.Open(path) if err != nil { return errors.Wrapf(err, "problem opening file %s", name) @@ -154,6 +217,15 @@ func (b *gridfsBucket) Upload(ctx context.Context, name, path string) error { } func (b *gridfsBucket) Download(ctx context.Context, name, path string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "download", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + "path": path, + }) + reader, err := b.Reader(ctx, name) if err != nil { return errors.WithStack(err) @@ -176,29 +248,72 @@ func (b *gridfsBucket) Download(ctx context.Context, name, path string) error { return errors.WithStack(f.Close()) } -func (b *gridfsBucket) Push(ctx context.Context, local, remote string) error { - localPaths, err := walkLocalTree(ctx, local) +func (b *gridfsBucket) Push(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "push", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + localPaths, err := walkLocalTree(ctx, opts.Local) if err != nil { return errors.Wrap(err, "problem finding local paths") } for _, path := range localPaths { - target := filepath.Join(remote, path) + if re != nil && re.MatchString(path) { + continue + } + + target := consistentJoin(opts.Remote, path) _ = b.Remove(ctx, target) - if err = b.Upload(ctx, target, filepath.Join(local, path)); err != nil { + if err = b.Upload(ctx, target, filepath.Join(opts.Local, path)); err != nil { return errors.Wrapf(err, "problem uploading '%s' to '%s'", path, target) } } if b.opts.DeleteOnSync && !b.opts.DryRun { - return errors.Wrapf(os.RemoveAll(local), "problem removing '%s' after push", local) + return errors.Wrap(deleteOnPush(ctx, localPaths, opts.Remote, b), "probelm with delete on sync after push") } return nil } -func (b *gridfsBucket) Pull(ctx context.Context, local, remote string) error { - iter, err := b.List(ctx, remote) +func (b *gridfsBucket) Pull(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "pull", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + iter, err := b.List(ctx, opts.Remote) if err != nil { return errors.WithStack(err) } @@ -207,8 +322,13 @@ func (b *gridfsBucket) Pull(ctx context.Context, local, remote string) error { for iter.Next(ctx) { item := iter.Item() - name := filepath.Join(local, item.Name()[len(remote)+1:]) - keys = append(keys, item.Name()) + if re != nil && re.MatchString(item.Name()) { + continue + } + + fn := item.Name()[len(opts.Remote)+1:] + name := filepath.Join(opts.Local, fn) + keys = append(keys, fn) if err = b.Download(ctx, item.Name(), name); err != nil { return errors.WithStack(err) @@ -220,13 +340,22 @@ func (b *gridfsBucket) Pull(ctx context.Context, local, remote string) error { } if b.opts.DeleteOnSync && !b.opts.DryRun { - return errors.Wrapf(b.RemoveMany(ctx, keys...), "problem removing '%s' after pull", remote) + return errors.Wrap(deleteOnPull(ctx, keys, opts.Local), "problem with delete on sync after pull") } return nil } func (b *gridfsBucket) Copy(ctx context.Context, opts CopyOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "copy", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "source_key": opts.SourceKey, + "dest_key": opts.DestinationKey, + }) + from, err := b.Reader(ctx, opts.SourceKey) if err != nil { return errors.Wrap(err, "problem getting reader for source") @@ -245,12 +374,21 @@ func (b *gridfsBucket) Copy(ctx context.Context, opts CopyOptions) error { } func (b *gridfsBucket) Remove(ctx context.Context, key string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": key, + }) + grid, err := b.bucket(ctx) if err != nil { return errors.Wrap(err, "problem resolving bucket") } - cursor, err := grid.Find(bson.M{"filename": key}) + cursor, err := grid.Find(bson.M{"filename": b.normalizeKey(key)}) if err == mongo.ErrNoDocuments { return nil } else if err != nil { @@ -291,12 +429,26 @@ func (b *gridfsBucket) Remove(ctx context.Context, key string) error { } func (b *gridfsBucket) RemoveMany(ctx context.Context, keys ...string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove many", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "keys": keys, + }) + grid, err := b.bucket(ctx) if err != nil { return errors.Wrap(err, "problem resolving bucket") } - cursor, err := grid.Find(bson.M{"filename": bson.M{"$in": keys}}) + normalizedKeys := make([]string, len(keys)) + for i, key := range keys { + normalizedKeys[i] = b.normalizeKey(key) + } + + cursor, err := grid.Find(bson.M{"filename": bson.M{"$in": normalizedKeys}}) if err != nil { return errors.Wrap(err, "problem finding file") } @@ -336,22 +488,43 @@ func (b *gridfsBucket) RemoveMany(ctx context.Context, keys ...string) error { } func (b *gridfsBucket) RemovePrefix(ctx context.Context, prefix string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove prefix", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "prefix": prefix, + }) + return removePrefix(ctx, prefix, b) } func (b *gridfsBucket) RemoveMatching(ctx context.Context, expr string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove matching", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "expression": expr, + }) + return removeMatching(ctx, expr, b) } func (b *gridfsBucket) List(ctx context.Context, prefix string) (BucketIterator, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "gridfs", + "operation": "list", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "prefix": prefix, + }) + filter := bson.M{} if prefix != "" { - pat, err := regexp.Compile(fmt.Sprintf("^%s.*", prefix)) - if err != nil { - return nil, errors.Wrap(err, "problem with filename matching") - } - - filter = bson.M{"filename": pat} + filter = bson.M{"filename": primitive.Regex{Pattern: fmt.Sprintf("^%s.*", b.normalizeKey(prefix))}} } grid, err := b.bucket(ctx) @@ -396,7 +569,7 @@ func (iter *gridfsIterator) Next(ctx context.Context) bool { iter.item = &bucketItemImpl{ bucket: iter.bucket.opts.Prefix, b: iter.bucket, - key: document.Filename, + key: iter.bucket.denormalizeKey(document.Filename), } return true } diff --git a/vendor/github.com/evergreen-ci/pail/gridfs_legacy.go b/vendor/github.com/evergreen-ci/pail/gridfs_legacy.go index d3a1a72ff1e..9b2c8a64f44 100644 --- a/vendor/github.com/evergreen-ci/pail/gridfs_legacy.go +++ b/vendor/github.com/evergreen-ci/pail/gridfs_legacy.go @@ -6,9 +6,11 @@ import ( "io" "os" "path/filepath" + "regexp" "time" "github.com/mongodb/grip" + "github.com/mongodb/grip/message" "github.com/pkg/errors" mgo "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" @@ -19,6 +21,20 @@ type gridfsLegacyBucket struct { session *mgo.Session } +func (b *gridfsLegacyBucket) normalizeKey(key string) string { + if key == "" { + return b.opts.Prefix + } + return consistentJoin(b.opts.Prefix, key) +} + +func (b *gridfsLegacyBucket) denormalizeKey(key string) string { + if b.opts.Prefix != "" && len(key) > len(b.opts.Prefix)+1 { + key = key[len(b.opts.Prefix)+1:] + } + return key +} + // NewLegacyGridFSBucket creates a Bucket implementation backed by // GridFS as implemented by the legacy "mgo" MongoDB driver. This // constructor creates a new connection and mgo session. @@ -68,7 +84,7 @@ func (b *gridfsLegacyBucket) Check(_ context.Context) error { } func (b *gridfsLegacyBucket) gridFS() *mgo.GridFS { - return b.session.DB(b.opts.Database).GridFS(b.opts.Prefix) + return b.session.DB(b.opts.Database).GridFS(b.opts.Name) } func (b *gridfsLegacyBucket) openFile(ctx context.Context, name string, create bool) (io.ReadWriteCloser, error) { @@ -77,6 +93,7 @@ func (b *gridfsLegacyBucket) openFile(ctx context.Context, name string, create b ctx, out.cancel = context.WithCancel(ctx) gridfs := b.gridFS() + normalizedName := b.normalizeKey(name) var ( err error @@ -84,13 +101,13 @@ func (b *gridfsLegacyBucket) openFile(ctx context.Context, name string, create b ) if create { - file, err = gridfs.Create(name) + file, err = gridfs.Create(normalizedName) } else { - file, err = gridfs.Open(name) + file, err = gridfs.Open(normalizedName) } if err != nil { ses.Close() - return nil, errors.Wrapf(err, "couldn't open %s/%s", b.opts.Prefix, name) + return nil, errors.Wrapf(err, "couldn't open %s/%s", b.opts.Name, normalizedName) } out.GridFile = file @@ -110,6 +127,15 @@ type legacyGridFSFile struct { func (f *legacyGridFSFile) Close() error { f.cancel(); return errors.WithStack(f.GridFile.Close()) } func (b *gridfsLegacyBucket) Writer(ctx context.Context, name string) (io.WriteCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "writer", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + if b.opts.DryRun { return &mockWriteCloser{}, nil } @@ -117,10 +143,27 @@ func (b *gridfsLegacyBucket) Writer(ctx context.Context, name string) (io.WriteC } func (b *gridfsLegacyBucket) Reader(ctx context.Context, name string) (io.ReadCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "reader", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + return b.openFile(ctx, name, false) } func (b *gridfsLegacyBucket) Put(ctx context.Context, name string, input io.Reader) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "put", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + var file io.WriteCloser var err error if b.opts.DryRun { @@ -141,10 +184,28 @@ func (b *gridfsLegacyBucket) Put(ctx context.Context, name string, input io.Read } func (b *gridfsLegacyBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "get", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + }) + return b.Reader(ctx, name) } func (b *gridfsLegacyBucket) Upload(ctx context.Context, name, path string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "upload", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + "path": path, + }) + f, err := os.Open(path) if err != nil { return errors.Wrapf(err, "problem opening file %s", name) @@ -155,6 +216,15 @@ func (b *gridfsLegacyBucket) Upload(ctx context.Context, name, path string) erro } func (b *gridfsLegacyBucket) Download(ctx context.Context, name, path string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "download", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": name, + "path": path, + }) + reader, err := b.Reader(ctx, name) if err != nil { return errors.WithStack(err) @@ -177,18 +247,42 @@ func (b *gridfsLegacyBucket) Download(ctx context.Context, name, path string) er return errors.WithStack(f.Close()) } -func (b *gridfsLegacyBucket) Push(ctx context.Context, local, remote string) error { - localPaths, err := walkLocalTree(ctx, local) +func (b *gridfsLegacyBucket) Push(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "push", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + localPaths, err := walkLocalTree(ctx, opts.Local) if err != nil { return errors.Wrap(err, "problem finding local paths") } gridfs := b.gridFS() for _, path := range localPaths { - target := filepath.Join(remote, path) - file, err := gridfs.Open(target) + if re != nil && re.MatchString(path) { + continue + } + + target := consistentJoin(opts.Remote, path) + file, err := gridfs.Open(b.normalizeKey(target)) if err == mgo.ErrNotFound { - if err = b.Upload(ctx, target, filepath.Join(local, path)); err != nil { + if err = b.Upload(ctx, target, filepath.Join(opts.Local, path)); err != nil { return errors.Wrapf(err, "problem uploading '%s' to '%s'", path, target) } continue @@ -196,26 +290,45 @@ func (b *gridfsLegacyBucket) Push(ctx context.Context, local, remote string) err return errors.Wrapf(err, "problem finding '%s'", target) } - localmd5, err := md5sum(filepath.Join(local, path)) + localmd5, err := md5sum(filepath.Join(opts.Local, path)) if err != nil { return errors.Wrapf(err, "problem checksumming '%s'", path) } if file.MD5() != localmd5 { - if err = b.Upload(ctx, target, filepath.Join(local, path)); err != nil { + if err = b.Upload(ctx, target, filepath.Join(opts.Local, path)); err != nil { return errors.Wrapf(err, "problem uploading '%s' to '%s'", path, target) } } } if b.opts.DeleteOnSync && !b.opts.DryRun { - return errors.Wrapf(os.RemoveAll(local), "problem removing '%s' after push", local) + return errors.Wrap(deleteOnPush(ctx, localPaths, opts.Remote, b), "probelm with delete on sync after push") } return nil } -func (b *gridfsLegacyBucket) Pull(ctx context.Context, local, remote string) error { - iter, err := b.List(ctx, remote) +func (b *gridfsLegacyBucket) Pull(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "pull", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + iter, err := b.List(ctx, opts.Remote) if err != nil { return errors.WithStack(err) } @@ -230,11 +343,17 @@ func (b *gridfsLegacyBucket) Pull(ctx context.Context, local, remote string) err var checksum string keys := []string{} for gridfs.OpenNext(iterimpl.iter, &f) { - name := filepath.Join(local, f.Name()[len(remote)+1:]) - keys = append(keys, f.Name()) + if re != nil && re.MatchString(f.Name()) { + continue + } + + denormalizedName := b.denormalizeKey(f.Name()) + fn := denormalizedName[len(opts.Remote)+1:] + name := filepath.Join(opts.Local, fn) + keys = append(keys, fn) checksum, err = md5sum(name) if os.IsNotExist(errors.Cause(err)) { - if err = b.Download(ctx, f.Name(), name); err != nil { + if err = b.Download(ctx, denormalizedName, name); err != nil { return errors.WithStack(err) } continue @@ -245,7 +364,7 @@ func (b *gridfsLegacyBucket) Pull(ctx context.Context, local, remote string) err // NOTE: it doesn't seem like the md5 sums are being // populated, so this always happens if f.MD5() != checksum { - if err = b.Download(ctx, f.Name(), name); err != nil { + if err = b.Download(ctx, denormalizedName, name); err != nil { return errors.WithStack(err) } } @@ -256,12 +375,21 @@ func (b *gridfsLegacyBucket) Pull(ctx context.Context, local, remote string) err } if b.opts.DeleteOnSync && !b.opts.DryRun { - return errors.Wrapf(b.RemoveMany(ctx, keys...), "problem removing '%s' after pull", remote) + return errors.Wrap(deleteOnPull(ctx, keys, opts.Local), "problem with delete on sync after pull") } return nil } func (b *gridfsLegacyBucket) Copy(ctx context.Context, options CopyOptions) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "copy", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "source_key": options.SourceKey, + "dest_key": options.DestinationKey, + }) + from, err := b.Reader(ctx, options.SourceKey) if err != nil { return errors.Wrap(err, "problem getting reader for source") @@ -280,13 +408,31 @@ func (b *gridfsLegacyBucket) Copy(ctx context.Context, options CopyOptions) erro } func (b *gridfsLegacyBucket) Remove(ctx context.Context, key string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "key": key, + }) + if b.opts.DryRun { return nil } - return errors.Wrapf(b.gridFS().Remove(key), "problem removing file %s", key) + return errors.Wrapf(b.gridFS().Remove(b.normalizeKey(key)), "problem removing file %s", key) } func (b *gridfsLegacyBucket) RemoveMany(ctx context.Context, keys ...string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove many", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "keys": keys, + }) + catcher := grip.NewBasicCatcher() for _, key := range keys { catcher.Add(b.Remove(ctx, key)) @@ -295,14 +441,40 @@ func (b *gridfsLegacyBucket) RemoveMany(ctx context.Context, keys ...string) err } func (b *gridfsLegacyBucket) RemovePrefix(ctx context.Context, prefix string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove prefix", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "prefix": prefix, + }) + return removePrefix(ctx, prefix, b) } func (b *gridfsLegacyBucket) RemoveMatching(ctx context.Context, expression string) error { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "dry_run": b.opts.DryRun, + "operation": "remove matching", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "expression": expression, + }) + return removeMatching(ctx, expression, b) } func (b *gridfsLegacyBucket) List(ctx context.Context, prefix string) (BucketIterator, error) { + grip.DebugWhen(b.opts.Verbose, message.Fields{ + "type": "legacy_gridfs", + "operation": "list", + "bucket": b.opts.Name, + "bucket_prefix": b.opts.Prefix, + "prefix": prefix, + }) + if ctx.Err() != nil { return nil, errors.New("operation canceled") } @@ -317,7 +489,7 @@ func (b *gridfsLegacyBucket) List(ctx context.Context, prefix string) (BucketIte return &legacyGridFSIterator{ ctx: ctx, - iter: b.gridFS().Find(bson.M{"filename": bson.RegEx{Pattern: fmt.Sprintf("^%s.*", prefix)}}).Iter(), + iter: b.gridFS().Find(bson.M{"filename": bson.RegEx{Pattern: fmt.Sprintf("^%s.*", b.normalizeKey(prefix))}}).Iter(), bucket: b, }, nil } @@ -351,7 +523,7 @@ func (iter *legacyGridFSIterator) Next(ctx context.Context) bool { iter.item = &bucketItemImpl{ bucket: iter.bucket.opts.Prefix, - key: f.Name(), + key: iter.bucket.denormalizeKey(f.Name()), b: iter.bucket, } diff --git a/vendor/github.com/evergreen-ci/pail/interface.go b/vendor/github.com/evergreen-ci/pail/interface.go index 683791bca36..937c9b93a3c 100644 --- a/vendor/github.com/evergreen-ci/pail/interface.go +++ b/vendor/github.com/evergreen-ci/pail/interface.go @@ -60,8 +60,8 @@ type Bucket interface { // Sync methods: these methods are the recursive, efficient // copy methods of files from s3 to the local file // system. - Push(context.Context, string, string) error - Pull(context.Context, string, string) error + Push(context.Context, SyncOptions) error + Pull(context.Context, SyncOptions) error // Copy does a special copy operation that does not require // downloading a file. Note that CopyOptions.DestinationBucket must @@ -88,6 +88,14 @@ type Bucket interface { List(context.Context, string) (BucketIterator, error) } +// SyncOptions describes the arguments to the sync operations (Push and Pull). +// Note that exclude is a regular expression. +type SyncOptions struct { + Local string + Remote string + Exclude string +} + // CopyOptions describes the arguments to the Copy method for moving // objects between Buckets. type CopyOptions struct { diff --git a/vendor/github.com/evergreen-ci/pail/local.go b/vendor/github.com/evergreen-ci/pail/local.go index 9f0dad27245..9a96deef12a 100644 --- a/vendor/github.com/evergreen-ci/pail/local.go +++ b/vendor/github.com/evergreen-ci/pail/local.go @@ -6,22 +6,35 @@ import ( "io/ioutil" "os" "path/filepath" + "regexp" "github.com/mongodb/grip" + "github.com/mongodb/grip/message" "github.com/pkg/errors" ) type localFileSystem struct { path string + prefix string dryRun bool deleteOnSync bool + verbose bool } // LocalOptions describes the configuration of a local Bucket. type LocalOptions struct { Path string + Prefix string DryRun bool DeleteOnSync bool + Verbose bool +} + +func (b *localFileSystem) normalizeKey(key string) string { + if key == "" { + return b.prefix + } + return filepath.Join(b.prefix, key) } // NewLocalBucket returns an implementation of the Bucket interface @@ -30,12 +43,12 @@ type LocalOptions struct { func NewLocalBucket(opts LocalOptions) (Bucket, error) { b := &localFileSystem{ path: opts.Path, + prefix: opts.Prefix, dryRun: opts.DryRun, deleteOnSync: opts.DeleteOnSync, } if err := b.Check(context.TODO()); err != nil { return nil, errors.WithStack(err) - } return b, nil } @@ -51,7 +64,7 @@ func NewLocalTemporaryBucket(opts LocalOptions) (Bucket, error) { return nil, errors.Wrap(err, "problem creating temporary directory") } - return &localFileSystem{path: dir, dryRun: opts.DryRun, deleteOnSync: opts.DeleteOnSync}, nil + return &localFileSystem{path: dir, prefix: opts.Prefix, dryRun: opts.DryRun, deleteOnSync: opts.DeleteOnSync}, nil } func (b *localFileSystem) Check(_ context.Context) error { @@ -63,11 +76,20 @@ func (b *localFileSystem) Check(_ context.Context) error { } func (b *localFileSystem) Writer(_ context.Context, name string) (io.WriteCloser, error) { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "writer", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + }) + if b.dryRun { return &mockWriteCloser{}, nil } - path := filepath.Join(b.path, name) + path := filepath.Join(b.path, b.normalizeKey(name)) if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { return nil, errors.Wrap(err, "problem creating base directories") } @@ -81,7 +103,15 @@ func (b *localFileSystem) Writer(_ context.Context, name string) (io.WriteCloser } func (b *localFileSystem) Reader(_ context.Context, name string) (io.ReadCloser, error) { - path := filepath.Join(b.path, name) + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "operation": "reader", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + }) + + path := filepath.Join(b.path, b.normalizeKey(name)) f, err := os.Open(path) if err != nil { return nil, errors.Wrapf(err, "problem opening file '%s'", path) @@ -91,6 +121,15 @@ func (b *localFileSystem) Reader(_ context.Context, name string) (io.ReadCloser, } func (b *localFileSystem) Put(ctx context.Context, name string, input io.Reader) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "put", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + }) + f, err := b.Writer(ctx, name) if err != nil { return errors.WithStack(err) @@ -105,10 +144,28 @@ func (b *localFileSystem) Put(ctx context.Context, name string, input io.Reader) } func (b *localFileSystem) Get(ctx context.Context, name string) (io.ReadCloser, error) { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "operation": "get", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + }) + return b.Reader(ctx, name) } func (b *localFileSystem) Upload(ctx context.Context, name, path string) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "upload", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + "path": path, + }) + f, err := os.Open(path) if err != nil { return errors.Wrapf(err, "problem opening file %s", name) @@ -119,12 +176,18 @@ func (b *localFileSystem) Upload(ctx context.Context, name, path string) error { } func (b *localFileSystem) Download(ctx context.Context, name, path string) error { - reader, err := b.Reader(ctx, name) - if err != nil { - return errors.WithStack(err) - } + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "operation": "download", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": name, + "path": path, + }) + + catcher := grip.NewBasicCatcher() - if err = os.MkdirAll(filepath.Dir(path), 0600); err != nil { + if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { return errors.Wrapf(err, "problem creating enclosing directory for '%s'", path) } @@ -132,16 +195,36 @@ func (b *localFileSystem) Download(ctx context.Context, name, path string) error if err != nil { return errors.Wrapf(err, "problem creating file '%s'", path) } + + reader, err := b.Reader(ctx, name) + if err != nil { + _ = f.Close() + return errors.WithStack(err) + } + _, err = io.Copy(f, reader) if err != nil { _ = f.Close() + _ = reader.Close() return errors.Wrap(err, "problem copying data") } - return errors.WithStack(f.Close()) + catcher.Add(reader.Close()) + catcher.Add(f.Close()) + return errors.WithStack(catcher.Resolve()) } func (b *localFileSystem) Copy(ctx context.Context, options CopyOptions) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "copy", + "bucket": b.path, + "bucket_prefix": b.prefix, + "source_key": options.SourceKey, + "dest_key": options.DestinationKey, + }) + from, err := b.Reader(ctx, options.SourceKey) if err != nil { return errors.Wrap(err, "problem getting reader for source") @@ -161,16 +244,34 @@ func (b *localFileSystem) Copy(ctx context.Context, options CopyOptions) error { } func (b *localFileSystem) Remove(ctx context.Context, key string) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "remove", + "bucket": b.path, + "bucket_prefix": b.prefix, + "key": key, + }) + if b.dryRun { return nil } - path := filepath.Join(b.path, key) + path := filepath.Join(b.path, b.normalizeKey(key)) return errors.Wrapf(os.Remove(path), "problem removing path %s", path) } func (b *localFileSystem) RemoveMany(ctx context.Context, keys ...string) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "remove many", + "bucket": b.path, + "bucket_prefix": b.prefix, + "keys": keys, + }) + catcher := grip.NewBasicCatcher() for _, key := range keys { catcher.Add(b.Remove(ctx, key)) @@ -179,24 +280,66 @@ func (b *localFileSystem) RemoveMany(ctx context.Context, keys ...string) error } func (b *localFileSystem) RemovePrefix(ctx context.Context, prefix string) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "remove prefix", + "bucket": b.path, + "bucket_prefix": b.prefix, + "prefix": prefix, + }) + return removePrefix(ctx, prefix, b) } func (b *localFileSystem) RemoveMatching(ctx context.Context, expression string) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "remove matching", + "bucket": b.path, + "bucket_prefix": b.prefix, + "expression": expression, + }) + return removeMatching(ctx, expression, b) } -func (b *localFileSystem) Push(ctx context.Context, local, remote string) error { - files, err := walkLocalTree(ctx, local) +func (b *localFileSystem) Push(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "dry_run": b.dryRun, + "operation": "push", + "bucket": b.path, + "bucket_prefix": b.prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + files, err := walkLocalTree(ctx, opts.Local) if err != nil { return errors.WithStack(err) } for _, fn := range files { - target := filepath.Join(b.path, remote, fn) - file := filepath.Join(local, fn) + if re != nil && re.MatchString(fn) { + continue + } + + target := filepath.Join(b.path, b.normalizeKey(filepath.Join(opts.Remote, fn))) + file := filepath.Join(opts.Local, fn) if _, err := os.Stat(target); os.IsNotExist(err) { - if err := b.Upload(ctx, target, file); err != nil { + if err := b.Upload(ctx, filepath.Join(opts.Remote, fn), file); err != nil { return errors.WithStack(err) } @@ -213,20 +356,39 @@ func (b *localFileSystem) Push(ctx context.Context, local, remote string) error } if lsum != rsum { - if err := b.Upload(ctx, target, file); err != nil { + if err := b.Upload(ctx, filepath.Join(opts.Remote, fn), file); err != nil { return errors.WithStack(err) } } } if b.deleteOnSync && !b.dryRun { - return errors.Wrapf(os.RemoveAll(local), "problem removing '%s' after push", local) + return errors.Wrap(deleteOnPush(ctx, files, opts.Remote, b), "probelm with delete on sync after push") } return nil } -func (b *localFileSystem) Pull(ctx context.Context, local, remote string) error { - prefix := filepath.Join(b.path, remote) +func (b *localFileSystem) Pull(ctx context.Context, opts SyncOptions) error { + grip.DebugWhen(b.verbose, message.Fields{ + "type": "local", + "operation": "pull", + "bucket": b.path, + "bucket_prefix": b.prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + prefix := filepath.Join(b.path, b.normalizeKey(opts.Remote)) files, err := walkLocalTree(ctx, prefix) if err != nil { return errors.WithStack(err) @@ -234,8 +396,13 @@ func (b *localFileSystem) Pull(ctx context.Context, local, remote string) error keys := []string{} for _, fn := range files { - path := filepath.Join(local, fn) + if re != nil && re.MatchString(fn) { + continue + } + keys = append(keys, fn) + path := filepath.Join(opts.Local, fn) + fn = filepath.Join(opts.Remote, fn) if _, err := os.Stat(path); os.IsNotExist(err) { if err := b.Download(ctx, fn, path); err != nil { return errors.WithStack(err) @@ -261,13 +428,20 @@ func (b *localFileSystem) Pull(ctx context.Context, local, remote string) error } if b.deleteOnSync && !b.dryRun { - return errors.Wrapf(b.RemoveMany(ctx, keys...), "problem removing '%s' after pull", remote) + return errors.Wrap(deleteOnPull(ctx, keys, opts.Local), "problem with delete on sync after pull") } return nil } func (b *localFileSystem) List(ctx context.Context, prefix string) (BucketIterator, error) { - files, err := walkLocalTree(ctx, filepath.Join(b.path, prefix)) + grip.DebugWhen(b.verbose, message.Fields{ + "operation": "list", + "bucket": b.path, + "bucket_prefix": b.prefix, + "prefix": prefix, + }) + + files, err := walkLocalTree(ctx, filepath.Join(b.path, b.normalizeKey(prefix))) if err != nil { return nil, errors.WithStack(err) } @@ -276,6 +450,7 @@ func (b *localFileSystem) List(ctx context.Context, prefix string) (BucketIterat files: files, idx: -1, bucket: b, + prefix: prefix, }, nil } @@ -285,6 +460,7 @@ type localFileSystemIterator struct { idx int item *bucketItemImpl bucket *localFileSystem + prefix string } func (iter *localFileSystemIterator) Err() error { return iter.err } @@ -297,7 +473,7 @@ func (iter *localFileSystemIterator) Next(_ context.Context) bool { iter.item = &bucketItemImpl{ bucket: iter.bucket.path, - key: iter.files[iter.idx], + key: filepath.Join(iter.prefix, iter.files[iter.idx]), b: iter.bucket, } return true diff --git a/vendor/github.com/evergreen-ci/pail/makefile b/vendor/github.com/evergreen-ci/pail/makefile index 3f9e004c437..61015927716 100644 --- a/vendor/github.com/evergreen-ci/pail/makefile +++ b/vendor/github.com/evergreen-ci/pail/makefile @@ -1,16 +1,23 @@ buildDir := build srcFiles := $(shell find . -name "*.go" -not -path "./$(buildDir)/*" -not -name "*_test.go" -not -path "*\#*") testFiles := $(shell find . -name "*.go" -not -path "./$(buildDir)/*" -not -path "*\#*") +projectPath := github.com/evergreen-ci/pail packages := pail -# -# override the go binary path if set -ifneq (,$(GO_BIN_PATH)) -gobin := $(GO_BIN_PATH) + +# start environment setup +ifneq (,${GO_BIN_PATH}) +gobin := ${GO_BIN_PATH} else gobin := go endif - +gopath := $(GOPATH) +ifeq ($(OS),Windows_NT) +gopath := $(shell cygpath -m $(gopath)) +userProfile := $(shell cygpath -m $(USERPROFILE)) +endif +goEnv := GOPATH=$(gopath) $(if ${GO_BIN_PATH},PATH="$(shell dirname ${GO_BIN_PATH}):${PATH}") +# end environment setup # start linting configuration # package, testing, and linter dependencies specified @@ -41,16 +48,12 @@ lintArgs += --exclude="error return value not checked .defer.*" # start dependency installation tools # implementation details for being able to lazily install dependencies -gopath := $(GOPATH) -ifeq ($(OS),Windows_NT) -gopath := $(shell cygpath -m $(gopath)) -endif lintDeps := $(addprefix $(gopath)/src/,$(lintDeps)) $(gopath)/src/%: @-[ ! -d $(gopath) ] && mkdir -p $(gopath) || true - $(gobin) get $(subst $(gopath)/src/,,$@) + $(goEnv) $(gobin) get $(subst $(gopath)/src/,,$@) $(buildDir)/run-linter:cmd/run-linter/run-linter.go $(buildDir)/.lintSetup - $(gobin) build -o $@ $< + $(goEnv) $(gobin) build -o $@ $< $(buildDir)/.lintSetup:$(lintDeps) @-$(gopath)/bin/gometalinter --install >/dev/null && touch $@ # end dependency installation tools @@ -61,6 +64,7 @@ ifneq (,$(RUN_TEST)) testArgs += -run='$(RUN_TEST)' endif ifneq (,$(RUN_COUNT)) + WORK_DIR: ${workdir} testArgs += -count='$(RUN_COUNT)' endif ifneq (,$(SKIP_LONG)) @@ -72,20 +76,29 @@ endif ifneq (,$(RACE_DETECTOR)) testArgs += -race endif +ifneq (,$(TEST_TIMEOUT)) +testArgs += -timeout=$(TEST_TIMEOUT) +else +testArgs += -timeout=30m +endif + # test execution and output handlers $(buildDir)/: mkdir -p $@ $(buildDir)/output.%.test:$(buildDir)/ .FORCE - GOPATH=$(gopath) $(gobin) test $(testArgs) ./$(if $(subst $(name),,$*),$*,) | tee $@ + export USERPROFILE=$(userProfile) + $(goEnv) $(gobin) test $(testArgs) ./$(if $(subst $(name),,$*),$*,) | tee $@ @! grep -s -q -e "^FAIL" $@ && ! grep -s -q "^WARNING: DATA RACE" $@ $(buildDir)/output.test:$(buildDir)/ .FORCE - GOPATH=$(gopath) $(gobin) test $(testArgs) ./... | tee $@ + export USERPROFILE=$(userProfile) + $(goEnv) $(gobin) test $(testArgs) ./... | tee $@ @! grep -s -q -e "^FAIL" $@ && ! grep -s -q "^WARNING: DATA RACE" $@ $(buildDir)/output.%.coverage:$(buildDir)/ .FORCE - GOPATH=$(gopath) $(gobin) test $(testArgs) ./$(if $(subst $(name),,$*),$*,) -covermode=count -coverprofile $@ | tee $(buildDir)/output.$*.test - @-[ -f $@ ] && $(gobin) tool cover -func=$@ | sed 's%$(projectPath)/%%' | column -t + export USERPROFILE=$(userProfile) + $(goEnv) $(gobin) test $(testArgs) ./$(if $(subst $(name),,$*),$*,) -covermode=count -coverprofile $@ | tee $(buildDir)/output.$*.test + @-[ -f $@ ] && $(goEnv) $(gobin) tool cover -func=$@ | sed 's%$(projectPath)/%%' | column -t $(buildDir)/output.%.coverage.html:$(buildDir)/output.%.coverage - GOPATH=$(gopath) $(gobin) tool cover -html=$< -o $@ + $(goEnv) $(gobin) tool cover -html=$< -o $@ # targets to generate gotest output from the linter. $(buildDir)/output.%.lint:$(buildDir)/run-linter $(buildDir)/ .FORCE @./$< --output=$@ --lintArgs='$(lintArgs)' --packages='$*' @@ -97,18 +110,19 @@ $(buildDir)/output.lint:$(buildDir)/run-linter $(buildDir)/ .FORCE # userfacing targets for basic build and development operations compile: - GOPATH=$(gopath) $(gobin) build ./ + $(goEnv) $(gobin) build ./ test:$(buildDir)/test.out $(buildDir)/test.out:.FORCE + export USERPROFILE=$(userProfile) @mkdir -p $(buildDir) - GOPATH=$(gopath) $(gobin) test $(testArgs) ./ | tee $@ + $(goEnv) $(gobin) test $(testArgs) ./ | tee $@ @grep -s -q -e "^PASS" $@ coverage:$(buildDir)/cover.out - GOPATH=$(gopath) @$(gobin) tool cover -func=$< | sed -E 's%github.com/.*/ftdc/%%' | column -t + @$(goEnv) $(gobin) tool cover -func=$< | sed -E 's%$(projectPath)/%%' | column -t coverage-html:$(buildDir)/cover.html benchmark: - $(gobin) test -v -benchmem -bench=. -run="Benchmark.*" -timeout=20m + $(goEnv) $(gobin) test -v -benchmem -bench=. -run="Benchmark.*" -timeout=20m lint:$(foreach target,$(packages),$(buildDir)/output.$(target).lint) phony += lint lint-deps build build-race race test coverage coverage-html @@ -120,9 +134,10 @@ phony += lint lint-deps build build-race race test coverage coverage-html $(buildDir):$(srcFiles) compile @mkdir -p $@ $(buildDir)/cover.out:$(buildDir) $(testFiles) .FORCE - $(gobin) test $(testArgs) -covermode=count -coverprofile $@ -cover ./ + export USERPROFILE=$(userProfile) + $(goEnv) $(gobin) test $(testArgs) -covermode=count -coverprofile $@ -cover ./ $(buildDir)/cover.html:$(buildDir)/cover.out - $(gobin) tool cover -html=$< -o $@ + $(goEnv) $(gobin) tool cover -html=$< -o $@ vendor-clean: diff --git a/vendor/github.com/evergreen-ci/pail/parallel.go b/vendor/github.com/evergreen-ci/pail/parallel.go new file mode 100644 index 00000000000..14974788453 --- /dev/null +++ b/vendor/github.com/evergreen-ci/pail/parallel.go @@ -0,0 +1,210 @@ +package pail + +import ( + "context" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/mongodb/grip" + "github.com/mongodb/grip/message" + "github.com/pkg/errors" +) + +type parallelBucketImpl struct { + Bucket + size int + deleteOnSync bool + dryRun bool +} + +// ParallelBucketOptions support the use and creation of parallel sync buckets. +type ParallelBucketOptions struct { + // Workers sets the number of worker threads. + Workers int + // DryRun enables running in a mode that will not execute any + // operations that modify the bucket. + DryRun bool + // DeleteOnSync will delete all objects from the target that do not + // exist in the source after the completion of a sync operation + // (Push/Pull). + DeleteOnSync bool +} + +// NewParallelSyncBucket returns a layered bucket implemenation that supports +// parallel sync operations. +func NewParallelSyncBucket(opts ParallelBucketOptions, b Bucket) Bucket { + return ¶llelBucketImpl{ + size: opts.Workers, + deleteOnSync: opts.DeleteOnSync, + dryRun: opts.DryRun, + Bucket: b, + } +} + +func (b *parallelBucketImpl) Push(ctx context.Context, opts SyncOptions) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + files, err := walkLocalTree(ctx, opts.Local) + if err != nil { + return errors.WithStack(err) + } + + in := make(chan string, len(files)) + for i := range files { + if re != nil && re.MatchString(files[i]) { + continue + } + in <- files[i] + } + close(in) + wg := &sync.WaitGroup{} + catcher := grip.NewBasicCatcher() + for i := 0; i < b.size; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for fn := range in { + select { + case <-ctx.Done(): + return + default: + } + + if b.dryRun { + continue + } + + err = b.Bucket.Upload(ctx, filepath.Join(opts.Remote, fn), filepath.Join(opts.Local, fn)) + if err != nil { + catcher.Add(err) + cancel() + } + } + }() + } + wg.Wait() + + if ctx.Err() == nil && b.deleteOnSync && !b.dryRun { + catcher.Add(errors.Wrap(deleteOnPush(ctx, files, opts.Remote, b), "probelm with delete on sync after push")) + } + + return catcher.Resolve() + +} +func (b *parallelBucketImpl) Pull(ctx context.Context, opts SyncOptions) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + iter, err := b.List(ctx, opts.Remote) + if err != nil { + return errors.WithStack(err) + } + + catcher := grip.NewBasicCatcher() + items := make(chan BucketItem) + toDelete := make(chan string) + + go func() { + defer close(items) + + for iter.Next(ctx) { + if iter.Err() != nil { + cancel() + catcher.Add(errors.Wrap(iter.Err(), "problem iterating bucket")) + } + + if re != nil && re.MatchString(iter.Item().Name()) { + continue + } + + select { + case <-ctx.Done(): + catcher.Add(ctx.Err()) + return + case items <- iter.Item(): + } + } + }() + + wg := &sync.WaitGroup{} + for i := 0; i < b.size; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for item := range items { + name, err := filepath.Rel(opts.Remote, item.Name()) + if err != nil { + catcher.Add(errors.Wrap(err, "problem getting relative filepath")) + cancel() + } + localName := filepath.Join(opts.Local, name) + if err := b.Download(ctx, item.Name(), localName); err != nil { + catcher.Add(err) + cancel() + } + + fn := strings.TrimPrefix(item.Name(), opts.Remote) + fn = strings.TrimPrefix(fn, "/") + fn = strings.TrimPrefix(fn, "\\") // cause windows... + + select { + case <-ctx.Done(): + catcher.Add(ctx.Err()) + return + case toDelete <- fn: + } + } + }() + } + go func() { + wg.Wait() + close(toDelete) + }() + + deleteSignal := make(chan struct{}) + go func() { + defer close(deleteSignal) + + keys := []string{} + for key := range toDelete { + keys = append(keys, key) + } + + if b.deleteOnSync && b.dryRun { + grip.Debug(message.Fields{ + "dry_run": true, + "message": "would delete after push", + }) + } else if ctx.Err() == nil && b.deleteOnSync { + catcher.Add(errors.Wrap(deleteOnPull(ctx, keys, opts.Local), "problem with delete on sync after pull")) + } + }() + + select { + case <-ctx.Done(): + case <-deleteSignal: + } + + return catcher.Resolve() +} diff --git a/vendor/github.com/evergreen-ci/pail/s3_bucket.go b/vendor/github.com/evergreen-ci/pail/s3_bucket.go index e14d63db46e..c0ae2890f16 100644 --- a/vendor/github.com/evergreen-ci/pail/s3_bucket.go +++ b/vendor/github.com/evergreen-ci/pail/s3_bucket.go @@ -1,11 +1,13 @@ package pail import ( + "compress/gzip" "context" "io" "net/http" "os" "path/filepath" + "regexp" "strings" "github.com/aws/aws-sdk-go/aws" @@ -13,11 +15,41 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" - homedir "github.com/mitchellh/go-homedir" "github.com/mongodb/grip" + "github.com/mongodb/grip/message" "github.com/pkg/errors" ) +const compressionEncoding = "gzip" + +// S3Permissions is a type that describes the object canned ACL from S3. +type S3Permissions string + +// Valid S3 permissions. +const ( + S3PermissionsPrivate S3Permissions = s3.ObjectCannedACLPrivate + S3PermissionsPublicRead S3Permissions = s3.ObjectCannedACLPublicRead + S3PermissionsPublicReadWrite S3Permissions = s3.ObjectCannedACLPublicReadWrite + S3PermissionsAuthenticatedRead S3Permissions = s3.ObjectCannedACLAuthenticatedRead + S3PermissionsAWSExecRead S3Permissions = s3.ObjectCannedACLAwsExecRead + S3PermissionsBucketOwnerRead S3Permissions = s3.ObjectCannedACLBucketOwnerRead + S3PermissionsBucketOwnerFullControl S3Permissions = s3.ObjectCannedACLBucketOwnerFullControl +) + +// Validate s3 permissions. +func (p S3Permissions) Validate() error { + switch p { + case S3PermissionsPublicRead, S3PermissionsPublicReadWrite: + return nil + case S3PermissionsPrivate, S3PermissionsAuthenticatedRead, S3PermissionsAWSExecRead: + return nil + case S3PermissionsBucketOwnerRead, S3PermissionsBucketOwnerFullControl: + return nil + default: + return errors.New("invalid S3 permissions type specified") + } +} + type s3BucketSmall struct { s3Bucket } @@ -28,30 +60,65 @@ type s3BucketLarge struct { } type s3Bucket struct { - dryRun bool - deleteOnSync bool - batchSize int - sess *session.Session - svc *s3.S3 - name string - prefix string - permission string - contentType string + dryRun bool + deleteOnSync bool + singleFileChecksums bool + compress bool + verbose bool + batchSize int + sess *session.Session + svc *s3.S3 + name string + prefix string + permissions S3Permissions + contentType string } // S3Options support the use and creation of S3 backed buckets. type S3Options struct { - DryRun bool - DeleteOnSync bool - MaxRetries int - Credentials *credentials.Credentials + // DryRun enables running in a mode that will not execute any + // operations that modify the bucket. + DryRun bool + // DeleteOnSync will delete all objects from the target that do not + // exist in the source after the completion of a sync operation + // (Push/Pull). + DeleteOnSync bool + // Compress enables gzipping of uploaded objects. + Compress bool + // UseSingleFileChecksums forces the bucket to checksum files before + // running uploads and download operation (rather than doing these + // operations independently.) Useful for large files, particularly in + // coordination with the parallel sync bucket implementations. + UseSingleFileChecksums bool + // Verbose sets the logging mode to "debug". + Verbose bool + // MaxRetries sets the number of retry attemps for s3 operations. + MaxRetries int + // Credentials allows the passing in of explicit AWS credentials. These + // will override the default credentials chain. (Optional) + Credentials *credentials.Credentials + // SharedCredentialsFilepath, when not empty, will override the default + // credentials chain and the Credentials value (see above). (Optional) SharedCredentialsFilepath string - SharedCredentialsProfile string - Region string - Name string - Prefix string - Permission string - ContentType string + // SharedCredentialsProfile, when not empty, will temporarily set the + // AWS_PROFILE environment variable to its value. (Optional) + SharedCredentialsProfile string + // Region specifies the AWS region. + Region string + // Name specifies the name of the bucket. + Name string + // Prefix specifies the prefix to use. (Optional) + Prefix string + // Permissions sets the S3 permissions to use for each object. Defaults + // to FULL_CONTROL. See + // `https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html` + // for more information. + Permissions S3Permissions + // ContentType sets the standard MIME type of the objet data. Defaults + // to nil. See + //`https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17` + // for more information. + ContentType string } // CreateAWSCredentials is a wrapper for creating AWS credentials. @@ -63,41 +130,42 @@ func (s *s3Bucket) normalizeKey(key string) string { if key == "" { return s.prefix } - if s.prefix != "" { - return filepath.Join(s.prefix, key) - } - return key + return consistentJoin(s.prefix, key) } func (s *s3Bucket) denormalizeKey(key string) string { - if s.prefix != "" { - denormalizedKey, err := filepath.Rel(s.prefix, key) - if err != nil { - return key - } - return denormalizedKey + if s.prefix != "" && len(key) > len(s.prefix)+1 { + key = key[len(s.prefix)+1:] } return key } func newS3BucketBase(client *http.Client, options S3Options) (*s3Bucket, error) { + if options.Permissions != "" { + if err := options.Permissions.Validate(); err != nil { + return nil, errors.WithStack(err) + } + } + config := &aws.Config{ Region: aws.String(options.Region), HTTPClient: client, MaxRetries: aws.Int(options.MaxRetries), } - // if options.SharedCredentialsProfile is set, will override any credentials passed in + if options.SharedCredentialsProfile != "" { - fp := options.SharedCredentialsFilepath - if fp == "" { - // if options.SharedCredentialsFilepath is not set, use default filepath - homeDir, err := homedir.Dir() - if err != nil { - return nil, errors.Wrap(err, "failed to detect home directory when getting default credentials file") - } - fp = filepath.Join(homeDir, ".aws", "credentials") + prev := os.Getenv("AWS_PROFILE") + if err := os.Setenv("AWS_PROFILE", options.SharedCredentialsProfile); err != nil { + return nil, errors.Wrap(err, "problem setting AWS_PROFILE env var") } - sharedCredentials := credentials.NewSharedCredentials(fp, options.SharedCredentialsProfile) + defer func() { + if err := os.Setenv("AWS_PROFILE", prev); err != nil { + grip.Error(errors.Wrap(err, "problem setting back AWS_PROFILE env var")) + } + }() + } + if options.SharedCredentialsFilepath != "" { + sharedCredentials := credentials.NewSharedCredentials(options.SharedCredentialsFilepath, options.SharedCredentialsProfile) _, err := sharedCredentials.Get() if err != nil { return nil, errors.Wrapf(err, "invalid credentials from profile '%s'", options.SharedCredentialsProfile) @@ -110,28 +178,31 @@ func newS3BucketBase(client *http.Client, options S3Options) (*s3Bucket, error) } config.Credentials = options.Credentials } + sess, err := session.NewSession(config) if err != nil { return nil, errors.Wrap(err, "problem connecting to AWS") } svc := s3.New(sess) return &s3Bucket{ - name: options.Name, - prefix: options.Prefix, - sess: sess, - svc: svc, - permission: options.Permission, - contentType: options.ContentType, - dryRun: options.DryRun, - batchSize: 1000, - deleteOnSync: options.DeleteOnSync, + name: options.Name, + prefix: options.Prefix, + compress: options.Compress, + singleFileChecksums: options.UseSingleFileChecksums, + verbose: options.Verbose, + sess: sess, + svc: svc, + permissions: options.Permissions, + contentType: options.ContentType, + dryRun: options.DryRun, + batchSize: 1000, + deleteOnSync: options.DeleteOnSync, }, nil } // NewS3Bucket returns a Bucket implementation backed by S3. This -// implementation does not support multipart uploads, if you would -// like to add objects larger than 5 gigabytes see -// `NewS3MultiPartBucket`. +// implementation does not support multipart uploads, if you would like to add +// objects larger than 5 gigabytes see `NewS3MultiPartBucket`. func NewS3Bucket(options S3Options) (Bucket, error) { bucket, err := newS3BucketBase(nil, options) if err != nil { @@ -200,19 +271,23 @@ func (s *s3Bucket) Check(ctx context.Context) error { type smallWriteCloser struct { isClosed bool dryRun bool + compress bool + verbose bool svc *s3.S3 buffer []byte name string ctx context.Context key string - permission string + permissions S3Permissions contentType string } type largeWriteCloser struct { isCreated bool isClosed bool + compress bool dryRun bool + verbose bool partNumber int64 maxSize int svc *s3.S3 @@ -221,19 +296,30 @@ type largeWriteCloser struct { completedParts []*s3.CompletedPart name string key string - permission string + permissions S3Permissions contentType string uploadID string } func (w *largeWriteCloser) create() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer create", + "bucket": w.name, + "key": w.key, + }) + if !w.dryRun { input := &s3.CreateMultipartUploadInput{ Bucket: aws.String(w.name), Key: aws.String(w.key), - ACL: aws.String(w.permission), + ACL: aws.String(string(w.permissions)), ContentType: aws.String(w.contentType), } + if w.compress { + input.ContentEncoding = aws.String(compressionEncoding) + } result, err := w.svc.CreateMultipartUploadWithContext(w.ctx, input) if err != nil { @@ -247,6 +333,14 @@ func (w *largeWriteCloser) create() error { } func (w *largeWriteCloser) complete() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer complete", + "bucket": w.name, + "key": w.key, + }) + if !w.dryRun { input := &s3.CompleteMultipartUploadInput{ Bucket: aws.String(w.name), @@ -270,6 +364,14 @@ func (w *largeWriteCloser) complete() error { } func (w *largeWriteCloser) abort() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer abort", + "bucket": w.name, + "key": w.key, + }) + input := &s3.AbortMultipartUploadInput{ Bucket: aws.String(w.name), Key: aws.String(w.key), @@ -281,6 +383,14 @@ func (w *largeWriteCloser) abort() error { } func (w *largeWriteCloser) flush() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer flush", + "bucket": w.name, + "key": w.key, + }) + if !w.isCreated { err := w.create() if err != nil { @@ -308,12 +418,21 @@ func (w *largeWriteCloser) flush() error { PartNumber: aws.Int64(w.partNumber), }) } + w.buffer = []byte{} w.partNumber++ return nil } func (w *smallWriteCloser) Write(p []byte) (int, error) { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "small writer write", + "bucket": w.name, + "key": w.key, + }) + if w.isClosed { return 0, errors.New("writer already closed") } @@ -322,6 +441,14 @@ func (w *smallWriteCloser) Write(p []byte) (int, error) { } func (w *largeWriteCloser) Write(p []byte) (int, error) { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer write", + "bucket": w.name, + "key": w.key, + }) + if w.isClosed { return 0, errors.New("writer already closed") } @@ -336,6 +463,14 @@ func (w *largeWriteCloser) Write(p []byte) (int, error) { } func (w *smallWriteCloser) Close() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "small writer close", + "bucket": w.name, + "key": w.key, + }) + if w.isClosed { return errors.New("writer already closed") } @@ -347,9 +482,12 @@ func (w *smallWriteCloser) Close() error { Body: aws.ReadSeekCloser(strings.NewReader(string(w.buffer))), // nolint:staticcheck Bucket: aws.String(w.name), Key: aws.String(w.key), - ACL: aws.String(w.permission), + ACL: aws.String(string(w.permissions)), ContentType: aws.String(w.contentType), } + if w.compress { + input.ContentEncoding = aws.String(compressionEncoding) + } _, err := w.svc.PutObjectWithContext(w.ctx, input) return errors.Wrap(err, "problem copying data to file") @@ -357,6 +495,14 @@ func (w *smallWriteCloser) Close() error { } func (w *largeWriteCloser) Close() error { + grip.DebugWhen(w.verbose, message.Fields{ + "type": "s3", + "dry_run": w.dryRun, + "operation": "large writer close", + "bucket": w.name, + "key": w.key, + }) + if w.isClosed { return errors.New("writer already closed") } @@ -370,32 +516,93 @@ func (w *largeWriteCloser) Close() error { return err } +type compressingWriteCloser struct { + gzipWriter io.WriteCloser + s3Writer io.WriteCloser +} + +func (w *compressingWriteCloser) Write(p []byte) (int, error) { + return w.gzipWriter.Write(p) +} + +func (w *compressingWriteCloser) Close() error { + catcher := grip.NewBasicCatcher() + + catcher.Add(w.gzipWriter.Close()) + catcher.Add(w.s3Writer.Close()) + + return catcher.Resolve() +} + func (s *s3BucketSmall) Writer(ctx context.Context, key string) (io.WriteCloser, error) { - return &smallWriteCloser{ + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "small writer", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + + writer := &smallWriteCloser{ name: s.name, svc: s.svc, ctx: ctx, key: s.normalizeKey(key), - permission: s.permission, + permissions: s.permissions, contentType: s.contentType, dryRun: s.dryRun, - }, nil + compress: s.compress, + } + if s.compress { + return &compressingWriteCloser{ + gzipWriter: gzip.NewWriter(writer), + s3Writer: writer, + }, nil + } + return writer, nil } func (s *s3BucketLarge) Writer(ctx context.Context, key string) (io.WriteCloser, error) { - return &largeWriteCloser{ + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "large writer", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + + writer := &largeWriteCloser{ maxSize: s.minPartSize, name: s.name, svc: s.svc, ctx: ctx, key: s.normalizeKey(key), - permission: s.permission, + permissions: s.permissions, contentType: s.contentType, dryRun: s.dryRun, - }, nil + compress: s.compress, + verbose: s.verbose, + } + if s.compress { + return &compressingWriteCloser{ + gzipWriter: gzip.NewWriter(writer), + s3Writer: writer, + }, nil + } + return writer, nil } func (s *s3Bucket) Reader(ctx context.Context, key string) (io.ReadCloser, error) { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "operation": "reader", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + input := &s3.GetObjectInput{ Bucket: aws.String(s.name), Key: aws.String(s.normalizeKey(key)), @@ -422,18 +629,64 @@ func putHelper(ctx context.Context, b Bucket, key string, r io.Reader) error { } func (s *s3BucketSmall) Put(ctx context.Context, key string, r io.Reader) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "put", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + return putHelper(ctx, s, key, r) } func (s *s3BucketLarge) Put(ctx context.Context, key string, r io.Reader) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "put", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + return putHelper(ctx, s, key, r) } func (s *s3Bucket) Get(ctx context.Context, key string) (io.ReadCloser, error) { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "operation": "get", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + return s.Reader(ctx, key) } -func uploadHelper(ctx context.Context, b Bucket, key, path string) error { +func (s *s3Bucket) s3WithUploadChecksumHelper(ctx context.Context, target, file string) (bool, error) { + localmd5, err := md5sum(file) + if err != nil { + return false, errors.Wrapf(err, "problem checksumming '%s'", file) + } + input := &s3.HeadObjectInput{ + Bucket: aws.String(s.name), + Key: aws.String(target), + IfMatch: aws.String(localmd5), + } + _, err = s.svc.HeadObjectWithContext(ctx, input) + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() == "PreconditionFailed" || aerr.Code() == "NotFound" { + return true, nil + } + } + + return false, errors.Wrapf(err, "problem with checksum for '%s'", target) +} + +func doUpload(ctx context.Context, b Bucket, key, path string) error { f, err := os.Open(path) if err != nil { return errors.Wrapf(err, "problem opening file %s", path) @@ -443,16 +696,40 @@ func uploadHelper(ctx context.Context, b Bucket, key, path string) error { return errors.WithStack(b.Put(ctx, key, f)) } -func (s *s3BucketSmall) Upload(ctx context.Context, key, path string) error { - return uploadHelper(ctx, s, key, path) +func (s *s3Bucket) uploadHelper(ctx context.Context, b Bucket, key, path string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "upload", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + "path": path, + }) + + if s.singleFileChecksums { + shouldUpload, err := s.s3WithUploadChecksumHelper(ctx, key, path) + if err != nil { + return errors.WithStack(err) + } + if !shouldUpload { + return nil + } + } + + return errors.WithStack(doUpload(ctx, b, key, path)) } func (s *s3BucketLarge) Upload(ctx context.Context, key, path string) error { - return uploadHelper(ctx, s, key, path) + return s.uploadHelper(ctx, s, key, path) +} + +func (s *s3BucketSmall) Upload(ctx context.Context, key, path string) error { + return s.uploadHelper(ctx, s, key, path) } -func (s *s3Bucket) Download(ctx context.Context, key, path string) error { - reader, err := s.Reader(ctx, key) +func doDownload(ctx context.Context, b Bucket, key, path string) error { + reader, err := b.Reader(ctx, key) if err != nil { return errors.WithStack(err) } @@ -474,53 +751,135 @@ func (s *s3Bucket) Download(ctx context.Context, key, path string) error { return errors.WithStack(f.Close()) } -func (s *s3Bucket) push(ctx context.Context, local, remote string, b Bucket) error { - files, err := walkLocalTree(ctx, local) +func s3DownloadWithChecksum(ctx context.Context, b Bucket, item BucketItem, local string) error { + localmd5, err := md5sum(local) + if os.IsNotExist(errors.Cause(err)) { + if err = doDownload(ctx, b, item.Name(), local); err != nil { + return errors.WithStack(err) + } + } else if err != nil { + return errors.WithStack(err) + } + if localmd5 != item.Hash() { + if err = doDownload(ctx, b, item.Name(), local); err != nil { + return errors.WithStack(err) + } + } + + return nil +} + +func (s *s3Bucket) downloadHelper(ctx context.Context, b Bucket, key, path string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "operation": "download", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + "path": path, + }) + + if s.singleFileChecksums { + iter, err := s.listHelper(ctx, b, s.normalizeKey(key)) + if err != nil { + return errors.WithStack(err) + } + if !iter.Next(ctx) { + return errors.New("no results found") + } + return s3DownloadWithChecksum(ctx, b, iter.Item(), path) + } + + return doDownload(ctx, b, key, path) +} + +func (s *s3BucketSmall) Download(ctx context.Context, key, path string) error { + return s.downloadHelper(ctx, s, key, path) +} + +func (s *s3BucketLarge) Download(ctx context.Context, key, path string) error { + return s.downloadHelper(ctx, s, key, path) +} + +func (s *s3Bucket) pushHelper(ctx context.Context, b Bucket, opts SyncOptions) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "push", + "bucket": s.name, + "bucket_prefix": s.prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + files, err := walkLocalTree(ctx, opts.Local) if err != nil { return errors.WithStack(err) } for _, fn := range files { - target := filepath.Join(remote, fn) - file := filepath.Join(local, fn) - localmd5, err := md5sum(file) + if re != nil && re.MatchString(fn) { + continue + } + + target := consistentJoin(opts.Remote, fn) + file := filepath.Join(opts.Local, fn) + shouldUpload, err := s.s3WithUploadChecksumHelper(ctx, target, file) if err != nil { - return errors.Wrapf(err, "problem checksumming '%s'", file) - } - input := &s3.HeadObjectInput{ - Bucket: aws.String(s.name), - Key: aws.String(target), - IfMatch: aws.String(localmd5), - } - _, err = s.svc.HeadObjectWithContext(ctx, input) - if aerr, ok := err.(awserr.Error); ok { - if aerr.Code() == "PreconditionFailed" || aerr.Code() == "NotFound" { - if err = b.Upload(ctx, target, file); err != nil { - return errors.Wrapf(err, "problem uploading '%s' to '%s'", - file, target) - } - } - } else if err != nil { - return errors.Wrapf(err, "problem finding '%s'", target) + return errors.WithStack(err) + } + if !shouldUpload { + continue + } + if err = doUpload(ctx, b, target, file); err != nil { + return errors.WithStack(err) } } if s.deleteOnSync && !s.dryRun { - return errors.Wrapf(os.RemoveAll(local), "problem removing '%s' after push", local) + return errors.Wrap(deleteOnPush(ctx, files, opts.Remote, b), "probelm with delete on sync after push") } return nil } -func (s *s3BucketSmall) Push(ctx context.Context, local, remote string) error { - return s.push(ctx, local, s.normalizeKey(remote), s) +func (s *s3BucketSmall) Push(ctx context.Context, opts SyncOptions) error { + return s.pushHelper(ctx, s, opts) } - -func (s *s3BucketLarge) Push(ctx context.Context, local, remote string) error { - return s.push(ctx, local, s.normalizeKey(remote), s) +func (s *s3BucketLarge) Push(ctx context.Context, opts SyncOptions) error { + return s.pushHelper(ctx, s, opts) } -func (s *s3Bucket) pull(ctx context.Context, local, remote string, b Bucket) error { - iter, err := b.List(ctx, remote) +func (s *s3Bucket) pullHelper(ctx context.Context, b Bucket, opts SyncOptions) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "operation": "pull", + "bucket": s.name, + "bucket_prefix": s.prefix, + "remote": opts.Remote, + "local": opts.Local, + "exclude": opts.Exclude, + }) + + var re *regexp.Regexp + var err error + if opts.Exclude != "" { + re, err = regexp.Compile(opts.Exclude) + if err != nil { + return errors.Wrap(err, "problem compiling exclude regex") + } + } + + iter, err := b.List(ctx, opts.Remote) if err != nil { return errors.WithStack(err) } @@ -530,54 +889,58 @@ func (s *s3Bucket) pull(ctx context.Context, local, remote string, b Bucket) err if iter.Err() != nil { return errors.Wrap(err, "problem iterating bucket") } - name, err := filepath.Rel(remote, iter.Item().Name()) + + if re != nil && re.MatchString(iter.Item().Name()) { + continue + } + + name, err := filepath.Rel(opts.Remote, iter.Item().Name()) if err != nil { return errors.Wrap(err, "problem getting relative filepath") } - localName := filepath.Join(local, name) - localmd5, err := md5sum(localName) - if os.IsNotExist(errors.Cause(err)) { - if err = b.Download(ctx, iter.Item().Name(), localName); err != nil { - return errors.WithStack(err) - } - } else if err != nil { + localName := filepath.Join(opts.Local, name) + if err := s3DownloadWithChecksum(ctx, b, iter.Item(), localName); err != nil { return errors.WithStack(err) } - if localmd5 != iter.Item().Hash() { - if err = b.Download(ctx, iter.Item().Name(), localName); err != nil { - return errors.WithStack(err) - } - } - - keys = append(keys, iter.Item().Name()) + keys = append(keys, name) } if s.deleteOnSync && !s.dryRun { - return errors.Wrapf(b.RemoveMany(ctx, keys...), "problem removing '%s' after pull", remote) + return errors.Wrap(deleteOnPull(ctx, keys, opts.Local), "problem with delete on sync after pull") } return nil } -func (s *s3BucketSmall) Pull(ctx context.Context, local, remote string) error { - return s.pull(ctx, local, remote, s) +func (s *s3BucketSmall) Pull(ctx context.Context, opts SyncOptions) error { + return s.pullHelper(ctx, s, opts) } -func (s *s3BucketLarge) Pull(ctx context.Context, local, remote string) error { - return s.pull(ctx, local, remote, s) +func (s *s3BucketLarge) Pull(ctx context.Context, opts SyncOptions) error { + return s.pullHelper(ctx, s, opts) } func (s *s3Bucket) Copy(ctx context.Context, options CopyOptions) error { if !options.IsDestination { options.IsDestination = true - options.SourceKey = filepath.Join(s.name, s.normalizeKey(options.SourceKey)) + options.SourceKey = consistentJoin(s.name, s.normalizeKey(options.SourceKey)) return options.DestinationBucket.Copy(ctx, options) } + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "copy", + "bucket": s.name, + "bucket_prefix": s.prefix, + "source_key": options.SourceKey, + "dest_key": options.DestinationKey, + }) + input := &s3.CopyObjectInput{ Bucket: aws.String(s.name), CopySource: aws.String(options.SourceKey), Key: aws.String(s.normalizeKey(options.DestinationKey)), - ACL: aws.String(s.permission), + ACL: aws.String(string(s.permissions)), } if !s.dryRun { @@ -590,6 +953,15 @@ func (s *s3Bucket) Copy(ctx context.Context, options CopyOptions) error { } func (s *s3Bucket) Remove(ctx context.Context, key string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove", + "bucket": s.name, + "bucket_prefix": s.prefix, + "key": key, + }) + if !s.dryRun { input := &s3.DeleteObjectInput{ Bucket: aws.String(s.name), @@ -619,6 +991,15 @@ func (s *s3Bucket) deleteObjectsWrapper(ctx context.Context, toDelete *s3.Delete } func (s *s3Bucket) RemoveMany(ctx context.Context, keys ...string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove", + "bucket": s.name, + "bucket_prefix": s.prefix, + "keys": keys, + }) + catcher := grip.NewBasicCatcher() if !s.dryRun { count := 0 @@ -642,23 +1023,67 @@ func (s *s3Bucket) RemoveMany(ctx context.Context, keys ...string) error { } func (s *s3BucketSmall) RemovePrefix(ctx context.Context, prefix string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove prefix", + "bucket": s.name, + "bucket_prefix": s.prefix, + "prefix": prefix, + }) + return removePrefix(ctx, prefix, s) } func (s *s3BucketLarge) RemovePrefix(ctx context.Context, prefix string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove prefix", + "bucket": s.name, + "bucket_prefix": s.prefix, + "prefix": prefix, + }) + return removePrefix(ctx, prefix, s) } func (s *s3BucketSmall) RemoveMatching(ctx context.Context, expression string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove matching", + "bucket": s.name, + "bucket_prefix": s.prefix, + "expression": expression, + }) + return removeMatching(ctx, expression, s) } func (s *s3BucketLarge) RemoveMatching(ctx context.Context, expression string) error { + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "dry_run": s.dryRun, + "operation": "remove matching", + "bucket": s.name, + "bucket_prefix": s.prefix, + "expression": expression, + }) + return removeMatching(ctx, expression, s) } func (s *s3Bucket) listHelper(ctx context.Context, b Bucket, prefix string) (BucketIterator, error) { - contents, isTruncated, err := getObjectsWrapper(ctx, s, prefix) + grip.DebugWhen(s.verbose, message.Fields{ + "type": "s3", + "operation": "list", + "bucket": s.name, + "bucket_prefix": s.prefix, + "prefix": prefix, + }) + + contents, isTruncated, err := getObjectsWrapper(ctx, s, prefix, "") if err != nil { return nil, err } @@ -668,6 +1093,7 @@ func (s *s3Bucket) listHelper(ctx context.Context, b Bucket, prefix string) (Buc isTruncated: isTruncated, s: s, b: b, + prefix: prefix, }, nil } @@ -679,10 +1105,11 @@ func (s *s3BucketLarge) List(ctx context.Context, prefix string) (BucketIterator return s.listHelper(ctx, s, s.normalizeKey(prefix)) } -func getObjectsWrapper(ctx context.Context, s *s3Bucket, prefix string) ([]*s3.Object, bool, error) { +func getObjectsWrapper(ctx context.Context, s *s3Bucket, prefix, marker string) ([]*s3.Object, bool, error) { input := &s3.ListObjectsInput{ Bucket: aws.String(s.name), Prefix: aws.String(prefix), + Marker: aws.String(marker), } result, err := s.svc.ListObjectsWithContext(ctx, input) @@ -700,6 +1127,7 @@ type s3BucketIterator struct { item *bucketItemImpl s *s3Bucket b Bucket + prefix string } func (iter *s3BucketIterator) Err() error { return iter.err } @@ -710,8 +1138,12 @@ func (iter *s3BucketIterator) Next(ctx context.Context) bool { iter.idx++ if iter.idx > len(iter.contents)-1 { if iter.isTruncated { - contents, isTruncated, err := getObjectsWrapper(ctx, iter.s, - *iter.contents[iter.idx-1].Key) + contents, isTruncated, err := getObjectsWrapper( + ctx, + iter.s, + iter.prefix, + *iter.contents[iter.idx-1].Key, + ) if err != nil { iter.err = err return false diff --git a/vendor/github.com/evergreen-ci/pail/s3_bucket_util_test.go b/vendor/github.com/evergreen-ci/pail/s3_bucket_util_test.go new file mode 100644 index 00000000000..d1fe99854d0 --- /dev/null +++ b/vendor/github.com/evergreen-ci/pail/s3_bucket_util_test.go @@ -0,0 +1,643 @@ +package pail + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + homedir "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func getS3SmallBucketTests(ctx context.Context, tempdir, s3BucketName, s3Prefix, s3Region string) []bucketTestCase { + return []bucketTestCase{ + { + id: "VerifyBucketType", + test: func(t *testing.T, b Bucket) { + bucket, ok := b.(*s3BucketSmall) + require.True(t, ok) + assert.NotNil(t, bucket) + }, + }, + { + id: "TestCredentialsOverrideDefaults", + test: func(t *testing.T, b Bucket) { + input := &s3.GetBucketLocationInput{ + Bucket: aws.String(s3BucketName), + } + + rawBucket := b.(*s3BucketSmall) + _, err := rawBucket.svc.GetBucketLocationWithContext(ctx, input) + assert.NoError(t, err) + + badOptions := S3Options{ + Credentials: CreateAWSCredentials("asdf", "asdf", "asdf"), + Region: s3Region, + Name: s3BucketName, + } + badBucket, err := NewS3Bucket(badOptions) + require.NoError(t, err) + rawBucket = badBucket.(*s3BucketSmall) + _, err = rawBucket.svc.GetBucketLocationWithContext(ctx, input) + assert.Error(t, err) + }, + }, + { + id: "TestCheckPassesWhenDoNotHaveAccess", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketSmall) + rawBucket.name = "mciuploads" + assert.NoError(t, rawBucket.Check(ctx)) + }, + }, + { + id: "TestCheckFailsWhenBucketDNE", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketSmall) + rawBucket.name = newUUID() + assert.Error(t, rawBucket.Check(ctx)) + }, + }, + { + id: "TestSharedCredentialsOption", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + newFile, err := os.Create(filepath.Join(tempdir, "creds")) + require.NoError(t, err) + defer newFile.Close() + _, err = newFile.WriteString("[my_profile]\n") + require.NoError(t, err) + awsKey := fmt.Sprintf("aws_access_key_id = %s\n", os.Getenv("AWS_KEY")) + _, err = newFile.WriteString(awsKey) + require.NoError(t, err) + awsSecret := fmt.Sprintf("aws_secret_access_key = %s\n", os.Getenv("AWS_SECRET")) + _, err = newFile.WriteString(awsSecret) + require.NoError(t, err) + + sharedCredsOptions := S3Options{ + SharedCredentialsFilepath: filepath.Join(tempdir, "creds"), + SharedCredentialsProfile: "my_profile", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3Bucket(sharedCredsOptions) + require.NoError(t, err) + assert.NoError(t, sharedCredsBucket.Check(ctx)) + }, + }, + { + id: "TestSharedCredentialsUsesCorrectDefaultFile", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + sharedCredsOptions := S3Options{ + SharedCredentialsProfile: "default", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3Bucket(sharedCredsOptions) + require.NoError(t, err) + homeDir, err := homedir.Dir() + require.NoError(t, err) + fileName := filepath.Join(homeDir, ".aws", "credentials") + _, err = os.Stat(fileName) + if err == nil { + assert.NoError(t, sharedCredsBucket.Check(ctx)) + } else { + assert.True(t, os.IsNotExist(err)) + } + }, + }, + { + id: "TestSharedCredentialsFailsWhenProfileDNE", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + sharedCredsOptions := S3Options{ + SharedCredentialsProfile: "DNE", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3Bucket(sharedCredsOptions) + assert.NoError(t, err) + _, err = sharedCredsBucket.List(ctx, "") + assert.Error(t, err) + }, + }, + + { + id: "TestPermissions", + test: func(t *testing.T, b Bucket) { + // default permissions + key1 := newUUID() + writer, err := b.Writer(ctx, key1) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket := b.(*s3BucketSmall) + objectACLInput := &s3.GetObjectAclInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key1)), + } + objectACLOutput, err := rawBucket.svc.GetObjectAcl(objectACLInput) + require.NoError(t, err) + require.Equal(t, 1, len(objectACLOutput.Grants)) + assert.Equal(t, "FULL_CONTROL", *objectACLOutput.Grants[0].Permission) + + // explicitly set permissions + openOptions := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + Permissions: S3PermissionsPublicRead, + } + openBucket, err := NewS3Bucket(openOptions) + require.NoError(t, err) + key2 := newUUID() + writer, err = openBucket.Writer(ctx, key2) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket = openBucket.(*s3BucketSmall) + objectACLInput = &s3.GetObjectAclInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key2)), + } + objectACLOutput, err = rawBucket.svc.GetObjectAcl(objectACLInput) + require.NoError(t, err) + require.Equal(t, 2, len(objectACLOutput.Grants)) + assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) + + // copy with permissions + destKey := newUUID() + copyOpts := CopyOptions{ + SourceKey: key1, + DestinationKey: destKey, + DestinationBucket: openBucket, + } + require.NoError(t, b.Copy(ctx, copyOpts)) + require.NoError(t, err) + require.Equal(t, 2, len(objectACLOutput.Grants)) + assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) + }, + }, + { + id: "TestContentType", + test: func(t *testing.T, b Bucket) { + // default content type + key := newUUID() + writer, err := b.Writer(ctx, key) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket := b.(*s3BucketSmall) + getObjectInput := &s3.GetObjectInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key)), + } + getObjectOutput, err := rawBucket.svc.GetObject(getObjectInput) + require.NoError(t, err) + assert.Nil(t, getObjectOutput.ContentType) + + // explicitly set content type + htmlOptions := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + ContentType: "html/text", + } + htmlBucket, err := NewS3Bucket(htmlOptions) + require.NoError(t, err) + key = newUUID() + writer, err = htmlBucket.Writer(ctx, key) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket = htmlBucket.(*s3BucketSmall) + getObjectInput = &s3.GetObjectInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key)), + } + getObjectOutput, err = rawBucket.svc.GetObject(getObjectInput) + require.NoError(t, err) + require.NotNil(t, getObjectOutput.ContentType) + assert.Equal(t, "html/text", *getObjectOutput.ContentType) + }, + }, + + { + id: "TestCompressingWriter", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketSmall) + s3Options := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: rawBucket.prefix, + MaxRetries: 20, + Compress: true, + } + cb, err := NewS3Bucket(s3Options) + require.NoError(t, err) + + data := []byte{} + for i := 0; i < 300; i++ { + data = append(data, []byte(newUUID())...) + } + + uncompressedKey := newUUID() + w, err := b.Writer(ctx, uncompressedKey) + require.NoError(t, err) + n, err := w.Write(data) + require.NoError(t, err) + require.NoError(t, w.Close()) + assert.Equal(t, len(data), n) + + compressedKey := newUUID() + cw, err := cb.Writer(ctx, compressedKey) + require.NoError(t, err) + n, err = cw.Write(data) + require.NoError(t, err) + require.NoError(t, cw.Close()) + assert.Equal(t, len(data), n) + compressedData := cw.(*compressingWriteCloser).s3Writer.(*smallWriteCloser).buffer + + reader, err := gzip.NewReader(bytes.NewReader(compressedData)) + require.NoError(t, err) + decompressedData, err := ioutil.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, data, decompressedData) + + cr, err := cb.Get(ctx, compressedKey) + require.NoError(t, err) + s3CompressedData, err := ioutil.ReadAll(cr) + require.NoError(t, err) + assert.Equal(t, data, s3CompressedData) + r, err := cb.Get(ctx, uncompressedKey) + require.NoError(t, err) + s3UncompressedData, err := ioutil.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, data, s3UncompressedData) + }, + }, + } +} + +func getS3LargeBucketTests(ctx context.Context, tempdir, s3BucketName, s3Prefix, s3Region string) []bucketTestCase { + return []bucketTestCase{ + { + id: "VerifyBucketType", + test: func(t *testing.T, b Bucket) { + bucket, ok := b.(*s3BucketLarge) + require.True(t, ok) + assert.NotNil(t, bucket) + }, + }, + { + id: "TestCredentialsOverrideDefaults", + test: func(t *testing.T, b Bucket) { + input := &s3.GetBucketLocationInput{ + Bucket: aws.String(s3BucketName), + } + + rawBucket := b.(*s3BucketLarge) + _, err := rawBucket.svc.GetBucketLocationWithContext(ctx, input) + assert.NoError(t, err) + + badOptions := S3Options{ + Credentials: CreateAWSCredentials("asdf", "asdf", "asdf"), + Region: s3Region, + Name: s3BucketName, + } + badBucket, err := NewS3MultiPartBucket(badOptions) + require.NoError(t, err) + rawBucket = badBucket.(*s3BucketLarge) + _, err = rawBucket.svc.GetBucketLocationWithContext(ctx, input) + assert.Error(t, err) + }, + }, + { + id: "TestCheckPassesWhenDoNotHaveAccess", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketLarge) + rawBucket.name = "mciuploads" + assert.NoError(t, rawBucket.Check(ctx)) + }, + }, + { + id: "TestCheckFailsWhenBucketDNE", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketLarge) + rawBucket.name = newUUID() + assert.Error(t, rawBucket.Check(ctx)) + }, + }, + { + id: "TestSharedCredentialsOption", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + newFile, err := os.Create(filepath.Join(tempdir, "creds")) + require.NoError(t, err) + defer newFile.Close() + _, err = newFile.WriteString("[my_profile]\n") + require.NoError(t, err) + awsKey := fmt.Sprintf("aws_access_key_id = %s\n", os.Getenv("AWS_KEY")) + _, err = newFile.WriteString(awsKey) + require.NoError(t, err) + awsSecret := fmt.Sprintf("aws_secret_access_key = %s\n", os.Getenv("AWS_SECRET")) + _, err = newFile.WriteString(awsSecret) + require.NoError(t, err) + + sharedCredsOptions := S3Options{ + SharedCredentialsFilepath: filepath.Join(tempdir, "creds"), + SharedCredentialsProfile: "my_profile", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3MultiPartBucket(sharedCredsOptions) + require.NoError(t, err) + assert.NoError(t, sharedCredsBucket.Check(ctx)) + }, + }, + { + id: "TestSharedCredentialsUsesCorrectDefaultFile", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + sharedCredsOptions := S3Options{ + SharedCredentialsProfile: "default", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3MultiPartBucket(sharedCredsOptions) + require.NoError(t, err) + homeDir, err := homedir.Dir() + require.NoError(t, err) + fileName := filepath.Join(homeDir, ".aws", "credentials") + _, err = os.Stat(fileName) + if err == nil { + assert.NoError(t, sharedCredsBucket.Check(ctx)) + } else { + assert.True(t, os.IsNotExist(err)) + } + }, + }, + { + id: "TestSharedCredentialsFailsWhenProfileDNE", + test: func(t *testing.T, b Bucket) { + require.NoError(t, b.Check(ctx)) + + sharedCredsOptions := S3Options{ + SharedCredentialsProfile: "DNE", + Region: s3Region, + Name: s3BucketName, + } + sharedCredsBucket, err := NewS3MultiPartBucket(sharedCredsOptions) + assert.NoError(t, err) + _, err = sharedCredsBucket.List(ctx, "") + assert.Error(t, err) + }, + }, + { + id: "TestPermissions", + test: func(t *testing.T, b Bucket) { + // default permissions + key1 := newUUID() + writer, err := b.Writer(ctx, key1) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket := b.(*s3BucketLarge) + objectACLInput := &s3.GetObjectAclInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key1)), + } + objectACLOutput, err := rawBucket.svc.GetObjectAcl(objectACLInput) + require.NoError(t, err) + require.Equal(t, 1, len(objectACLOutput.Grants)) + assert.Equal(t, "FULL_CONTROL", *objectACLOutput.Grants[0].Permission) + + // explicitly set permissions + openOptions := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + Permissions: S3PermissionsPublicRead, + } + openBucket, err := NewS3MultiPartBucket(openOptions) + require.NoError(t, err) + key2 := newUUID() + writer, err = openBucket.Writer(ctx, key2) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket = openBucket.(*s3BucketLarge) + objectACLInput = &s3.GetObjectAclInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key2)), + } + objectACLOutput, err = rawBucket.svc.GetObjectAcl(objectACLInput) + require.NoError(t, err) + require.Equal(t, 2, len(objectACLOutput.Grants)) + assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) + + // copy with permissions + destKey := newUUID() + copyOpts := CopyOptions{ + SourceKey: key1, + DestinationKey: destKey, + DestinationBucket: openBucket, + } + require.NoError(t, b.Copy(ctx, copyOpts)) + require.NoError(t, err) + require.Equal(t, 2, len(objectACLOutput.Grants)) + assert.Equal(t, "READ", *objectACLOutput.Grants[1].Permission) + }, + }, + { + id: "TestLargeFileRoundTrip", + test: func(t *testing.T, b Bucket) { + size := int64(10000000) + key := newUUID() + bigBuff := make([]byte, size) + path := filepath.Join(tempdir, "bigfile.test0") + + // upload large empty file + require.NoError(t, ioutil.WriteFile(path, bigBuff, 0666)) + require.NoError(t, b.Upload(ctx, key, path)) + + // check size of empty file + path = filepath.Join(tempdir, "bigfile.test1") + require.NoError(t, b.Download(ctx, key, path)) + fi, err := os.Stat(path) + require.NoError(t, err) + assert.Equal(t, size, fi.Size()) + }, + }, + + { + id: "TestContentType", + test: func(t *testing.T, b Bucket) { + // default content type + key := newUUID() + writer, err := b.Writer(ctx, key) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket := b.(*s3BucketLarge) + getObjectInput := &s3.GetObjectInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key)), + } + getObjectOutput, err := rawBucket.svc.GetObject(getObjectInput) + require.NoError(t, err) + assert.Nil(t, getObjectOutput.ContentType) + + // explicitly set content type + htmlOptions := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: s3Prefix + newUUID(), + ContentType: "html/text", + } + htmlBucket, err := NewS3MultiPartBucket(htmlOptions) + require.NoError(t, err) + key = newUUID() + writer, err = htmlBucket.Writer(ctx, key) + require.NoError(t, err) + _, err = writer.Write([]byte("hello world")) + require.NoError(t, err) + require.NoError(t, writer.Close()) + rawBucket = htmlBucket.(*s3BucketLarge) + getObjectInput = &s3.GetObjectInput{ + Bucket: aws.String(s3BucketName), + Key: aws.String(rawBucket.normalizeKey(key)), + } + getObjectOutput, err = rawBucket.svc.GetObject(getObjectInput) + require.NoError(t, err) + require.NotNil(t, getObjectOutput.ContentType) + assert.Equal(t, "html/text", *getObjectOutput.ContentType) + }, + }, + { + id: "TestCompressingWriter", + test: func(t *testing.T, b Bucket) { + rawBucket := b.(*s3BucketLarge) + s3Options := S3Options{ + Region: s3Region, + Name: s3BucketName, + Prefix: rawBucket.prefix, + MaxRetries: 20, + Compress: true, + } + cb, err := NewS3MultiPartBucket(s3Options) + require.NoError(t, err) + + data := []byte{} + for i := 0; i < 300; i++ { + data = append(data, []byte(newUUID())...) + } + + uncompressedKey := newUUID() + w, err := b.Writer(ctx, uncompressedKey) + require.NoError(t, err) + n, err := w.Write(data) + require.NoError(t, err) + require.NoError(t, w.Close()) + assert.Equal(t, len(data), n) + + compressedKey := newUUID() + cw, err := cb.Writer(ctx, compressedKey) + require.NoError(t, err) + n, err = cw.Write(data) + require.NoError(t, err) + require.NoError(t, cw.Close()) + assert.Equal(t, len(data), n) + _, ok := cw.(*compressingWriteCloser).s3Writer.(*largeWriteCloser) + assert.True(t, ok) + + cr, err := cb.Get(ctx, compressedKey) + require.NoError(t, err) + s3CompressedData, err := ioutil.ReadAll(cr) + require.NoError(t, err) + assert.Equal(t, data, s3CompressedData) + r, err := cb.Get(ctx, uncompressedKey) + require.NoError(t, err) + s3UncompressedData, err := ioutil.ReadAll(r) + require.NoError(t, err) + assert.Equal(t, data, s3UncompressedData) + }, + }, + } +} + +func cleanUpS3Bucket(name, prefix, region string) error { + svc, err := createS3Client(region) + if err != nil { + return errors.Wrap(err, "clean up failed") + } + deleteObjectsInput := &s3.DeleteObjectsInput{ + Bucket: aws.String(name), + Delete: &s3.Delete{}, + } + listInput := &s3.ListObjectsInput{ + Bucket: aws.String(name), + Prefix: aws.String(prefix), + } + var result *s3.ListObjectsOutput + + for { + result, err = svc.ListObjects(listInput) + if err != nil { + return errors.Wrap(err, "clean up failed") + } + + for _, object := range result.Contents { + deleteObjectsInput.Delete.Objects = append(deleteObjectsInput.Delete.Objects, &s3.ObjectIdentifier{ + Key: object.Key, + }) + } + + if deleteObjectsInput.Delete.Objects != nil { + _, err = svc.DeleteObjects(deleteObjectsInput) + if err != nil { + return errors.Wrap(err, "failed to delete S3 bucket") + } + deleteObjectsInput.Delete = &s3.Delete{} + } + + if *result.IsTruncated { + listInput.Marker = result.Contents[len(result.Contents)-1].Key + } else { + break + } + } + + return nil +} +func createS3Client(region string) (*s3.S3, error) { + sess, err := session.NewSession(&aws.Config{Region: aws.String(region)}) + if err != nil { + return nil, errors.Wrap(err, "problem connecting to AWS") + } + svc := s3.New(sess) + return svc, nil +} diff --git a/vendor/github.com/evergreen-ci/pail/scripts/setup-credentials.sh b/vendor/github.com/evergreen-ci/pail/scripts/setup-credentials.sh index f29bf9392ed..4beb2c63ec9 100644 --- a/vendor/github.com/evergreen-ci/pail/scripts/setup-credentials.sh +++ b/vendor/github.com/evergreen-ci/pail/scripts/setup-credentials.sh @@ -4,13 +4,18 @@ set -o errexit echo "building aws creds file!" -rm -rf ~/.aws -mkdir ~/.aws -cat < ~/.aws/config +if [ "Windows_NT" == "$OS" ]; then + export AWS_DIR=$WORK_DIR/.aws +else + export AWS_DIR=$HOME/.aws +fi +rm -rf $AWS_DIR +mkdir $AWS_DIR +cat < $AWS_DIR/config [default] region = us-east-1 EOF -cat < ~/.aws/credentials +cat < $AWS_DIR/credentials [default] aws_access_key_id = "$AWS_KEY" aws_secret_access_key = "$AWS_SECRET" diff --git a/vendor/github.com/evergreen-ci/pail/util.go b/vendor/github.com/evergreen-ci/pail/util.go index 78c97cfa027..0de37ee61cd 100644 --- a/vendor/github.com/evergreen-ci/pail/util.go +++ b/vendor/github.com/evergreen-ci/pail/util.go @@ -10,7 +10,9 @@ import ( "os" "path/filepath" "regexp" + "strings" + "github.com/mongodb/grip" "github.com/pkg/errors" ) @@ -54,14 +56,32 @@ func walkLocalTree(ctx context.Context, prefix string) ([]string, error) { return errors.New("operation canceled") } - if info.IsDir() { - return nil - } - rel, err := filepath.Rel(prefix, path) if err != nil { return errors.Wrap(err, "problem getting relative path") } + + if info.Mode()&os.ModeSymlink != 0 { + symPath, err := filepath.EvalSymlinks(path) + if err != nil { + return errors.Wrap(err, "problem getting symlink path") + } + symTree, err := walkLocalTree(ctx, symPath) + if err != nil { + return errors.Wrap(err, "problem getting symlink tree") + } + for i := range symTree { + symTree[i] = filepath.Join(rel, symTree[i]) + } + out = append(out, symTree...) + + return nil + } + + if info.IsDir() { + return nil + } + out = append(out, rel) return nil }) @@ -69,6 +89,9 @@ func walkLocalTree(ctx context.Context, prefix string) ([]string, error) { if err != nil { return nil, errors.Wrap(err, "problem finding files") } + if ctx.Err() != nil { + return nil, errors.New("operation canceled") + } return out, nil } @@ -105,3 +128,56 @@ func removeMatching(ctx context.Context, expression string, b Bucket) error { } return errors.Wrapf(b.RemoveMany(ctx, keys...), "failed to delete some objects matching '%s'", expression) } + +func consistentJoin(prefix, key string) string { + if prefix != "" { + return prefix + "/" + key + } + return key +} + +func deleteOnPush(ctx context.Context, sourceFiles []string, remote string, bucket Bucket) error { + sourceFilesMap := map[string]bool{} + for _, fn := range sourceFiles { + sourceFilesMap[fn] = true + } + + iter, err := bucket.List(ctx, remote) + if err != nil { + return err + } + + toDelete := []string{} + for iter.Next(ctx) { + fn := strings.TrimPrefix(iter.Item().Name(), remote) + fn = strings.TrimPrefix(fn, "/") + fn = strings.TrimPrefix(fn, "\\") // cause windows... + + if !sourceFilesMap[fn] { + toDelete = append(toDelete, iter.Item().Name()) + } + } + + return bucket.RemoveMany(ctx, toDelete...) +} + +func deleteOnPull(ctx context.Context, sourceFiles []string, local string) error { + sourceFilesMap := map[string]bool{} + for _, fn := range sourceFiles { + sourceFilesMap[fn] = true + } + + destinationFiles, err := walkLocalTree(ctx, local) + if err != nil { + return errors.WithStack(err) + } + + catcher := grip.NewBasicCatcher() + for _, fn := range destinationFiles { + if !sourceFilesMap[fn] { + catcher.Add(os.RemoveAll(filepath.Join(local, fn))) + } + } + + return catcher.Resolve() +} diff --git a/vendor/github.com/evergreen-ci/pail/util_test.go b/vendor/github.com/evergreen-ci/pail/util_test.go index f72f5bb73c0..4d0dfa76796 100644 --- a/vendor/github.com/evergreen-ci/pail/util_test.go +++ b/vendor/github.com/evergreen-ci/pail/util_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestChecksum(t *testing.T) { @@ -85,4 +86,24 @@ func TestWalkTree(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, out) }) + t.Run("SymLink", func(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("git symlinks do not work on windows") + } + vendor, err := walkLocalTree(ctx, "vendor") + require.NoError(t, err) + + out, err := walkLocalTree(ctx, "testdata") + require.NoError(t, err) + + fnMap := map[string]bool{} + for _, fn := range out { + fnMap[fn] = true + } + assert.True(t, fnMap["a_file.txt"]) + assert.True(t, fnMap["z_file.txt"]) + for _, fn := range vendor { + require.True(t, fnMap[filepath.Join("vendor", fn)]) + } + }) } diff --git a/vendor/github.com/mongodb/anser/backup/backup.go b/vendor/github.com/mongodb/anser/backup/backup.go new file mode 100644 index 00000000000..6da772fb32e --- /dev/null +++ b/vendor/github.com/mongodb/anser/backup/backup.go @@ -0,0 +1,155 @@ +package backup + +import ( + "context" + "io" + "path/filepath" + + "github.com/evergreen-ci/birch" + "github.com/mongodb/anser/model" + "github.com/mongodb/grip" + "github.com/pkg/errors" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// WriterCreator provides a way to create writers (e.g. for file or +// similar,) to support writing backup payloads without requiring +// this implementation to manage files or file interfaces. +type WriterCreator func(context.Context, string) (io.WriteCloser, error) + +// Options describes the configuration of the backup, for a single +// collection. Query, Sort, and Limit are optional, but allow you to +// constrain the backup. +type Options struct { + NS model.Namespace `bson:"ns" json:"ns" yaml:"ns"` + Target WriterCreator `bson:"-" json:"-" yaml:"-"` + Query interface{} `bson:"query" json:"query" yaml:"query"` + Sort interface{} `bson:"sort" json:"sort" yaml:"sort"` + Limit int64 `bson:"limit" json:"limit" yaml:"limit"` + IndexesOnly bool `bson:"indexes_only" json:"indexes_only" yaml:"indexes_only"` +} + +// Collection creates a backup of a collection using the options to +// describe how to filter or constrain the backup. The option's Target +// value allows you to produce a writer where the backup will be collected. +func Collection(ctx context.Context, client *mongo.Client, opts Options) error { + if err := opts.flushData(ctx, client); err != nil { + return errors.WithStack(err) + } + + idxes, err := opts.getIndexData(ctx, client) + if err != nil { + return errors.WithStack(err) + } + + if err := opts.writeIndexData(ctx, idxes); err != nil { + return errors.WithStack(err) + } + + return nil +} + +func (opts *Options) getQueryOpts() *options.FindOptions { + qopts := options.Find() + if opts.Sort != nil { + qopts.SetSort(opts.Sort) + } + if opts.Limit > 0 { + qopts.SetLimit(opts.Limit) + } + if opts.Query == nil { + opts.Query = struct{}{} + } + return qopts +} + +func (opts *Options) getCursor(ctx context.Context, client *mongo.Client) (*mongo.Cursor, error) { + cursor, err := client.Database(opts.NS.DB).Collection(opts.NS.Collection).Find(ctx, opts.Query, opts.getQueryOpts()) + if err != nil { + return nil, errors.WithStack(err) + } + + return cursor, nil +} + +func (opts *Options) flushData(ctx context.Context, client *mongo.Client) error { + if opts.IndexesOnly { + return nil + } + + catcher := grip.NewCatcher() + + cursor, err := opts.getCursor(ctx, client) + if err != nil { + return errors.WithStack(err) + } + defer func() { catcher.Add(cursor.Close(ctx)) }() + + target, err := opts.Target(ctx, filepath.Join(opts.NS.DB, opts.NS.Collection)+".bson") + if err != nil { + return errors.WithStack(err) + } + defer func() { catcher.Add(target.Close()) }() + + for cursor.Next(ctx) { + _, err := target.Write(cursor.Current) + if err != nil { + catcher.Add(err) + break + } + } + + catcher.Add(cursor.Err()) + return catcher.Resolve() +} + +func (opts *Options) getIndexData(ctx context.Context, client *mongo.Client) (*birch.Array, error) { + catcher := grip.NewCatcher() + + cursor, err := client.Database(opts.NS.DB).Collection(opts.NS.Collection).Indexes().List(ctx) + if err != nil { + return nil, errors.WithStack(err) + } + defer func() { catcher.Add(cursor.Close(ctx)) }() + + indexes := birch.NewArray() + for cursor.Next(ctx) { + doc, err := birch.DC.ReaderErr(birch.Reader(cursor.Current)) + if err != nil { + catcher.Add(err) + break + } + indexes.Append(birch.VC.Document(doc)) + } + + catcher.Add(cursor.Err()) + + return indexes, catcher.Resolve() +} + +func (opts *Options) writeIndexData(ctx context.Context, indexes *birch.Array) error { + out, err := birch.DC.Elements( + birch.EC.SubDocument("options", birch.DC.New()), + birch.EC.Array("indexes", indexes), + birch.EC.String("uuid", ""), + ).MarshalJSON() + if err != nil { + return errors.WithStack(err) + } + + target, err := opts.Target(ctx, filepath.Join(opts.NS.DB, opts.NS.Collection)+".metadata.json") + if err != nil { + return errors.WithStack(err) + } + + catcher := grip.NewCatcher() + defer func() { catcher.Add(target.Close()) }() + _, err = target.Write(out) + if err != nil { + catcher.Add(err) + return catcher.Resolve() + } + + return catcher.Resolve() +} diff --git a/vendor/github.com/mongodb/anser/backup/backup_test.go b/vendor/github.com/mongodb/anser/backup/backup_test.go new file mode 100644 index 00000000000..dd505f36f67 --- /dev/null +++ b/vendor/github.com/mongodb/anser/backup/backup_test.go @@ -0,0 +1,246 @@ +package backup + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math/rand" + "testing" + "time" + + "github.com/evergreen-ci/birch" + "github.com/mongodb/anser/model" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type closableBuffer struct { + bytes.Buffer +} + +func (cb *closableBuffer) Close() error { return nil } + +type fileCache map[string]*closableBuffer + +func (mf fileCache) Target(ctx context.Context, name string) (io.WriteCloser, error) { + if buf, ok := mf[name]; ok { + return buf, nil + } + + mf[name] = &closableBuffer{Buffer: bytes.Buffer{}} + + return mf[name], nil +} + +func (mf fileCache) TargetErrors(context.Context, string) (io.WriteCloser, error) { + return nil, errors.New("always") +} + +func newDocument(doc *birch.Document, numKeys, otherNum int) *birch.Document { + if doc == nil { + doc = birch.DC.Make(numKeys * 3) + } + + for i := 0; i < numKeys; i++ { + doc.Append(birch.EC.Int64(fmt.Sprintln(numKeys, otherNum), rand.Int63n(int64(numKeys)*1))) + doc.Append(birch.EC.Double(fmt.Sprintln("float", numKeys, otherNum), rand.Float64())) + + if otherNum%5 == 0 { + ar := birch.NewArray() + for ii := int64(0); i < otherNum; i++ { + ar.Append(birch.VC.Int64(rand.Int63n(1 + ii*int64(numKeys)))) + } + doc.Append(birch.EC.Array(fmt.Sprintln("first", numKeys, otherNum), ar)) + } + + if otherNum%3 == 0 { + doc.Append(birch.EC.SubDocument(fmt.Sprintln("second", numKeys, otherNum), newDocument(nil, otherNum, otherNum))) + } + + if otherNum%12 == 0 { + doc.Append(birch.EC.SubDocument(fmt.Sprintln("third", numKeys, otherNum), newDocument(nil, otherNum, 2*otherNum))) + } + } + + return doc +} + +func produceDocuments(doc *birch.Document, num int) []interface{} { + out := make([]interface{}, num) + + if doc == nil { + doc = birch.DC.New() + } + + for idx := range out { + out[idx] = newDocument(doc.Copy(), num, 10*num) + } + + return out +} + +func TestBackup(t *testing.T) { + files := fileCache{} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client, err := mongo.NewClient(options.Client().ApplyURI("mongodb://localhost:27017").SetConnectTimeout(time.Second)) + require.NoError(t, err) + + err = client.Connect(ctx) + require.NoError(t, err) + + defer func() { + client.Disconnect(ctx) + }() + + t.Run("SimpleRoundTrip", func(t *testing.T) { + defer func() { require.NoError(t, client.Database("foo").Collection("bar").Drop(ctx)) }() + + res, err := client.Database("foo").Collection("bar").InsertMany(ctx, produceDocuments(nil, 10)) + require.NoError(t, err) + require.Len(t, res.InsertedIDs, 10) + + err = Collection(ctx, client, Options{ + NS: model.Namespace{DB: "foo", Collection: "bar"}, + Target: files.Target, + }) + require.NoError(t, err) + require.Contains(t, files, "foo/bar.bson") + require.Contains(t, files, "foo/bar.metadata.json") + + buf := files["foo/bar.bson"] + count := 0 + for { + doc, err := birch.DC.ReadFromErr(buf) + if err == io.EOF { + break + } + assert.NoError(t, err) + if doc == nil { + break + } + count++ + } + assert.Equal(t, 10, count) + }) + t.Run("Filter", func(t *testing.T) { + defer func() { require.NoError(t, client.Database("foo").Collection("baz").Drop(ctx)) }() + + res, err := client.Database("foo").Collection("baz").InsertMany(ctx, produceDocuments(birch.DC.Elements(birch.EC.Int("a", 1)), 10)) + require.NoError(t, err) + require.Len(t, res.InsertedIDs, 10) + + res, err = client.Database("foo").Collection("baz").InsertMany(ctx, produceDocuments(birch.DC.Elements(birch.EC.Int("b", 1)), 10)) + require.NoError(t, err) + require.Len(t, res.InsertedIDs, 10) + + err = Collection(ctx, client, Options{ + NS: model.Namespace{DB: "foo", Collection: "baz"}, + Target: files.Target, + Query: birch.DC.Elements(birch.EC.Int("a", 1)), + }) + require.NoError(t, err) + require.Contains(t, files, "foo/baz.bson") + require.Contains(t, files, "foo/baz.metadata.json") + + count, err := client.Database("foo").Collection("baz").EstimatedDocumentCount(ctx) + require.NoError(t, err) + assert.EqualValues(t, count, 20) + + buf := files["foo/baz.bson"] + count = 0 + for { + doc, err := birch.DC.ReadFromErr(buf) + if err == io.EOF { + break + } + assert.NoError(t, err) + if doc == nil { + break + } + count++ + } + assert.EqualValues(t, 10, count) + }) + t.Run("Empty", func(t *testing.T) { + defer func() { require.NoError(t, client.Database("foo").Collection("fuz").Drop(ctx)) }() + + err = Collection(ctx, client, Options{ + NS: model.Namespace{DB: "foo", Collection: "fuz"}, + Target: files.Target, + }) + require.NoError(t, err) + require.Contains(t, files, "foo/fuz.bson") + require.Contains(t, files, "foo/fuz.metadata.json") + + buf, ok := files["foo/fuz.bson"] + require.True(t, ok) + count := 0 + for { + doc, err := birch.DC.ReadFromErr(buf) + if err == io.EOF { + break + } + assert.NoError(t, err) + if doc == nil { + break + } + count++ + } + assert.EqualValues(t, 0, count) + }) + t.Run("IndexesOnly", func(t *testing.T) { + defer func() { require.NoError(t, client.Database("foo").Collection("bat").Drop(ctx)) }() + + res, err := client.Database("foo").Collection("bat").InsertMany(ctx, produceDocuments(nil, 10)) + require.NoError(t, err) + require.Len(t, res.InsertedIDs, 10) + + err = Collection(ctx, client, Options{ + NS: model.Namespace{DB: "foo", Collection: "bat"}, + Target: files.Target, + IndexesOnly: true, + }) + require.NoError(t, err) + require.NotContains(t, files, "foo/bat.bson") + require.Contains(t, files, "foo/bat.metadata.json") + + doc := birch.DC.New() + err = json.Unmarshal(files["foo/bat.metadata.json"].Buffer.Bytes(), doc) + require.NoError(t, err) + + require.Equal(t, 0, doc.Lookup("options").MutableDocument().Len()) + require.Equal(t, 1, doc.Lookup("indexes").MutableArray().Len()) + require.Zero(t, doc.Lookup("uuid").StringValue()) + }) + t.Run("ProblmeGettingTarget", func(t *testing.T) { + err = Collection(ctx, client, Options{ + NS: model.Namespace{DB: "foo", Collection: "noop"}, + Target: files.TargetErrors, + }) + require.Error(t, err) + require.NotContains(t, files, "foo/noop.bson") + require.NotContains(t, files, "foo/noop.metadata.json") + }) + t.Run("QueryOptions", func(t *testing.T) { + opts := &Options{ + Sort: birch.DC.Elements(birch.EC.Int("a", 1)), + Limit: 200, + } + assert.Nil(t, opts.Query) + qopts := opts.getQueryOpts() + assert.NotNil(t, opts.Query) + assert.Equal(t, struct{}{}, opts.Query) + assert.EqualValues(t, 200, *qopts.Limit) + assert.EqualValues(t, opts.Sort, qopts.Sort) + + }) + +} diff --git a/vendor/github.com/mongodb/anser/glide.lock b/vendor/github.com/mongodb/anser/glide.lock index a8779a0bd98..8c88f271d2c 100644 --- a/vendor/github.com/mongodb/anser/glide.lock +++ b/vendor/github.com/mongodb/anser/glide.lock @@ -2,7 +2,7 @@ hash: "" updated: 2019-11-07T14:42:47.264471-05:00 imports: - name: github.com/evergreen-ci/birch - version: 84e679301eb0478dae95bdebc1c6debdff1f76e2 + version: 3a26bb67719ad6a9e7daed059abd5d018586f3cd - name: github.com/mongodb/amboy version: e5548953650b5d4cc7b6d7aba4cb1b2771e35342 - name: github.com/mongodb/ftdc diff --git a/vendor/github.com/mongodb/anser/makefile b/vendor/github.com/mongodb/anser/makefile index 22310f58735..fbbc0bdf056 100644 --- a/vendor/github.com/mongodb/anser/makefile +++ b/vendor/github.com/mongodb/anser/makefile @@ -1,7 +1,7 @@ # start project configuration name := anser buildDir := build -packages := $(name) mock model db bsonutil client apm +packages := $(name) mock model db bsonutil client apm backup orgPath := github.com/mongodb projectPath := $(orgPath)/$(name) # end project configuration
+ EnabledDisabled
Dispatch tasks + + + + +
Create and provision hosts + + + + +
Monitor hosts and tasks + + + + +
Alert for spawn host expiration + + + + +
Start agents on hosts + + + + +
Track GitHub repositories + + + + +
Schedule tasks + + + + +
Test GitHub pull requests + + + + +
Update CLI + + + + +
Collect background statistics + + + + +
Persist task and test logs + + + + +
Cache historical statistics + + + + +
Cache historical statistics endpoint + + + + +
Cache historical statistics old tasks + + + + +
Process Commit Queue + + + + +
Planner + + + + +
Host Allocator + + + + +
Disaster Recovery Backup Disabled + + + + +