-
Notifications
You must be signed in to change notification settings - Fork 29
/
workspace.go
771 lines (684 loc) · 24.4 KB
/
workspace.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
// Package workspace contains a pipeline for processing a Go workspace.
package workspace
import (
"context"
"fmt"
"go/build"
"log"
"sync"
"github.com/bradfitz/iter"
"github.com/shurcooL/Go-Package-Store"
"github.com/shurcooL/Go-Package-Store/presenter"
"github.com/shurcooL/gostatus/status"
"github.com/shurcooL/vcsstate"
"golang.org/x/tools/go/vcs"
)
// GoPackageList is a list of Go packages.
// It's implemented as a slice and map that are kept in sync, with a mutex.
type GoPackageList struct {
// TODO: Merge the List and OrderedList into a single struct to better communicate that it's a single data structure.
sync.Mutex
OrderedList []*RepoPresentation // OrderedList has the same contents as List, but gives it a stable order.
List map[string]*RepoPresentation // Map key is repoRoot.
}
// RepoPresentation represents a repository update presentation.
type RepoPresentation struct {
Repo *gps.Repo
Presentation *presenter.Presentation
UpdateState UpdateState
}
// UpdateState represents the state of an update.
//
// TODO: Dedup.
type UpdateState uint8
const (
// Available represents an available update.
Available UpdateState = iota
// Updating represents an update in progress.
Updating
// Updated represents a completed update.
Updated
)
// Pipeline for processing a Go workspace, where each repo has local and remote components.
type Pipeline struct {
wd string // Working directory. Used to resolve relative import paths.
// presenters are presenters registered with RegisterPresenter.
presenters []presenter.Presenter
importPaths chan string
importPathRevisions chan importPathRevision
rootRevisionLatests chan rootRevisionLatest
repositories chan LocalRepo
subrepos chan Subrepo
// unique is the output of finding unique repositories from diverse possible inputs.
unique chan *gps.Repo
// processedFiltered is the output of processed repos (complete with local and remote revisions),
// with just enough information to decide if an update should be displayed.
processedFiltered chan *gps.Repo
// presented is the output of processed and presented repos (complete with presenter.Presentation).
presented chan *RepoPresentation
reposMu sync.Mutex
repos map[string]*gps.Repo // Map key is the import path corresponding to the root of the repository.
newObserver chan observerRequest
observers map[chan *RepoPresentation]struct{}
GoPackageList *GoPackageList
}
type observerRequest struct {
Response chan chan *RepoPresentation
}
// NewPipeline creates a Pipeline with working directory wd.
// Working directory is used to resolve relative import paths.
//
// First, available presenters should be registered via RegisterPresenter.
// Then Go packages can be added via various means. Call Done once done adding.
// Processing begins as soon as Go packages are added to the pipeline.
// Results can be accessed via RepoPresentations at any time, as often as needed.
func NewPipeline(wd string) *Pipeline {
p := &Pipeline{
wd: wd,
importPaths: make(chan string, 64),
importPathRevisions: make(chan importPathRevision, 64),
rootRevisionLatests: make(chan rootRevisionLatest, 64),
repositories: make(chan LocalRepo, 64),
subrepos: make(chan Subrepo, 64),
unique: make(chan *gps.Repo, 64),
processedFiltered: make(chan *gps.Repo, 64),
presented: make(chan *RepoPresentation, 64),
repos: make(map[string]*gps.Repo),
newObserver: make(chan observerRequest),
observers: make(map[chan *RepoPresentation]struct{}),
GoPackageList: &GoPackageList{List: make(map[string]*RepoPresentation)},
}
// It is a lot of work to
// find all Go packages in one's GOPATH workspace (or vendor.json file),
// then group them by VCS repository,
// and determine their local state (current revision, etc.),
// then determine their remote state (latest remote revision, etc.),
// then hit an API like GitHub or Gitiles to fetch descriptions of all commits
// between the current local revision and latest remote revision for display purposes.
//
// That work is heavily blocked on local disk IO and network IO,
// and also consists of dependencies. E.g., we can't ask for commit descriptions
// until we know both the local and remote revisions, and we can't figure out local
// revisions before we know which repository a Go package belongs to.
//
// Luckily, Go is great at concurrency, ʕ◔ϖ◔ʔ
// which also makes parallelism easy!
// (See https://blog.golang.org/concurrency-is-not-parallelism.)
//
// Let's make gophers do all this work for us in multiple interconnected stages,
// and parallelize each stage with many worker goroutines.
// Stage 1, grouping all inputs into a set of unique repositories.
//
// We populate the workspace from any of the 3 sources:
//
// - via AddImportPath - import paths of Go packages from the GOPATH workspace.
// - via AddRevision - import paths of Go packages and their revisions from vendor.json or Godeps.json.
// - via AddRevisionLatest - roots of Go packages, their revisions and latest versions via dep.
// - via AddRepository - by directly adding local VCS repositories.
// - via AddSubrepo - by directly adding remote subrepos.
//
// The goal of processing in stage 1 is to take in diverse possible inputs
// and convert them into a unique set of repositories for further processing by next stages.
// When finished, all unique repositories are sent to p.unique channel
// and the channel is closed.
{
var wg0 sync.WaitGroup
for range iter.N(8) {
wg0.Add(1)
go p.importPathWorker(&wg0)
}
var wg1 sync.WaitGroup
for range iter.N(8) {
wg1.Add(1)
go p.importPathRevisionWorker(&wg1)
}
var wg2 sync.WaitGroup
for range iter.N(8) {
wg2.Add(1)
go p.rootRevisionLatestWorker(&wg2)
}
var wg3 sync.WaitGroup
for range iter.N(8) {
wg3.Add(1)
go p.repositoriesWorker(&wg3)
}
var wg4 sync.WaitGroup
for range iter.N(8) {
wg4.Add(1)
go p.subreposWorker(&wg4)
}
go func() {
wg0.Wait()
wg1.Wait()
wg2.Wait()
wg3.Wait()
wg4.Wait()
close(p.unique)
}()
}
// Stage 2, figuring out which repositories have updates available.
//
// We compute repository remote revision (and local if needed)
// in order to figure out if repositories should be presented,
// or filtered out (for example, because there are no updates available).
// When finished, all non-filtered-out repositories are sent to p.processedFiltered channel
// and the channel is closed.
{
var wg sync.WaitGroup
for range iter.N(8) {
wg.Add(1)
go p.processFilterWorker(&wg)
}
go func() {
wg.Wait()
close(p.processedFiltered)
}()
}
// Stage 3, filling in the update presentation information.
//
// We talk to remote APIs to fill in the missing presentation details
// that are not available from VCS (unless we fetch commits, but we choose not to that).
// Primarily, we get the commit messages for all the new commits that are available.
// When finished, all repositories complete with full presentation information
// are sent to p.presented channel and the channel is closed.
{
var wg sync.WaitGroup
for range iter.N(8) {
wg.Add(1)
go p.presentWorker(&wg)
}
go func() {
wg.Wait()
close(p.presented)
}()
}
go p.run()
return p
}
// RegisterPresenter registers a presenter.
// Presenters are consulted in the same order that they were registered.
func (p *Pipeline) RegisterPresenter(pr presenter.Presenter) {
p.presenters = append(p.presenters, pr)
}
// AddImportPath adds a package with specified import path for processing.
func (p *Pipeline) AddImportPath(importPath string) {
p.importPaths <- importPath
}
// AddRevision adds a package with specified import path and revision for processing.
func (p *Pipeline) AddRevision(importPath string, revision string) {
p.importPathRevisions <- importPathRevision{
importPath: importPath,
revision: revision,
}
}
type importPathRevision struct {
importPath string
revision string
}
// AddRevisionLatest adds a package with specified root, revision and latest.
func (p *Pipeline) AddRevisionLatest(root, revision, latest string) {
p.rootRevisionLatests <- rootRevisionLatest{
root: root,
revision: revision,
latest: latest,
}
}
type rootRevisionLatest struct {
root string
revision string
latest string
}
// LocalRepo represents a local repository on disk.
type LocalRepo struct {
Path string // Full path to repository on disk.
Root string // Import path corresponding to the root of the repository.
VCS *vcs.Cmd
}
// AddRepository adds the specified repository for processing.
func (p *Pipeline) AddRepository(r LocalRepo) {
p.repositories <- r
}
// Subrepo represents a "virtual" sub-repository inside a larger actual VCS repository.
type Subrepo struct {
Root string
RemoteVCS vcsstate.RemoteVCS // RemoteVCS allows getting the remote state of the VCS.
RemoteURL string // RemoteURL is the remote URL, including scheme.
Revision string
}
// AddSubrepo adds the specified Subrepo for processing.
func (p *Pipeline) AddSubrepo(s Subrepo) {
p.subrepos <- s
}
// Done should be called after the workspace is finished being populated.
func (p *Pipeline) Done() {
close(p.importPaths)
close(p.importPathRevisions)
close(p.rootRevisionLatests)
close(p.repositories)
close(p.subrepos)
}
// AddPresented adds a RepoPresentation the pipeline.
// It enables mocks to directly add presented repos.
func (p *Pipeline) AddPresented(r *RepoPresentation) {
p.presented <- r
}
// RepoPresentations returns a channel of all repo presentations.
// Repo presentations that are ready will be sent immediately.
// The remaining repo presentations will be sent onto the channel
// as they become available. Once all repo presentations have been
// sent, the channel will be closed. Therefore, iterating over
// the channel may block until all processing is done, but it
// will effectively return all repo presentations as soon as possible.
//
// It's safe to call RepoPresentations at any time and concurrently
// to get multiple such channels.
func (p *Pipeline) RepoPresentations() <-chan *RepoPresentation {
response := make(chan chan *RepoPresentation)
p.newObserver <- observerRequest{Response: response}
return <-response
}
func (p *Pipeline) run() {
Outer:
for {
select {
// New repoPresentation available.
case repoPresentation, ok := <-p.presented:
// We're done streaming.
if !ok {
break Outer
}
// Append repoPresentation to current list.
p.GoPackageList.Lock()
p.GoPackageList.OrderedList = append(p.GoPackageList.OrderedList, repoPresentation)
moveUp(p.GoPackageList.OrderedList, repoPresentation)
p.GoPackageList.List[repoPresentation.Repo.Root] = repoPresentation
p.GoPackageList.Unlock()
// Send new repoPresentation to all existing observers.
for ch := range p.observers {
// TODO: If an observer isn't listening, this will block. Should we defend against that here?
ch <- repoPresentation
}
// New observer request.
case req := <-p.newObserver:
p.GoPackageList.Lock()
ch := make(chan *RepoPresentation, len(p.GoPackageList.OrderedList))
for _, repoPresentation := range p.GoPackageList.OrderedList {
ch <- repoPresentation
}
p.GoPackageList.Unlock()
p.observers[ch] = struct{}{}
req.Response <- ch
}
}
// At this point, streaming has finished, so finish up existing observers.
for ch := range p.observers {
close(ch)
}
p.observers = nil
// Respond to new observer requests directly.
for req := range p.newObserver {
p.GoPackageList.Lock()
ch := make(chan *RepoPresentation, len(p.GoPackageList.OrderedList))
for _, repoPresentation := range p.GoPackageList.OrderedList {
ch <- repoPresentation
}
p.GoPackageList.Unlock()
close(ch)
req.Response <- ch
}
}
// moveUp moves last entry up the orderedList above all other updated entries, unless rp is already updated.
func moveUp(orderedList []*RepoPresentation, rp *RepoPresentation) {
if rp.UpdateState == Updated {
return
}
for i := len(orderedList) - 1; i-1 >= 0 && orderedList[i-1].UpdateState == Updated; i-- {
orderedList[i], orderedList[i-1] = orderedList[i-1], orderedList[i] // Swap the two.
}
}
// importPathWorker sends unique repositories to phase 2.
func (p *Pipeline) importPathWorker(wg *sync.WaitGroup) {
defer wg.Done()
for importPath := range p.importPaths {
// Determine repo root.
// This is potentially somewhat slow.
bpkg, err := build.Import(importPath, p.wd, build.FindOnly|build.IgnoreVendor) // THINK: This (build.FindOnly) may find repos even when importPath has no actual package... Is that okay?
if err != nil {
log.Println("build.Import:", err)
continue
}
if bpkg.Goroot {
// Go-Package-Store has no support for updating packages in GOROOT, so skip those.
continue
}
vcsCmd, root, err := vcs.FromDir(bpkg.Dir, bpkg.SrcRoot)
if err != nil {
// Go package not under VCS.
continue
}
vcs, err := vcsstate.NewVCS(vcsCmd)
if err != nil {
log.Printf("repo %v not supported by vcsstate: %v", root, err)
continue
}
var repo *gps.Repo
p.reposMu.Lock()
if _, ok := p.repos[root]; !ok {
repo = &gps.Repo{
Root: root,
// This is a local repository inside GOPATH. Set all of its fields.
VCS: vcs,
Path: bpkg.Dir,
Cmd: vcsCmd,
// TODO: Maybe keep track of import paths inside, etc.
}
p.repos[root] = repo
//} else {
// TODO: Maybe keep track of import paths inside, etc.
}
p.reposMu.Unlock()
// If new repo, send off to phase 2 channel.
if repo != nil {
p.unique <- repo
}
}
}
// importPathRevisionWorker sends unique repositories to phase 2.
func (p *Pipeline) importPathRevisionWorker(wg *sync.WaitGroup) {
defer wg.Done()
for ipr := range p.importPathRevisions {
// Determine repo root.
// This is potentially somewhat slow.
rr, err := vcs.RepoRootForImportPath(ipr.importPath, false)
if err != nil {
log.Printf("failed to dynamically determine repo root for %v: %v\n", ipr.importPath, err)
continue
}
remoteVCS, err := vcsstate.NewRemoteVCS(rr.VCS)
if err != nil {
log.Printf("repo %v not supported by vcsstate: %v\n", rr.Root, err)
continue
}
var repo *gps.Repo
p.reposMu.Lock()
if _, ok := p.repos[rr.Root]; !ok {
repo = &gps.Repo{
Root: rr.Root,
// This is a remote repository only. Set all of its fields.
RemoteVCS: remoteVCS,
RemoteURL: rr.Repo,
}
repo.Local.Revision = ipr.revision
repo.Remote.RepoURL = rr.Repo
p.repos[rr.Root] = repo
}
p.reposMu.Unlock()
// If new repo, send off to phase 2 channel.
if repo != nil {
p.unique <- repo
}
}
}
// rootRevisionLatestWorker sends unique repositories to phase 2.
func (p *Pipeline) rootRevisionLatestWorker(wg *sync.WaitGroup) {
defer wg.Done()
for rrl := range p.rootRevisionLatests {
// Determine repo root.
// This is potentially somewhat slow.
rr, err := vcs.RepoRootForImportPath(rrl.root, false)
if err != nil {
log.Printf("failed to dynamically determine repo root for %v: %v\n", rrl.root, err)
continue
}
if rr.Root != rrl.root {
log.Printf("dynamically determined repo root (%q) doesn't match input root (%q)\n", rr.Root, rrl.root)
continue
}
var repo *gps.Repo
p.reposMu.Lock()
if _, ok := p.repos[rr.Root]; !ok {
repo = new(gps.Repo)
repo.Root = rr.Root
repo.Local.Revision = rrl.revision
repo.Remote.Revision = rrl.latest
repo.Remote.RepoURL = rr.Repo
p.repos[rr.Root] = repo
}
p.reposMu.Unlock()
// If new repo, send off to phase 2 channel.
if repo != nil {
p.unique <- repo
}
}
}
// repositoriesWorker sends unique repositories to phase 2.
func (p *Pipeline) repositoriesWorker(wg *sync.WaitGroup) {
defer wg.Done()
for r := range p.repositories {
vcsCmd, root := r.VCS, r.Root
vcs, err := vcsstate.NewVCS(vcsCmd)
if err != nil {
log.Printf("repo %v not supported by vcsstate: %v", root, err)
continue
}
var repo *gps.Repo
p.reposMu.Lock()
if _, ok := p.repos[root]; !ok {
repo = &gps.Repo{
Root: root,
// This is a local repository inside GOPATH. Set all of its fields.
VCS: vcs,
Path: r.Path,
Cmd: vcsCmd,
}
p.repos[root] = repo
}
p.reposMu.Unlock()
// If new repo, send off to phase 2 channel.
if repo != nil {
p.unique <- repo
}
}
}
// subreposWorker sends unique subrepos to phase 2.
func (p *Pipeline) subreposWorker(wg *sync.WaitGroup) {
defer wg.Done()
for r := range p.subrepos {
// Determine repo root.
// This is potentially somewhat slow.
rr, err := vcs.RepoRootForImportPath(r.Root, false)
if err != nil {
log.Printf("failed to dynamically determine repo root for %v: %v\n", r.Root, err)
continue
}
var repo *gps.Repo
p.reposMu.Lock()
if _, ok := p.repos[r.Root]; !ok {
repo = &gps.Repo{
Root: r.Root,
// This is a remote repository only. Set all of its fields.
RemoteVCS: r.RemoteVCS,
RemoteURL: r.RemoteURL,
}
repo.Local.RemoteURL = r.RemoteURL // TODO: Consider having r.RemoteURL take precedence over rr.Repo. But need to make that play nicely with the updaters; see TODO at bottom of gps.Repo struct.
repo.Local.Revision = r.Revision
repo.Remote.RepoURL = rr.Repo
p.repos[r.Root] = repo
}
p.reposMu.Unlock()
// If new repo, send off to phase 2 channel.
if repo != nil {
p.unique <- repo
}
}
}
// processFilterWorker computes repository remote revision (and local if needed)
// in order to figure out if repositories should be presented.
func (p *Pipeline) processFilterWorker(wg *sync.WaitGroup) {
defer wg.Done()
for r := range p.unique {
// Determine remote revision.
// This is slow because it requires a network operation.
switch {
case r.VCS != nil:
var err error
r.Remote.Branch, r.Remote.Revision, err = r.VCS.RemoteBranchAndRevision(r.Path)
if err != nil {
log.Printf("skipping %q because of remote error:\n\t%v\n", r.Root, err)
continue
}
if r.Local.Revision == "" {
if rev, err := r.VCS.LocalRevision(r.Path, r.Remote.Branch); err == nil {
r.Local.Revision = rev
}
}
if ru, err := r.VCS.RemoteURL(r.Path); err == nil {
r.Local.RemoteURL = ru
}
if rr, err := vcs.RepoRootForImportPath(r.Root, false); err == nil {
r.Remote.RepoURL = rr.Repo
} else {
log.Printf("failed to dynamically determine repo root for %v: %v\n", r.Root, err)
}
case r.RemoteVCS != nil:
var err error
r.Remote.Branch, r.Remote.Revision, err = r.RemoteVCS.RemoteBranchAndRevision(r.RemoteURL)
if err != nil {
log.Printf("skipping %q because of remote error:\n\t%v\n", r.Root, err)
continue
}
default:
// Do nothing. If both r.VCS and r.RemoteVCS are nil, then we expect
// the Local and Remote structs to already be populated.
}
if ok, reason := shouldPresentUpdate(r); !ok {
if reason != "" {
log.Printf("skipping %q because:\n\t%v\n", r.Root, reason)
}
continue
}
p.processedFiltered <- r
}
}
// shouldPresentUpdate reports if the given goPackage should be presented as an available update.
// It checks that the Go package is on default branch, does not have a dirty working tree, and does not have the remote revision.
// It returns a non-empty reason for why an update should be skipped, or empty string if it's not interesting (e.g., repository is up to date).
func shouldPresentUpdate(repo *gps.Repo) (ok bool, reason string) {
// Ensure sufficient remote information is available, otherwise we can't present updates.
if repo.Remote.RepoURL == "" {
return false, "repository URL (as determined dynamically from the import path) is empty"
}
if (repo.VCS != nil || repo.RemoteVCS != nil) && repo.Remote.Branch == "" {
return false, "remote branch is empty"
}
if repo.Remote.Revision == "" {
return false, "remote revision is empty"
}
// Check repository state before presenting updates, and report most useful
// reasons first.
switch {
case repo.VCS != nil:
// Local remote URL should match Repo URL derived from import path.
// This is the very first thing to verify, because it affects default branch.
if !status.EqualRepoURLs(repo.Local.RemoteURL, repo.Remote.RepoURL) {
return false, "remote URL doesn't match repo URL inferred from import path:" +
fmt.Sprintf("\n (actual) %s", repo.Local.RemoteURL) +
fmt.Sprintf("\n (expected) %s", status.FormatRepoURL(repo.Local.RemoteURL, repo.Remote.RepoURL))
}
// Local branch should match remote branch.
localBranch, err := repo.VCS.Branch(repo.Path)
if err != nil {
return false, "error determining local branch:\n" + err.Error()
}
if localBranch != repo.Remote.Branch {
return false, fmt.Sprintf("local branch %q doesn't match remote branch %q", localBranch, repo.Remote.Branch)
}
case repo.RemoteVCS != nil:
// TODO: Consider taking care of this difference in remote URLs earlier, inside, e.g., subreposWorker. But need to make that play nicely with the updaters; see TODO at bottom of gps.Repo struct.
//
// Local remote URL, if set, should match Repo URL derived from import path.
if repo.Local.RemoteURL != "" && !status.EqualRepoURLs(repo.Local.RemoteURL, repo.Remote.RepoURL) {
return false, "remote URL doesn't match repo URL inferred from import path:" +
fmt.Sprintf("\n (actual) %s", repo.Local.RemoteURL) +
fmt.Sprintf("\n (expected) %s", status.FormatRepoURL(repo.Local.RemoteURL, repo.Remote.RepoURL))
}
}
if repo.Local.Revision == "" {
return false, "local revision is empty"
}
// Check if repo is already up to date.
if repo.Local.Revision == repo.Remote.Revision {
// No reason provided because it's not worth mentioning.
return false, ""
}
// Check rest of local repository state before presenting updates,
// and report most useful reasons first.
if repo.VCS != nil {
// Local default branch shouldn't contain remote commit.
// Otherwise, it means local revision is different because it's
// ahead of remote revision, rather than because there's an update.
localContainsRemoteRevision, err := repo.VCS.Contains(repo.Path, repo.Remote.Revision, repo.Remote.Branch)
if err != nil {
return false, "error determining if local default branch contains remote revision:\n" + err.Error()
}
if localContainsRemoteRevision {
// Local revision is ahead of remote revision, and there's no update.
// This isn't worth reporting in detail, since there's no update anyway.
return false, ""
}
// Remote default branch should contain local commit.
// Otherwise, it means there's an update, but it won't be able to apply
// cleanly because the local revision is ahead of remote revision.
remoteContainsLocalRevision, err := repo.VCS.RemoteContains(repo.Path, repo.Local.Revision, repo.Remote.Branch)
if err != nil {
return false, "error determining if remote default branch contains local revision:\n" + err.Error()
}
if !remoteContainsLocalRevision {
return false, fmt.Sprintf("local revision %q is ahead of remote revision %q", repo.Local.Revision, repo.Remote.Revision)
}
// There shouldn't be a dirty working tree.
treeStatus, err := repo.VCS.Status(repo.Path)
if err != nil {
return false, "error determining if working tree is dirty:\n" + err.Error()
}
if treeStatus != "" {
return false, "working tree is dirty:\n" + treeStatus
}
}
// If we got this far, there's an update available and everything looks normal. Present it.
return true, ""
}
// presentWorker works with repos that should be displayed, creating a presentation for each.
func (p *Pipeline) presentWorker(wg *sync.WaitGroup) {
defer wg.Done()
for repo := range p.processedFiltered {
// This part might take a while.
presentation := p.present(presenter.Repo{
Root: repo.Root,
RepoURL: repo.Remote.RepoURL,
LocalRevision: repo.Local.Revision,
RemoteRevision: repo.Remote.Revision,
})
p.presented <- &RepoPresentation{
Repo: repo,
Presentation: presentation,
}
}
}
// present takes a repository containing 1 or more Go packages, and returns a presentation for it.
// It tries to find the best presenter for the given repository out of the registered ones,
// but falls back to a generic presentation if there's nothing better.
func (p *Pipeline) present(repo presenter.Repo) *presenter.Presentation {
for _, presenter := range p.presenters {
if presentation := presenter(context.Background(), repo); presentation != nil {
return presentation
}
}
// Generic presentation is the fallback if no presenters matched.
return &presenter.Presentation{
HomeURL: "https://" + repo.Root,
ImageURL: "https://github.com/images/gravatars/gravatar-user-420.png",
Changes: nil,
Error: nil,
}
}