Skip to content

Commit

Permalink
Enable static checks
Browse files Browse the repository at this point in the history
Signed-off-by: Derek McGowan <derek@mcgstyle.net>
  • Loading branch information
dmcgowan committed Aug 6, 2018
1 parent 32e2260 commit db0a4ec
Show file tree
Hide file tree
Showing 33 changed files with 116 additions and 96 deletions.
2 changes: 2 additions & 0 deletions .gometalinter.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
"EnableGC": true,
"Enable": [
"structcheck",
"staticcheck",
"unconvert",

"gofmt",
"golint",
Expand Down
6 changes: 3 additions & 3 deletions digestset/set_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,23 +41,23 @@ func TestLookup(t *testing.T) {
}
assertEqualDigests(t, dgst, digests[3])

dgst, err = dset.Lookup("1234")
_, err = dset.Lookup("1234")
if err == nil {
t.Fatal("Expected ambiguous error looking up: 1234")
}
if err != ErrDigestAmbiguous {
t.Fatal(err)
}

dgst, err = dset.Lookup("9876")
_, err = dset.Lookup("9876")
if err == nil {
t.Fatal("Expected not found error looking up: 9876")
}
if err != ErrDigestNotFound {
t.Fatal(err)
}

dgst, err = dset.Lookup("sha256:1234")
_, err = dset.Lookup("sha256:1234")
if err == nil {
t.Fatal("Expected ambiguous error looking up: sha256:1234")
}
Expand Down
4 changes: 2 additions & 2 deletions health/health.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func RegisterFunc(name string, check func() error) {
// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
// from an arbitrary func() error.
func (registry *Registry) RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
registry.Register(name, PeriodicChecker(CheckFunc(check), period))
registry.Register(name, PeriodicChecker(check, period))
}

// RegisterPeriodicFunc allows the convenience of registering a PeriodicChecker
Expand All @@ -227,7 +227,7 @@ func RegisterPeriodicFunc(name string, period time.Duration, check CheckFunc) {
// RegisterPeriodicThresholdFunc allows the convenience of registering a
// PeriodicChecker from an arbitrary func() error.
func (registry *Registry) RegisterPeriodicThresholdFunc(name string, period time.Duration, threshold int, check CheckFunc) {
registry.Register(name, PeriodicThresholdChecker(CheckFunc(check), period, threshold))
registry.Register(name, PeriodicThresholdChecker(check, period, threshold))
}

// RegisterPeriodicThresholdFunc allows the convenience of registering a
Expand Down
2 changes: 1 addition & 1 deletion manifest/schema1/config_builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func TestEmptyTar(t *testing.T) {
if err != nil {
t.Fatalf("NewReader returned error: %v", err)
}
n, err := gzipReader.Read(decompressed[:])
n, _ := gzipReader.Read(decompressed[:])
if n != 1024 {
t.Fatalf("read returned %d bytes; expected 1024", n)
}
Expand Down
2 changes: 1 addition & 1 deletion manifest/schema2/manifest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func TestManifest(t *testing.T) {
t.Fatalf("error creating DeserializedManifest: %v", err)
}

mediaType, canonical, err := deserialized.Payload()
mediaType, canonical, _ := deserialized.Payload()

if mediaType != MediaTypeManifest {
t.Fatalf("unexpected media type: %s", mediaType)
Expand Down
15 changes: 12 additions & 3 deletions notifications/sinks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ func TestBroadcaster(t *testing.T) {
wg.Add(1)
go func(block ...Event) {
if err := b.Write(block...); err != nil {
t.Fatalf("error writing block of length %d: %v", len(block), err)
t.Errorf("error writing block of length %d: %v", len(block), err)
}
wg.Done()
}(block...)
Expand All @@ -41,6 +41,9 @@ func TestBroadcaster(t *testing.T) {
}

wg.Wait() // Wait until writes complete
if t.Failed() {
t.FailNow()
}
checkClose(t, b)

// Iterate through the sinks and check that they all have the expected length.
Expand Down Expand Up @@ -79,7 +82,7 @@ func TestEventQueue(t *testing.T) {
wg.Add(1)
go func(block ...Event) {
if err := eq.Write(block...); err != nil {
t.Fatalf("error writing event block: %v", err)
t.Errorf("error writing event block: %v", err)
}
wg.Done()
}(block...)
Expand All @@ -89,6 +92,9 @@ func TestEventQueue(t *testing.T) {
}

wg.Wait()
if t.Failed() {
t.FailNow()
}
checkClose(t, eq)

ts.mu.Lock()
Expand Down Expand Up @@ -177,7 +183,7 @@ func TestRetryingSink(t *testing.T) {
go func(block ...Event) {
defer wg.Done()
if err := s.Write(block...); err != nil {
t.Fatalf("error writing event block: %v", err)
t.Errorf("error writing event block: %v", err)
}
}(block...)

Expand All @@ -186,6 +192,9 @@ func TestRetryingSink(t *testing.T) {
}

wg.Wait()
if t.Failed() {
t.FailNow()
}
checkClose(t, s)

ts.mu.Lock()
Expand Down
2 changes: 1 addition & 1 deletion registry/auth/htpasswd/htpasswd.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func (htpasswd *htpasswd) authenticateUser(username string, password string) err
return auth.ErrAuthenticationFailure
}

err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password))
err := bcrypt.CompareHashAndPassword(credentials, []byte(password))
if err != nil {
return auth.ErrAuthenticationFailure
}
Expand Down
9 changes: 4 additions & 5 deletions registry/client/transport/http_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
)
Expand Down Expand Up @@ -97,7 +96,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {

lastReaderOffset := hrs.readerOffset

if whence == os.SEEK_SET && hrs.rc == nil {
if whence == io.SeekStart && hrs.rc == nil {
// If no request has been made yet, and we are seeking to an
// absolute position, set the read offset as well to avoid an
// unnecessary request.
Expand All @@ -113,14 +112,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
newOffset := hrs.seekOffset

switch whence {
case os.SEEK_CUR:
case io.SeekCurrent:
newOffset += offset
case os.SEEK_END:
case io.SeekEnd:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + offset
case os.SEEK_SET:
case io.SeekStart:
newOffset = offset
}

Expand Down
10 changes: 5 additions & 5 deletions registry/handlers/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -512,8 +512,8 @@ func testBlobAPI(t *testing.T, env *testEnv, args blobArgs) *testEnv {

// ------------------------------------------
// Now, actually do successful upload.
layerLength, _ := layerFile.Seek(0, os.SEEK_END)
layerFile.Seek(0, os.SEEK_SET)
layerLength, _ := layerFile.Seek(0, io.SeekEnd)
layerFile.Seek(0, io.SeekStart)

uploadURLBase, _ = startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)
Expand Down Expand Up @@ -674,12 +674,12 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {

// ----------------
// Reupload previously deleted blob
layerFile.Seek(0, os.SEEK_SET)
layerFile.Seek(0, io.SeekStart)

uploadURLBase, _ := startPushLayer(t, env, imageName)
pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)

layerFile.Seek(0, os.SEEK_SET)
layerFile.Seek(0, io.SeekStart)
canonicalDigester := digest.Canonical.Digester()
if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
t.Fatalf("error copying to digest: %v", err)
Expand All @@ -693,7 +693,7 @@ func testBlobDelete(t *testing.T, env *testEnv, args blobArgs) {
t.Fatalf("unexpected error checking head on existing layer: %v", err)
}

layerLength, _ := layerFile.Seek(0, os.SEEK_END)
layerLength, _ := layerFile.Seek(0, io.SeekEnd)
checkResponse(t, "checking head on reuploaded layer", resp, http.StatusOK)
checkHeaders(t, resp, http.Header{
"Content-Length": []string{fmt.Sprint(layerLength)},
Expand Down
2 changes: 1 addition & 1 deletion registry/handlers/mail.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func (mail *mailer) sendMail(subject, message string) error {
auth,
mail.From,
mail.To,
[]byte(msg),
msg,
)
if err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion registry/proxy/proxyblobstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
)

// todo(richardscothern): from cache control header or config file
const blobTTL = time.Duration(24 * 7 * time.Hour)
const blobTTL = 24 * 7 * time.Hour

type proxyBlobStore struct {
localStore distribution.BlobStore
Expand Down
13 changes: 9 additions & 4 deletions registry/proxy/proxyblobstore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -350,24 +350,30 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
w := httptest.NewRecorder()
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Fatal(err)
t.Error(err)
return
}

err = te.store.ServeBlob(te.ctx, w, r, remoteBlob.Digest)
if err != nil {
t.Fatalf(err.Error())
t.Errorf(err.Error())
return
}

bodyBytes := w.Body.Bytes()
localDigest := digest.FromBytes(bodyBytes)
if localDigest != remoteBlob.Digest {
t.Fatalf("Mismatching blob fetch from proxy")
t.Errorf("Mismatching blob fetch from proxy")
return
}
}
}()
}

wg.Wait()
if t.Failed() {
t.FailNow()
}

remoteBlobCount := len(te.inRemote)
sbsMu.Lock()
Expand Down Expand Up @@ -404,7 +410,6 @@ func testProxyStoreServe(t *testing.T, te *testEnv, numClients int) {
}
}

localStats = te.LocalStats()
remoteStats = te.RemoteStats()

// Ensure remote unchanged
Expand Down
2 changes: 1 addition & 1 deletion registry/proxy/proxymanifeststore.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import (
)

// todo(richardscothern): from cache control header or config
const repositoryTTL = time.Duration(24 * 7 * time.Hour)
const repositoryTTL = 24 * 7 * time.Hour

type proxyManifestStore struct {
ctx context.Context
Expand Down
21 changes: 10 additions & 11 deletions registry/storage/blob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"reflect"
"testing"
Expand Down Expand Up @@ -96,7 +95,7 @@ func TestSimpleBlobUpload(t *testing.T) {
}

// Do a resume, get unknown upload
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
_, err = bs.Resume(ctx, blobUpload.ID())
if err != distribution.ErrBlobUploadUnknown {
t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
}
Expand Down Expand Up @@ -278,7 +277,7 @@ func TestSimpleBlobRead(t *testing.T) {
t.Fatalf("expected not found error when testing for existence: %v", err)
}

rc, err := bs.Open(ctx, dgst)
_, err = bs.Open(ctx, dgst)
if err != distribution.ErrBlobUnknown {
t.Fatalf("expected not found error when opening non-existent blob: %v", err)
}
Expand All @@ -300,7 +299,7 @@ func TestSimpleBlobRead(t *testing.T) {
t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
}

rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
rc, err := bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
if err != nil {
t.Fatalf("error opening blob with %v: %v", dgst, err)
}
Expand All @@ -323,7 +322,7 @@ func TestSimpleBlobRead(t *testing.T) {
}

// Now seek back the blob, read the whole thing and check against randomLayerData
offset, err := rc.Seek(0, os.SEEK_SET)
offset, err := rc.Seek(0, io.SeekStart)
if err != nil {
t.Fatalf("error seeking blob: %v", err)
}
Expand All @@ -342,7 +341,7 @@ func TestSimpleBlobRead(t *testing.T) {
}

// Reset the randomLayerReader and read back the buffer
_, err = randomLayerReader.Seek(0, os.SEEK_SET)
_, err = randomLayerReader.Seek(0, io.SeekStart)
if err != nil {
t.Fatalf("error resetting layer reader: %v", err)
}
Expand Down Expand Up @@ -397,7 +396,7 @@ func TestBlobMount(t *testing.T) {
t.Fatalf("error getting seeker size of random data: %v", err)
}

nn, err := io.Copy(blobUpload, randomDataReader)
_, err = io.Copy(blobUpload, randomDataReader)
if err != nil {
t.Fatalf("unexpected error uploading layer data: %v", err)
}
Expand Down Expand Up @@ -460,7 +459,7 @@ func TestBlobMount(t *testing.T) {
defer rc.Close()

h := sha256.New()
nn, err = io.Copy(h, rc)
nn, err := io.Copy(h, rc)
if err != nil {
t.Fatalf("error reading layer: %v", err)
}
Expand Down Expand Up @@ -573,17 +572,17 @@ func simpleUpload(t *testing.T, bs distribution.BlobIngester, blob []byte, expec
// the original state, returning the size. The state of the seeker should be
// treated as unknown if an error is returned.
func seekerSize(seeker io.ReadSeeker) (int64, error) {
current, err := seeker.Seek(0, os.SEEK_CUR)
current, err := seeker.Seek(0, io.SeekCurrent)
if err != nil {
return 0, err
}

end, err := seeker.Seek(0, os.SEEK_END)
end, err := seeker.Seek(0, io.SeekEnd)
if err != nil {
return 0, err
}

resumed, err := seeker.Seek(current, os.SEEK_SET)
resumed, err := seeker.Seek(current, io.SeekStart)
if err != nil {
return 0, err
}
Expand Down
6 changes: 3 additions & 3 deletions registry/storage/blobwriter_resumable.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error {
return errResumableDigestNotAvailable
}
offset := bw.fileWriter.Size()
if offset == int64(h.Len()) {
if offset == h.Len() {
// State of digester is already at the requested offset.
return nil
}
Expand Down Expand Up @@ -65,7 +65,7 @@ func (bw *blobWriter) resumeDigest(ctx context.Context) error {
}

// Mind the gap.
if gapLen := offset - int64(h.Len()); gapLen > 0 {
if gapLen := offset - h.Len(); gapLen > 0 {
return errResumableDigestNotAvailable
}

Expand Down Expand Up @@ -129,7 +129,7 @@ func (bw *blobWriter) storeHashState(ctx context.Context) error {
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: int64(h.Len()),
offset: h.Len(),
})

if err != nil {
Expand Down
Loading

0 comments on commit db0a4ec

Please sign in to comment.