/
storage.go
140 lines (125 loc) · 5.24 KB
/
storage.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
package storage
import (
"context"
//RA Summary: gosec - G501 - Weak cryptographic hash
//RA: This line was flagged because of the use of MD5 hashing
//RA: This line of code hashes the AWS object to be able to verify data integrity
//RA: Purpose of this hash is to protect against environmental risks, it does not
//RA: hash any sensitive user provided information such as passwords.
//RA: AWS S3 API requires use of MD5 to validate data integrity.
//RA Developer Status: Mitigated
//RA Validator Status: Mitigated
//RA Modified Severity: CAT III
// #nosec G501
"crypto/md5"
"encoding/base64"
"io"
"path"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors"
"github.com/spf13/afero"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/transcom/mymove/pkg/cli"
)
// StoreResult represents the result of a call to Store().
type StoreResult struct{}
// FileStorer is the set of methods needed to store and retrieve objects.
//
//go:generate mockery --name FileStorer
type FileStorer interface {
Store(string, io.ReadSeeker, string, *string) (*StoreResult, error)
Fetch(string) (io.ReadCloser, error)
Delete(string) error
PresignedURL(string, string) (string, error)
FileSystem() *afero.Afero
TempFileSystem() *afero.Afero
Tags(string) (map[string]string, error)
}
// ComputeChecksum calculates the MD5 checksum for the provided data. It expects that
// the passed io object will be seeked to its beginning and will seek back to the
// beginning after reading its content.
func ComputeChecksum(data io.ReadSeeker) (string, error) {
//RA Summary: gosec - G401 - Weak cryptographic hash
//RA: This line was flagged because of the use of MD5 hashing
//RA: This line of code hashes the AWS object to be able to verify data integrity
//RA: Purpose of this hash is to protect against environmental risks, it does not
//RA: hash any sensitive user provided information such as passwords
//RA: AWS S3 API requires use of MD5 to validate data integrity.
//RA Developer Status: Mitigated
//RA Validator Status: Mitigated
//RA Validator: jneuner@mitre.org
//RA Modified Severity: CAT III
// #nosec G401
hash := md5.New()
if _, err := io.Copy(hash, data); err != nil {
return "", errors.Wrap(err, "could not read file")
}
if _, err := data.Seek(0, io.SeekStart); err != nil { // seek back to beginning of file
return "", errors.Wrap(err, "could not seek to beginning of file")
}
return base64.StdEncoding.EncodeToString(hash.Sum(nil)), nil
}
// DetectContentType leverages http.DetectContentType to identify the content type
// of the provided data. It expects that the passed io object will be seeked to its
// beginning and will seek back to the beginning after reading its content.
func DetectContentType(data io.ReadSeeker) (string, error) {
if _, err := data.Seek(0, io.SeekStart); err != nil { // seek back to beginning of file
return "", errors.Wrap(err, "could not seek to beginning of file")
}
// the default return value will default to application/octet-stream if unable to detect the MIME type
contentType, readErr := mimetype.DetectReader(data)
if readErr != nil {
return "", errors.Wrap(readErr, "encountered error reading file content type")
}
if _, err := data.Seek(0, io.SeekStart); err != nil { // seek back to beginning of file
return "", errors.Wrap(err, "could not seek to beginning of file")
}
return contentType.String(), nil
}
// InitStorage initializes the storage backend
func InitStorage(v *viper.Viper, logger *zap.Logger) FileStorer {
storageBackend := v.GetString(cli.StorageBackendFlag)
localStorageRoot := v.GetString(cli.LocalStorageRootFlag)
localStorageWebRoot := v.GetString(cli.LocalStorageWebRootFlag)
var storer FileStorer
if storageBackend == "s3" {
awsS3Bucket := v.GetString(cli.AWSS3BucketNameFlag)
awsS3Region := v.GetString(cli.AWSS3RegionFlag)
awsS3KeyNamespace := v.GetString(cli.AWSS3KeyNamespaceFlag)
logger.Info("Using s3 storage backend",
zap.String("bucket", awsS3Bucket),
zap.String("region", awsS3Region),
zap.String("key", awsS3KeyNamespace))
if len(awsS3Bucket) == 0 {
logger.Fatal("must provide aws-s3-bucket-name parameter, exiting")
}
if len(awsS3Region) == 0 {
logger.Fatal("Must provide aws-s3-region parameter, exiting")
}
if len(awsS3KeyNamespace) == 0 {
logger.Fatal("Must provide aws_s3_key_namespace parameter, exiting")
}
cfg, err := config.LoadDefaultConfig(context.Background(),
config.WithRegion(awsS3Region),
)
if err != nil {
logger.Fatal("error loading S3 aws config", zap.Error(err))
}
storer = NewS3(awsS3Bucket, awsS3KeyNamespace, cfg)
} else if storageBackend == "memory" {
logger.Info("Using memory storage backend",
zap.String(cli.LocalStorageRootFlag, path.Join(localStorageRoot, localStorageWebRoot)),
zap.String(cli.LocalStorageWebRootFlag, localStorageWebRoot))
fsParams := NewMemoryParams(localStorageRoot, localStorageWebRoot)
storer = NewMemory(fsParams)
} else {
logger.Info("Using local storage backend",
zap.String(cli.LocalStorageRootFlag, path.Join(localStorageRoot, localStorageWebRoot)),
zap.String(cli.LocalStorageWebRootFlag, localStorageWebRoot))
fsParams := NewFilesystemParams(localStorageRoot, localStorageWebRoot)
storer = NewFilesystem(fsParams)
}
return storer
}