forked from bep/s3deploy
/
config.go
109 lines (87 loc) · 3.54 KB
/
config.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
// Copyright © 2018 Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package lib
import (
"errors"
"flag"
"log"
"os"
"path/filepath"
"strings"
)
// Config configures a deployment.
type Config struct {
conf fileConfig
AccessKey string
SecretKey string
SourcePath string
BucketName string
// To have multiple sites in one bucket.
BucketPath string
RegionName string
// When set, will invalidate the CDN cache for the updated files.
CDNDistributionID string
// Optional configFile
ConfigFile string
NumberOfWorkers int
MaxDelete int
ACL string
PublicReadACL bool
Verbose bool
Silent bool
Force bool
Try bool
// CLI state
PrintVersion bool
Help bool
// Mostly useful for testing.
baseStore remoteStore
}
// FlagsToConfig reads command-line flags from os.Args[1:] into Config.
// Note that flag.Parse is not called.
func FlagsToConfig() (*Config, error) {
return flagsToConfig(flag.CommandLine)
}
func flagsToConfig(f *flag.FlagSet) (*Config, error) {
var cfg Config
f.StringVar(&cfg.AccessKey, "key", "", "access key ID for AWS")
f.StringVar(&cfg.SecretKey, "secret", "", "secret access key for AWS")
f.StringVar(&cfg.RegionName, "region", "", "name of AWS region")
f.StringVar(&cfg.BucketName, "bucket", "", "destination bucket name on AWS")
f.StringVar(&cfg.BucketPath, "path", "", "optional bucket sub path")
f.StringVar(&cfg.SourcePath, "source", ".", "path of files to upload")
f.StringVar(&cfg.CDNDistributionID, "distribution-id", "", "optional CDN distribution ID for cache invalidation")
f.StringVar(&cfg.ConfigFile, "config", ".s3deploy.yml", "optional config file")
f.IntVar(&cfg.MaxDelete, "max-delete", 256, "maximum number of files to delete per deploy")
f.BoolVar(&cfg.PublicReadACL, "public-access", false, "DEPRECATED: please set -acl='public-read'")
f.StringVar(&cfg.ACL, "acl", "", "provide an ACL for uploaded objects. to make objects public, set to 'public-read'. all possible values are listed here: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl (default \"private\")")
f.BoolVar(&cfg.Force, "force", false, "upload even if the etags match")
f.BoolVar(&cfg.Try, "try", false, "trial run, no remote updates")
f.BoolVar(&cfg.Verbose, "v", false, "enable verbose logging")
f.BoolVar(&cfg.Silent, "quiet", false, "enable silent mode")
f.BoolVar(&cfg.PrintVersion, "V", false, "print version and exit")
f.IntVar(&cfg.NumberOfWorkers, "workers", -1, "number of workers to upload files")
f.BoolVar(&cfg.Help, "h", false, "help")
return &cfg, nil
}
func (cfg *Config) check() error {
if cfg.BucketName == "" {
return errors.New("AWS bucket is required")
}
cfg.SourcePath = filepath.Clean(cfg.SourcePath)
// Sanity check to prevent people from uploading their entire disk.
// The returned path from filepath.Clean ends in a slash only if it represents
// a root directory, such as "/" on Unix or `C:\` on Windows.
if strings.HasSuffix(cfg.SourcePath, string(os.PathSeparator)) {
return errors.New("invalid source path: Cannot deploy from root")
}
if cfg.PublicReadACL {
log.Print("WARNING: the 'public-access' flag is deprecated. Please use -acl='public-read' instead.")
}
if cfg.PublicReadACL && cfg.ACL != "" {
return errors.New("you passed a value for the flags public-access and acl, which is not supported. the public-access flag is deprecated. please use the acl flag moving forward")
}
return nil
}