Skip to content

Commit

Permalink
Merge branch 'master' into disableRange
Browse files Browse the repository at this point in the history
  • Loading branch information
harshavardhana committed Jun 26, 2020
2 parents 0d69cd5 + f7f12b8 commit 3264756
Show file tree
Hide file tree
Showing 69 changed files with 19,390 additions and 68 deletions.
7 changes: 2 additions & 5 deletions cmd/config-current.go
Original file line number Diff line number Diff line change
Expand Up @@ -482,12 +482,12 @@ func lookupConfigs(s config.Config) {
}
}

globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport())
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(), false)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}

globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport())
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport(), true)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
Expand Down Expand Up @@ -579,9 +579,6 @@ func newSrvConfig(objAPI ObjectLayer) error {
// Initialize server config.
srvCfg := newServerConfig()

// Override any values from ENVs.
lookupConfigs(srvCfg)

// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
Expand Down
5 changes: 1 addition & 4 deletions cmd/config-encrypted.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,7 @@ import (
"github.com/minio/minio/pkg/madmin"
)

func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
if !server {
return nil
}
func handleEncryptedConfigBackend(objAPI ObjectLayer) error {

encrypted, err := checkBackendEncrypted(objAPI)
if err != nil {
Expand Down
18 changes: 0 additions & 18 deletions cmd/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"path"
"sort"
"strings"
"time"

jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/config"
Expand Down Expand Up @@ -184,23 +183,6 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
return sys.Init(objAPI)
}

// WatchConfigNASDisk - watches nas disk on periodic basis.
func (sys *ConfigSys) WatchConfigNASDisk(ctx context.Context, objAPI ObjectLayer) {
configInterval := globalRefreshIAMInterval
watchDisk := func() {
for {
select {
case <-ctx.Done():
return
case <-time.After(configInterval):
loadConfig(objAPI)
}
}
}
// Refresh configSys in background for NAS gateway.
go watchDisk()
}

// Init - initializes config system from config.json.
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
if objAPI == nil {
Expand Down
4 changes: 1 addition & 3 deletions cmd/config/notify/parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ func TestNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transpor

// GetNotificationTargets registers and initializes all notification
// targets, returns error if any.
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport) (*event.TargetList, error) {
test := false
func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, test bool) (*event.TargetList, error) {
returnOnTargetError := false
return RegisterNotificationTargets(cfg, doneCh, transport, nil, test, returnOnTargetError)
}
Expand All @@ -72,7 +71,6 @@ func GetNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport
// * Add newly added target configuration to serverConfig.Notify.<TARGET_NAME>.
// * Handle the configuration in this function to create/add into TargetList.
func RegisterNotificationTargets(cfg config.Config, doneCh <-chan struct{}, transport *http.Transport, targetIDs []event.TargetID, test bool, returnOnTargetError bool) (*event.TargetList, error) {

targetList, err := FetchRegisteredTargets(cfg, doneCh, transport, test, returnOnTargetError)
if err != nil {
return targetList, err
Expand Down
8 changes: 4 additions & 4 deletions cmd/erasure-decode.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
wg.Add(1)
go func(i int) {
defer wg.Done()
disk := p.readers[i]
if disk == nil {
// Since disk is nil, trigger another read.
rr := p.readers[i]
if rr == nil {
// Since reader is nil, trigger another read.
readTriggerCh <- true
return
}
Expand All @@ -160,7 +160,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
// For the last shard, the shardsize might be less than previous shard sizes.
// Hence the following statement ensures that the buffer size is reset to the right size.
p.buf[bufIdx] = p.buf[bufIdx][:p.shardSize]
_, err := disk.ReadAt(p.buf[bufIdx], p.offset)
_, err := rr.ReadAt(p.buf[bufIdx], p.offset)
if err != nil {
if _, ok := err.(*errHashMismatch); ok {
atomic.StoreInt32(&healRequired, 1)
Expand Down
44 changes: 40 additions & 4 deletions cmd/erasure-zones.go
Original file line number Diff line number Diff line change
Expand Up @@ -928,9 +928,18 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker,
// N times until this boolean is 'false'.
func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) (FileInfo, int, int, bool) {
for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs {
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
j := j
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
}
wg.Wait()
}

var isTruncated = false
Expand Down Expand Up @@ -1008,9 +1017,18 @@ func lexicallySortedEntryZone(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileI
// N times until this boolean is 'false'.
func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) (FileInfoVersions, int, int, bool) {
for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs {
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
j := j
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
}
wg.Wait()
}

var isTruncated = false
Expand Down Expand Up @@ -1141,9 +1159,18 @@ func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, ndisks int)

func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zoneEntriesValid [][]bool) bool {
for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs {
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
j := j
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
}
wg.Wait()
}

var isTruncated = false
Expand All @@ -1170,9 +1197,18 @@ func isTruncatedZones(zoneEntryChs [][]FileInfoCh, zoneEntries [][]FileInfo, zon

func isTruncatedZonesVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneEntries [][]FileInfoVersions, zoneEntriesValid [][]bool) bool {
for i, entryChs := range zoneEntryChs {
i := i
var wg sync.WaitGroup
for j := range entryChs {
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
j := j
wg.Add(1)
// Pop() entries in parallel for large drive setups.
go func() {
defer wg.Done()
zoneEntries[i][j], zoneEntriesValid[i][j] = entryChs[j].Pop()
}()
}
wg.Wait()
}

var isTruncated = false
Expand Down
18 changes: 2 additions & 16 deletions cmd/gateway-main.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
// Set when gateway is enabled
globalIsGateway = true

enableConfigOps := gatewayName == "nas"
enableConfigOps := false

// TODO: We need to move this code with globalConfigSys.Init()
// for now keep it here such that "s3" gateway layer initializes
Expand Down Expand Up @@ -242,29 +242,15 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()

// Migrate all backend configs to encrypted backend, also handles rotation as well.
// For "nas" gateway we need to specially handle the backend migration as well.
// Internally code handles migrating etcd if enabled automatically.
logger.FatalIf(handleEncryptedConfigBackend(newObject, enableConfigOps),
"Unable to handle encrypted backend for config, iam and policies")

// Calls all New() for all sub-systems.
newAllSubsystems()

// **** WARNING ****
// Migrating to encrypted backend should happen before initialization of any
// sub-systems, make sure that we do not move the above codeblock elsewhere.
if enableConfigOps {
logger.FatalIf(globalConfigSys.Init(newObject), "Unable to initialize config system")
if gatewayName == "nas" {
buckets, err := newObject.ListBuckets(GlobalContext)
if err != nil {
logger.Fatal(err, "Unable to list buckets")
}

logger.FatalIf(globalNotificationSys.Init(buckets, newObject), "Unable to initialize notification system")
// Start watching disk for reloading config, this
// is only enabled for "NAS" gateway.
globalConfigSys.WatchConfigNASDisk(GlobalContext, newObject)
}

if globalEtcdClient != nil {
Expand Down
2 changes: 1 addition & 1 deletion cmd/object-api-listobjects_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {

for i, testCase := range testCases {
testCase := testCase
t.Run(fmt.Sprintf("Test%d-%s", i+1, instanceType), func(t *testing.T) {
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
result, err := obj.ListObjects(context.Background(), testCase.bucketName,
testCase.prefix, testCase.marker, testCase.delimeter, int(testCase.maxKeys))
if err != nil && testCase.shouldPass {
Expand Down
4 changes: 4 additions & 0 deletions cmd/peer-rest-server.go
Original file line number Diff line number Diff line change
Expand Up @@ -606,6 +606,10 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(w http.ResponseWriter, r *htt
}

globalBucketMetadataSys.Set(bucketName, meta)

if meta.notificationConfig != nil {
globalNotificationSys.AddRulesMap(bucketName, meta.notificationConfig.ToRulesMap())
}
}

// ReloadFormatHandler - Reload Format.
Expand Down
2 changes: 1 addition & 1 deletion cmd/server-main.go
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ func initSafeMode(ctx context.Context, newObject ObjectLayer) (err error) {
// Migrate all backend configs to encrypted backend configs, optionally
// handles rotating keys for encryption, if there is any retriable failure
// that shall be retried if there is an error.
if err = handleEncryptedConfigBackend(newObject, true); err == nil {
if err = handleEncryptedConfigBackend(newObject); err == nil {
// Upon success migrating the config, initialize all sub-systems
// if all sub-systems initialized successfully return right away
if err = initAllSubsystems(retryCtx, newObject); err == nil {
Expand Down
5 changes: 3 additions & 2 deletions cmd/storage-rest-client.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,8 +289,9 @@ func (client *storageRESTClient) DeleteVersion(volume, path string, fi FileInfo)
values.Set(storageRESTFilePath, path)

var buffer bytes.Buffer
encoder := gob.NewEncoder(&buffer)
encoder.Encode(&fi)
if err := gob.NewEncoder(&buffer).Encode(fi); err != nil {
return err
}

respBody, err := client.call(storageRESTMethodDeleteVersion, values, &buffer, -1)
defer http.DrainBody(respBody)
Expand Down
1 change: 0 additions & 1 deletion cmd/storage-rest-common.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ const (
storageRESTFilePath = "file-path"
storageRESTVersionID = "version-id"
storageRESTTotalVersions = "total-versions"
storageRESTDeleteMarker = "delete-marker"
storageRESTSrcVolume = "source-volume"
storageRESTSrcPath = "source-path"
storageRESTDataDir = "data-dir"
Expand Down
12 changes: 8 additions & 4 deletions cmd/storage-rest-server.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,13 @@ func (s *storageRESTServer) DeleteVersionHandler(w http.ResponseWriter, r *http.
volume := vars[storageRESTVolume]
filePath := vars[storageRESTFilePath]

if r.ContentLength < 0 {
s.writeErrorResponse(w, errInvalidArgument)
return
}

var fi FileInfo
decoder := gob.NewDecoder(r.Body)
if err := decoder.Decode(&fi); err != nil {
if err := gob.NewDecoder(r.Body).Decode(&fi); err != nil {
s.writeErrorResponse(w, err)
return
}
Expand All @@ -300,7 +304,7 @@ func (s *storageRESTServer) DeleteVersionHandler(w http.ResponseWriter, r *http.
}
}

// ReadVersion delete updated metadata.
// ReadVersion read metadata of versionID
func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
Expand Down Expand Up @@ -858,7 +862,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpointZones EndpointZones
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodWriteMetadata).HandlerFunc(httpTraceHdrs(server.WriteMetadataHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersion).HandlerFunc(httpTraceHdrs(server.DeleteVersionHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID, storageRESTDeleteMarker)...)
Queries(restQueries(storageRESTVolume, storageRESTFilePath)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(httpTraceHdrs(server.ReadVersionHandler)).
Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTVersionID)...)
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodRenameData).HandlerFunc(httpTraceHdrs(server.RenameDataHandler)).
Expand Down
2 changes: 1 addition & 1 deletion cmd/web-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re
for _, bucket := range buckets {
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: claims.AccessKey,
Action: iampolicy.ListAllMyBucketsAction,
Action: iampolicy.ListBucketAction,
BucketName: bucket.Name,
ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()),
IsOwner: owner,
Expand Down
30 changes: 30 additions & 0 deletions docs/gateway/nas.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,36 @@ mc ls mynas
[2017-02-26 22:10:11 PST] 0B test-bucket1/
```

## Breaking changes

There will be a breaking change after the release version 'RELEASE.2020-06-22T03-12-50Z'.

### The file-based config settings are deprecated in NAS

The support for admin config APIs will be removed. This will include getters and setters like `mc admin config get` and `mc admin config` and any other `mc admin config` options. The reason for this change is to avoid un-necessary reloads of the config from the disk. And to comply with the Environment variable based settings like other gateways.

### Migration guide

The users who have been using the older config approach should migrate to ENV settings by setting environment variables accordingly.

For example,

Consider the following webhook target config.

```
notify_webhook:1 endpoint=http://localhost:8080/ auth_token= queue_limit=0 queue_dir=/tmp/webhk client_cert= client_key=
```

The corresponding environment variable setting can be

```
export MINIO_NOTIFY_WEBHOOK_ENABLE_1=on
export MINIO_NOTIFY_WEBHOOK_ENDPOINT_1=http://localhost:8080/
export MINIO_NOTIFY_WEBHOOK_QUEUE_DIR_1=/tmp/webhk
```

> NOTE: Please check the docs for the corresponding ENV setting. Alternatively, We can obtain other ENVs in the form `mc admin config set alias/ <sub-sys> --env`
## Explore Further
- [`mc` command-line interface](https://docs.min.io/docs/minio-client-quickstart-guide)
- [`aws` command-line interface](https://docs.min.io/docs/aws-cli-with-minio)
Expand Down
3 changes: 3 additions & 0 deletions docs/sts/keycloak.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ Set `identity_openid` config with `config_url`, `client_id` and restart MinIO
```
~ mc admin config set myminio identity_openid config_url="http://localhost:8080/auth/realms/demo/.well-known/openid-configuration" client_id="account"
```
> Note: You can configure the `scopes` parameter to restrict the OpenID scopes requested by minio to the IdP, for example, `"openid,policy_role_attribute"`, being `policy_role_attribute` a client_scope / client_mapper that maps a role attribute called policy to a `policy` claim returned by Keycloak
Once successfully set restart the MinIO instance.
```
Expand Down Expand Up @@ -87,6 +88,8 @@ This will open the login page of keycloak, upon successful login, STS credential
}
```

> Note: You can use the `-cscopes` parameter to restrict the requested scopes, for example to `"openid,policy_role_attribute"`, being `policy_role_attribute` a client_scope / client_mapper that maps a role attribute called policy to a `policy` claim returned by Keycloak.
These credentials can now be used to perform MinIO API operations.

## 5. Using MinIO Browser
Expand Down

0 comments on commit 3264756

Please sign in to comment.