Skip to content

Commit

Permalink
feat: Single drive XL implementation
Browse files Browse the repository at this point in the history
Main motivation is move towards a common backend format
for all different types of modes in MinIO, allowing for
a simpler code and predictable behavior across all features.

This PR also brings features such as versioning, replication,
transitioning to single drive setups.
  • Loading branch information
harshavardhana authored and minio-trusted committed May 26, 2022
1 parent 62cd643 commit fb509f9
Show file tree
Hide file tree
Showing 47 changed files with 4,177 additions and 145 deletions.
8 changes: 4 additions & 4 deletions Makefile
Expand Up @@ -53,7 +53,7 @@ test-iam: build ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends)"
@CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
@GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd

test-replication: install ## verify multi site replication
@echo "Running tests for replicating three sites"
Expand All @@ -73,18 +73,18 @@ test-site-replication-minio: install ## verify automatic site replication

verify: ## verify minio various setups
@echo "Verifying build with race"
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)

verify-healing: ## verify healing and replacing disks with minio binary
@echo "Verify healing build with race"
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing.sh)
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)

verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
@echo "Verify resolving inconsistent versions build with race"
@CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)

build: checks ## builds minio to $(PWD)
Expand Down
6 changes: 0 additions & 6 deletions README.md
Expand Up @@ -196,12 +196,6 @@ iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT
service iptables restart
```

## Pre-existing data

When deployed on a single drive, MinIO server lets clients access any pre-existing data in the data directory. For example, if MinIO is started with the command `minio server /mnt/data`, any pre-existing data in the `/mnt/data` directory would be accessible to the clients.

The above statement is also valid for all gateway backends.

## Test MinIO Connectivity

### Test using MinIO Console
Expand Down
1 change: 1 addition & 0 deletions buildscripts/race.sh
Expand Up @@ -2,6 +2,7 @@

set -e

export GORACE="history_size=7"
## TODO remove `dsync` from race detector once this is merged and released https://go-review.googlesource.com/c/go/+/333529/
for d in $(go list ./... | grep -v dsync); do
CGO_ENABLED=1 go test -v -race --timeout 100m "$d"
Expand Down
8 changes: 4 additions & 4 deletions cmd/admin-handlers-users-race_test.go
Expand Up @@ -47,10 +47,10 @@ func TestIAMInternalIDPConcurrencyServerSuite(t *testing.T) {
}

baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
Expand Down
8 changes: 4 additions & 4 deletions cmd/admin-handlers-users_test.go
Expand Up @@ -102,10 +102,10 @@ func (s *TestSuiteIAM) iamSetup(c *check) {
// common to tests.
var iamTestSuites = func() []*TestSuiteIAM {
baseTestCases := []TestSuiteCommon{
// Init and run test on FS backend with signature v4.
{serverType: "FS", signer: signerV4},
// Init and run test on FS backend, with tls enabled.
{serverType: "FS", signer: signerV4, secure: true},
// Init and run test on ErasureSD backend with signature v4.
{serverType: "ErasureSD", signer: signerV4},
// Init and run test on ErasureSD backend, with tls enabled.
{serverType: "ErasureSD", signer: signerV4, secure: true},
// Init and run test on Erasure backend.
{serverType: "Erasure", signer: signerV4},
// Init and run test on ErasureSet backend.
Expand Down
2 changes: 1 addition & 1 deletion cmd/bucket-handlers.go
Expand Up @@ -1364,7 +1364,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
if !globalIsErasure {
if globalIsGateway {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
Expand Down
9 changes: 0 additions & 9 deletions cmd/bucket-metadata-sys.go
Expand Up @@ -127,20 +127,11 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF
meta.QuotaConfigJSON = configData
meta.QuotaConfigUpdatedAt = UTCNow()
case objectLockConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.ObjectLockConfigXML = configData
meta.ObjectLockConfigUpdatedAt = UTCNow()
case bucketVersioningConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.VersioningConfigXML = configData
case bucketReplicationConfig:
if !globalIsErasure && !globalIsDistErasure {
return NotImplemented{}
}
meta.ReplicationConfigXML = configData
meta.ReplicationConfigUpdatedAt = UTCNow()
case bucketTargetsFile:
Expand Down
2 changes: 1 addition & 1 deletion cmd/endpoint.go
Expand Up @@ -582,7 +582,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
}
endpoints = append(endpoints, endpoint)
setupType = FSSetupType
setupType = ErasureSDSetupType

// Check for cross device mounts if any.
if err = checkCrossDeviceMounts(endpoints); err != nil {
Expand Down
6 changes: 3 additions & 3 deletions cmd/endpoint_test.go
Expand Up @@ -231,10 +231,10 @@ func TestCreateEndpoints(t *testing.T) {
}{
{"localhost", [][]string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},

// FS Setup
// Erasure Single Drive
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
{":443", [][]string{{"/d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil},
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil},
{":443", [][]string{{"/d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, ErasureSDSetupType, nil},
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},

// Erasure Setup with PathEndpointType
Expand Down
2 changes: 1 addition & 1 deletion cmd/erasure-coding.go
Expand Up @@ -41,7 +41,7 @@ type Erasure struct {
// NewErasure creates a new ErasureStorage.
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
// Check the parameters for sanity now.
if dataBlocks <= 0 || parityBlocks <= 0 {
if dataBlocks <= 0 || parityBlocks < 0 {
return e, reedsolomon.ErrInvShardNum
}

Expand Down
8 changes: 6 additions & 2 deletions cmd/erasure-metadata.go
Expand Up @@ -99,7 +99,7 @@ func (fi FileInfo) IsValid() bool {
fi.Erasure.Index <= dataBlocks+parityBlocks &&
len(fi.Erasure.Distribution) == (dataBlocks+parityBlocks))
return ((dataBlocks >= parityBlocks) &&
(dataBlocks != 0) && (parityBlocks != 0) &&
(dataBlocks > 0) && (parityBlocks >= 0) &&
correctIndexes)
}

Expand Down Expand Up @@ -284,7 +284,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn

func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (FileInfo, error) {
// with less quorum return error.
if quorum < 2 {
if quorum < 1 {
return FileInfo{}, errErasureReadQuorum
}
metaHashes := make([]string, len(metaArr))
Expand Down Expand Up @@ -398,6 +398,10 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs []error, defaultParityCount int) (objectReadQuorum, objectWriteQuorum int, err error) {
if defaultParityCount == 0 {
return 1, 1, nil
}

// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
Expand Down
5 changes: 5 additions & 0 deletions cmd/erasure-object.go
Expand Up @@ -1327,12 +1327,17 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string) error {
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
dirPrefix := encodeDirObject(prefix)
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return nil
}
// Deletes
// - The prefix and its children
// - The prefix__XLDIR__
defer disks[index].Delete(ctx, bucket, dirPrefix, true)
return disks[index].Delete(ctx, bucket, prefix, true)
}, index)
}
Expand Down
22 changes: 19 additions & 3 deletions cmd/erasure-server-pool.go
Expand Up @@ -61,6 +61,22 @@ func (z *erasureServerPools) SinglePool() bool {

// Initialize new pool of erasure sets.
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
if endpointServerPools.NEndpoints() == 1 {
ep := endpointServerPools[0]
storageDisks, format, err := waitForFormatErasure(true, ep.Endpoints, 1, ep.SetCount, ep.DrivesPerSet, "", "")
if err != nil {
return nil, err
}

objLayer, err := newErasureSingle(ctx, storageDisks[0], format)
if err != nil {
return nil, err
}

globalLocalDrives = storageDisks
return objLayer, nil
}

var (
deploymentID string
distributionAlgo string
Expand Down Expand Up @@ -320,7 +336,7 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b
nSets[index] = pool.setCount
g.Go(func() error {
// Get the set where it would be placed.
storageInfos[index] = getDiskInfos(ctx, pool.getHashedSet(object).getDisks())
storageInfos[index] = getDiskInfos(ctx, pool.getHashedSet(object).getDisks()...)
return nil
}, index)
}
Expand Down Expand Up @@ -933,7 +949,7 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
object = encodeDirObject(object)

if z.SinglePool() {
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()), data.Size()) {
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), data.Size()) {
return ObjectInfo{}, toObjectErr(errDiskFull)
}
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
Expand Down Expand Up @@ -1325,7 +1341,7 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
}

if z.SinglePool() {
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()), -1) {
if !isMinioMetaBucketName(bucket) && !hasSpaceFor(getDiskInfos(ctx, z.serverPools[0].getHashedSet(object).getDisks()...), -1) {
return "", toObjectErr(errDiskFull)
}
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
Expand Down
2 changes: 1 addition & 1 deletion cmd/erasure-sets.go
Expand Up @@ -1252,7 +1252,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H

defer func(storageDisks []StorageAPI) {
if err != nil {
closeStorageDisks(storageDisks)
closeStorageDisks(storageDisks...)
}
}(storageDisks)

Expand Down

0 comments on commit fb509f9

Please sign in to comment.