Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 21 additions & 8 deletions cmd/erasure-server-pool-decom.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,7 @@ func (p *poolMeta) QueueBuckets(idx int, buckets []decomBucketInfo) {
var (
errDecommissionAlreadyRunning = errors.New("decommission is already in progress")
errDecommissionComplete = errors.New("decommission is complete, please remove the servers from command-line")
errDecommissionNotStarted = errors.New("decommission is not in progress")
)

func (p *poolMeta) Decommission(idx int, pi poolSpaceInfo) error {
Expand Down Expand Up @@ -1183,7 +1184,11 @@ func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, e
poolInfo := z.poolMeta.Pools[idx]
if poolInfo.Decommission != nil {
poolInfo.Decommission.TotalSize = pi.Total
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
if poolInfo.Decommission.Failed || poolInfo.Decommission.Canceled {
poolInfo.Decommission.CurrentSize = pi.Free
} else {
poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone
}
} else {
poolInfo.Decommission = &PoolDecommissionInfo{
TotalSize: pi.Total,
Expand Down Expand Up @@ -1219,15 +1224,21 @@ func (z *erasureServerPools) DecommissionCancel(ctx context.Context, idx int) (e
z.poolMetaMutex.Lock()
defer z.poolMetaMutex.Unlock()

fn := z.decommissionCancelers[idx]
if fn == nil {
// canceling a decommission before it started return an error.
return errDecommissionNotStarted
}

defer fn() // cancel any active thread.

if z.poolMeta.DecommissionCancel(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
globalNotificationSys.ReloadPoolMeta(ctx)
}

return nil
}

Expand All @@ -1245,8 +1256,9 @@ func (z *erasureServerPools) DecommissionFailed(ctx context.Context, idx int) (e

if z.poolMeta.DecommissionFailed(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
defer fn()
} // cancel any active thread.

if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
Expand All @@ -1269,8 +1281,9 @@ func (z *erasureServerPools) CompleteDecommission(ctx context.Context, idx int)

if z.poolMeta.DecommissionComplete(idx) {
if fn := z.decommissionCancelers[idx]; fn != nil {
defer fn() // cancel any active thread.
}
defer fn()
} // cancel any active thread.

if err = z.poolMeta.save(ctx, z.serverPools); err != nil {
return err
}
Expand Down
4 changes: 2 additions & 2 deletions docs/bucket/replication/setup_2site_existing_replication.sh
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ remote_arn=$(./mc replicate ls sitea/bucket --json | jq -r .rule.Destination.Buc
sleep 1

./mc replicate resync start sitea/bucket/ --remote-bucket "${remote_arn}"
sleep 20s ## sleep for 20s idea is that we give 200ms per object.
sleep 30s ## sleep for 30s idea is that we give 300ms per object.

count=$(./mc replicate resync status sitea/bucket --remote-bucket "${remote_arn}" --json | jq .resyncInfo.target[].replicationCount)

Expand All @@ -99,7 +99,7 @@ if [ $ret -ne 0 ]; then
fi

if [ $count -ne 12 ]; then
echo "resync not complete after 10s unexpected failure"
echo "resync not complete after 30s - unexpected failure"
./mc diff sitea/bucket siteb/bucket
exit 1
fi
Expand Down
2 changes: 1 addition & 1 deletion docs/distributed/decom-compressed-sse-s3.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ sleep 2
./mc admin policy create myminio/ lake ./docs/distributed/rw.json

./mc admin policy attach myminio/ rw --user=minio123
./mc admin policy attach myminio/ lake,rw --user=minio12345
./mc admin policy attach myminio/ lake --user=minio12345

./mc mb -l myminio/versioned

Expand Down
2 changes: 1 addition & 1 deletion docs/distributed/decom-encrypted-sse-s3.sh
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ sleep 2
./mc admin policy create myminio/ lake ./docs/distributed/rw.json

./mc admin policy attach myminio/ rw --user=minio123
./mc admin policy attach myminio/ lake,rw --user=minio12345
./mc admin policy attach myminio/ lake --user=minio12345

./mc mb -l myminio/versioned

Expand Down
2 changes: 1 addition & 1 deletion docs/distributed/decom-encrypted.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
./mc admin policy create myminio/ lake ./docs/distributed/rw.json

./mc admin policy attach myminio/ rw --user=minio123
./mc admin policy attach myminio/ lake,rw --user=minio12345
./mc admin policy attach myminio/ lake --user=minio12345

./mc mb -l myminio/versioned

Expand Down
4 changes: 2 additions & 2 deletions docs/distributed/decom.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ fi

export CI=true

(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) &
(minio server /tmp/xl/{1...10}/disk{0...1} 2>&1 >/tmp/decom.log) &
pid=$!

sleep 2
Expand All @@ -29,7 +29,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
./mc admin policy create myminio/ lake ./docs/distributed/rw.json

./mc admin policy attach myminio/ rw --user=minio123
./mc admin policy attach myminio/ lake,rw --user=minio12345
./mc admin policy attach myminio/ lake --user=minio12345

./mc mb -l myminio/versioned

Expand Down