Skip to content

Commit

Permalink
HDDS-5159. Make periodic disk check interval configurable.
Browse files Browse the repository at this point in the history
  • Loading branch information
markgui committed May 5, 2021
1 parent 142a2dd commit f5615f2
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 3 deletions.
Expand Up @@ -41,9 +41,13 @@ public class DatanodeConfiguration {
"hdds.datanode.replication.streams.limit";
static final String CONTAINER_DELETE_THREADS_MAX_KEY =
"hdds.datanode.container.delete.threads.max";
static final String PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY =
"hdds.datanode.periodic.disk.check.interval.minutes";

static final int REPLICATION_MAX_STREAMS_DEFAULT = 10;

static final long PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT = 15;

/**
* The maximum number of replication commands a single datanode can execute
* simultaneously.
Expand Down Expand Up @@ -110,6 +114,15 @@ public void setBlockDeletionLimit(int limit) {
this.blockLimitPerInterval = limit;
}

@Config(key = "periodic.disk.check.interval.minutes",
defaultValue = "15",
type = ConfigType.LONG,
tags = { DATANODE },
description = "Periodic disk check run interval in minutes."
)
private long periodicDiskCheckIntervalMinutes =
PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;

@PostConstruct
public void validate() {
if (replicationMaxStreams < 1) {
Expand All @@ -125,6 +138,15 @@ public void validate() {
containerDeleteThreads, CONTAINER_DELETE_THREADS_DEFAULT);
containerDeleteThreads = CONTAINER_DELETE_THREADS_DEFAULT;
}

if (periodicDiskCheckIntervalMinutes < 1) {
LOG.warn(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY +
" must be greater than zero and was set to {}. Defaulting to {}",
periodicDiskCheckIntervalMinutes,
PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT);
periodicDiskCheckIntervalMinutes =
PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;
}
}

public void setReplicationMaxStreams(int replicationMaxStreams) {
Expand All @@ -143,4 +165,12 @@ public int getContainerDeleteThreads() {
return containerDeleteThreads;
}

public long getPeriodicDiskCheckIntervalMinutes() {
return periodicDiskCheckIntervalMinutes;
}

public void setPeriodicDiskCheckIntervalMinutes(
long periodicDiskCheckIntervalMinutes) {
this.periodicDiskCheckIntervalMinutes = periodicDiskCheckIntervalMinutes;
}
}
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
import org.apache.hadoop.util.DiskChecker;
Expand Down Expand Up @@ -88,8 +89,6 @@ public class MutableVolumeSet implements VolumeSet {
private final ScheduledFuture<?> periodicDiskChecker;
private final SpaceUsageCheckFactory usageCheckFactory;

private static final long DISK_CHECK_INTERVAL_MINUTES = 15;

/**
* A Reentrant Read Write Lock to synchronize volume operations in VolumeSet.
* Any update to {@link #volumeMap}, {@link #failedVolumeMap}, or
Expand Down Expand Up @@ -123,14 +122,19 @@ public MutableVolumeSet(String dnUuid, String clusterID,
t.setDaemon(true);
return t;
});

DatanodeConfiguration dnConf =
conf.getObject(DatanodeConfiguration.class);
long periodicDiskCheckIntervalMinutes =
dnConf.getPeriodicDiskCheckIntervalMinutes();
this.periodicDiskChecker =
diskCheckerservice.scheduleWithFixedDelay(() -> {
try {
checkAllVolumes();
} catch (IOException e) {
LOG.warn("Exception while checking disks", e);
}
}, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
}, periodicDiskCheckIntervalMinutes, periodicDiskCheckIntervalMinutes,
TimeUnit.MINUTES);

usageCheckFactory = SpaceUsageCheckFactory.create(conf);
Expand Down
Expand Up @@ -24,6 +24,9 @@
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_DELETE_THREADS_MAX_KEY;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_MAX_STREAMS_DEFAULT;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_STREAMS_LIMIT_KEY;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY;
import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT;

import static org.junit.Assert.assertEquals;

/**
Expand All @@ -36,26 +39,34 @@ public void acceptsValidValues() {
// GIVEN
int validReplicationLimit = 123;
int validDeleteThreads = 42;
long validDiskCheckIntervalMinutes = 60;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, validReplicationLimit);
conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, validDeleteThreads);
conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
validDiskCheckIntervalMinutes);

// WHEN
DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);

// THEN
assertEquals(validReplicationLimit, subject.getReplicationMaxStreams());
assertEquals(validDeleteThreads, subject.getContainerDeleteThreads());
assertEquals(validDiskCheckIntervalMinutes,
subject.getPeriodicDiskCheckIntervalMinutes());
}

@Test
public void overridesInvalidValues() {
// GIVEN
int invalidReplicationLimit = -5;
int invalidDeleteThreads = 0;
long invalidDiskCheckIntervalMinutes = -1;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, invalidReplicationLimit);
conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, invalidDeleteThreads);
conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY,
invalidDiskCheckIntervalMinutes);

// WHEN
DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
Expand All @@ -65,6 +76,8 @@ public void overridesInvalidValues() {
subject.getReplicationMaxStreams());
assertEquals(CONTAINER_DELETE_THREADS_DEFAULT,
subject.getContainerDeleteThreads());
assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT,
subject.getPeriodicDiskCheckIntervalMinutes());
}

@Test
Expand All @@ -80,6 +93,8 @@ public void isCreatedWitDefaultValues() {
subject.getReplicationMaxStreams());
assertEquals(CONTAINER_DELETE_THREADS_DEFAULT,
subject.getContainerDeleteThreads());
assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT,
subject.getPeriodicDiskCheckIntervalMinutes());
}

}

0 comments on commit f5615f2

Please sign in to comment.