diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java index da4636b2c0fbe..45f9311fc5a02 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DF.java @@ -39,6 +39,9 @@ @InterfaceStability.Evolving public class DF extends Shell { + /** Default DF refresh interval. */ + public static final long DF_INTERVAL_DEFAULT = 3 * 1000; + private final String dirPath; private final File dirFile; private String filesystem; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IoTimeTracker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IoTimeTracker.java new file mode 100644 index 0000000000000..a063f795e1d8d --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IoTimeTracker.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.util; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import java.math.BigInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +@InterfaceAudience.Private +@InterfaceStability.Unstable +public class IoTimeTracker { + public static final int UNAVAILABLE = -1; + final long MINIMUM_UPDATE_INTERVAL; + + // IO used time since system is on (ms) + BigInteger cumulativeIoTime = BigInteger.ZERO; + + // IO used time read last time (ms) + BigInteger lastCumulativeIoTime = BigInteger.ZERO; + + // Unix timestamp while reading the IO time (ms) + long sampleTime; + long lastSampleTime; + float ioUsage; + BigInteger jiffyLengthInMillis; + + private ReadWriteLock rwLock = new ReentrantReadWriteLock(); + private Lock readLock = rwLock.readLock(); + public Lock writeLock = rwLock.writeLock(); + + public IoTimeTracker(long jiffyLengthInMillis) { + this.jiffyLengthInMillis = BigInteger.valueOf(jiffyLengthInMillis); + this.ioUsage = UNAVAILABLE; + this.sampleTime = UNAVAILABLE; + this.lastSampleTime = UNAVAILABLE; + MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis; + } + + /** + * Return percentage of io time spent over the time since last update. + * IO time spent is based on elapsed jiffies multiplied by amount of + * time for 1 disk. Thus, if you use 2 disks completely you would have spent + * twice the actual time between updates and this will return 200%. + * + * @return Return percentage of io usage since last update, {@link + * IoTimeTracker#UNAVAILABLE} if there haven't been 2 updates more than + * {@link IoTimeTracker#MINIMUM_UPDATE_INTERVAL} apart + */ + public float getIoTrackerUsagePercent() { + readLock.lock(); + try { + if (lastSampleTime == UNAVAILABLE || lastSampleTime > sampleTime) { + // lastSampleTime > sampleTime may happen when the system time is changed + lastSampleTime = sampleTime; + lastCumulativeIoTime = cumulativeIoTime; + return ioUsage; + } + // When lastSampleTime is sufficiently old, update ioUsage. + // Also take a sample of the current time and cumulative IO time for the + // use of the next calculation. + if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) { + ioUsage = ((cumulativeIoTime.subtract(lastCumulativeIoTime)).floatValue()) + / ((float) (sampleTime - lastSampleTime)); + lastSampleTime = sampleTime; + lastCumulativeIoTime = cumulativeIoTime; + } + } finally { + readLock.unlock(); + } + return ioUsage; + } + + public void updateElapsedJiffies(BigInteger totalIoPerDisk, long sampleTime) { + this.cumulativeIoTime = this.cumulativeIoTime.add(totalIoPerDisk); + this.sampleTime = sampleTime; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("SampleTime " + this.sampleTime); + sb.append(" CummulativeIoTime " + this.cumulativeIoTime); + sb.append(" LastSampleTime " + this.lastSampleTime); + sb.append(" LastCummulativeIoTime " + this.lastCumulativeIoTime); + sb.append(" IoUsage " + this.ioUsage); + sb.append(" JiffyLengthMillisec " + this.jiffyLengthInMillis); + return sb.toString(); + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeResource.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeResource.java new file mode 100644 index 0000000000000..65655bb266817 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeResource.java @@ -0,0 +1,38 @@ +package org.apache.hadoop.util; + +public class NodeResource { + + float cpuUsage; + float ioUsage; + float memoryUsage; + + public NodeResource(float cpuUsage, float ioUsage, float memoryUsage) { + this.cpuUsage = cpuUsage; + this.ioUsage = ioUsage; + this.memoryUsage = memoryUsage; + } + + public float getCpuUsage() { + return this.cpuUsage; + } + + public float getIoUsage() { + return this.ioUsage; + } + + public float getMemoryUsage() { + return this.memoryUsage; + } + + public void setCpuUsage(float cpuUsage) { + this.cpuUsage = cpuUsage; + } + + public void setIoUsage(float ioUsage) { + this.ioUsage = ioUsage; + } + + public void setMemoryUsage(float memoryUsage) { + this.memoryUsage = memoryUsage; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java index e8a571489e9ec..2db0f825d157f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfo.java @@ -20,6 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; /** * Plugin to calculate resource information on the system. @@ -108,6 +109,21 @@ public static SysInfo newInstance() { */ public abstract float getCpuUsagePercentage(); + /** + * Obtain the IO usage % of the machine. Return -1 if it is unavailable + * + * @return IO usage in % + */ + public abstract float getIoUsagePercentage(String[] paths); + + /** + * Obtain the node resource of the machine. Return null if it is unavailable + * + * @return cpu & io & memory usage in % + */ + public abstract NodeResource getNodeResourceLastPeriod( + String[] localDirs, long millis); + /** * Obtain the number of VCores used. Return -1 if it is unavailable * @@ -132,13 +148,13 @@ public static SysInfo newInstance() { * * @return total number of bytes read. */ - public abstract long getStorageBytesRead(); + public abstract long getStorageBytesRead(String[] paths); /** * Obtain the aggregated number of bytes written to disks. * * @return total number of bytes written. */ - public abstract long getStorageBytesWritten(); + public abstract long getStorageBytesWritten(String[] paths); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java index 38777d8f66465..f5fa7c8214620 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java @@ -19,14 +19,23 @@ package org.apache.hadoop.util; import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; import java.io.InputStreamReader; import java.io.IOException; import java.math.BigInteger; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; import java.util.HashMap; import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -34,10 +43,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DF; import org.apache.hadoop.util.Shell.ShellCommandExecutor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.DF.DF_INTERVAL_DEFAULT; + /** * Plugin to calculate resource information on Linux systems. */ @@ -111,6 +124,8 @@ public class SysInfoLinux extends SysInfo { "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)" + "[ \t]*([0-9]+)[ \t]*([0-9]+)[ \t]*([0-9]+)"); + private IoTimeTracker ioTimeTracker; + /** * Pattern for parsing /sys/block/partition_name/queue/hw_sector_size. */ @@ -124,6 +139,10 @@ public class SysInfoLinux extends SysInfo { private String procfsDisksFile; private long jiffyLengthInMillis; + // cache mount path and disk mapping relation + private String[] lastPaths; + private List mountList; + private long ramSize = 0; private long swapSize = 0; private long ramSizeFree = 0; // free ram space on the machine (kB) @@ -139,6 +158,8 @@ public class SysInfoLinux extends SysInfo { private int numProcessors = 0; /* number of physical cores on the system. */ private int numCores = 0; + /* number of disks on the system. */ + private int numDisks = 0; private long cpuFrequency = 0L; // CPU frequency on the system (kHz) private long numNetBytesRead = 0L; // aggregated bytes read from network private long numNetBytesWritten = 0L; // aggregated bytes written to network @@ -155,6 +176,9 @@ public class SysInfoLinux extends SysInfo { public static final long JIFFY_LENGTH_IN_MILLIS = Math.max(Math.round(1000D / getConf("CLK_TCK")), -1); + /* map for node load sampling */ + Map nodeResourceSampleMap = new ConcurrentHashMap<>(); + private static long getConf(String attr) { if(Shell.LINUX) { try { @@ -207,6 +231,7 @@ public SysInfoLinux(String procfsMemFile, this.jiffyLengthInMillis = jiffyLengthInMillis; this.cpuTimeTracker = new CpuTimeTracker(jiffyLengthInMillis); this.perDiskSectorSize = new HashMap(); + this.ioTimeTracker = new IoTimeTracker(jiffyLengthInMillis); } /** @@ -480,28 +505,39 @@ private void readProcNetInfoFile() { * Read /proc/diskstats file, parse and calculate amount * of bytes read and written from/to disks. */ - private void readProcDisksInfoFile() { + private void readProcDisksInfoFile(String[] paths) { numDisksBytesRead = 0L; numDisksBytesWritten = 0L; // Read "/proc/diskstats" file - BufferedReader in; - try { - in = new BufferedReader(new InputStreamReader( - Files.newInputStream(Paths.get(procfsDisksFile)), - Charset.forName("UTF-8"))); - } catch (IOException f) { - return; - } - Matcher mat; - try { + try (BufferedReader in = new BufferedReader(new InputStreamReader( + new FileInputStream(procfsDisksFile), StandardCharsets.UTF_8))){ + numDisks = 0; + ioTimeTracker.cumulativeIoTime = BigInteger.ZERO; + if (paths != null && !Arrays.equals(lastPaths, paths) || mountList.size() != paths.length) { + mountList = new ArrayList<>(paths.length); + for (String path : paths) { + try { + DF df = new DF(new File(path), DF_INTERVAL_DEFAULT); + mountList.add(df.getFilesystem()); + } catch (Throwable t) { + // path may not exist for unhealthy disks etc. + if (LOG.isDebugEnabled()) { + LOG.debug("Read disk path " + path + " error.", t); + } + } + } + lastPaths = paths; + } + String str = in.readLine(); + long totalIoPerDisk = 0; while (str != null) { mat = PROCFS_DISKSFILE_FORMAT.matcher(str); if (mat.find()) { - String diskName = mat.group(4); + String diskName = mat.group(3); assert diskName != null; // ignore loop or ram partitions if (diskName.contains("loop") || diskName.contains("ram")) { @@ -509,6 +545,9 @@ private void readProcDisksInfoFile() { continue; } + numDisks++; + totalIoPerDisk += Long.parseLong(mat.group(13)); + Integer sectorSize; synchronized (perDiskSectorSize) { sectorSize = perDiskSectorSize.get(diskName); @@ -530,15 +569,12 @@ private void readProcDisksInfoFile() { } str = in.readLine(); } + + ioTimeTracker.updateElapsedJiffies( + BigInteger.valueOf(totalIoPerDisk), + getCurrentTime()); } catch (IOException e) { LOG.warn("Error reading the stream " + procfsDisksFile, e); - } finally { - // Close the streams - try { - in.close(); - } catch (IOException e) { - LOG.warn("Error closing the stream " + procfsDisksFile, e); - } } } @@ -660,6 +696,59 @@ public float getCpuUsagePercentage() { return overallCpuUsage; } + + /** {@inheritDoc} */ + @Override + public float getIoUsagePercentage(String[] paths) { + readProcDisksInfoFile(paths); + float overallIoUsage = ioTimeTracker.getIoTrackerUsagePercent(); + if (overallIoUsage != IoTimeTracker.UNAVAILABLE && numDisks != 0) { + overallIoUsage = overallIoUsage / numDisks; + } + return overallIoUsage; + } + + /** {@inheritDoc} */ + @Override + public NodeResource getNodeResourceLastPeriod(String[] localDirs, long millis) { + + // trigger collect and obtain the latest NodeResource + NodeResource nowNodeResource = new NodeResource( + getCpuUsagePercentage(), + getIoUsagePercentage(localDirs), + 1 - (float) getAvailablePhysicalMemorySize() / getPhysicalMemorySize()); + + int nums = 0; + NodeResource totalResource = new NodeResource(0F, 0F, 0F); + Iterator> it = nodeResourceSampleMap.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + long delta = System.currentTimeMillis() - millis; + NodeResource oldNodeResource = entry.getValue(); + if (delta <= entry.getKey()) { + nums++; + totalResource.setCpuUsage(totalResource.cpuUsage + oldNodeResource.cpuUsage); + totalResource.setIoUsage(totalResource.ioUsage + oldNodeResource.ioUsage); + totalResource.setMemoryUsage(totalResource.memoryUsage + oldNodeResource.memoryUsage); + } else { + // remove outdated records + it.remove(); + } + } + + if (nums > 1) { + totalResource.setCpuUsage(totalResource.cpuUsage / nums); + totalResource.setIoUsage(totalResource.ioUsage / nums); + totalResource.setMemoryUsage(totalResource.memoryUsage / nums); + } + + if (nowNodeResource.cpuUsage >= 0 && nowNodeResource.ioUsage >= 0 + && nowNodeResource.memoryUsage >= 0) { + nodeResourceSampleMap.put(ioTimeTracker.sampleTime, nowNodeResource); + } + return totalResource; + } + /** {@inheritDoc} */ @Override public float getNumVCoresUsed() { @@ -686,14 +775,14 @@ public long getNetworkBytesWritten() { } @Override - public long getStorageBytesRead() { - readProcDisksInfoFile(); + public long getStorageBytesRead(String[] paths) { + readProcDisksInfoFile(paths); return numDisksBytesRead; } @Override - public long getStorageBytesWritten() { - readProcDisksInfoFile(); + public long getStorageBytesWritten(String[] paths) { + readProcDisksInfoFile(paths); return numDisksBytesWritten; } @@ -721,9 +810,9 @@ public static void main(String[] args) { System.out.println("Total network written (bytes) : " + plugin.getNetworkBytesWritten()); System.out.println("Total storage read (bytes) : " - + plugin.getStorageBytesRead()); + + plugin.getStorageBytesRead(null)); System.out.println("Total storage written (bytes) : " - + plugin.getStorageBytesWritten()); + + plugin.getStorageBytesWritten(null)); try { // Sleep so we can compute the CPU usage Thread.sleep(500L); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java index 91ebc4c50bb19..9ef7cdc8152fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java @@ -206,6 +206,20 @@ public synchronized float getCpuUsagePercentage() { return ret; } + /** {@inheritDoc} */ + @Override + public float getIoUsagePercentage(String[] paths) { + return 0f; + } + + /** {@inheritDoc} */ + @Override + public NodeResource getNodeResourceLastPeriod( + String[] localDirs, long millis) { + // TODO support for windows + return null; + } + /** {@inheritDoc} */ @Override public synchronized float getNumVCoresUsed() { @@ -232,7 +246,7 @@ public long getNetworkBytesWritten() { } @Override - public long getStorageBytesRead() { + public long getStorageBytesRead(String[] paths) { refreshIfNeeded(); return storageBytesRead; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java index f8ef7f2aa3ba5..dc11a0e4f3644 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoLinux.java @@ -573,8 +573,8 @@ public void parsingProcDisksFile() throws IOException { // use non-default sector size int diskSectorSize = FakeLinuxResourceCalculatorPlugin.SECTORSIZE; assertEquals(expectedNumSectorsRead * diskSectorSize, - plugin.getStorageBytesRead()); + plugin.getStorageBytesRead(null)); assertEquals(expectedNumSectorsWritten * diskSectorSize, - plugin.getStorageBytesWritten()); + plugin.getStorageBytesWritten(null)); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java index fc99aeb976f91..56f05aa7079df 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java @@ -58,7 +58,7 @@ public void parseSystemInfoString() { assertEquals(1, tester.getNumCores()); assertEquals(2805000L, tester.getCpuFrequency()); assertEquals(6261812L, tester.getCumulativeCpuTime()); - assertEquals(1234567L, tester.getStorageBytesRead()); + assertEquals(1234567L, tester.getStorageBytesRead(null)); assertEquals(2345678L, tester.getStorageBytesWritten()); assertEquals(3456789L, tester.getNetworkBytesRead()); assertEquals(4567890L, tester.getNetworkBytesWritten()); diff --git a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java index 528202fd7f402..797a183814fd7 100644 --- a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java +++ b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/DummyResourceCalculatorPlugin.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapred.gridmix; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.NodeResource; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; /** @@ -52,6 +53,10 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin { "mapred.tasktracker.cumulativecputime.testing"; /** CPU usage percentage for testing */ public static final String CPU_USAGE = "mapred.tasktracker.cpuusage.testing"; + /** Memory usage percentage for testing */ + public static final String MEMORY_USAGE = "mapred.tasktracker.memoryusage.testing"; + /** Disk IO usage percentage for testing */ + public static final String IO_USAGE = "mapred.tasktracker.iousage.testing"; /** cumulative number of bytes read over the network */ public static final String NETWORK_BYTES_READ = "mapred.tasktracker.networkread.testing"; @@ -128,6 +133,21 @@ public float getCpuUsagePercentage() { return getConf().getFloat(CPU_USAGE, -1); } + /** {@inheritDoc} */ + @Override + public float getIoUsagePercentage(String[] paths) { + return getConf().getFloat(IO_USAGE, -1); + } + + /** {@inheritDoc} */ + @Override + public NodeResource getNodeResourceLastPeriod(String[] localDirs, long millis) { + return new NodeResource(getConf().getFloat(CPU_USAGE, -1), + getConf().getFloat(MEMORY_USAGE, -1), + getConf().getFloat(IO_USAGE, -1)); + } + + /** {@inheritDoc} */ @Override public long getNetworkBytesRead() { @@ -142,13 +162,13 @@ public long getNetworkBytesWritten() { /** {@inheritDoc} */ @Override - public long getStorageBytesRead() { + public long getStorageBytesRead(String[] paths) { return getConf().getLong(STORAGE_BYTES_READ, -1); } /** {@inheritDoc} */ @Override - public long getStorageBytesWritten() { + public long getStorageBytesWritten(String[] paths) { return getConf().getLong(STORAGE_BYTES_WRITTEN, -1); } } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java index 32567db666ef3..9fb98083ada09 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -116,6 +117,10 @@ public String getHealthReport() { return healthReport; } + public NodeStatus getNodeStatus() { + return null; + } + public long getLastHealthReportTime() { return 0; } diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java index 26d35ac897235..4a6047e0f6715 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java @@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; +import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; @@ -89,6 +90,11 @@ public String getHealthReport() { return node.getHealthReport(); } + @Override + public NodeStatus getNodeStatus() { + return node.getNodeStatus(); + } + @Override public long getLastHealthReportTime() { return node.getLastHealthReportTime();