Skip to content

Commit

Permalink
Merge branch 'trunk' into YARN-11250
Browse files Browse the repository at this point in the history
  • Loading branch information
slfan1989 committed Aug 15, 2022
2 parents 4e98d5e + 622ca0d commit 8fdadcb
Show file tree
Hide file tree
Showing 54 changed files with 3,016 additions and 353 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.io.compress;

import java.io.IOException;

/**
* An exception class for when a closed compressor/decopressor is being used
* {@link org.apache.hadoop.io.compress.Compressor}
* {@link org.apache.hadoop.io.compress.Decompressor}
*/
public class AlreadyClosedException extends IOException {

public AlreadyClosedException(String message) {
super(message);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,7 @@ public static void returnCompressor(Compressor compressor) {
}
// if the compressor can't be reused, don't pool it.
if (compressor.getClass().isAnnotationPresent(DoNotPool.class)) {
compressor.end();
return;
}
compressor.reset();
Expand All @@ -225,6 +226,7 @@ public static void returnDecompressor(Decompressor decompressor) {
}
// if the decompressor can't be reused, don't pool it.
if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
decompressor.end();
return;
}
decompressor.reset();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import java.util.zip.GZIPOutputStream;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.AlreadyClosedException;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.DoNotPool;
import org.apache.hadoop.util.DataChecksum;
Expand Down Expand Up @@ -83,6 +84,10 @@ public int compress(byte[] b, int off, int len) throws IOException {
throw new IOException("compress called on finished compressor");
}

if (state == BuiltInGzipDecompressor.GzipStateLabel.ENDED) {
throw new AlreadyClosedException("compress called on closed compressor");
}

int compressedBytesWritten = 0;

// If we are not within uncompressed data yet, output the header.
Expand Down Expand Up @@ -139,6 +144,8 @@ public long getBytesWritten() {
@Override
public void end() {
deflater.end();

state = BuiltInGzipDecompressor.GzipStateLabel.ENDED;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import java.util.zip.DataFormatException;
import java.util.zip.Inflater;

import org.apache.hadoop.io.compress.AlreadyClosedException;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DoNotPool;
import org.apache.hadoop.util.DataChecksum;
Expand Down Expand Up @@ -109,7 +110,11 @@ public enum GzipStateLabel {
* Immediately after the trailer (and potentially prior to the next gzip
* member/substream header), without reset() having been called.
*/
FINISHED;
FINISHED,
/**
* Immediately after end() has been called.
*/
ENDED;
}

/**
Expand Down Expand Up @@ -186,6 +191,10 @@ public synchronized int decompress(byte[] b, int off, int len)
throws IOException {
int numAvailBytes = 0;

if (state == GzipStateLabel.ENDED) {
throw new AlreadyClosedException("decompress called on closed decompressor");
}

if (state != GzipStateLabel.DEFLATE_STREAM) {
executeHeaderState();

Expand Down Expand Up @@ -476,6 +485,8 @@ public synchronized void reset() {
@Override
public synchronized void end() {
inflater.end();

state = GzipStateLabel.ENDED;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public abstract class Shell {
* {@value}
*/
private static final String WINDOWS_PROBLEMS =
"https://wiki.apache.org/hadoop/WindowsProblems";
"https://cwiki.apache.org/confluence/display/HADOOP2/WindowsProblems";

/**
* Name of the windows utils binary: {@value}.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,20 @@

import static org.junit.Assert.assertEquals;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.OutputStream;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.TimeUnit;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.zlib.BuiltInGzipCompressor;
import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Before;
import org.junit.Test;

Expand Down Expand Up @@ -189,4 +196,56 @@ public void testDecompressorNotReturnSameInstance() {
CodecPool.returnDecompressor(decompressor);
}
}

@Test(timeout = 10000)
public void testDoNotPoolCompressorNotUseableAfterReturn() throws Exception {

final GzipCodec gzipCodec = new GzipCodec();
gzipCodec.setConf(new Configuration());

// BuiltInGzipCompressor is an explicit example of a Compressor with the @DoNotPool annotation
final Compressor compressor = new BuiltInGzipCompressor(new Configuration());
CodecPool.returnCompressor(compressor);

final CompressionOutputStream outputStream =
gzipCodec.createOutputStream(new ByteArrayOutputStream(), compressor);
LambdaTestUtils.intercept(
AlreadyClosedException.class,
"compress called on closed compressor",
"Compressor from Codec with @DoNotPool should not be " +
"useable after returning to CodecPool",
() -> outputStream.write(1));
}

@Test(timeout = 10000)
public void testDoNotPoolDecompressorNotUseableAfterReturn() throws Exception {

final GzipCodec gzipCodec = new GzipCodec();
gzipCodec.setConf(new Configuration());

final Random random = new Random();
final byte[] bytes = new byte[1024];
random.nextBytes(bytes);

ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (OutputStream outputStream = gzipCodec.createOutputStream(baos)) {
outputStream.write(bytes);
}

final byte[] gzipBytes = baos.toByteArray();
final ByteArrayInputStream bais = new ByteArrayInputStream(gzipBytes);

// BuiltInGzipDecompressor is an explicit example of a Decompressor
// with the @DoNotPool annotation
final Decompressor decompressor = new BuiltInGzipDecompressor();
CodecPool.returnDecompressor(decompressor);

final CompressionInputStream inputStream = gzipCodec.createInputStream(bais, decompressor);
LambdaTestUtils.intercept(
AlreadyClosedException.class,
"decompress called on closed decompressor",
"Decompressor from Codec with @DoNotPool should not be " +
"useable after returning to CodecPool",
() -> inputStream.read());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import javax.management.ObjectName;
import javax.management.StandardMBean;

import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
Expand Down Expand Up @@ -113,6 +114,8 @@ public class RBFMetrics implements RouterMBean, FederationMBean {
/** Prevent holding the page from load too long. */
private final long timeOut;

/** Enable/Disable getNodeUsage. **/
private boolean enableGetDNUsage;

/** Router interface. */
private final Router router;
Expand Down Expand Up @@ -175,6 +178,8 @@ public RBFMetrics(Router router) throws IOException {
Configuration conf = router.getConfig();
this.timeOut = conf.getTimeDuration(RBFConfigKeys.DN_REPORT_TIME_OUT,
RBFConfigKeys.DN_REPORT_TIME_OUT_MS_DEFAULT, TimeUnit.MILLISECONDS);
this.enableGetDNUsage = conf.getBoolean(RBFConfigKeys.DFS_ROUTER_ENABLE_GET_DN_USAGE_KEY,
RBFConfigKeys.DFS_ROUTER_ENABLE_GET_DN_USAGE_DEFAULT);
this.topTokenRealOwners = conf.getInt(
RBFConfigKeys.DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY,
RBFConfigKeys.DFS_ROUTER_METRICS_TOP_NUM_TOKEN_OWNERS_KEY_DEFAULT);
Expand All @@ -184,6 +189,11 @@ public RBFMetrics(Router router) throws IOException {
ms.register(RBFMetrics.class.getName(), "RBFActivity Metrics", this);
}

@VisibleForTesting
public void setEnableGetDNUsage(boolean enableGetDNUsage) {
this.enableGetDNUsage = enableGetDNUsage;
}

/**
* Unregister the JMX beans.
*/
Expand Down Expand Up @@ -537,35 +547,34 @@ public int getNumEnteringMaintenanceDataNodes() {

@Override // NameNodeMXBean
public String getNodeUsage() {
float median = 0;
float max = 0;
float min = 0;
float dev = 0;
double median = 0;
double max = 0;
double min = 0;
double dev = 0;

final Map<String, Map<String, Object>> info = new HashMap<>();
try {
RouterRpcServer rpcServer = this.router.getRpcServer();
DatanodeInfo[] live = rpcServer.getDatanodeReport(
DatanodeReportType.LIVE, false, timeOut);
DatanodeInfo[] live = null;
if (this.enableGetDNUsage) {
RouterRpcServer rpcServer = this.router.getRpcServer();
live = rpcServer.getDatanodeReport(DatanodeReportType.LIVE, false, timeOut);
} else {
LOG.debug("Getting node usage is disabled.");
}

if (live.length > 0) {
float totalDfsUsed = 0;
float[] usages = new float[live.length];
if (live != null && live.length > 0) {
double[] usages = new double[live.length];
int i = 0;
for (DatanodeInfo dn : live) {
usages[i++] = dn.getDfsUsedPercent();
totalDfsUsed += dn.getDfsUsedPercent();
}
totalDfsUsed /= live.length;
Arrays.sort(usages);
median = usages[usages.length / 2];
max = usages[usages.length - 1];
min = usages[0];

for (i = 0; i < usages.length; i++) {
dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
}
dev = (float) Math.sqrt(dev / usages.length);
StandardDeviation deviation = new StandardDeviation();
dev = deviation.evaluate(usages);
}
} catch (IOException e) {
LOG.error("Cannot get the live nodes: {}", e.getMessage());
Expand Down
Loading

0 comments on commit 8fdadcb

Please sign in to comment.