Skip to content

Commit

Permalink
modify monicluster, add zookeeper start and stop operation
Browse files Browse the repository at this point in the history
modify TestCreation case
add file lock for write operation
modify test case testFileCreationNonRecursive
fix: throw exception when close a file with no block allocated
modify test case testFileCreationNonRecursive
optimize default configuration for zoo.cfg:maxClientCnxns=300
FSNamesystem:delete adjust
delete Minicluster two Exception
modify TestCreation
add old markBlockAsCorrupt(Block blk, DatanodeInfo dn) method(empty function) for test
comment of getBlocks(dn,size) is error:called by balancer not by snn
fix: BlockEntry.getTotalLength(...) should not be added to total length when block size is -1
fix: FSNamesystem.getListig(...) should return null when src is not existed
add setLeasePeriod() for stateManager
  • Loading branch information
jiwan committed Jun 6, 2012
1 parent fe1e56f commit 3d60d01
Show file tree
Hide file tree
Showing 9 changed files with 777 additions and 375 deletions.
3 changes: 2 additions & 1 deletion adfs-hdfs-project/adfs-hdfs/src/main/conf/zoo.cfg
Expand Up @@ -3,4 +3,5 @@ initLimit=10
syncLimit=5
dataDir=zkdata
dataLogDir=zklogs
clientPort=2181
clientPort=2181
maxClientCnxns=300
Expand Up @@ -157,7 +157,7 @@ public static org.apache.hadoop.hdfs.protocol.Block[] getHadoopBlockArray(List<B
public static long getTotalLength(List<BlockEntry> blockEntryList) {
long totalLength = 0;
for (BlockEntry blockEntry : blockEntryList) {
totalLength += blockEntry.getLength();
if (blockEntry.getLength() > 0) totalLength += blockEntry.getLength();
}
return totalLength;
}
Expand Down
Expand Up @@ -62,7 +62,14 @@ public class StateManager {
BlockRepository blockRepository = null;
DatanodeRepository datanodeRepository = null;
LeaseRepository leaseRepository = null;
private static long softLimit = FSConstants.LEASE_SOFTLIMIT_PERIOD;
private static long hardLimit = FSConstants.LEASE_HARDLIMIT_PERIOD;

public void setLeasePeriod(long softLimit, long hardLimit) {
this.softLimit = softLimit;
this.hardLimit = hardLimit;
}

public StateManager(FileRepository fileRepository, BlockRepository blockRepository,
DatanodeRepository datanodeRepository, LeaseRepository leaseRepository) {
this.fileRepository = fileRepository;
Expand Down Expand Up @@ -819,16 +826,16 @@ public Lease deleteLeaseByLease(Lease lease) throws IOException {
}

/** @return true if the Soft Limit Timer has expired */
static public boolean expiredSoftLimit(long leaseTime) {
return System.currentTimeMillis() - leaseTime > FSConstants.LEASE_SOFTLIMIT_PERIOD;
public static boolean expiredSoftLimit(long leaseTime) {
return System.currentTimeMillis() - leaseTime > softLimit;
}

static public class LeaseMonitor implements Runnable {
final String name = getClass().getSimpleName();

public void run() {
try {
long expiredHardLimitTime = System.currentTimeMillis() - FSConstants.LEASE_HARDLIMIT_PERIOD;
long expiredHardLimitTime = System.currentTimeMillis() - hardLimit;
List<Lease> leaseList = FSNamesystem.getFSStateManager().findLeaseByTimeLessThan(expiredHardLimitTime);
for (Lease lease : leaseList) {
List<File> fileList = FSNamesystem.getFSStateManager().findFileByLeaseHolder(lease.holder);
Expand Down
@@ -0,0 +1,97 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package com.taobao.adfs.util;

import java.io.UnsupportedEncodingException;
import java.util.Arrays;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.WritableComparator;

/**
* This class encapsulates a byte array and overrides hashCode and equals so
* that it's identity is based on the data rather than the array instance.
*/
public class HashedBytes {
private static final Log LOG = LogFactory.getLog(HashedBytes.class);

private final byte[] bytes;

private final int hashCode;

public HashedBytes(byte[] bytes) {
this.bytes = bytes;
hashCode = WritableComparator.hashBytes(bytes, bytes.length);
}

public byte[] getBytes() {
return bytes;
}

@Override
public int hashCode() {
return hashCode;
}

@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null || getClass() != obj.getClass()) return false;
HashedBytes other = (HashedBytes) obj;
return Arrays.equals(bytes, other.bytes);
}

@Override
public String toString() {
if (bytes == null) { return "null"; }
return toStringBinary(bytes, 0, bytes.length);
}

/**
* Write a printable representation of a byte array. Non-printable
* characters are hex escaped in the format \\x%02X, eg: \x00 \x05 etc
*
* @param b
* array to write out
* @param off
* offset to start at
* @param len
* length to write
* @return string output
*/
public static String toStringBinary(final byte[] b, int off, int len) {
StringBuilder result = new StringBuilder();
try {
String first = new String(b, off, len, "ISO-8859-1");
for (int i = 0; i < first.length(); ++i) {
int ch = first.charAt(i) & 0xFF;
if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z')
|| " `~!@#$%^&*()-_=+[]{}\\|;:'\",.<>/?".indexOf(ch) >= 0) {
result.append(first.charAt(i));
} else {
result.append(String.format("\\x%02X", ch));
}
}
} catch (UnsupportedEncodingException e) {
LOG.error("ISO-8859-1 not supported?", e);
}
return result.toString();
}
}

0 comments on commit 3d60d01

Please sign in to comment.