org.apache.hadoop.hdfs.protocol.DatanodeInfo - java examples

Here are the examples of the java api org.apache.hadoop.hdfs.protocol.DatanodeInfo taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : BlockPlacementPolicyAlwaysSatisfied.java
Copyright Apache License 2.0
Author : apache
@Override
public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, int numberOfReplicas) {
    return SATISFIED;
}

19 View Complete Implementation : BlockMovementAttemptFinished.java
Copyright Apache License 2.0
Author : apache
/**
 * This clreplaced represents status from a block movement task. This will have the
 * information of the task which was successful or failed due to errors.
 */
@InterfaceAudience.Private
@InterfaceStability.Evolving
public clreplaced BlockMovementAttemptFinished {

    private final Block block;

    private final DatanodeInfo src;

    private final DatanodeInfo target;

    private final StorageType targetType;

    private final BlockMovementStatus status;

    /**
     * Construct movement attempt finished info.
     *
     * @param block
     *          block
     * @param src
     *          src datanode
     * @param target
     *          target datanode
     * @param targetType
     *          target storage type
     * @param status
     *          movement status
     */
    public BlockMovementAttemptFinished(Block block, DatanodeInfo src, DatanodeInfo target, StorageType targetType, BlockMovementStatus status) {
        this.block = block;
        this.src = src;
        this.target = target;
        this.targetType = targetType;
        this.status = status;
    }

    /**
     * @return details of the block, which attempted to move from src to target
     *         node.
     */
    public Block getBlock() {
        return block;
    }

    /**
     * @return the target datanode where it moved the block.
     */
    public DatanodeInfo getTargetDatanode() {
        return target;
    }

    /**
     * @return target storage type.
     */
    public StorageType getTargetType() {
        return targetType;
    }

    /**
     * @return block movement status code.
     */
    public BlockMovementStatus getStatus() {
        return status;
    }

    @Override
    public String toString() {
        return new StringBuilder().append("Block movement attempt finished(\n  ").append(" block : ").append(block).append(" src node: ").append(src).append(" target node: ").append(target).append(" target type: ").append(targetType).append(" movement status: ").append(status).append(")").toString();
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Add datanode to suspectNodes.
 */
private boolean addSuspectNodeToDetect(DatanodeInfo datanodeInfo) {
    return suspectNodesProbeQueue.offer(datanodeInfo);
}

19 View Complete Implementation : BlockPlacementPolicy.java
Copyright Apache License 2.0
Author : apache
/**
 * Get rack string from a data node
 * @return rack of data node
 */
protected String getRack(final DatanodeInfo datanode) {
    return datanode.getNetworkLocation();
}

19 View Complete Implementation : TestModTime.java
Copyright Apache License 2.0
Author : apache
private void printDatanodeReport(DatanodeInfo[] info) {
    System.out.println("-------------------------------------------------");
    for (int i = 0; i < info.length; i++) {
        System.out.println(info[i].getDatanodeReport());
        System.out.println();
    }
}

19 View Complete Implementation : BlockPlacementPolicyRackFaultTolerant.java
Copyright Apache License 2.0
Author : apache
@Override
public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, int numberOfReplicas) {
    if (locs == null)
        locs = DatanodeDescriptor.EMPTY_ARRAY;
    if (!clusterMap.hasClusterEverBeenMultiRack()) {
        // only one rack
        return new BlockPlacementStatusDefault(1, 1, 1);
    }
    // 1. Check that all locations are different.
    // 2. Count locations on different racks.
    Set<String> racks = new TreeSet<>();
    for (DatanodeInfo dn : locs) {
        racks.add(dn.getNetworkLocation());
    }
    return new BlockPlacementStatusDefault(racks.size(), numberOfReplicas, clusterMap.getNumOfRacks());
}

19 View Complete Implementation : ClientContext.java
Copyright Apache License 2.0
Author : apache
public int getNetworkDistance(DatanodeInfo datanodeInfo) throws IOException {
    // If applications disable the feature or the client machine can't
    // resolve its network location, clientNode will be set to null.
    if (clientNode == null) {
        return DFSUtilClient.isLocalAddress(NetUtils.createSocketAddr(datanodeInfo.getXferAddr())) ? 0 : Integer.MAX_VALUE;
    }
    NodeBase node = new NodeBase(datanodeInfo.getHostName(), datanodeInfo.getNetworkLocation());
    return NetworkTopology.getDistanceByPath(clientNode, node);
}

19 View Complete Implementation : TestBlockPlacementPolicyRackFaultTolerant.java
Copyright Apache License 2.0
Author : apache
private void doTestLocatedBlock(int replication, LocatedBlock locatedBlock) {
    replacedertEquals(replication, locatedBlock.getLocations().length);
    HashMap<String, Integer> racksCount = new HashMap<String, Integer>();
    for (DatanodeInfo node : locatedBlock.getLocations()) {
        addToRacksCount(node.getNetworkLocation(), racksCount);
    }
    int minCount = Integer.MAX_VALUE;
    int maxCount = Integer.MIN_VALUE;
    for (Integer rackCount : racksCount.values()) {
        minCount = Math.min(minCount, rackCount);
        maxCount = Math.max(maxCount, rackCount);
    }
    replacedertTrue(maxCount - minCount <= 1);
}

19 View Complete Implementation : StripedWriter.java
Copyright Apache License 2.0
Author : apache
/**
 * Manage striped writers that writes to a target with reconstructed data.
 */
@InterfaceAudience.Private
clreplaced StripedWriter {

    private static final Logger LOG = DataNode.LOG;

    private final static int WRITE_PACKET_SIZE = 64 * 1024;

    private final StripedReconstructor reconstructor;

    private final DataNode datanode;

    private final Configuration conf;

    private final int dataBlkNum;

    private final int parityBlkNum;

    private boolean[] targetsStatus;

    // targets
    private final DatanodeInfo[] targets;

    private final short[] targetIndices;

    private boolean hasValidTargets;

    private final StorageType[] targetStorageTypes;

    private final String[] targetStorageIds;

    private StripedBlockWriter[] writers;

    private int maxChunksPerPacket;

    private byte[] packetBuf;

    private byte[] checksumBuf;

    private int bytesPerChecksum;

    private int checksumSize;

    StripedWriter(StripedReconstructor reconstructor, DataNode datanode, Configuration conf, StripedReconstructionInfo stripedReconInfo) {
        this.reconstructor = reconstructor;
        this.datanode = datanode;
        this.conf = conf;
        dataBlkNum = stripedReconInfo.getEcPolicy().getNumDataUnits();
        parityBlkNum = stripedReconInfo.getEcPolicy().getNumParityUnits();
        this.targets = stripedReconInfo.getTargets();
        replacedert targets != null;
        this.targetStorageTypes = stripedReconInfo.getTargetStorageTypes();
        replacedert targetStorageTypes != null;
        this.targetStorageIds = stripedReconInfo.getTargetStorageIds();
        replacedert targetStorageIds != null;
        writers = new StripedBlockWriter[targets.length];
        targetIndices = new short[targets.length];
        Preconditions.checkArgument(targetIndices.length <= parityBlkNum, "Too much missed striped blocks.");
        initTargetIndices();
        long maxTargetLength = 0L;
        for (short targetIndex : targetIndices) {
            maxTargetLength = Math.max(maxTargetLength, reconstructor.getBlockLen(targetIndex));
        }
        reconstructor.setMaxTargetLength(maxTargetLength);
        // targetsStatus store whether some target is success, it will record
        // any failed target once, if some target failed (invalid DN or transfer
        // failed), will not transfer data to it any more.
        targetsStatus = new boolean[targets.length];
    }

    void init() throws IOException {
        DataChecksum checksum = reconstructor.getChecksum();
        checksumSize = checksum.getChecksumSize();
        bytesPerChecksum = checksum.getBytesPerChecksum();
        int chunkSize = bytesPerChecksum + checksumSize;
        maxChunksPerPacket = Math.max((WRITE_PACKET_SIZE - PacketHeader.PKT_MAX_HEADER_LEN) / chunkSize, 1);
        int maxPacketSize = chunkSize * maxChunksPerPacket + PacketHeader.PKT_MAX_HEADER_LEN;
        packetBuf = new byte[maxPacketSize];
        int tmpLen = checksumSize * (reconstructor.getBufferSize() / bytesPerChecksum);
        checksumBuf = new byte[tmpLen];
        if (initTargetStreams() == 0) {
            String error = "All targets are failed.";
            throw new IOException(error);
        }
    }

    private void initTargetIndices() {
        BitSet bitset = reconstructor.getLiveBitSet();
        int m = 0;
        hasValidTargets = false;
        for (int i = 0; i < dataBlkNum + parityBlkNum; i++) {
            if (!bitset.get(i)) {
                if (reconstructor.getBlockLen(i) > 0) {
                    if (m < targets.length) {
                        targetIndices[m++] = (short) i;
                        hasValidTargets = true;
                    }
                }
            }
        }
    }

    /**
     * Send reconstructed data to targets.
     */
    int transferData2Targets() {
        int nSuccess = 0;
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                boolean success = false;
                try {
                    writers[i].transferData2Target(packetBuf);
                    nSuccess++;
                    success = true;
                } catch (IOException e) {
                    LOG.warn(e.getMessage());
                }
                targetsStatus[i] = success;
            }
        }
        return nSuccess;
    }

    /**
     * Send an empty packet to mark the end of the block.
     */
    void endTargetBlocks() {
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                try {
                    writers[i].endTargetBlock(packetBuf);
                } catch (IOException e) {
                    LOG.warn(e.getMessage());
                }
            }
        }
    }

    /**
     * Initialize  output/input streams for transferring data to target
     * and send create block request.
     */
    int initTargetStreams() {
        int nSuccess = 0;
        for (short i = 0; i < targets.length; i++) {
            try {
                writers[i] = createWriter(i);
                nSuccess++;
                targetsStatus[i] = true;
            } catch (Throwable e) {
                LOG.warn(e.getMessage());
            }
        }
        return nSuccess;
    }

    private StripedBlockWriter createWriter(short index) throws IOException {
        return new StripedBlockWriter(this, datanode, conf, reconstructor.getBlock(targetIndices[index]), targets[index], targetStorageTypes[index], targetStorageIds[index]);
    }

    ByteBuffer allocateWriteBuffer() {
        return reconstructor.allocateBuffer(reconstructor.getBufferSize());
    }

    int getTargets() {
        return targets.length;
    }

    private int getRealTargets() {
        int m = 0;
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                m++;
            }
        }
        return m;
    }

    int[] getRealTargetIndices() {
        int realTargets = getRealTargets();
        int[] results = new int[realTargets];
        int m = 0;
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                results[m++] = targetIndices[i];
            }
        }
        return results;
    }

    ByteBuffer[] getRealTargetBuffers(int toReconstructLen) {
        int numGood = getRealTargets();
        ByteBuffer[] outputs = new ByteBuffer[numGood];
        int m = 0;
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                writers[i].getTargetBuffer().limit(toReconstructLen);
                outputs[m++] = writers[i].getTargetBuffer();
            }
        }
        return outputs;
    }

    void updateRealTargetBuffers(int toReconstructLen) {
        for (int i = 0; i < targets.length; i++) {
            if (targetsStatus[i]) {
                long blockLen = reconstructor.getBlockLen(targetIndices[i]);
                long remaining = blockLen - reconstructor.getPositionInBlock();
                if (remaining <= 0) {
                    writers[i].getTargetBuffer().limit(0);
                } else if (remaining < toReconstructLen) {
                    writers[i].getTargetBuffer().limit((int) remaining);
                }
            }
        }
    }

    byte[] getChecksumBuf() {
        return checksumBuf;
    }

    int getBytesPerChecksum() {
        return bytesPerChecksum;
    }

    int getChecksumSize() {
        return checksumSize;
    }

    DataChecksum getChecksum() {
        return reconstructor.getChecksum();
    }

    int getMaxChunksPerPacket() {
        return maxChunksPerPacket;
    }

    CachingStrategy getCachingStrategy() {
        return reconstructor.getCachingStrategy();
    }

    InetSocketAddress getSocketAddress4Transfer(DatanodeInfo target) {
        return reconstructor.getSocketAddress4Transfer(target);
    }

    StripedReconstructor getReconstructor() {
        return reconstructor;
    }

    boolean hasValidTargets() {
        return hasValidTargets;
    }

    /**
     * Clear all buffers.
     */
    void clearBuffers() {
        for (StripedBlockWriter writer : writers) {
            ByteBuffer targetBuffer = writer.getTargetBuffer();
            if (targetBuffer != null) {
                targetBuffer.clear();
            }
        }
    }

    void close() {
        for (StripedBlockWriter writer : writers) {
            ByteBuffer targetBuffer = writer.getTargetBuffer();
            if (targetBuffer != null) {
                reconstructor.freeBuffer(targetBuffer);
                writer.freeTargetBuffer();
            }
        }
        for (int i = 0; i < targets.length; i++) {
            writers[i].close();
        }
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Remove suspect and dead node from suspectAndDeadNodes#dfsInputStream and
 *  local deadNodes.
 */
public synchronized void removeNodeFromDeadNodeDetector(DFSInputStream dfsInputStream, DatanodeInfo datanodeInfo) {
    Set<DatanodeInfo> datanodeInfos = suspectAndDeadNodes.get(dfsInputStream);
    if (datanodeInfos != null) {
        datanodeInfos.remove(datanodeInfo);
        dfsInputStream.removeFromLocalDeadNodes(datanodeInfo);
        if (datanodeInfos.isEmpty()) {
            suspectAndDeadNodes.remove(dfsInputStream);
        }
    }
}

19 View Complete Implementation : BlockPlacementPolicyWithUpgradeDomain.java
Copyright Apache License 2.0
Author : apache
@Override
public boolean isMovable(Collection<DatanodeInfo> locs, DatanodeInfo source, DatanodeInfo target) {
    if (super.isMovable(locs, source, target)) {
        return isMovableBasedOnUpgradeDomain(locs, source, target);
    } else {
        return false;
    }
}

19 View Complete Implementation : InvalidateBlocks.java
Copyright Apache License 2.0
Author : apache
private LightWeightHashSet<Block> getBlocksSet(final DatanodeInfo dn, final Block block) {
    if (blockIdManager.isStripedBlock(block)) {
        return getECBlocksSet(dn);
    } else {
        return getBlocksSet(dn);
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Add datanode to suspectNodes and suspectAndDeadNodes.
 */
public synchronized void addNodeToDetect(DFSInputStream dfsInputStream, DatanodeInfo datanodeInfo) {
    HashSet<DatanodeInfo> datanodeInfos = suspectAndDeadNodes.get(dfsInputStream);
    if (datanodeInfos == null) {
        datanodeInfos = new HashSet<DatanodeInfo>();
        datanodeInfos.add(datanodeInfo);
        suspectAndDeadNodes.putIfAbsent(dfsInputStream, datanodeInfos);
    } else {
        datanodeInfos.add(datanodeInfo);
    }
    addSuspectNodeToDetect(datanodeInfo);
}

19 View Complete Implementation : ReplaceDatanodeOnFailure.java
Copyright Apache License 2.0
Author : apache
/**
 * Does it need a replacement according to the policy?
 */
public boolean satisfy(final short replication, final DatanodeInfo[] existings, final boolean isAppend, final boolean isHflushed) {
    final int n = existings == null ? 0 : existings.length;
    // don't need to add datanode for any policy.
    return !(n == 0 || n >= replication) && policy.getCondition().satisfy(replication, existings, n, isAppend, isHflushed);
}

19 View Complete Implementation : BlockPlacementPolicyWithUpgradeDomain.java
Copyright Apache License 2.0
Author : apache
// If upgrade domain isn't specified, uses its XferAddr as upgrade domain.
// Such fallback is useful to test the scenario where upgrade domain isn't
// defined but the block placement is set to upgrade domain policy.
public String getUpgradeDomainWithDefaultValue(DatanodeInfo datanodeInfo) {
    String upgradeDomain = datanodeInfo.getUpgradeDomain();
    if (upgradeDomain == null) {
        LOG.warn("Upgrade domain isn't defined for " + datanodeInfo);
        upgradeDomain = datanodeInfo.getXferAddr();
    }
    return upgradeDomain;
}

19 View Complete Implementation : BlockPlacementPolicy.java
Copyright Apache License 2.0
Author : apache
/**
 * Verify if the block's placement meets requirement of placement policy,
 * i.e. replicas are placed on no less than minRacks racks in the system.
 *
 * @param locs block with locations
 * @param numOfReplicas replica number of file to be verified
 * @return the result of verification
 */
public abstract BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, int numOfReplicas);

19 View Complete Implementation : TestReadStripedFileWithMissingBlocks.java
Copyright Apache License 2.0
Author : apache
private void restartDeadDataNodes() throws IOException {
    DatanodeInfo[] deadNodes = dfsClient.datanodeReport(DatanodeReportType.DEAD);
    for (DatanodeInfo dnInfo : deadNodes) {
        cluster.restartDataNode(dnInfo.getXferAddr());
    }
    cluster.triggerHeartbeats();
}

19 View Complete Implementation : StripedDataStreamer.java
Copyright Apache License 2.0
Author : apache
@Override
protected void setupPipelineInternal(DatanodeInfo[] nodes, StorageType[] nodeStorageTypes, String[] nodeStorageIDs) throws IOException {
    boolean success = false;
    while (!success && !streamerClosed() && dfsClient.clientRunning) {
        if (!handleRestartingDatanode()) {
            return;
        }
        if (!handleBadDatanode()) {
            // for striped streamer if it is datanode error then close the stream
            // and return. no need to replace datanode
            return;
        }
        // get a new generation stamp and an access token
        final LocatedBlock lb = coordinator.getNewBlocks().take(index);
        long newGS = lb.getBlock().getGenerationStamp();
        setAccessToken(lb.getBlockToken());
        // set up the pipeline again with the remaining nodes. when a striped
        // data streamer comes here, it must be in external error state.
        replacedert getErrorState().hasExternalError();
        success = createBlockOutputStream(nodes, nodeStorageTypes, nodeStorageIDs, newGS, true);
        failPacket4Testing();
        getErrorState().checkRestartingNodeDeadline(nodes);
        // notify coordinator the result of createBlockOutputStream
        synchronized (coordinator) {
            if (!streamerClosed()) {
                coordinator.updateStreamer(this, success);
                coordinator.notify();
            } else {
                success = false;
            }
        }
        if (success) {
            // wait for results of other streamers
            success = coordinator.takeStreamerUpdateResult(index);
            if (success) {
                // if all succeeded, update its block using the new GS
                updateBlockGS(newGS);
            } else {
                // otherwise close the block stream and restart the recovery process
                closeStream();
            }
        } else {
            // if fail, close the stream. The internal error state and last
            // exception have already been set in createBlockOutputStream
            // TODO: wait for restarting DataNodes during RollingUpgrade
            closeStream();
            setStreamerAsClosed();
        }
    }
// while
}

19 View Complete Implementation : Dispatcher.java
Copyright Apache License 2.0
Author : apache
private boolean shouldIgnore(DatanodeInfo dn) {
    // ignore out-of-service nodes
    final boolean outOfService = !dn.isInService();
    // ignore nodes in exclude list
    final boolean excluded = Util.isExcluded(excludedNodes, dn);
    // ignore nodes not in the include list (if include list is not empty)
    final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
    if (outOfService || excluded || notIncluded) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Excluding datanode " + dn + ": outOfService=" + outOfService + ", excluded=" + excluded + ", notIncluded=" + notIncluded);
        }
        return true;
    }
    return false;
}

19 View Complete Implementation : BlockCommand.java
Copyright Apache License 2.0
Author : apache
/**
 * *************************************************
 *  A BlockCommand is an instruction to a datanode
 *  regarding some blocks under its control.  It tells
 *  the DataNode to either invalidate a set of indicated
 *  blocks, or to copy a set of indicated blocks to
 *  another DataNode.
 *
 * **************************************************
 */
@InterfaceAudience.Private
@InterfaceStability.Evolving
public clreplaced BlockCommand extends DatanodeCommand {

    /**
     * This constant is used to indicate that the block deletion does not need
     * explicit ACK from the datanode. When a block is put into the list of blocks
     * to be deleted, it's size is set to this constant. We replacedume that no block
     * would actually have this size. Otherwise, we would miss ACKs for blocks
     * with such size. Positive number is used for compatibility reasons.
     */
    public static final long NO_ACK = Long.MAX_VALUE;

    final String poolId;

    final Block[] blocks;

    final DatanodeInfo[][] targets;

    final StorageType[][] targetStorageTypes;

    final String[][] targetStorageIDs;

    /**
     * Create BlockCommand for transferring blocks to another datanode
     * @param blocktargetlist    blocks to be transferred
     */
    public BlockCommand(int action, String poolId, List<BlockTargetPair> blocktargetlist) {
        super(action);
        this.poolId = poolId;
        blocks = new Block[blocktargetlist.size()];
        targets = new DatanodeInfo[blocks.length][];
        targetStorageTypes = new StorageType[blocks.length][];
        targetStorageIDs = new String[blocks.length][];
        for (int i = 0; i < blocks.length; i++) {
            BlockTargetPair p = blocktargetlist.get(i);
            blocks[i] = p.block;
            targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets);
            targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets);
            targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets);
        }
    }

    private static final DatanodeInfo[][] EMPTY_TARGET_DATANODES = {};

    private static final StorageType[][] EMPTY_TARGET_STORAGE_TYPES = {};

    private static final String[][] EMPTY_TARGET_STORAGEIDS = {};

    /**
     * Create BlockCommand for the given action
     * @param blocks blocks related to the action
     */
    public BlockCommand(int action, String poolId, Block[] blocks) {
        this(action, poolId, blocks, EMPTY_TARGET_DATANODES, EMPTY_TARGET_STORAGE_TYPES, EMPTY_TARGET_STORAGEIDS);
    }

    /**
     * Create BlockCommand for the given action
     * @param blocks blocks related to the action
     */
    public BlockCommand(int action, String poolId, Block[] blocks, DatanodeInfo[][] targets, StorageType[][] targetStorageTypes, String[][] targetStorageIDs) {
        super(action);
        this.poolId = poolId;
        this.blocks = blocks;
        this.targets = targets;
        this.targetStorageTypes = targetStorageTypes;
        this.targetStorageIDs = targetStorageIDs;
    }

    public String getBlockPoolId() {
        return poolId;
    }

    public Block[] getBlocks() {
        return blocks;
    }

    public DatanodeInfo[][] getTargets() {
        return targets;
    }

    public StorageType[][] getTargetStorageTypes() {
        return targetStorageTypes;
    }

    public String[][] getTargetStorageIDs() {
        return targetStorageIDs;
    }
}

19 View Complete Implementation : Dispatcher.java
Copyright Apache License 2.0
Author : apache
public DDatanode newDatanode(DatanodeInfo datanode) {
    return new DDatanode(datanode, maxConcurrentMovesPerNode);
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
private void removeFromDead(DatanodeInfo datanodeInfo) {
    deadNodes.remove(datanodeInfo.getDatanodeUuid());
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Prode datanode by probe byte.
 */
private void scheduleProbe(ProbeType type) {
    LOG.debug("Schedule probe datanode for probe type: {}.", type);
    DatanodeInfo datanodeInfo = null;
    if (type == ProbeType.CHECK_DEAD) {
        while ((datanodeInfo = deadNodesProbeQueue.poll()) != null) {
            if (probeInProg.containsKey(datanodeInfo.getDatanodeUuid())) {
                LOG.debug("The datanode {} is already contained in probe queue, " + "skip to add it.", datanodeInfo);
                continue;
            }
            probeInProg.put(datanodeInfo.getDatanodeUuid(), datanodeInfo);
            Probe probe = new Probe(this, datanodeInfo, ProbeType.CHECK_DEAD);
            probeDeadNodesThreadPool.execute(probe);
        }
    } else if (type == ProbeType.CHECK_SUSPECT) {
        while ((datanodeInfo = suspectNodesProbeQueue.poll()) != null) {
            if (probeInProg.containsKey(datanodeInfo.getDatanodeUuid())) {
                continue;
            }
            probeInProg.put(datanodeInfo.getDatanodeUuid(), datanodeInfo);
            Probe probe = new Probe(this, datanodeInfo, ProbeType.CHECK_SUSPECT);
            probeSuspectNodesThreadPool.execute(probe);
        }
    }
}

19 View Complete Implementation : BlockReaderFactory.java
Copyright Apache License 2.0
Author : apache
public BlockReaderFactory setDatanodeInfo(DatanodeInfo datanode) {
    this.datanode = datanode;
    return this;
}

19 View Complete Implementation : JsonUtilClient.java
Copyright Apache License 2.0
Author : apache
/**
 * Convert an Object[] to a DatanodeInfo[].
 */
static DatanodeInfo[] toDatanodeInfoArray(final List<?> objects) throws IOException {
    if (objects == null) {
        return null;
    } else if (objects.isEmpty()) {
        return EMPTY_DATANODE_INFO_ARRAY;
    } else {
        final DatanodeInfo[] array = new DatanodeInfo[objects.size()];
        int i = 0;
        for (Object object : objects) {
            array[i++] = toDatanodeInfo((Map<?, ?>) object);
        }
        return array;
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
private void addToDead(DatanodeInfo datanodeInfo) {
    deadNodes.put(datanodeInfo.getDatanodeUuid(), datanodeInfo);
}

19 View Complete Implementation : NamenodeBeanMetrics.java
Copyright Apache License 2.0
Author : apache
private long getLastContact(DatanodeInfo node) {
    return (now() - node.getLastUpdate()) / 1000;
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Remove suspect and dead node from suspectAndDeadNodes#dfsInputStream and
 *  local deadNodes.
 */
private synchronized void removeNodeFromDeadNodeDetector(DatanodeInfo datanodeInfo) {
    for (Map.Entry<DFSInputStream, HashSet<DatanodeInfo>> entry : suspectAndDeadNodes.entrySet()) {
        Set<DatanodeInfo> datanodeInfos = entry.getValue();
        if (datanodeInfos.remove(datanodeInfo)) {
            DFSInputStream dfsInputStream = entry.getKey();
            dfsInputStream.removeFromLocalDeadNodes(datanodeInfo);
            if (datanodeInfos.isEmpty()) {
                suspectAndDeadNodes.remove(dfsInputStream);
            }
        }
    }
}

19 View Complete Implementation : DatanodeStorageInfo.java
Copyright Apache License 2.0
Author : apache
static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
    final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
    for (int i = 0; i < storages.size(); i++) {
        datanodes[i] = storages.get(i).getDatanodeDescriptor();
    }
    return datanodes;
}

19 View Complete Implementation : DfsClientShmManager.java
Copyright Apache License 2.0
Author : apache
public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer, MutableBoolean usedPeer, ExtendedBlockId blockId, String clientName) throws IOException {
    lock.lock();
    try {
        if (closed) {
            LOG.trace(this + ": the DfsClientShmManager isclosed.");
            return null;
        }
        EndpointShmManager shmManager = datanodes.get(datanode);
        if (shmManager == null) {
            shmManager = new EndpointShmManager(datanode);
            datanodes.put(datanode, shmManager);
        }
        return shmManager.allocSlot(peer, usedPeer, clientName, blockId);
    } finally {
        lock.unlock();
    }
}

19 View Complete Implementation : StripedWriter.java
Copyright Apache License 2.0
Author : apache
InetSocketAddress getSocketAddress4Transfer(DatanodeInfo target) {
    return reconstructor.getSocketAddress4Transfer(target);
}

19 View Complete Implementation : TestStripedBlockUtil.java
Copyright Apache License 2.0
Author : apache
private LocatedStripedBlock createDummyLocatedBlock(long bgSize) {
    final long blockGroupID = -1048576;
    DatanodeInfo[] locs = new DatanodeInfo[groupSize];
    String[] storageIDs = new String[groupSize];
    StorageType[] storageTypes = new StorageType[groupSize];
    byte[] indices = new byte[groupSize];
    for (int i = 0; i < groupSize; i++) {
        indices[i] = (byte) ((i + 2) % dataBlocks);
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
        storageIDs[i] = locs[i].getDatanodeUuid();
        storageTypes[i] = StorageType.DISK;
    }
    return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false, null);
}

19 View Complete Implementation : AdminStatesBaseTest.java
Copyright Apache License 2.0
Author : apache
protected void putNodeInService(int nnIndex, String datanodeUuid) throws IOException {
    DatanodeInfo datanodeInfo = getDatanodeDesriptor(cluster.getNamesystem(nnIndex), datanodeUuid);
    putNodeInService(nnIndex, datanodeInfo);
}

19 View Complete Implementation : ReportBadBlockAction.java
Copyright Apache License 2.0
Author : apache
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
    if (bpRegistration == null) {
        return;
    }
    DatanodeInfo[] dnArr = { new DatanodeInfoBuilder().setNodeID(bpRegistration).build() };
    String[] uuids = { storageUuid };
    StorageType[] types = { storageType };
    LocatedBlock[] locatedBlock = { new LocatedBlock(block, dnArr, uuids, types) };
    try {
        bpNamenode.reportBadBlocks(locatedBlock);
    } catch (RemoteException re) {
        DataNode.LOG.info("reportBadBlock encountered RemoteException for " + "block:  " + block, re);
    } catch (IOException e) {
        throw new BPServiceActorActionException("Failed to report bad block " + block + " to namenode.", e);
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Remove suspect and dead node from suspectAndDeadNodes#dfsInputStream and
 * deadNodes.
 */
private void removeDeadNode(DatanodeInfo datanodeInfo) {
    removeNodeFromDeadNodeDetector(datanodeInfo);
    removeFromDead(datanodeInfo);
}

19 View Complete Implementation : RouterNamenodeProtocol.java
Copyright Apache License 2.0
Author : apache
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size, long minBlockSize) throws IOException {
    rpcServer.checkOperation(OperationCategory.READ);
    // Get the namespace where the datanode is located
    Map<String, DatanodeStorageReport[]> map = rpcServer.getDatanodeStorageReportMap(DatanodeReportType.ALL);
    String nsId = null;
    for (Entry<String, DatanodeStorageReport[]> entry : map.entrySet()) {
        DatanodeStorageReport[] dns = entry.getValue();
        for (DatanodeStorageReport dn : dns) {
            DatanodeInfo dnInfo = dn.getDatanodeInfo();
            if (dnInfo.getDatanodeUuid().equals(datanode.getDatanodeUuid())) {
                nsId = entry.getKey();
                break;
            }
        }
        // Break the loop if already found
        if (nsId != null) {
            break;
        }
    }
    // Forward to the proper namenode
    if (nsId != null) {
        RemoteMethod method = new RemoteMethod(NamenodeProtocol.clreplaced, "getBlocks", new Clreplaced<?>[] { DatanodeInfo.clreplaced, long.clreplaced, long.clreplaced }, datanode, size, minBlockSize);
        return rpcClient.invokeSingle(nsId, method, BlocksWithLocations.clreplaced);
    }
    return null;
}

19 View Complete Implementation : StripedReconstructor.java
Copyright Apache License 2.0
Author : apache
InetSocketAddress getSocketAddress4Transfer(DatanodeInfo dnInfo) {
    return NetUtils.createSocketAddr(dnInfo.getXferAddr(datanode.getDnConf().getConnectToDnViaHostname()));
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Remove dead node which is not used by any DFSInputStream from deadNodes.
 * @return new dead node shared by all DFSInputStreams.
 */
public synchronized Set<DatanodeInfo> clearAndGetDetectedDeadNodes() {
    // remove the dead nodes who doesn't have any inputstream first
    Set<DatanodeInfo> newDeadNodes = new HashSet<DatanodeInfo>();
    for (HashSet<DatanodeInfo> datanodeInfos : suspectAndDeadNodes.values()) {
        newDeadNodes.addAll(datanodeInfos);
    }
    for (DatanodeInfo datanodeInfo : deadNodes.values()) {
        if (!newDeadNodes.contains(datanodeInfo)) {
            deadNodes.remove(datanodeInfo.getDatanodeUuid());
        }
    }
    return new HashSet<>(deadNodes.values());
}

19 View Complete Implementation : BlockPlacementPolicy.java
Copyright Apache License 2.0
Author : apache
/**
 * Check if the move is allowed. Used by balancer and other tools.
 *
 * @param candidates all replicas including source and target
 * @param source source replica of the move
 * @param target target replica of the move
 */
public abstract boolean isMovable(Collection<DatanodeInfo> candidates, DatanodeInfo source, DatanodeInfo target);

19 View Complete Implementation : StripedReader.java
Copyright Apache License 2.0
Author : apache
InetSocketAddress getSocketAddress4Transfer(DatanodeInfo dnInfo) {
    return reconstructor.getSocketAddress4Transfer(dnInfo);
}

19 View Complete Implementation : TestSafeModeWithStripedFile.java
Copyright Apache License 2.0
Author : apache
/**
 * This util writes a small block group whose size is given by caller.
 * Then write another 2 full stripe blocks.
 * Then shutdown all DNs and start again one by one. and verify the safemode
 * status accordingly.
 *
 * @param smallSize file size of the small block group
 * @param minStorages minimum replicas needed by the block so it can be safe
 */
private void doTest(int smallSize, int minStorages) throws IOException {
    FileSystem fs = cluster.getFileSystem();
    // add 1 block
    byte[] data = StripedFileTestUtil.generateBytes(smallSize);
    Path smallFilePath = new Path("/testStripedFile_" + smallSize);
    DFSTestUtil.writeFile(fs, smallFilePath, data);
    // If we only have 1 block, NN won't enter safemode in the first place
    // because the threshold is 0 blocks.
    // So we need to add another 2 blocks.
    int bigSize = blockSize * dataBlocks * 2;
    Path bigFilePath = new Path("/testStripedFile_" + bigSize);
    data = StripedFileTestUtil.generateBytes(bigSize);
    DFSTestUtil.writeFile(fs, bigFilePath, data);
    // now we have 3 blocks. NN needs 2 blocks to reach the threshold 0.9 of
    // total blocks 3.
    // stopping all DNs
    List<MiniDFSCluster.DataNodeProperties> dnprops = Lists.newArrayList();
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(smallFilePath.toString(), 0, smallSize);
    DatanodeInfo[] locations = lbs.get(0).getLocations();
    for (DatanodeInfo loc : locations) {
        // keep the DNs that have smallFile in the head of dnprops
        dnprops.add(cluster.stopDataNode(loc.getName()));
    }
    for (int i = 0; i < numDNs - locations.length; i++) {
        dnprops.add(cluster.stopDataNode(0));
    }
    cluster.restartNameNode(0);
    NameNode nn = cluster.getNameNode();
    replacedertTrue(cluster.getNameNode().isInSafeMode());
    replacedertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // the block of smallFile doesn't reach minStorages,
    // so the safe blocks count doesn't increment.
    for (int i = 0; i < minStorages - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        replacedertEquals(0, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    }
    // the block of smallFile reaches minStorages,
    // so the safe blocks count increment.
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    replacedertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
    // the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
    for (int i = minStorages; i < dataBlocks - 1; i++) {
        cluster.restartDataNode(dnprops.remove(0));
        cluster.waitActive();
        cluster.triggerBlockReports();
        replacedertTrue(nn.isInSafeMode());
    }
    cluster.restartDataNode(dnprops.remove(0));
    cluster.waitActive();
    cluster.triggerBlockReports();
    replacedertFalse(nn.isInSafeMode());
}

19 View Complete Implementation : BlockPlacementPolicyWithNodeGroup.java
Copyright Apache License 2.0
Author : apache
/**
 * Check if there are any replica (other than source) on the same node group
 * with target. If true, then target is not a good candidate for placing
 * specific replica as we don't want 2 replicas under the same nodegroup.
 *
 * @return true if there are any replica (other than source) on the same node
 *         group with target
 */
@Override
public boolean isMovable(Collection<DatanodeInfo> locs, DatanodeInfo source, DatanodeInfo target) {
    for (DatanodeInfo dn : locs) {
        if (dn != source && dn != target && clusterMap.isOnSameNodeGroup(dn, target)) {
            return false;
        }
    }
    return true;
}

19 View Complete Implementation : TestModTime.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests modification time in DFS.
 */
@Test
public void testModTime() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    replacedertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    int replicas = numDatanodes - 1;
    replacedertTrue(fileSys instanceof DistributedFileSystem);
    try {
        // 
        // create file and record ctime and mtime of test file
        // 
        System.out.println("Creating testdir1 and testdir1/test1.dat.");
        Path dir1 = new Path("testdir1");
        Path file1 = new Path(dir1, "test1.dat");
        DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, (short) replicas, seed);
        FileStatus stat = fileSys.getFileStatus(file1);
        long mtime1 = stat.getModificationTime();
        replacedertTrue(mtime1 != 0);
        // 
        // record dir times
        // 
        stat = fileSys.getFileStatus(dir1);
        long mdir1 = stat.getModificationTime();
        // 
        // create second test file
        // 
        System.out.println("Creating testdir1/test2.dat.");
        Path file2 = new Path(dir1, "test2.dat");
        DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize, (short) replicas, seed);
        stat = fileSys.getFileStatus(file2);
        // 
        // verify that mod time of dir remains the same
        // as before. modification time of directory has increased.
        // 
        stat = fileSys.getFileStatus(dir1);
        replacedertTrue(stat.getModificationTime() >= mdir1);
        mdir1 = stat.getModificationTime();
        // 
        // create another directory
        // 
        Path dir2 = fileSys.makeQualified(new Path("testdir2/"));
        System.out.println("Creating testdir2 " + dir2);
        replacedertTrue(fileSys.mkdirs(dir2));
        stat = fileSys.getFileStatus(dir2);
        long mdir2 = stat.getModificationTime();
        // 
        // rename file1 from testdir into testdir2
        // 
        Path newfile = new Path(dir2, "testnew.dat");
        System.out.println("Moving " + file1 + " to " + newfile);
        fileSys.rename(file1, newfile);
        // 
        // verify that modification time of file1 did not change.
        // 
        stat = fileSys.getFileStatus(newfile);
        replacedertTrue(stat.getModificationTime() == mtime1);
        // 
        // verify that modification time of  testdir1 and testdir2
        // were changed.
        // 
        stat = fileSys.getFileStatus(dir1);
        replacedertTrue(stat.getModificationTime() != mdir1);
        mdir1 = stat.getModificationTime();
        stat = fileSys.getFileStatus(dir2);
        replacedertTrue(stat.getModificationTime() != mdir2);
        mdir2 = stat.getModificationTime();
        // 
        // delete newfile
        // 
        System.out.println("Deleting testdir2/testnew.dat.");
        replacedertTrue(fileSys.delete(newfile, true));
        // 
        // verify that modification time of testdir1 has not changed.
        // 
        stat = fileSys.getFileStatus(dir1);
        replacedertTrue(stat.getModificationTime() == mdir1);
        // 
        // verify that modification time of testdir2 has changed.
        // 
        stat = fileSys.getFileStatus(dir2);
        replacedertTrue(stat.getModificationTime() != mdir2);
        mdir2 = stat.getModificationTime();
        cleanupFile(fileSys, file2);
        cleanupFile(fileSys, dir1);
        cleanupFile(fileSys, dir2);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

19 View Complete Implementation : BlockPlacementPolicyWithUpgradeDomain.java
Copyright Apache License 2.0
Author : apache
@Override
public BlockPlacementStatus verifyBlockPlacement(DatanodeInfo[] locs, int numberOfReplicas) {
    BlockPlacementStatus defaultStatus = super.verifyBlockPlacement(locs, numberOfReplicas);
    BlockPlacementStatusWithUpgradeDomain upgradeDomainStatus = new BlockPlacementStatusWithUpgradeDomain(defaultStatus, getUpgradeDomainsFromNodes(locs), numberOfReplicas, upgradeDomainFactor);
    return upgradeDomainStatus;
}

19 View Complete Implementation : ECTopologyVerifier.java
Copyright Apache License 2.0
Author : apache
private static int getNumberOfRacks(DatanodeInfo[] report) {
    final Map<String, Integer> racks = new HashMap<>();
    for (DatanodeInfo dni : report) {
        Integer count = racks.get(dni.getNetworkLocation());
        if (count == null) {
            count = 0;
        }
        racks.put(dni.getNetworkLocation(), count + 1);
    }
    return racks.size();
}

19 View Complete Implementation : ECTopologyVerifier.java
Copyright Apache License 2.0
Author : apache
/**
 * Verifies whether the cluster setup can support the given EC policies.
 *
 * @param report list of data node descriptors for all data nodes
 * @param policies erasure coding policies to verify
 * @return the status of the verification
 */
public static ECTopologyVerifierResult getECTopologyVerifierResult(final DatanodeInfo[] report, final Collection<ErasureCodingPolicy> policies) {
    final int numOfRacks = getNumberOfRacks(report);
    return getECTopologyVerifierResult(numOfRacks, report.length, policies);
}

19 View Complete Implementation : TestBlockPlacementPolicyRackFaultTolerant.java
Copyright Apache License 2.0
Author : apache
private void shuffle(DatanodeInfo[] locs, String[] storageIDs) {
    int length = locs.length;
    Object[][] pairs = new Object[length][];
    for (int i = 0; i < length; i++) {
        pairs[i] = new Object[] { locs[i], storageIDs[i] };
    }
    Collections.shuffle(Arrays.asList(pairs));
    for (int i = 0; i < length; i++) {
        locs[i] = (DatanodeInfo) pairs[i][0];
        storageIDs[i] = (String) pairs[i][1];
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
/**
 * Check dead node periodically.
 */
private void checkDeadNodes() {
    long ts = Time.monotonicNow();
    if (ts - lastDetectDeadTS > deadNodeDetectInterval) {
        Set<DatanodeInfo> datanodeInfos = clearAndGetDetectedDeadNodes();
        for (DatanodeInfo datanodeInfo : datanodeInfos) {
            LOG.debug("Add dead node to check: {}.", datanodeInfo);
            if (!deadNodesProbeQueue.offer(datanodeInfo)) {
                LOG.debug("Skip to add dead node {} to check " + "since the probe queue is full.", datanodeInfo);
                break;
            }
        }
        lastDetectDeadTS = ts;
    }
    state = State.IDLE;
}

19 View Complete Implementation : DatanodeStorageReport.java
Copyright Apache License 2.0
Author : apache
/**
 * Clreplaced captures information of a datanode and its storages.
 */
public clreplaced DatanodeStorageReport {

    final DatanodeInfo datanodeInfo;

    final StorageReport[] storageReports;

    public DatanodeStorageReport(DatanodeInfo datanodeInfo, StorageReport[] storageReports) {
        this.datanodeInfo = datanodeInfo;
        this.storageReports = storageReports;
    }

    public DatanodeInfo getDatanodeInfo() {
        return datanodeInfo;
    }

    public StorageReport[] getStorageReports() {
        return storageReports;
    }
}

19 View Complete Implementation : DeadNodeDetector.java
Copyright Apache License 2.0
Author : apache
public boolean isDeadNode(DatanodeInfo datanodeInfo) {
    return deadNodes.containsKey(datanodeInfo.getDatanodeUuid());
}