org.apache.hadoop.hdfs.DistributedFileSystem - java examples

Here are the examples of the java api org.apache.hadoop.hdfs.DistributedFileSystem taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : TestMRCJCSocketFactory.java
Copyright Apache License 2.0
Author : apache
private void closeDfs(DistributedFileSystem dfs) {
    try {
        if (dfs != null)
            dfs.close();
    } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
    }
}

19 View Complete Implementation : TestCheckPointForSecurityTokens.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests save namespace.
 */
@Test
public void testSaveNamespace() throws IOException {
    DistributedFileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        FSNamesystem namesystem = cluster.getNamesystem();
        String renewer = UserGroupInformation.getLoginUser().getUserName();
        Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        // verify that the edits file is NOT empty
        NameNode nn = cluster.getNameNode();
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            replacedertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            replacedertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
            ;
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e.getMessage());
        }
        // verify that the edits file is empty except for the START txn
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            replacedertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            replacedertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
        }
        // restart cluster
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        // Should be able to renew & cancel the delegation token after cluster restart
        try {
            renewToken(token1);
            renewToken(token2);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
        try {
            renewToken(token1);
            renewToken(token2);
            renewToken(token3);
            renewToken(token4);
            renewToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        try {
            renewToken(token1);
            cancelToken(token1);
            renewToken(token2);
            cancelToken(token2);
            renewToken(token3);
            cancelToken(token3);
            renewToken(token4);
            cancelToken(token4);
            renewToken(token5);
            cancelToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}

19 View Complete Implementation : TestViewFSStoragePolicyCommands.java
Copyright Apache License 2.0
Author : apache
@Before
public void clusterSetUp() throws IOException {
    conf = new HdfsConfiguration();
    String clusterName = "cluster";
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
    cluster.waitActive();
    DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
    DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_SCHEME + "://" + clusterName);
    Path base1 = new Path("/user1");
    Path base2 = new Path("/user2");
    hdfs1.delete(base1, true);
    hdfs2.delete(base2, true);
    hdfs1.mkdirs(base1);
    hdfs2.mkdirs(base2);
    ConfigUtil.addLink(conf, clusterName, "/foo", hdfs1.makeQualified(base1).toUri());
    ConfigUtil.addLink(conf, clusterName, "/hdfs2", hdfs2.makeQualified(base2).toUri());
    fs = FileSystem.get(conf);
}

19 View Complete Implementation : TestMRCJCSocketFactory.java
Copyright Apache License 2.0
Author : aliyun-beta
private void closeDfs(DistributedFileSystem dfs) {
    try {
        if (dfs != null)
            dfs.close();
    } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
    }
}

19 View Complete Implementation : HdfsUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Is the HDFS healthy?
 * HDFS is considered as healthy if it is up and not in safemode.
 *
 * @param uri the HDFS URI.  Note that the URI path is ignored.
 * @return true if HDFS is healthy; false, otherwise.
 */
public static boolean isHealthy(URI uri) {
    // check scheme
    final String scheme = uri.getScheme();
    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
        throw new IllegalArgumentException("The scheme is not " + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
    }
    final Configuration conf = new Configuration();
    // disable FileSystem cache
    conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
    // disable client retry for rpc connection and rpc calls
    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
    DistributedFileSystem fs = null;
    try {
        fs = (DistributedFileSystem) FileSystem.get(uri, conf);
        final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
        }
        fs.close();
        fs = null;
        return !safemode;
    } catch (IOException e) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Got an exception for uri=" + uri, e);
        }
        return false;
    } finally {
        IOUtils.closeQuietly(fs);
    }
}

19 View Complete Implementation : TestDistCpSystem.java
Copyright Apache License 2.0
Author : apache
private void createFiles(DistributedFileSystem fs, String topdir, FileEntry[] entries) throws IOException {
    createFiles(fs, topdir, entries, -1);
}

19 View Complete Implementation : TestBlockReaderRemote.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    util = new BlockReaderTestUtil(1, new HdfsConfiguration());
    blockData = getBlockData();
    DistributedFileSystem fs = util.getCluster().getFileSystem();
    Path testfile = new Path("/testfile");
    FSDataOutputStream fout = fs.create(testfile);
    fout.write(blockData);
    fout.close();
    LocatedBlock blk = util.getFileBlocks(testfile, blockData.length).get(0);
    reader = getBlockReader(blk);
}

18 View Complete Implementation : TestErasureCodingCLI.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestErasureCodingCLI extends replacedestHelper {

    private final int NUM_OF_DATANODES = 3;

    private MiniDFSCluster dfsCluster = null;

    private DistributedFileSystem fs = null;

    private String namenode = null;

    @Rule
    public Timeout globalTimeout = new Timeout(300000);

    @Before
    @Override
    public void setUp() throws Exception {
        super.setUp();
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
        dfsCluster.waitClusterUp();
        namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
        username = System.getProperty("user.name");
        fs = dfsCluster.getFileSystem();
        fs.enableErasureCodingPolicy("RS-6-3-1024k");
        fs.enableErasureCodingPolicy("RS-3-2-1024k");
        fs.enableErasureCodingPolicy("XOR-2-1-1024k");
    }

    @Override
    protected String getTestFile() {
        return "testErasureCodingConf.xml";
    }

    @After
    @Override
    public void tearDown() throws Exception {
        if (fs != null) {
            fs.close();
            fs = null;
        }
        if (dfsCluster != null) {
            dfsCluster.shutdown();
            dfsCluster = null;
        }
        Thread.sleep(2000);
        super.tearDown();
    }

    @Override
    protected String expandCommand(final String cmd) {
        String expCmd = cmd;
        expCmd = expCmd.replaceAll("NAMENODE", namenode);
        expCmd = expCmd.replaceAll("#LF#", System.getProperty("line.separator"));
        expCmd = super.expandCommand(expCmd);
        return expCmd;
    }

    @Override
    protected TestConfigFileParser getConfigParser() {
        return new TestErasureCodingAdmin();
    }

    private clreplaced TestErasureCodingAdmin extends replacedestHelper.TestConfigFileParser {

        @Override
        public void endElement(String uri, String localName, String qName) throws SAXException {
            if (qName.equals("ec-admin-command")) {
                if (testCommands != null) {
                    testCommands.add(new replacedestCmdErasureCoding(charString, new CLICommandErasureCodingCli()));
                } else if (cleanupCommands != null) {
                    cleanupCommands.add(new replacedestCmdErasureCoding(charString, new CLICommandErasureCodingCli()));
                }
            } else {
                super.endElement(uri, localName, qName);
            }
        }
    }

    @Override
    protected Result execute(CLICommand cmd) throws Exception {
        return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
    }

    @Test
    @Override
    public void testAll() {
        super.testAll();
    }
}

18 View Complete Implementation : TestDataNodeTcpNoDelay.java
Copyright Apache License 2.0
Author : apache
private void createData(DistributedFileSystem dfs) throws Exception {
    Path dir = new Path("test-dir");
    for (int i = 0; i < 3; i++) {
        Path f = new Path(dir, "file" + i);
        DFSTestUtil.createFile(dfs, f, 10240, (short) 3, 0);
    }
}

18 View Complete Implementation : TestQuotaWithStripedBlocks.java
Copyright Apache License 2.0
Author : apache
/**
 * Make sure we correctly update the quota usage with the striped blocks.
 */
public clreplaced TestQuotaWithStripedBlocks {

    private int blockSize;

    private ErasureCodingPolicy ecPolicy;

    private int dataBlocks;

    private int parityBlocsk;

    private int groupSize;

    private int cellSize;

    private Path ecDir;

    private long diskQuota;

    private MiniDFSCluster cluster;

    private FSDirectory dir;

    private DistributedFileSystem dfs;

    public ErasureCodingPolicy getEcPolicy() {
        return StripedFileTestUtil.getDefaultECPolicy();
    }

    @Rule
    public Timeout globalTimeout = new Timeout(300000);

    @Before
    public void setUp() throws IOException {
        blockSize = 1024 * 1024;
        ecPolicy = getEcPolicy();
        dataBlocks = ecPolicy.getNumDataUnits();
        parityBlocsk = ecPolicy.getNumParityUnits();
        groupSize = dataBlocks + parityBlocsk;
        cellSize = ecPolicy.getCellSize();
        ecDir = new Path("/ec");
        diskQuota = blockSize * (groupSize + 1);
        final Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
        cluster.waitActive();
        dir = cluster.getNamesystem().getFSDirectory();
        dfs = cluster.getFileSystem();
        dfs.enableErasureCodingPolicy(ecPolicy.getName());
        dfs.mkdirs(ecDir);
        dfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        dfs.setQuota(ecDir, Long.MAX_VALUE - 1, diskQuota);
        dfs.setQuotaByStorageType(ecDir, StorageType.DISK, diskQuota);
        dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    }

    @After
    public void tearDown() {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testUpdatingQuotaCount() throws Exception {
        final Path file = new Path(ecDir, "file");
        FSDataOutputStream out = null;
        try {
            out = dfs.create(file, (short) 1);
            INodeFile fileNode = dir.getINode4Write(file.toString()).asFile();
            ExtendedBlock previous = null;
            // Create striped blocks which have a cell in each block.
            Block newBlock = DFSTestUtil.addBlockToFile(true, cluster.getDataNodes(), dfs, cluster.getNamesystem(), file.toString(), fileNode, dfs.getClient().getClientName(), previous, 1, 0);
            previous = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), newBlock);
            final INodeDirectory dirNode = dir.getINode4Write(ecDir.toString()).asDirectory();
            final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
            final long diskUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
            // When we add a new block we update the quota using the full block size.
            replacedert.replacedertEquals(blockSize * groupSize, spaceUsed);
            replacedert.replacedertEquals(blockSize * groupSize, diskUsed);
            dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
            final long actualSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
            final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
            // In this case the file's real size is cell size * block group size.
            replacedert.replacedertEquals(cellSize * groupSize, actualSpaceUsed);
            replacedert.replacedertEquals(cellSize * groupSize, actualDiskUsed);
        } finally {
            IOUtils.cleanup(null, out);
        }
    }
}

18 View Complete Implementation : TestMultiObserverNode.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests multiple ObserverNodes.
 */
public clreplaced TestMultiObserverNode {

    private static Configuration conf;

    private static MiniQJMHACluster qjmhaCluster;

    private static MiniDFSCluster dfsCluster;

    private static DistributedFileSystem dfs;

    private final Path testPath = new Path("/TestMultiObserverNode");

    @BeforeClreplaced
    public static void startUpCluster() throws Exception {
        conf = new Configuration();
        conf.setBoolean(DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY, true);
        qjmhaCluster = HATestUtil.setUpObserverCluster(conf, 2, 0, true);
        dfsCluster = qjmhaCluster.getDfsCluster();
        dfs = HATestUtil.configureObserverReadFs(dfsCluster, conf, ObserverReadProxyProvider.clreplaced, true);
    }

    @After
    public void cleanUp() throws IOException {
        dfs.delete(testPath, true);
    }

    @AfterClreplaced
    public static void shutDownCluster() throws IOException {
        if (qjmhaCluster != null) {
            qjmhaCluster.shutdown();
        }
    }

    @Test
    public void testObserverFailover() throws Exception {
        dfs.mkdir(testPath, FsPermission.getDefault());
        dfsCluster.rollEditLogAndTail(0);
        dfs.getFileStatus(testPath);
        replacedertSentTo(2, 3);
        // Transition observer #2 to standby, request should go to the #3.
        dfsCluster.transitionToStandby(2);
        dfs.getFileStatus(testPath);
        replacedertSentTo(3);
        // Transition observer #3 to standby, request should go to active
        dfsCluster.transitionToStandby(3);
        dfs.getFileStatus(testPath);
        replacedertSentTo(0);
        // Transition #2 back to observer, request should go to #2
        dfsCluster.transitionToObserver(2);
        dfs.getFileStatus(testPath);
        replacedertSentTo(2);
        // Transition #3 back to observer, request should go to either #2 or #3
        dfsCluster.transitionToObserver(3);
        dfs.getFileStatus(testPath);
        replacedertSentTo(2, 3);
    }

    @Test
    public void testMultiObserver() throws Exception {
        Path testPath2 = new Path(testPath, "test2");
        Path testPath3 = new Path(testPath, "test3");
        dfs.mkdir(testPath, FsPermission.getDefault());
        replacedertSentTo(0);
        dfsCluster.rollEditLogAndTail(0);
        dfs.getFileStatus(testPath);
        replacedertSentTo(2, 3);
        dfs.mkdir(testPath2, FsPermission.getDefault());
        dfsCluster.rollEditLogAndTail(0);
        // Shutdown first observer, request should go to the second one
        dfsCluster.shutdownNameNode(2);
        dfs.listStatus(testPath2);
        replacedertSentTo(3);
        // Restart the first observer
        dfsCluster.restartNameNode(2);
        dfs.listStatus(testPath);
        replacedertSentTo(3);
        dfsCluster.transitionToObserver(2);
        dfs.listStatus(testPath);
        replacedertSentTo(2, 3);
        dfs.mkdir(testPath3, FsPermission.getDefault());
        dfsCluster.rollEditLogAndTail(0);
        // Now shutdown the second observer, request should go to the first one
        dfsCluster.shutdownNameNode(3);
        dfs.listStatus(testPath3);
        replacedertSentTo(2);
        // Shutdown both, request should go to active
        dfsCluster.shutdownNameNode(2);
        dfs.listStatus(testPath3);
        replacedertSentTo(0);
        dfsCluster.restartNameNode(2);
        dfsCluster.transitionToObserver(2);
        dfsCluster.restartNameNode(3);
        dfsCluster.transitionToObserver(3);
    }

    @Test
    public void testObserverFallBehind() throws Exception {
        dfs.mkdir(testPath, FsPermission.getDefault());
        replacedertSentTo(0);
        // Set large state Id on the client
        long realStateId = HATestUtil.setACStateId(dfs, 500000);
        dfs.getFileStatus(testPath);
        // Should end up on ANN
        replacedertSentTo(0);
        HATestUtil.setACStateId(dfs, realStateId);
    }

    private void replacedertSentTo(int... nnIndices) throws IOException {
        replacedertTrue("Request was not sent to any of the expected namenodes.", HATestUtil.isSentToAnyOfNameNodes(dfs, dfsCluster, nnIndices));
    }
}

18 View Complete Implementation : TestJobSplitWriterWithEC.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests that maxBlockLocations default value is sufficient for RS-10-4.
 */
public clreplaced TestJobSplitWriterWithEC {

    // This will ensure 14 block locations
    private ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(SystemErasureCodingPolicies.RS_10_4_POLICY_ID);

    private static final int BLOCKSIZE = 1024 * 1024 * 10;

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    private Configuration conf;

    private Path submitDir;

    private Path testFile;

    @Before
    public void setup() throws Exception {
        Configuration hdfsConf = new HdfsConfiguration();
        hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
        String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath();
        hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir);
        hdfsConf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir);
        hdfsConf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
        cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(15).build();
        fs = cluster.getFileSystem();
        fs.enableErasureCodingPolicy(ecPolicy.getName());
        fs.setErasureCodingPolicy(new Path("/"), ecPolicy.getName());
        cluster.waitActive();
        conf = new Configuration();
        submitDir = new Path("/");
        testFile = new Path("/testfile");
        DFSTestUtil.writeFile(fs, testFile, StripedFileTestUtil.generateBytes(BLOCKSIZE));
        conf.set(FileInputFormat.INPUT_DIR, fs.getUri().toString() + testFile.toString());
    }

    @After
    public void after() {
        cluster.close();
    }

    @Test
    public void testMaxBlockLocationsNewSplitsWithErasureCoding() throws Exception {
        Job job = Job.getInstance(conf);
        final FileInputFormat<?, ?> fileInputFormat = new TextInputFormat();
        final List<InputSplit> splits = fileInputFormat.getSplits(job);
        JobSplitWriter.createSplitFiles(submitDir, conf, fs, splits);
        validateSplitMetaInfo();
    }

    @Test
    public void testMaxBlockLocationsOldSplitsWithErasureCoding() throws Exception {
        JobConf jobConf = new JobConf(conf);
        org.apache.hadoop.mapred.TextInputFormat fileInputFormat = new org.apache.hadoop.mapred.TextInputFormat();
        fileInputFormat.configure(jobConf);
        final org.apache.hadoop.mapred.InputSplit[] splits = fileInputFormat.getSplits(jobConf, 1);
        JobSplitWriter.createSplitFiles(submitDir, conf, fs, splits);
        validateSplitMetaInfo();
    }

    private void validateSplitMetaInfo() throws IOException {
        JobSplit.TaskSplitMetaInfo[] splitInfo = SplitMetaInfoReader.readSplitMetaInfo(new JobID(), fs, conf, submitDir);
        replacedertEquals("Number of splits", 1, splitInfo.length);
        replacedertEquals("Number of block locations", 14, splitInfo[0].getLocations().length);
    }
}

18 View Complete Implementation : DistCpSync.java
Copyright Apache License 2.0
Author : apache
private void deleteTargetTmpDir(DistributedFileSystem targetFs, Path tmpDir) {
    try {
        if (tmpDir != null) {
            targetFs.delete(tmpDir, true);
        }
    } catch (IOException e) {
        DistCp.LOG.error("Unable to cleanup tmp dir: " + tmpDir, e);
    }
}

18 View Complete Implementation : TestRedudantBlocks.java
Copyright Apache License 2.0
Author : apache
/**
 * Test RedudantBlocks.
 */
public clreplaced TestRedudantBlocks {

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    private final Path dirPath = new Path("/striped");

    private Path filePath = new Path(dirPath, "file");

    private final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getPolicies().get(1);

    private final short dataBlocks = (short) ecPolicy.getNumDataUnits();

    private final short parityBlocks = (short) ecPolicy.getNumParityUnits();

    private final short groupSize = (short) (dataBlocks + parityBlocks);

    private final int cellSize = ecPolicy.getCellSize();

    private final int stripesPerBlock = 4;

    private final int blockSize = stripesPerBlock * cellSize;

    private final int numDNs = groupSize;

    @Before
    public void setup() throws IOException {
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
        // disable block recovery
        conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
        conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
        SimulatedFSDataset.setFactory(conf);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        fs.enableErasureCodingPolicy(ecPolicy.getName());
        fs.mkdirs(dirPath);
        fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
    }

    @After
    public void tearDown() {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testProcessOverReplicatedAndRedudantBlock() throws Exception {
        long fileLen = dataBlocks * blockSize;
        DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
        LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
        LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
        long gs = bg.getBlock().getGenerationStamp();
        String bpid = bg.getBlock().getBlockPoolId();
        long groupId = bg.getBlock().getBlockId();
        Block blk = new Block(groupId, blockSize, gs);
        int i = 0;
        // one missing block
        for (; i < groupSize - 1; i++) {
            blk.setBlockId(groupId + i);
            cluster.injectBlocks(i, Arrays.asList(blk), bpid);
        }
        cluster.triggerBlockReports();
        // one redundant block
        blk.setBlockId(groupId + 2);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
        BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem().getBlockManager().getStoredBlock(new Block(groupId));
        // update blocksMap
        cluster.triggerBlockReports();
        // delete redundant block
        cluster.triggerHeartbeats();
        // wait for IBR
        GenericTestUtils.waitFor(() -> cluster.getNamesystem().getBlockManager().countNodes(blockInfo).liveReplicas() >= groupSize - 1, 500, 10000);
        // trigger reconstruction
        cluster.triggerHeartbeats();
        // wait for IBR
        GenericTestUtils.waitFor(() -> cluster.getNamesystem().getBlockManager().countNodes(blockInfo).liveReplicas() >= groupSize, 500, 10000);
        HashSet<Long> blockIdsSet = new HashSet<Long>();
        lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
        bg = (LocatedStripedBlock) (lbs.get(0));
        final LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(bg, cellSize, dataBlocks, parityBlocks);
        for (LocatedBlock dn : blocks) {
            if (dn != null) {
                blockIdsSet.add(dn.getBlock().getBlockId());
            }
        }
        replacedertEquals(groupSize, blockIdsSet.size());
    }
}

18 View Complete Implementation : HATestUtil.java
Copyright Apache License 2.0
Author : apache
public static boolean isSentToAnyOfNameNodes(DistributedFileSystem dfs, MiniDFSCluster cluster, int... nnIndices) throws IOException {
    ObserverReadProxyProvider<?> provider = (ObserverReadProxyProvider<?>) ((RetryInvocationHandler<?>) Proxy.getInvocationHandler(dfs.getClient().getNamenode())).getProxyProvider();
    FailoverProxyProvider.ProxyInfo<?> pi = provider.getLastProxy();
    for (int nnIdx : nnIndices) {
        if (pi.proxyInfo.equals(cluster.getNameNode(nnIdx).getNameNodeAddress().toString())) {
            return true;
        }
    }
    return false;
}

18 View Complete Implementation : TestXAttrConfigFlag.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests that the configuration flag that controls support for XAttrs is off
 * and causes all attempted operations related to XAttrs to fail.  The
 * NameNode can still load XAttrs from fsimage or edits.
 */
public clreplaced TestXAttrConfigFlag {

    private static final Path PATH = new Path("/path");

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    @Rule
    public ExpectedException exception = ExpectedException.none();

    @After
    public void shutdown() throws Exception {
        IOUtils.cleanup(null, fs);
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testSetXAttr() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.setXAttr(PATH, "user.foo", null);
    }

    @Test
    public void testGetXAttrs() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.getXAttrs(PATH);
    }

    @Test
    public void testRemoveXAttr() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.removeXAttr(PATH, "user.foo");
    }

    @Test
    public void testEditLog() throws Exception {
        // With XAttrs enabled, set an XAttr.
        initCluster(true, true);
        fs.mkdirs(PATH);
        fs.setXAttr(PATH, "user.foo", null);
        // Restart with XAttrs disabled.  Expect successful restart.
        restart(false, false);
    }

    @Test
    public void testFsImage() throws Exception {
        // With XAttrs enabled, set an XAttr.
        initCluster(true, true);
        fs.mkdirs(PATH);
        fs.setXAttr(PATH, "user.foo", null);
        // Save a new checkpoint and restart with XAttrs still enabled.
        restart(true, true);
        // Restart with XAttrs disabled.  Expect successful restart.
        restart(false, false);
    }

    /**
     * We expect an IOException, and we want the exception text to state the
     * configuration key that controls XAttr support.
     */
    private void expectException() {
        exception.expect(IOException.clreplaced);
        exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY);
    }

    /**
     * Initialize the cluster, wait for it to become active, and get FileSystem.
     *
     * @param format if true, format the NameNode and DataNodes before starting up
     * @param xattrsEnabled if true, XAttr support is enabled
     * @throws Exception if any step fails
     */
    private void initCluster(boolean format, boolean xattrsEnabled) throws Exception {
        Configuration conf = new Configuration();
        // not explicitly setting to false, should be false by default
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, xattrsEnabled);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
    }

    /**
     * Restart the cluster, optionally saving a new checkpoint.
     *
     * @param checkpoint boolean true to save a new checkpoint
     * @param xattrsEnabled if true, XAttr support is enabled
     * @throws Exception if restart fails
     */
    private void restart(boolean checkpoint, boolean xattrsEnabled) throws Exception {
        NameNode nameNode = cluster.getNameNode();
        if (checkpoint) {
            NameNodeAdapter.enterSafeMode(nameNode, false);
            NameNodeAdapter.saveNamespace(nameNode);
        }
        shutdown();
        initCluster(false, xattrsEnabled);
    }
}

18 View Complete Implementation : DistCpSync.java
Copyright Apache License 2.0
Author : apache
private Path createTargetTmpDir(DistributedFileSystem targetFs, Path targetDir) throws IOException {
    final Path tmp = new Path(targetDir, DistCpConstants.HDFS_DISTCP_DIFF_DIRECTORY_NAME + DistCp.rand.nextInt());
    if (!targetFs.mkdirs(tmp)) {
        throw new IOException("The tmp directory " + tmp + " already exists");
    }
    return tmp;
}

18 View Complete Implementation : TestExportsTable.java
Copyright Apache License 2.0
Author : apache
@Test
public void testViewFsRootExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String clusterName = RandomStringUtils.randomAlphabetic(10);
    String exportPoint = "/";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_SCHEME + "://" + clusterName);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    config.set("nfs.http.address", "0.0.0.0:0");
    try {
        cluster = new MiniDFSCluster.Builder(config).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
        cluster.waitActive();
        DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
        DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
        cluster.waitActive();
        Path base1 = new Path("/user1");
        Path base2 = new Path("/user2");
        hdfs1.delete(base1, true);
        hdfs2.delete(base2, true);
        hdfs1.mkdirs(base1);
        hdfs2.mkdirs(base2);
        ConfigUtil.addLink(config, clusterName, "/hdfs1", hdfs1.makeQualified(base1).toUri());
        ConfigUtil.addLink(config, clusterName, "/hdfs2", hdfs2.makeQualified(base2).toUri());
        exception.expect(FileSystemException.clreplaced);
        exception.expectMessage("Only HDFS is supported as underlyingFileSystem, " + "fs scheme:viewfs");
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestReplicaCachingGetSpaceUsed.java
Copyright Apache License 2.0
Author : apache
/**
 * Unit test for ReplicaCachingGetSpaceUsed clreplaced.
 */
public clreplaced TestReplicaCachingGetSpaceUsed {

    private Configuration conf = null;

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    private DataNode dataNode;

    @Before
    public void setUp() throws IOException, NoSuchMethodException, InterruptedException {
        conf = new Configuration();
        conf.setClreplaced("fs.getspaceused.clreplacedname", ReplicaCachingGetSpaceUsed.clreplaced, CachingGetSpaceUsed.clreplaced);
        conf.setLong(FS_DU_INTERVAL_KEY, 1000);
        conf.setLong("fs.getspaceused.jitterMillis", 0);
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        dataNode = cluster.getDataNodes().get(0);
        fs = cluster.getFileSystem();
    }

    @After
    public void tearDown() throws IOException {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testReplicaCachingGetSpaceUsedByFINALIZEDReplica() throws Exception {
        FSDataOutputStream os = fs.create(new Path("/testReplicaCachingGetSpaceUsedByFINALIZEDReplica"));
        byte[] bytes = new byte[20480];
        InputStream is = new ByteArrayInputStream(bytes);
        IOUtils.copyBytes(is, os, bytes.length);
        os.hsync();
        os.close();
        DFSInputStream dfsInputStream = fs.getClient().open("/testReplicaCachingGetSpaceUsedByFINALIZEDReplica");
        long blockLength = 0;
        long metaLength = 0;
        List<LocatedBlock> locatedBlocks = dfsInputStream.getAllBlocks();
        for (LocatedBlock locatedBlock : locatedBlocks) {
            ExtendedBlock extendedBlock = locatedBlock.getBlock();
            blockLength += extendedBlock.getLocalBlock().getNumBytes();
            metaLength += dataNode.getFSDataset().getMetaDataInputStream(extendedBlock).getLength();
        }
        // Guarantee ReplicaCachingGetSpaceUsed#refresh() is called after replica
        // has been written to disk.
        Thread.sleep(2000);
        replacedertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed());
        fs.delete(new Path("/testReplicaCachingGetSpaceUsedByFINALIZEDReplica"), true);
    }

    @Test
    public void testReplicaCachingGetSpaceUsedByRBWReplica() throws Exception {
        FSDataOutputStream os = fs.create(new Path("/testReplicaCachingGetSpaceUsedByRBWReplica"));
        byte[] bytes = new byte[20480];
        InputStream is = new ByteArrayInputStream(bytes);
        IOUtils.copyBytes(is, os, bytes.length);
        os.hsync();
        DFSInputStream dfsInputStream = fs.getClient().open("/testReplicaCachingGetSpaceUsedByRBWReplica");
        long blockLength = 0;
        long metaLength = 0;
        List<LocatedBlock> locatedBlocks = dfsInputStream.getAllBlocks();
        for (LocatedBlock locatedBlock : locatedBlocks) {
            ExtendedBlock extendedBlock = locatedBlock.getBlock();
            blockLength += extendedBlock.getLocalBlock().getNumBytes();
            metaLength += dataNode.getFSDataset().getMetaDataInputStream(extendedBlock).getLength();
        }
        // Guarantee ReplicaCachingGetSpaceUsed#refresh() is called after replica
        // has been written to disk.
        Thread.sleep(2000);
        replacedertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed());
        os.close();
        // Guarantee ReplicaCachingGetSpaceUsed#refresh() is called, dfsspaceused is
        // recalculated
        Thread.sleep(2000);
        // After close operation, the replica state will be transformed from RBW to
        // finalized. But the space used of these replicas are all included and the
        // dfsUsed value should be same.
        replacedertEquals(blockLength + metaLength, dataNode.getFSDataset().getDfsUsed());
        fs.delete(new Path("/testReplicaCachingGetSpaceUsedByRBWReplica"), true);
    }

    @Test(timeout = 15000)
    public void testFsDatasetImplDeepCopyReplica() {
        FsDatasetSpi<?> fsDataset = dataNode.getFSDataset();
        ModifyThread modifyThread = new ModifyThread();
        modifyThread.start();
        String bpid = cluster.getNamesystem(0).getBlockPoolId();
        int retryTimes = 10;
        while (retryTimes > 0) {
            try {
                Set<? extends Replica> replicas = fsDataset.deepCopyReplica(bpid);
                if (replicas.size() > 0) {
                    retryTimes--;
                }
            } catch (IOException e) {
                modifyThread.setShouldRun(false);
                replacedert.fail("Encounter IOException when deep copy replica.");
            }
        }
        modifyThread.setShouldRun(false);
    }

    private clreplaced ModifyThread extends Thread {

        private boolean shouldRun = true;

        @Override
        public void run() {
            FSDataOutputStream os = null;
            while (shouldRun) {
                try {
                    int id = RandomUtils.nextInt();
                    os = fs.create(new Path("/testFsDatasetImplDeepCopyReplica/" + id));
                    byte[] bytes = new byte[2048];
                    InputStream is = new ByteArrayInputStream(bytes);
                    IOUtils.copyBytes(is, os, bytes.length);
                    os.hsync();
                    os.close();
                } catch (IOException e) {
                }
            }
            try {
                fs.delete(new Path("/testFsDatasetImplDeepCopyReplica"), true);
            } catch (IOException e) {
            }
        }

        private void setShouldRun(boolean shouldRun) {
            this.shouldRun = shouldRun;
        }
    }
}

18 View Complete Implementation : TestDataNodeTcpNoDelay.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests the {@code DataNode#transferBlocks()} path by re-replicating an
 * existing block.
 */
private void transferBlock(DistributedFileSystem dfs) throws Exception {
    Path dir = new Path("test-block-transfer");
    Path f = new Path(dir, "testfile");
    DFSTestUtil.createFile(dfs, f, 10240, (short) 1, 0);
    // force a block transfer to another DN
    dfs.setReplication(f, (short) 2);
    DFSTestUtil.waitForReplication(dfs, f, (short) 2, 20000);
}

18 View Complete Implementation : TestTracing.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestTracing {

    private static MiniDFSCluster cluster;

    private static DistributedFileSystem dfs;

    private Tracer prevTracer;

    private final static Configuration TRACING_CONF;

    private final static Configuration NO_TRACING_CONF;

    static {
        NO_TRACING_CONF = new Configuration();
        NO_TRACING_CONF.setLong("dfs.blocksize", 100 * 1024);
        TRACING_CONF = new Configuration(NO_TRACING_CONF);
        TRACING_CONF.set(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX + Tracer.SPAN_RECEIVER_CLreplacedES_KEY, SetSpanReceiver.clreplaced.getName());
        TRACING_CONF.set(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX + Tracer.SAMPLER_CLreplacedES_KEY, "AlwaysSampler");
    }

    @Test
    public void testTracing() throws Exception {
        // write and read without tracing started
        String fileName = "testTracingDisabled.dat";
        writeTestFile(fileName);
        replacedert.replacedertEquals(0, SetSpanReceiver.size());
        readTestFile(fileName);
        replacedert.replacedertEquals(0, SetSpanReceiver.size());
        writeTestFile("testReadTraceHooks.dat");
        FsTracer.clear();
        Tracer tracer = FsTracer.get(TRACING_CONF);
        writeWithTracing(tracer);
        readWithTracing(tracer);
    }

    private void writeWithTracing(Tracer tracer) throws Exception {
        long startTime = System.currentTimeMillis();
        TraceScope ts = tracer.newScope("testWriteTraceHooks");
        writeTestFile("testWriteTraceHooks.dat");
        long endTime = System.currentTimeMillis();
        ts.close();
        String[] expectedSpanNames = { "testWriteTraceHooks", "ClientProtocol#create", "ClientNamenodeProtocol#create", "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", "DFSOutputStream#close", "dataStreamer", "OpWriteBlockProto", "ClientProtocol#addBlock", "ClientNamenodeProtocol#addBlock" };
        SetSpanReceiver.replacedertSpanNamesFound(expectedSpanNames);
        // The trace should last about the same amount of time as the test
        Map<String, List<Span>> map = SetSpanReceiver.getMap();
        Span s = map.get("testWriteTraceHooks").get(0);
        replacedert.replacedertNotNull(s);
        long spanStart = s.getStartTimeMillis();
        long spanEnd = s.getStopTimeMillis();
        // Spans homed in the top trace shoud have same trace id.
        // Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
        // and children of them are exception.
        String[] spansInTopTrace = { "testWriteTraceHooks", "ClientProtocol#create", "ClientNamenodeProtocol#create", "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", "DFSOutputStream#close" };
        for (String desc : spansInTopTrace) {
            for (Span span : map.get(desc)) {
                replacedert.replacedertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
            }
        }
        // test for timeline annotation added by HADOOP-11242
        replacedert.replacedertEquals("called", map.get("ClientProtocol#create").get(0).getTimelineAnnotations().get(0).getMessage());
        SetSpanReceiver.clear();
    }

    private void readWithTracing(Tracer tracer) throws Exception {
        long startTime = System.currentTimeMillis();
        TraceScope ts = tracer.newScope("testReadTraceHooks");
        readTestFile("testReadTraceHooks.dat");
        ts.close();
        long endTime = System.currentTimeMillis();
        String[] expectedSpanNames = { "testReadTraceHooks", "ClientProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };
        SetSpanReceiver.replacedertSpanNamesFound(expectedSpanNames);
        // The trace should last about the same amount of time as the test
        Map<String, List<Span>> map = SetSpanReceiver.getMap();
        Span s = map.get("testReadTraceHooks").get(0);
        replacedert.replacedertNotNull(s);
        long spanStart = s.getStartTimeMillis();
        long spanEnd = s.getStopTimeMillis();
        replacedert.replacedertTrue(spanStart - startTime < 100);
        replacedert.replacedertTrue(spanEnd - endTime < 100);
        // There should only be one trace id as it should all be homed in the
        // top trace.
        for (Span span : SetSpanReceiver.getSpans()) {
            System.out.println(span.toJson());
        }
        for (Span span : SetSpanReceiver.getSpans()) {
            replacedert.replacedertEquals(ts.getSpan().getSpanId().getHigh(), span.getSpanId().getHigh());
        }
        SetSpanReceiver.clear();
    }

    private void writeTestFile(String testFileName) throws Exception {
        Path filePath = new Path(testFileName);
        FSDataOutputStream stream = dfs.create(filePath);
        for (int i = 0; i < 10; i++) {
            byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
            stream.write(data);
        }
        stream.hsync();
        stream.close();
    }

    private void readTestFile(String testFileName) throws Exception {
        Path filePath = new Path(testFileName);
        FSDataInputStream istream = dfs.open(filePath, 10240);
        ByteBuffer buf = ByteBuffer.allocate(10240);
        int count = 0;
        try {
            while (istream.read(buf) > 0) {
                count += 1;
                buf.clear();
                istream.seek(istream.getPos() + 5);
            }
        } catch (IOException ioe) {
        // Ignore this it's probably a seek after eof.
        } finally {
            istream.close();
        }
    }

    @Before
    public void startCluster() throws IOException {
        cluster = new MiniDFSCluster.Builder(NO_TRACING_CONF).numDataNodes(3).build();
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        SetSpanReceiver.clear();
    }

    @After
    public void shutDown() throws IOException {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
        FsTracer.clear();
    }
}

18 View Complete Implementation : TestSnapshotMetrics.java
Copyright Apache License 2.0
Author : apache
/**
 * Test the snapshot-related metrics
 */
public clreplaced TestSnapshotMetrics {

    private static final long seed = 0;

    private static final short REPLICATION = 3;

    private static final String NN_METRICS = "NameNodeActivity";

    private static final String NS_METRICS = "FSNamesystem";

    private final Path dir = new Path("/TestSnapshot");

    private final Path sub1 = new Path(dir, "sub1");

    private final Path file1 = new Path(sub1, "file1");

    private final Path file2 = new Path(sub1, "file2");

    private Configuration conf;

    private MiniDFSCluster cluster;

    private DistributedFileSystem hdfs;

    @Before
    public void setUp() throws Exception {
        conf = new Configuration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
        cluster.waitActive();
        hdfs = cluster.getFileSystem();
        DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
        DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    /**
     * Test the metric SnapshottableDirectories, AllowSnapshotOps,
     * DisallowSnapshotOps, and listSnapshottableDirOps
     */
    @Test
    public void testSnapshottableDirs() throws Exception {
        cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
        replacedertGauge("SnapshottableDirectories", 0, getMetrics(NS_METRICS));
        replacedertCounter("AllowSnapshotOps", 0L, getMetrics(NN_METRICS));
        replacedertCounter("DisallowSnapshotOps", 0L, getMetrics(NN_METRICS));
        // Allow snapshots for directories, and check the metrics
        hdfs.allowSnapshot(sub1);
        replacedertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
        replacedertCounter("AllowSnapshotOps", 1L, getMetrics(NN_METRICS));
        Path sub2 = new Path(dir, "sub2");
        Path file = new Path(sub2, "file");
        DFSTestUtil.createFile(hdfs, file, 1024, REPLICATION, seed);
        hdfs.allowSnapshot(sub2);
        replacedertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
        replacedertCounter("AllowSnapshotOps", 2L, getMetrics(NN_METRICS));
        Path subsub1 = new Path(sub1, "sub1sub1");
        Path subfile = new Path(subsub1, "file");
        DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
        hdfs.allowSnapshot(subsub1);
        replacedertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
        replacedertCounter("AllowSnapshotOps", 3L, getMetrics(NN_METRICS));
        // Set an already snapshottable directory to snapshottable, should not
        // change the metrics
        hdfs.allowSnapshot(sub1);
        replacedertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
        // But the number of allowSnapshot operations still increases
        replacedertCounter("AllowSnapshotOps", 4L, getMetrics(NN_METRICS));
        // Disallow the snapshot for snapshottable directories, then check the
        // metrics again
        hdfs.disallowSnapshot(sub1);
        replacedertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
        replacedertCounter("DisallowSnapshotOps", 1L, getMetrics(NN_METRICS));
        // delete subsub1, snapshottable directories should be 1
        hdfs.delete(subsub1, true);
        replacedertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
        // list all the snapshottable directories
        SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
        replacedertEquals(1, status.length);
        replacedertCounter("ListSnapshottableDirOps", 1L, getMetrics(NN_METRICS));
    }

    /**
     * Test the metrics Snapshots, CreateSnapshotOps, DeleteSnapshotOps,
     * RenameSnapshotOps
     */
    @Test
    public void testSnapshots() throws Exception {
        cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
        replacedertGauge("Snapshots", 0, getMetrics(NS_METRICS));
        replacedertCounter("CreateSnapshotOps", 0L, getMetrics(NN_METRICS));
        // Create a snapshot for a non-snapshottable directory, thus should not
        // change the metrics
        try {
            hdfs.createSnapshot(sub1, "s1");
        } catch (Exception e) {
        }
        replacedertGauge("Snapshots", 0, getMetrics(NS_METRICS));
        replacedertCounter("CreateSnapshotOps", 1L, getMetrics(NN_METRICS));
        // Create snapshot for sub1
        hdfs.allowSnapshot(sub1);
        hdfs.createSnapshot(sub1, "s1");
        replacedertGauge("Snapshots", 1, getMetrics(NS_METRICS));
        replacedertCounter("CreateSnapshotOps", 2L, getMetrics(NN_METRICS));
        hdfs.createSnapshot(sub1, "s2");
        replacedertGauge("Snapshots", 2, getMetrics(NS_METRICS));
        replacedertCounter("CreateSnapshotOps", 3L, getMetrics(NN_METRICS));
        hdfs.getSnapshotDiffReport(sub1, "s1", "s2");
        replacedertCounter("SnapshotDiffReportOps", 1L, getMetrics(NN_METRICS));
        // Create snapshot for a directory under sub1
        Path subsub1 = new Path(sub1, "sub1sub1");
        Path subfile = new Path(subsub1, "file");
        DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
        hdfs.allowSnapshot(subsub1);
        hdfs.createSnapshot(subsub1, "s11");
        replacedertGauge("Snapshots", 3, getMetrics(NS_METRICS));
        replacedertCounter("CreateSnapshotOps", 4L, getMetrics(NN_METRICS));
        // delete snapshot
        hdfs.deleteSnapshot(sub1, "s2");
        replacedertGauge("Snapshots", 2, getMetrics(NS_METRICS));
        replacedertCounter("DeleteSnapshotOps", 1L, getMetrics(NN_METRICS));
        // rename snapshot
        hdfs.renameSnapshot(sub1, "s1", "NewS1");
        replacedertGauge("Snapshots", 2, getMetrics(NS_METRICS));
        replacedertCounter("RenameSnapshotOps", 1L, getMetrics(NN_METRICS));
    }
}

18 View Complete Implementation : TestBlockUnderConstruction.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestBlockUnderConstruction {

    static final String BASE_DIR = "/test/TestBlockUnderConstruction";

    // same as TestFileCreation.blocksize
    static final int BLOCK_SIZE = 8192;

    // number of blocks to write
    static final int NUM_BLOCKS = 5;

    private static MiniDFSCluster cluster;

    private static DistributedFileSystem hdfs;

    @BeforeClreplaced
    public static void setUp() throws Exception {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        hdfs = cluster.getFileSystem();
    }

    @AfterClreplaced
    public static void tearDown() throws Exception {
        if (hdfs != null)
            hdfs.close();
        if (cluster != null)
            cluster.shutdown();
    }

    void writeFile(Path file, FSDataOutputStream stm, int size) throws IOException {
        long blocksBefore = stm.getPos() / BLOCK_SIZE;
        TestFileCreation.writeFile(stm, BLOCK_SIZE);
        // need to make sure the full block is completely flushed to the DataNodes
        // (see FSOutputSummer#flush)
        stm.flush();
        int blocksAfter = 0;
        // wait until the block is allocated by DataStreamer
        BlockLocation[] locatedBlocks;
        while (blocksAfter <= blocksBefore) {
            locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(file.toString(), 0L, BLOCK_SIZE * NUM_BLOCKS);
            blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
        }
    }

    private void verifyFileBlocks(String file, boolean isFileOpen) throws IOException {
        FSNamesystem ns = cluster.getNamesystem();
        final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file);
        replacedertTrue("File " + inode.toString() + " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, inode.isUnderConstruction() == isFileOpen);
        BlockInfo[] blocks = inode.getBlocks();
        replacedertTrue("File does not have blocks: " + inode.toString(), blocks != null && blocks.length > 0);
        int idx = 0;
        BlockInfo curBlock;
        // all blocks but the last two should be regular blocks
        for (; idx < blocks.length - 2; idx++) {
            curBlock = blocks[idx];
            replacedertTrue("Block is not complete: " + curBlock, curBlock.isComplete());
            replacedertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
        }
        // the penultimate block is either complete or
        // committed if the file is not closed
        if (idx > 0) {
            // penultimate block
            curBlock = blocks[idx - 1];
            replacedertTrue("Block " + curBlock + " isUnderConstruction = " + inode.isUnderConstruction() + " expected to be " + isFileOpen, (isFileOpen && curBlock.isComplete()) || (!isFileOpen && !curBlock.isComplete() == (curBlock.getBlockUCState() == BlockUCState.COMMITTED)));
            replacedertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
        }
        // The last block is complete if the file is closed.
        // If the file is open, the last block may be complete or not.
        // last block
        curBlock = blocks[idx];
        if (!isFileOpen) {
            replacedertTrue("Block " + curBlock + ", isFileOpen = " + isFileOpen, curBlock.isComplete());
        }
        replacedertTrue("Block is not in BlocksMap: " + curBlock, ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
    }

    @Test
    public void testBlockCreation() throws IOException {
        Path file1 = new Path(BASE_DIR, "file1.dat");
        FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
        for (int idx = 0; idx < NUM_BLOCKS; idx++) {
            // write one block
            writeFile(file1, out, BLOCK_SIZE);
            // verify consistency
            verifyFileBlocks(file1.toString(), true);
        }
        // close file
        out.close();
        // verify consistency
        verifyFileBlocks(file1.toString(), false);
    }

    /**
     * Test NameNode.getBlockLocations(..) on reading un-closed files.
     */
    @Test
    public void testGetBlockLocations() throws IOException {
        final NamenodeProtocols namenode = cluster.getNameNodeRpc();
        final BlockManager blockManager = cluster.getNamesystem().getBlockManager();
        final Path p = new Path(BASE_DIR, "file2.dat");
        final String src = p.toString();
        final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
        // write a half block
        int len = BLOCK_SIZE >>> 1;
        writeFile(p, out, len);
        for (int i = 1; i < NUM_BLOCKS; ) {
            // verify consistency
            final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
            final List<LocatedBlock> blocks = lb.getLocatedBlocks();
            replacedertEquals(i, blocks.size());
            final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
            replacedertFalse(blockManager.getStoredBlock(b).isComplete());
            if (++i < NUM_BLOCKS) {
                // write one more block
                writeFile(p, out, BLOCK_SIZE);
                len += BLOCK_SIZE;
            }
        }
        // close file
        out.close();
    }

    /**
     * A storage ID can be invalid if the storage failed or the node
     * reregisters. When the node heart-beats, the storage report in it
     * causes storage volumes to be added back. An invalid storage ID
     * should not cause an NPE.
     */
    @Test
    public void testEmptyExpectedLocations() throws Exception {
        final NamenodeProtocols namenode = cluster.getNameNodeRpc();
        final FSNamesystem fsn = cluster.getNamesystem();
        final BlockManager bm = fsn.getBlockManager();
        final Path p = new Path(BASE_DIR, "file2.dat");
        final String src = p.toString();
        final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 1);
        writeFile(p, out, 256);
        out.hflush();
        // make sure the block is readable
        LocatedBlocks lbs = namenode.getBlockLocations(src, 0, 256);
        LocatedBlock lastLB = lbs.getLocatedBlocks().get(0);
        final Block b = lastLB.getBlock().getLocalBlock();
        // fake a block recovery
        long blockRecoveryId = bm.nextGenerationStamp(false);
        BlockUnderConstructionFeature uc = bm.getStoredBlock(b).getUnderConstructionFeature();
        uc.initializeBlockRecovery(null, blockRecoveryId, false);
        try {
            String[] storages = { "invalid-storage-id1" };
            fsn.commitBlockSynchronization(lastLB.getBlock(), blockRecoveryId, 256L, true, false, lastLB.getLocations(), storages);
        } catch (java.lang.IllegalStateException ise) {
        // Although a failure is expected as of now, future commit policy
        // changes may make it not fail. This is not critical to the test.
        }
        // Invalid storage should not trigger an exception.
        lbs = namenode.getBlockLocations(src, 0, 256);
    }
}

18 View Complete Implementation : TestMover.java
Copyright Apache License 2.0
Author : apache
private void waitForLocatedBlockWithDiskStorageType(final DistributedFileSystem dfs, final String file, int expectedDiskCount) throws Exception {
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            LocatedBlock lb = null;
            try {
                lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
            } catch (IOException e) {
                LOG.error("Exception while getting located blocks", e);
                return false;
            }
            int diskCount = 0;
            for (StorageType storageType : lb.getStorageTypes()) {
                if (StorageType.DISK == storageType) {
                    diskCount++;
                }
            }
            LOG.info("Archive replica count, expected={} and actual={}", expectedDiskCount, diskCount);
            return expectedDiskCount == diskCount;
        }
    }, 100, 3000);
}

18 View Complete Implementation : TestExportsTable.java
Copyright Apache License 2.0
Author : apache
@Test
public void testViewFsInternalExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String clusterName = RandomStringUtils.randomAlphabetic(10);
    String exportPoint = "/hdfs1/subpath";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_SCHEME + "://" + clusterName);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    config.set("nfs.http.address", "0.0.0.0:0");
    try {
        cluster = new MiniDFSCluster.Builder(config).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
        cluster.waitActive();
        DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
        DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
        cluster.waitActive();
        Path base1 = new Path("/user1");
        Path base2 = new Path("/user2");
        hdfs1.delete(base1, true);
        hdfs2.delete(base2, true);
        hdfs1.mkdirs(base1);
        hdfs2.mkdirs(base2);
        ConfigUtil.addLink(config, clusterName, "/hdfs1", hdfs1.makeQualified(base1).toUri());
        ConfigUtil.addLink(config, clusterName, "/hdfs2", hdfs2.makeQualified(base2).toUri());
        Path subPath = new Path(base1, "subpath");
        hdfs1.delete(subPath, true);
        hdfs1.mkdirs(subPath);
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
        Mountd mountd = nfsServer.getMountd();
        RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
        replacedertTrue(rpcMount.getExports().size() == 1);
        String exportInMountd = rpcMount.getExports().get(0);
        replacedertTrue(exportInMountd.equals(exportPoint));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestTracingShortCircuitLocalRead.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestTracingShortCircuitLocalRead {

    private static Configuration conf;

    private static MiniDFSCluster cluster;

    private static DistributedFileSystem dfs;

    private static TemporarySocketDirectory sockDir;

    static final Path TEST_PATH = new Path("testShortCircuitTraceHooks");

    static final int TEST_LENGTH = 1234;

    @BeforeClreplaced
    public static void init() {
        sockDir = new TemporarySocketDirectory();
        DomainSocket.disableBindPathValidation();
    }

    @AfterClreplaced
    public static void shutdown() throws IOException {
        sockDir.close();
    }

    @Test
    public void testShortCircuitTraceHooks() throws IOException {
        replacedumeTrue(NativeCodeLoader.isNativeCodeLoaded());
        replacedumeNotWindows();
        conf = new Configuration();
        conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX + Tracer.SPAN_RECEIVER_CLreplacedES_KEY, SetSpanReceiver.clreplaced.getName());
        conf.set(TraceUtils.DEFAULT_HADOOP_TRACE_PREFIX + Tracer.SAMPLER_CLreplacedES_KEY, "AlwaysSampler");
        conf.setLong("dfs.blocksize", 100 * 1024);
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
        conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "testShortCircuitTraceHooks._PORT.sock").getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        dfs = cluster.getFileSystem();
        try {
            DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short) 1, 5678L);
            TraceScope ts = FsTracer.get(conf).newScope("testShortCircuitTraceHooks");
            FSDataInputStream stream = dfs.open(TEST_PATH);
            byte[] buf = new byte[TEST_LENGTH];
            IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
            stream.close();
            ts.close();
            String[] expectedSpanNames = { "OpRequestShortCircuitAccessProto", "ShortCircuitShmRequestProto" };
            SetSpanReceiver.replacedertSpanNamesFound(expectedSpanNames);
        } finally {
            dfs.close();
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestAddStripedBlockInFBR.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestAddStripedBlockInFBR {

    private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy();

    private final int cellSize = ecPolicy.getCellSize();

    private final short dataBlocks = (short) ecPolicy.getNumDataUnits();

    private final short parityBlocks = (short) ecPolicy.getNumParityUnits();

    private final short groupSize = (short) (dataBlocks + parityBlocks);

    private MiniDFSCluster cluster;

    private DistributedFileSystem dfs;

    @Rule
    public Timeout globalTimeout = new Timeout(300000);

    @Before
    public void setup() throws IOException {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        dfs.enableErasureCodingPolicy(StripedFileTestUtil.getDefaultECPolicy().getName());
    }

    @After
    public void tearDown() {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testAddBlockInFullBlockReport() throws Exception {
        BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
        // let NN ignore one DataNode's IBR
        final DataNode dn = cluster.getDataNodes().get(0);
        final DatanodeID datanodeID = dn.getDatanodeId();
        Mockito.doNothing().when(spy).processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
        Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
        final Path ecDir = new Path("/ec");
        final Path repDir = new Path("/rep");
        dfs.mkdirs(ecDir);
        dfs.mkdirs(repDir);
        dfs.getClient().setErasureCodingPolicy(ecDir.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
        // create several non-EC files and one EC file
        final Path[] repFiles = new Path[groupSize];
        for (int i = 0; i < groupSize; i++) {
            repFiles[i] = new Path(repDir, "f" + i);
            DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
        }
        final Path ecFile = new Path(ecDir, "f");
        DFSTestUtil.createFile(dfs, ecFile, cellSize * dataBlocks, (short) 1, 0L);
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                try {
                    // trigger dn's FBR. The FBR will add block-dn mapping.
                    cluster.triggerBlockReports();
                    // make sure NN has correct block-dn mapping
                    BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem().getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
                    NumberReplicas nr = spy.countNodes(blockInfo);
                    return nr.excessReplicas() == 0 && nr.liveReplicas() == groupSize;
                } catch (Exception ignored) {
                // Ignore the exception
                }
                return false;
            }
        }, 3000, 60000);
    }
}

18 View Complete Implementation : HATestUtil.java
Copyright Apache License 2.0
Author : apache
/**
 * Customize stateId of the client AlignmentContext for testing.
 */
public static long setACStateId(DistributedFileSystem dfs, long stateId) throws Exception {
    ObserverReadProxyProvider<?> provider = (ObserverReadProxyProvider<?>) ((RetryInvocationHandler<?>) Proxy.getInvocationHandler(dfs.getClient().getNamenode())).getProxyProvider();
    ClientGSIContext ac = (ClientGSIContext) (provider.getAlignmentContext());
    Field f = ac.getClreplaced().getDeclaredField("lastSeenStateId");
    f.setAccessible(true);
    LongAcreplacedulator lastSeenStateId = (LongAcreplacedulator) f.get(ac);
    long currentStateId = lastSeenStateId.getThenReset();
    lastSeenStateId.acreplacedulate(stateId);
    return currentStateId;
}

18 View Complete Implementation : TestSnapshotStatsMXBean.java
Copyright Apache License 2.0
Author : apache
/**
 * Test getting SnapshotStatsMXBean information
 */
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    String pathName = "/snapshot";
    Path path = new Path(pathName);
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
        DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
        dfs.mkdirs(path);
        dfs.allowSnapshot(path);
        dfs.createSnapshot(path);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=SnapshotInfo");
        CompositeData[] directories = (CompositeData[]) mbs.getAttribute(mxbeanName, "SnapshottableDirectories");
        int numDirectories = Array.getLength(directories);
        replacedertEquals(sm.getNumSnapshottableDirs(), numDirectories);
        CompositeData[] snapshots = (CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
        int numSnapshots = Array.getLength(snapshots);
        replacedertEquals(sm.getNumSnapshots(), numSnapshots);
        CompositeData d = (CompositeData) Array.get(directories, 0);
        CompositeData s = (CompositeData) Array.get(snapshots, 0);
        replacedertTrue(((String) d.get("path")).contains(pathName));
        replacedertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestNNMetricFilesInGetListingOps.java
Copyright Apache License 2.0
Author : apache
/**
 * Test case for FilesInGetListingOps metric in Namenode
 */
public clreplaced TestNNMetricFilesInGetListingOps {

    private static final Configuration CONF = new HdfsConfiguration();

    private static final String NN_METRICS = "NameNodeActivity";

    static {
        CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
        CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
        CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
        CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    }

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    private final Random rand = new Random();

    @Before
    public void setUp() throws Exception {
        cluster = new MiniDFSCluster.Builder(CONF).build();
        cluster.waitActive();
        cluster.getNameNode();
        fs = cluster.getFileSystem();
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    /**
     * create a file with a length of <code>fileLen</code>
     */
    private void createFile(String fileName, long fileLen, short replicas) throws IOException {
        Path filePath = new Path(fileName);
        DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
    }

    @Test
    public void testFilesInGetListingOps() throws Exception {
        createFile("/tmp1/t1", 3200, (short) 3);
        createFile("/tmp1/t2", 3200, (short) 3);
        createFile("/tmp2/t1", 3200, (short) 3);
        createFile("/tmp2/t2", 3200, (short) 3);
        cluster.getNameNodeRpc().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
        replacedertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS));
        cluster.getNameNodeRpc().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false);
        replacedertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS));
    }
}

18 View Complete Implementation : TestStoragePolicySatisfyAdminCommands.java
Copyright Apache License 2.0
Author : apache
/**
 * Test StoragePolicySatisfy admin commands.
 */
public clreplaced TestStoragePolicySatisfyAdminCommands {

    private static final short REPL = 1;

    private static final int SIZE = 128;

    private Configuration conf = null;

    private MiniDFSCluster cluster = null;

    private DistributedFileSystem dfs = null;

    private StoragePolicySatisfier externalSps = null;

    @Before
    public void clusterSetUp() throws IOException, URISyntaxException {
        conf = new HdfsConfiguration();
        conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL.toString());
        // Reduced refresh cycle to update latest datanodes.
        conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS, 1000);
        StorageType[][] newtypes = new StorageType[][] { { StorageType.ARCHIVE, StorageType.DISK } };
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).storageTypes(newtypes).build();
        cluster.waitActive();
        dfs = cluster.getFileSystem();
        NameNodeConnector nnc = DFSTestUtil.getNameNodeConnector(conf, HdfsServerConstants.MOVER_ID_PATH, 1, false);
        StoragePolicySatisfier externalSps = new StoragePolicySatisfier(conf);
        Context externalCtxt = new ExternalSPSContext(externalSps, nnc);
        externalSps.init(externalCtxt);
        externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
    }

    @After
    public void clusterShutdown() throws IOException {
        if (dfs != null) {
            dfs.close();
            dfs = null;
        }
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
        if (externalSps != null) {
            externalSps.stopGracefully();
        }
    }

    @Test(timeout = 30000)
    public void testStoragePolicySatisfierCommand() throws Exception {
        final String file = "/testStoragePolicySatisfierCommand";
        DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
        final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
        DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, "The storage policy of " + file + " is unspecified");
        DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
        DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0, "Scheduled blocks to move based on the current storage policy on " + file.toString());
        DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, dfs);
    }

    @Test(timeout = 30000)
    public void testStoragePolicySatisfierCommandWithURI() throws Exception {
        final String file = "/testStoragePolicySatisfierCommandURI";
        DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
        final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
        DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, "The storage policy of " + file + " is unspecified");
        DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
        DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + dfs.getUri() + file, 0, "Scheduled blocks to move based on the current storage policy on " + dfs.getUri() + file.toString());
        DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, dfs);
    }
}

18 View Complete Implementation : TestMover.java
Copyright Apache License 2.0
Author : apache
private void waitForLocatedBlockWithArchiveStorageType(final DistributedFileSystem dfs, final String file, int expectedArchiveCount) throws Exception {
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            LocatedBlock lb = null;
            try {
                lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
            } catch (IOException e) {
                LOG.error("Exception while getting located blocks", e);
                return false;
            }
            int archiveCount = 0;
            for (StorageType storageType : lb.getStorageTypes()) {
                if (StorageType.ARCHIVE == storageType) {
                    archiveCount++;
                }
            }
            LOG.info("Archive replica count, expected={} and actual={}", expectedArchiveCount, archiveCount);
            return expectedArchiveCount == archiveCount;
        }
    }, 100, 3000);
}

18 View Complete Implementation : TestFileContextSnapshot.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestFileContextSnapshot {

    private static final short REPLICATION = 3;

    private static final int BLOCKSIZE = 1024;

    private static final long SEED = 0;

    private Configuration conf;

    private MiniDFSCluster cluster;

    private FileContext fileContext;

    private DistributedFileSystem dfs;

    private final String snapshotRoot = "/snapshot";

    private final Path filePath = new Path(snapshotRoot, "file1");

    private Path snapRootPath;

    @Before
    public void setUp() throws Exception {
        conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
        cluster.waitActive();
        fileContext = FileContext.getFileContext(conf);
        dfs = (DistributedFileSystem) cluster.getFileSystem();
        snapRootPath = new Path(snapshotRoot);
        dfs.mkdirs(snapRootPath);
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test(timeout = 60000)
    public void testCreateAndDeleteSnapshot() throws Exception {
        DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
        // disallow snapshot on dir
        dfs.disallowSnapshot(snapRootPath);
        try {
            fileContext.createSnapshot(snapRootPath, "s1");
        } catch (SnapshotException e) {
            GenericTestUtils.replacedertExceptionContains("Directory is not a snapshottable directory: " + snapRootPath, e);
        }
        // allow snapshot on dir
        dfs.allowSnapshot(snapRootPath);
        Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
        replacedertTrue("Failed to create snapshot", dfs.exists(ssPath));
        fileContext.deleteSnapshot(snapRootPath, "s1");
        replacedertFalse("Failed to delete snapshot", dfs.exists(ssPath));
    }

    /**
     * Test FileStatus of snapshot file before/after rename
     */
    @Test(timeout = 60000)
    public void testRenameSnapshot() throws Exception {
        DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
        dfs.allowSnapshot(snapRootPath);
        // Create snapshot for sub1
        Path snapPath1 = fileContext.createSnapshot(snapRootPath, "s1");
        Path ssPath = new Path(snapPath1, filePath.getName());
        replacedertTrue("Failed to create snapshot", dfs.exists(ssPath));
        FileStatus statusBeforeRename = dfs.getFileStatus(ssPath);
        // Rename the snapshot
        fileContext.renameSnapshot(snapRootPath, "s1", "s2");
        // <sub1>/.snapshot/s1/file1 should no longer exist
        replacedertFalse("Old snapshot still exists after rename!", dfs.exists(ssPath));
        Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapRootPath, "s2");
        ssPath = new Path(snapshotRoot, filePath.getName());
        // Instead, <sub1>/.snapshot/s2/file1 should exist
        replacedertTrue("Snapshot doesn't exists!", dfs.exists(ssPath));
        FileStatus statusAfterRename = dfs.getFileStatus(ssPath);
        // FileStatus of the snapshot should not change except the path
        replacedertFalse("Filestatus of the snapshot matches", statusBeforeRename.equals(statusAfterRename));
        statusBeforeRename.setPath(statusAfterRename.getPath());
        replacedertEquals("FileStatus of the snapshot mismatches!", statusBeforeRename.toString(), statusAfterRename.toString());
    }
}

18 View Complete Implementation : TestFileTruncate.java
Copyright Apache License 2.0
Author : apache
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs) throws IOException {
    checkBlockRecovery(p, dfs, SUCCESS_ATTEMPTS, SLEEP);
}

18 View Complete Implementation : TestDebugAdmin.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestDebugAdmin {

    static private final String TEST_ROOT_DIR = new File(System.getProperty("test.build.data", "/tmp"), TestDebugAdmin.clreplaced.getSimpleName()).getAbsolutePath();

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    private DebugAdmin admin;

    private DataNode datanode;

    @Before
    public void setUp() throws Exception {
        final File testRoot = new File(TEST_ROOT_DIR);
        testRoot.delete();
        testRoot.mkdirs();
        Configuration conf = new Configuration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        admin = new DebugAdmin(conf);
        datanode = cluster.getDataNodes().get(0);
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    private String runCmd(String[] cmd) throws Exception {
        final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
        final PrintStream out = new PrintStream(bytes);
        final PrintStream oldErr = System.err;
        final PrintStream oldOut = System.out;
        System.setErr(out);
        System.setOut(out);
        int ret;
        try {
            ret = admin.run(cmd);
        } finally {
            System.setErr(oldErr);
            System.setOut(oldOut);
            IOUtils.closeStream(out);
        }
        return "ret: " + ret + ", " + bytes.toString().replaceAll(System.lineSeparator(), "");
    }

    @Test(timeout = 60000)
    public void testRecoverLease() throws Exception {
        replacedertEquals("ret: 1, You must supply a -path argument to recoverLease.", runCmd(new String[] { "recoverLease", "-retries", "1" }));
        FSDataOutputStream out = fs.create(new Path("/foo"));
        out.write(123);
        out.close();
        replacedertEquals("ret: 0, recoverLease SUCCEEDED on /foo", runCmd(new String[] { "recoverLease", "-path", "/foo" }));
    }

    @Test(timeout = 60000)
    public void testVerifyMetaCommand() throws Exception {
        DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
        FsDatasetSpi<?> fsd = datanode.getFSDataset();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
        File blockFile = getBlockFile(fsd, block.getBlockPoolId(), block.getLocalBlock());
        replacedertEquals("ret: 1, You must specify a meta file with -meta", runCmd(new String[] { "verifyMeta", "-block", blockFile.getAbsolutePath() }));
        File metaFile = getMetaFile(fsd, block.getBlockPoolId(), block.getLocalBlock());
        replacedertEquals("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)", runCmd(new String[] { "verifyMeta", "-meta", metaFile.getAbsolutePath() }));
        replacedertEquals("ret: 0, Checksum type: " + "DataChecksum(type=CRC32C, chunkSize=512)" + "Checksum verification succeeded on block file " + blockFile.getAbsolutePath(), runCmd(new String[] { "verifyMeta", "-meta", metaFile.getAbsolutePath(), "-block", blockFile.getAbsolutePath() }));
    }

    @Test(timeout = 60000)
    public void testComputeMetaCommand() throws Exception {
        DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
        FsDatasetSpi<?> fsd = datanode.getFSDataset();
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
        File blockFile = getBlockFile(fsd, block.getBlockPoolId(), block.getLocalBlock());
        replacedertEquals("ret: 1, computeMeta -block <block-file> -out " + "<output-metadata-file>  Compute HDFS metadata from the specified" + " block file, and save it to  the specified output metadata file." + "**NOTE: Use at your own risk! If the block file is corrupt" + " and you overwrite it's meta file,  it will show up" + " as good in HDFS, but you can't read the data." + " Only use as a last measure, and when you are 100% certain" + " the block file is good.", runCmd(new String[] { "computeMeta" }));
        replacedertEquals("ret: 2, You must specify a block file with -block", runCmd(new String[] { "computeMeta", "-whatever" }));
        replacedertEquals("ret: 3, Block file <bla> does not exist or is not a file", runCmd(new String[] { "computeMeta", "-block", "bla" }));
        replacedertEquals("ret: 4, You must specify a output file with -out", runCmd(new String[] { "computeMeta", "-block", blockFile.getAbsolutePath() }));
        replacedertEquals("ret: 5, output file already exists!", runCmd(new String[] { "computeMeta", "-block", blockFile.getAbsolutePath(), "-out", blockFile.getAbsolutePath() }));
        File outFile = new File(TEST_ROOT_DIR, "out.meta");
        outFile.delete();
        replacedertEquals("ret: 0, Checksum calculation succeeded on block file " + blockFile.getAbsolutePath() + " saved metadata to meta file " + outFile.getAbsolutePath(), runCmd(new String[] { "computeMeta", "-block", blockFile.getAbsolutePath(), "-out", outFile.getAbsolutePath() }));
        replacedertTrue(outFile.exists());
        replacedertTrue(outFile.length() > 0);
    }

    @Test(timeout = 60000)
    public void testRecoverLeaseforFileNotFound() throws Exception {
        replacedertTrue(runCmd(new String[] { "recoverLease", "-path", "/foo", "-retries", "2" }).contains("Giving up on recoverLease for /foo after 1 try"));
    }
}

18 View Complete Implementation : TestPersistentStoragePolicySatisfier.java
Copyright Apache License 2.0
Author : apache
/**
 * Setup test files for testing.
 * @param dfs
 * @param replication
 * @throws Exception
 */
private void createTestFiles(DistributedFileSystem dfs, short replication) throws Exception {
    DFSTestUtil.createFile(dfs, testFile, 1024L, replication, 0L);
    DFSTestUtil.createFile(dfs, parentFile, 1024L, replication, 0L);
    DFSTestUtil.createFile(dfs, childFile, 1024L, replication, 0L);
    DFSTestUtil.waitReplication(dfs, testFile, replication);
    DFSTestUtil.waitReplication(dfs, parentFile, replication);
    DFSTestUtil.waitReplication(dfs, childFile, replication);
}

18 View Complete Implementation : DistCpSync.java
Copyright Apache License 2.0
Author : apache
private void syncDiff(DiffInfo[] diffs, DistributedFileSystem targetFs, Path tmpDir) throws IOException {
    moveToTmpDir(diffs, targetFs, tmpDir);
    moveToTarget(diffs, targetFs);
}

18 View Complete Implementation : TestOfflineImageViewerWithStripedBlocks.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestOfflineImageViewerWithStripedBlocks {

    private final ErasureCodingPolicy ecPolicy = StripedFileTestUtil.getDefaultECPolicy();

    private int dataBlocks = ecPolicy.getNumDataUnits();

    private int parityBlocks = ecPolicy.getNumParityUnits();

    private static MiniDFSCluster cluster;

    private static DistributedFileSystem fs;

    private final int cellSize = ecPolicy.getCellSize();

    private final int stripesPerBlock = 3;

    private final int blockSize = cellSize * stripesPerBlock;

    @Before
    public void setup() throws IOException {
        int numDNs = dataBlocks + parityBlocks + 2;
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
        cluster.waitActive();
        cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName());
        fs = cluster.getFileSystem();
        fs.enableErasureCodingPolicy(StripedFileTestUtil.getDefaultECPolicy().getName());
        Path eczone = new Path("/eczone");
        fs.mkdirs(eczone);
    }

    @After
    public void tearDown() {
        if (cluster != null) {
            cluster.shutdown();
        }
    }

    @Test(timeout = 60000)
    public void testFileEqualToOneStripe() throws Exception {
        int numBytes = cellSize;
        testFileSize(numBytes);
    }

    @Test(timeout = 60000)
    public void testFileLessThanOneStripe() throws Exception {
        int numBytes = cellSize - 100;
        testFileSize(numBytes);
    }

    @Test(timeout = 60000)
    public void testFileHavingMultipleBlocks() throws Exception {
        int numBytes = blockSize * 3;
        testFileSize(numBytes);
    }

    @Test(timeout = 60000)
    public void testFileLargerThanABlockGroup1() throws IOException {
        testFileSize(blockSize * dataBlocks + cellSize + 123);
    }

    @Test(timeout = 60000)
    public void testFileLargerThanABlockGroup2() throws IOException {
        testFileSize(blockSize * dataBlocks * 3 + cellSize * dataBlocks + cellSize + 123);
    }

    @Test(timeout = 60000)
    public void testFileFullBlockGroup() throws IOException {
        testFileSize(blockSize * dataBlocks);
    }

    @Test(timeout = 60000)
    public void testFileMoreThanOneStripe() throws Exception {
        int numBytes = blockSize + blockSize / 2;
        testFileSize(numBytes);
    }

    private void testFileSize(int numBytes) throws IOException, UnresolvedLinkException, SnapshotAccessControlException {
        fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        File orgFsimage = null;
        Path file = new Path("/eczone/striped");
        FSDataOutputStream out = fs.create(file, true);
        byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
        out.write(bytes);
        out.close();
        // Write results to the fsimage file
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        fs.saveNamespace();
        // Determine location of fsimage file
        orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (orgFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
        String fileStatus = loader.getFileStatus("/eczone/striped");
        long expectedFileSize = bytes.length;
        // Verify space consumed present in BlockInfoStriped
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        replacedertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID());
        replacedertTrue("Invalid block size", fileNode.getBlocks().length > 0);
        long actualFileSize = 0;
        for (BlockInfo blockInfo : fileNode.getBlocks()) {
            replacedertTrue("Didn't find block striped information", blockInfo instanceof BlockInfoStriped);
            actualFileSize += blockInfo.getNumBytes();
        }
        replacedertEquals("Wrongly computed file size contains striped blocks", expectedFileSize, actualFileSize);
        // Verify space consumed present in filestatus
        String EXPECTED_FILE_SIZE = "\"length\":" + String.valueOf(expectedFileSize);
        replacedertTrue("Wrongly computed file size contains striped blocks, file status:" + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, fileStatus.contains(EXPECTED_FILE_SIZE));
    }
}

18 View Complete Implementation : SnapshotDiff.java
Copyright Apache License 2.0
Author : apache
@Override
public int run(String[] argv) throws Exception {
    String description = "hdfs snapshotDiff <snapshotDir> <from> <to>:\n" + "\tGet the difference between two snapshots, \n" + "\tor between a snapshot and the current tree of a directory.\n" + "\tFor <from>/<to>, users can use \".\" to present the current status,\n" + "\tand use \".snapshot/snapshot_name\" to present a snapshot,\n" + "\twhere \".snapshot/\" can be omitted\n";
    if (argv.length != 3) {
        System.err.println("Usage: \n" + description);
        return 1;
    }
    FileSystem fs = FileSystem.get(new Path(argv[0]).toUri(), getConf());
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("SnapshotDiff can only be used in DistributedFileSystem");
        return 1;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    Path snapshotRoot = new Path(argv[0]);
    String fromSnapshot = getSnapshotName(argv[1]);
    String toSnapshot = getSnapshotName(argv[2]);
    try {
        SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot, fromSnapshot, toSnapshot);
        System.out.println(diffReport.toString());
    } catch (IOException e) {
        String[] content = e.getLocalizedMessage().split("\n");
        System.err.println("snapshotDiff: " + content[0]);
        e.printStackTrace(System.err);
        return 1;
    }
    return 0;
}

18 View Complete Implementation : TestGetContentSummaryWithPermission.java
Copyright Apache License 2.0
Author : apache
/**
 * This clreplaced tests get content summary with permission settings.
 */
public clreplaced TestGetContentSummaryWithPermission {

    protected static final short REPLICATION = 3;

    protected static final long BLOCKSIZE = 1024;

    private Configuration conf;

    private MiniDFSCluster cluster;

    private DistributedFileSystem dfs;

    @Before
    public void setUp() throws Exception {
        conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
        cluster.waitActive();
        dfs = cluster.getFileSystem();
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    /**
     * Test getContentSummary for super user. For super user, whatever
     * permission the directories are with, always allowed to access
     *
     * @throws Exception
     */
    @Test
    public void testGetContentSummarySuperUser() throws Exception {
        final Path foo = new Path("/fooSuper");
        final Path bar = new Path(foo, "barSuper");
        final Path baz = new Path(bar, "bazSuper");
        dfs.mkdirs(bar);
        DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
        ContentSummary summary;
        summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
        verifySummary(summary, 2, 1, 10);
        dfs.setPermission(foo, new FsPermission((short) 0));
        summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
        verifySummary(summary, 2, 1, 10);
        dfs.setPermission(bar, new FsPermission((short) 0));
        summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
        verifySummary(summary, 2, 1, 10);
        dfs.setPermission(baz, new FsPermission((short) 0));
        summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
        verifySummary(summary, 2, 1, 10);
    }

    /**
     * Test getContentSummary for non-super, non-owner. Such users are restricted
     * by permission of subdirectories. Namely if there is any subdirectory that
     * does not have READ_EXECUTE access, AccessControlException will be thrown.
     *
     * @throws Exception
     */
    @Test
    public void testGetContentSummaryNonSuperUser() throws Exception {
        final Path foo = new Path("/fooNoneSuper");
        final Path bar = new Path(foo, "barNoneSuper");
        final Path baz = new Path(bar, "bazNoneSuper");
        // run as some random non-superuser, non-owner user.
        final UserGroupInformation userUgi = UserGroupInformation.createUserForTesting("randomUser", new String[] { "randomGroup" });
        dfs.mkdirs(bar);
        DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
        // by default, permission is rwxr-xr-x, as long as READ and EXECUTE are set,
        // content summary should accessible
        FileStatus fileStatus;
        fileStatus = dfs.getFileStatus(foo);
        replacedertEquals((short) 755, fileStatus.getPermission().toOctal());
        fileStatus = dfs.getFileStatus(bar);
        replacedertEquals((short) 755, fileStatus.getPermission().toOctal());
        // file has no EXECUTE, it is rw-r--r-- default
        fileStatus = dfs.getFileStatus(baz);
        replacedertEquals((short) 644, fileStatus.getPermission().toOctal());
        // by default, can get content summary
        ContentSummary summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>) () -> cluster.getNameNodeRpc().getContentSummary(foo.toString()));
        verifySummary(summary, 2, 1, 10);
        // set empty access on root dir, should disallow content summary
        dfs.setPermission(foo, new FsPermission((short) 0));
        try {
            userUgi.doAs((PrivilegedExceptionAction<ContentSummary>) () -> cluster.getNameNodeRpc().getContentSummary(foo.toString()));
            fail("Should've fail due to access control exception.");
        } catch (AccessControlException e) {
            replacedertTrue(e.getMessage().contains("Permission denied"));
        }
        // restore foo's permission to allow READ_EXECUTE
        dfs.setPermission(foo, new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
        // set empty access on subdir, should disallow content summary from root dir
        dfs.setPermission(bar, new FsPermission((short) 0));
        try {
            userUgi.doAs((PrivilegedExceptionAction<ContentSummary>) () -> cluster.getNameNodeRpc().getContentSummary(foo.toString()));
            fail("Should've fail due to access control exception.");
        } catch (AccessControlException e) {
            replacedertTrue(e.getMessage().contains("Permission denied"));
        }
        // restore the permission of subdir to READ_EXECUTE. enable
        // getContentSummary again for root
        dfs.setPermission(bar, new FsPermission(READ_EXECUTE, READ_EXECUTE, READ_EXECUTE));
        summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>) () -> cluster.getNameNodeRpc().getContentSummary(foo.toString()));
        verifySummary(summary, 2, 1, 10);
        // permission of files under the directory does not affect
        // getContentSummary
        dfs.setPermission(baz, new FsPermission((short) 0));
        summary = userUgi.doAs((PrivilegedExceptionAction<ContentSummary>) () -> cluster.getNameNodeRpc().getContentSummary(foo.toString()));
        verifySummary(summary, 2, 1, 10);
    }

    private void verifySummary(ContentSummary summary, int dirCount, int fileCount, int length) {
        replacedertEquals(dirCount, summary.getDirectoryCount());
        replacedertEquals(fileCount, summary.getFileCount());
        replacedertEquals(length, summary.getLength());
    }
}

18 View Complete Implementation : TestHDFSFileContextMainOperations.java
Copyright Apache License 2.0
Author : apache
@Test
public void testTruncate() throws Exception {
    final short repl = 3;
    final int blockSize = 1024;
    final int numOfBlocks = 2;
    DistributedFileSystem fs = cluster.getFileSystem();
    Path dir = getTestRootPath(fc, "test/hadoop");
    Path file = getTestRootPath(fc, "test/hadoop/file");
    final byte[] data = FileSystemTestHelper.getFileData(numOfBlocks, blockSize);
    FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
    final int newLength = blockSize;
    boolean isReady = fc.truncate(file, newLength);
    replacedert.replacedertTrue("Recovery is not expected.", isReady);
    FileStatus fileStatus = fc.getFileStatus(file);
    replacedert.replacedertEquals(fileStatus.getLen(), newLength);
    AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
    ContentSummary cs = fs.getContentSummary(dir);
    replacedert.replacedertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * repl);
    replacedert.replacedertTrue(fs.delete(dir, true));
}

18 View Complete Implementation : TestCommitBlockWithInvalidGenStamp.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestCommitBlockWithInvalidGenStamp {

    private static final int BLOCK_SIZE = 1024;

    private MiniDFSCluster cluster;

    private FSDirectory dir;

    private DistributedFileSystem dfs;

    @Before
    public void setUp() throws IOException {
        final Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        dir = cluster.getNamesystem().getFSDirectory();
        dfs = cluster.getFileSystem();
    }

    @After
    public void tearDown() {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testCommitWithInvalidGenStamp() throws Exception {
        final Path file = new Path("/file");
        FSDataOutputStream out = null;
        try {
            out = dfs.create(file, (short) 1);
            INodeFile fileNode = dir.getINode4Write(file.toString()).asFile();
            ExtendedBlock previous = null;
            Block newBlock = DFSTestUtil.addBlockToFile(false, cluster.getDataNodes(), dfs, cluster.getNamesystem(), file.toString(), fileNode, dfs.getClient().getClientName(), previous, 0, 100);
            Block newBlockClone = new Block(newBlock);
            previous = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), newBlockClone);
            previous.setGenerationStamp(123);
            try {
                dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
                replacedert.fail("should throw exception because invalid genStamp");
            } catch (IOException e) {
                replacedert.replacedertTrue(e.toString().contains("Commit block with mismatching GS. NN has " + newBlock + ", client submits " + newBlockClone));
            }
            previous = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), newBlock);
            boolean complete = dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
            replacedert.replacedertTrue("should complete successfully", complete);
        } finally {
            IOUtils.cleanup(null, out);
        }
    }
}

18 View Complete Implementation : TestSnapshotNameWithInvalidCharacters.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestSnapshotNameWithInvalidCharacters {

    private static final long SEED = 0;

    private static final short REPLICATION = 1;

    private static final int BLOCKSIZE = 1024;

    private static final Configuration conf = new Configuration();

    private static MiniDFSCluster cluster;

    private static DistributedFileSystem hdfs;

    private final Path dir1 = new Path("/");

    private final String file1Name = "file1";

    private final String snapshot1 = "a:b:c";

    private final String snapshot2 = "a/b/c";

    @Before
    public void setUp() throws Exception {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
        cluster.waitActive();
        hdfs = cluster.getFileSystem();
    }

    @After
    public void tearDown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test(timeout = 600000)
    public void TestSnapshotWithInvalidName() throws Exception {
        Path file1 = new Path(dir1, file1Name);
        DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
        hdfs.allowSnapshot(dir1);
        try {
            hdfs.createSnapshot(dir1, snapshot1);
        } catch (RemoteException e) {
        }
    }

    @Test(timeout = 60000)
    public void TestSnapshotWithInvalidName1() throws Exception {
        Path file1 = new Path(dir1, file1Name);
        DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
        hdfs.allowSnapshot(dir1);
        try {
            hdfs.createSnapshot(dir1, snapshot2);
        } catch (RemoteException e) {
        }
    }
}

18 View Complete Implementation : TestAclConfigFlag.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests that the configuration flag that controls support for ACLs is off by
 * default and causes all attempted operations related to ACLs to fail.  The
 * NameNode can still load ACLs from fsimage or edits.
 */
public clreplaced TestAclConfigFlag {

    private static final Path PATH = new Path("/path");

    private MiniDFSCluster cluster;

    private DistributedFileSystem fs;

    @Rule
    public ExpectedException exception = ExpectedException.none();

    @After
    public void shutdown() throws Exception {
        IOUtils.cleanup(null, fs);
        if (cluster != null) {
            cluster.shutdown();
            cluster = null;
        }
    }

    @Test
    public void testModifyAclEntries() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.modifyAclEntries(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
    }

    @Test
    public void testRemoveAclEntries() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.removeAclEntries(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
    }

    @Test
    public void testRemoveDefaultAcl() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.removeAclEntries(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
    }

    @Test
    public void testRemoveAcl() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.removeAcl(PATH);
    }

    @Test
    public void testSetAcl() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.setAcl(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
    }

    @Test
    public void testGetAclStatus() throws Exception {
        initCluster(true, false);
        fs.mkdirs(PATH);
        expectException();
        fs.getAclStatus(PATH);
    }

    @Test
    public void testEditLog() throws Exception {
        // With ACLs enabled, set an ACL.
        initCluster(true, true);
        fs.mkdirs(PATH);
        fs.setAcl(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
        // Restart with ACLs disabled.  Expect successful restart.
        restart(false, false);
    }

    @Test
    public void testFsImage() throws Exception {
        // With ACLs enabled, set an ACL.
        initCluster(true, true);
        fs.mkdirs(PATH);
        fs.setAcl(PATH, Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
        // Save a new checkpoint and restart with ACLs still enabled.
        restart(true, true);
        // Restart with ACLs disabled.  Expect successful restart.
        restart(false, false);
    }

    /**
     * We expect an AclException, and we want the exception text to state the
     * configuration key that controls ACL support.
     */
    private void expectException() {
        exception.expect(AclException.clreplaced);
        exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY);
    }

    /**
     * Initialize the cluster, wait for it to become active, and get FileSystem.
     *
     * @param format if true, format the NameNode and DataNodes before starting up
     * @param aclsEnabled if true, ACL support is enabled
     * @throws Exception if any step fails
     */
    private void initCluster(boolean format, boolean aclsEnabled) throws Exception {
        Configuration conf = new Configuration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
    }

    /**
     * Restart the cluster, optionally saving a new checkpoint.
     *
     * @param checkpoint boolean true to save a new checkpoint
     * @param aclsEnabled if true, ACL support is enabled
     * @throws Exception if restart fails
     */
    private void restart(boolean checkpoint, boolean aclsEnabled) throws Exception {
        NameNode nameNode = cluster.getNameNode();
        if (checkpoint) {
            NameNodeAdapter.enterSafeMode(nameNode, false);
            NameNodeAdapter.saveNamespace(nameNode);
        }
        shutdown();
        initCluster(false, aclsEnabled);
    }
}

18 View Complete Implementation : TestExportsTable.java
Copyright Apache License 2.0
Author : apache
@Test
public void testViewFsMultipleExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String clusterName = RandomStringUtils.randomAlphabetic(10);
    String exportPoint = "/hdfs1,/hdfs2";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    config.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, FsConstants.VIEWFS_SCHEME + "://" + clusterName);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    config.set("nfs.http.address", "0.0.0.0:0");
    try {
        cluster = new MiniDFSCluster.Builder(config).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).numDataNodes(2).build();
        cluster.waitActive();
        DistributedFileSystem hdfs1 = cluster.getFileSystem(0);
        DistributedFileSystem hdfs2 = cluster.getFileSystem(1);
        cluster.waitActive();
        Path base1 = new Path("/user1");
        Path base2 = new Path("/user2");
        hdfs1.delete(base1, true);
        hdfs2.delete(base2, true);
        hdfs1.mkdirs(base1);
        hdfs2.mkdirs(base2);
        ConfigUtil.addLink(config, clusterName, "/hdfs1", hdfs1.makeQualified(base1).toUri());
        ConfigUtil.addLink(config, clusterName, "/hdfs2", hdfs2.makeQualified(base2).toUri());
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
        Mountd mountd = nfsServer.getMountd();
        RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
        replacedertTrue(rpcMount.getExports().size() == 2);
        String exportInMountd1 = rpcMount.getExports().get(0);
        replacedertTrue(exportInMountd1.equals("/hdfs1"));
        String exportInMountd2 = rpcMount.getExports().get(1);
        replacedertTrue(exportInMountd2.equals("/hdfs2"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : LsSnapshottableDir.java
Copyright Apache License 2.0
Author : apache
@Override
public int run(String[] argv) throws Exception {
    String description = "hdfs lsSnapshottableDir: \n" + "\tGet the list of snapshottable directories that are owned by the current user.\n" + "\tReturn all the snapshottable directories if the current user is a super user.\n";
    if (argv.length != 0) {
        System.err.println("Usage: \n" + description);
        return 1;
    }
    FileSystem fs = FileSystem.get(getConf());
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("LsSnapshottableDir can only be used in DistributedFileSystem");
        return 1;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
        SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing();
        SnapshottableDirectoryStatus.print(stats, System.out);
    } catch (IOException e) {
        String[] content = e.getLocalizedMessage().split("\n");
        System.err.println("lsSnapshottableDir: " + content[0]);
        return 1;
    }
    return 0;
}

18 View Complete Implementation : TestHDFSFileContextMainOperations.java
Copyright Apache License 2.0
Author : apache
private void oldRename(Path src, Path dst, boolean renameSucceeds, boolean exception) throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        replacedert.replacedertEquals(renameSucceeds, fs.rename(src, dst));
    } catch (Exception ex) {
        replacedert.replacedertTrue(exception);
    }
    replacedert.replacedertEquals(renameSucceeds, !exists(fc, src));
    replacedert.replacedertEquals(renameSucceeds, exists(fc, dst));
}

18 View Complete Implementation : TestExportsTable.java
Copyright Apache License 2.0
Author : apache
@Test
public void testHdfsInternalExportPoint() throws IOException {
    NfsConfiguration config = new NfsConfiguration();
    MiniDFSCluster cluster = null;
    String exportPoint = "/myexport1";
    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
    // Use emphral port in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    config.set("nfs.http.address", "0.0.0.0:0");
    Path base = new Path(exportPoint);
    try {
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem(0);
        hdfs.delete(base, true);
        hdfs.mkdirs(base);
        // Start nfs
        final Nfs3 nfsServer = new Nfs3(config);
        nfsServer.startServiceInternal(false);
        Mountd mountd = nfsServer.getMountd();
        RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
        replacedertTrue(rpcMount.getExports().size() == 1);
        String exportInMountd = rpcMount.getExports().get(0);
        replacedertTrue(exportInMountd.equals(exportPoint));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestClientAccessPrivilege.java
Copyright Apache License 2.0
Author : apache
public clreplaced TestClientAccessPrivilege {

    static MiniDFSCluster cluster = null;

    static NfsConfiguration config = new NfsConfiguration();

    static DistributedFileSystem hdfs;

    static NameNode nn;

    static String testdir = "/tmp";

    static SecurityHandler securityHandler;

    @BeforeClreplaced
    public static void setup() throws Exception {
        String currentUser = System.getProperty("user.name");
        config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
        config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
        ProxyUsers.refreshSuperUserGroupsConfiguration(config);
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        hdfs = cluster.getFileSystem();
        nn = cluster.getNameNode();
        // Use ephemeral port in case tests are running in parallel
        config.setInt("nfs3.mountd.port", 0);
        config.setInt("nfs3.server.port", 0);
        securityHandler = Mockito.mock(SecurityHandler.clreplaced);
        Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
    }

    @AfterClreplaced
    public static void shutdown() throws Exception {
        if (cluster != null) {
            cluster.shutdown();
        }
    }

    @Before
    public void createFiles() throws IllegalArgumentException, IOException {
        hdfs.delete(new Path(testdir), true);
        hdfs.mkdirs(new Path(testdir));
        DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
    }

    @Test(timeout = 60000)
    public void testClientAccessPrivilegeForRemove() throws Exception {
        // Configure ro access for nfs1 service
        config.set("dfs.nfs.exports.allowed.hosts", "* ro");
        // Start nfs
        Nfs3 nfs = new Nfs3(config);
        nfs.startServiceInternal(false);
        RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
        // Create a remove request
        HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
        long dirId = status.getFileId();
        int namenodeId = Nfs3Utils.getNamenodeId(config);
        XDR xdr_req = new XDR();
        FileHandle handle = new FileHandle(dirId, namenodeId);
        handle.serialize(xdr_req);
        xdr_req.writeString("f1");
        // Remove operation
        REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
        // replacedert on return code
        replacedertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response.getStatus());
    }
}

18 View Complete Implementation : TestStoragePolicySatisfierWithHA.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests that StoragePolicySatisfier is able to work with HA enabled.
 */
public clreplaced TestStoragePolicySatisfierWithHA {

    private MiniDFSCluster cluster = null;

    private final Configuration config = new HdfsConfiguration();

    private static final int DEFAULT_BLOCK_SIZE = 1024;

    private DistributedFileSystem dfs = null;

    private StorageType[][] allDiskTypes = new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK } };

    private int numOfDatanodes = 3;

    private int storagesPerDatanode = 2;

    private long capacity = 2 * 256 * 1024 * 1024;

    private int nnIndex = 0;

    private void createCluster() throws IOException {
        config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
        config.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.EXTERNAL.toString());
        // Reduced refresh cycle to update latest datanodes.
        config.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS, 1000);
        startCluster(config, allDiskTypes, numOfDatanodes, storagesPerDatanode, capacity);
        dfs = cluster.getFileSystem(nnIndex);
    }

    private void startCluster(final Configuration conf, StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn, long nodeCapacity) throws IOException {
        long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
        for (int i = 0; i < numberOfDatanodes; i++) {
            for (int j = 0; j < storagesPerDn; j++) {
                capacities[i][j] = nodeCapacity;
            }
        }
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(numberOfDatanodes).storagesPerDatanode(storagesPerDn).storageTypes(storageTypes).storageCapacities(capacities).build();
        cluster.waitActive();
        cluster.transitionToActive(0);
    }

    /**
     * Tests to verify that SPS should run/stop automatically when NN state
     * changes between Standby and Active.
     */
    @Test(timeout = 90000)
    public void testWhenNNHAStateChanges() throws IOException {
        try {
            createCluster();
            // NN transits from Active to Standby
            cluster.transitionToStandby(0);
            cluster.waitActive();
            try {
                cluster.getNameNode(0).reconfigurePropertyImpl(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, StoragePolicySatisfierMode.NONE.toString());
                replacedert.fail("It's not allowed to enable or disable" + " StoragePolicySatisfier on Standby NameNode");
            } catch (ReconfigurationException e) {
                GenericTestUtils.replacedertExceptionContains("Could not change property " + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " from 'EXTERNAL' to 'NONE'", e);
                GenericTestUtils.replacedertExceptionContains("Enabling or disabling storage policy satisfier service on " + "standby NameNode is not allowed", e.getCause());
            }
        } finally {
            cluster.shutdown();
        }
    }
}