org.apache.hadoop.hdfs.DFSTestUtil.createFile() - java examples

Here are the examples of the java api org.apache.hadoop.hdfs.DFSTestUtil.createFile() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : TestSnapshotNameWithInvalidCharacters.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 600000)
public void TestSnapshotWithInvalidName() throws Exception {
    Path file1 = new Path(dir1, file1Name);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
    hdfs.allowSnapshot(dir1);
    try {
        hdfs.createSnapshot(dir1, snapshot1);
    } catch (RemoteException e) {
    }
}

19 View Complete Implementation : TestSequentialBlockId.java
Copyright Apache License 2.0
Author : apache
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        // Create a file that is 10 blocks long.
        Path path = new Path("testBlockIdGeneration.dat");
        DFSTestUtil.createFile(fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
        List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
        LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
        long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;
        // Ensure that the block IDs are sequentially increasing.
        for (int i = 1; i < blocks.size(); ++i) {
            long nextBlockId = blocks.get(i).getBlock().getBlockId();
            LOG.info("Block" + i + " id is " + nextBlockId);
            replacedertThat(nextBlockId, is(nextBlockExpectedId));
            ++nextBlockExpectedId;
        }
    } finally {
        cluster.shutdown();
    }
}

19 View Complete Implementation : TestDistCpUtilsWithCombineMode.java
Copyright Apache License 2.0
Author : apache
private void compareSameContentButDiffBlockSizes() throws IOException {
    String base = "/tmp/verify-checksum-" + testName.getMethodName() + "/";
    long seed = System.currentTimeMillis();
    short rf = 2;
    FileSystem fs = FileSystem.get(config);
    Path basePath = new Path(base);
    fs.mkdirs(basePath);
    // create 2 files of same content but different block-sizes
    Path src = new Path(base + "src");
    Path dst = new Path(base + "dst");
    DFSTestUtil.createFile(fs, src, 256, 1024, 512, rf, seed);
    DFSTestUtil.createFile(fs, dst, 256, 1024, 1024, rf, seed);
    // then compare
    DistCpUtils.compareFileLengthsAndChecksums(1024, fs, src, null, fs, dst, false, 1024);
}

19 View Complete Implementation : TestStoragePolicySatisfyAdminCommands.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 30000)
public void testStoragePolicySatisfierCommandWithURI() throws Exception {
    final String file = "/testStoragePolicySatisfierCommandURI";
    DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, "The storage policy of " + file + " is unspecified");
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
    DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + dfs.getUri() + file, 0, "Scheduled blocks to move based on the current storage policy on " + dfs.getUri() + file.toString());
    DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, dfs);
}

19 View Complete Implementation : TestSnapshotRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Rename snapshot(s), and check the correctness of the snapshot list within
 * {@link INodeDirectorySnapshottable}
 */
@Test(timeout = 60000)
public void testSnapshotList() throws Exception {
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Create three snapshots for sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s3");
    // Rename s3 to s22
    hdfs.renameSnapshot(sub1, "s3", "s22");
    // Check the snapshots list
    INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
    checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" }, new String[] { "s1", "s2", "s22" });
    // Rename s1 to s4
    hdfs.renameSnapshot(sub1, "s1", "s4");
    checkSnapshotList(srcRoot, new String[] { "s2", "s22", "s4" }, new String[] { "s4", "s2", "s22" });
    // Rename s22 to s0
    hdfs.renameSnapshot(sub1, "s22", "s0");
    checkSnapshotList(srcRoot, new String[] { "s0", "s2", "s4" }, new String[] { "s4", "s2", "s0" });
}

19 View Complete Implementation : TestStoragePolicyCommands.java
Copyright Apache License 2.0
Author : apache
@Test
public void testSetAndUnsetStoragePolicy() throws Exception {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path wow = new Path(bar, "wow");
    DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
    /*
     * test: set storage policy
     */
    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + fs.getUri() + "/foo -policy WARM", 0, "Set storage policy WARM on " + fs.getUri() + "/foo");
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD", 0, "Set storage policy COLD on " + bar.toString());
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar/wow -policy HOT", 0, "Set storage policy HOT on " + wow.toString());
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM", 2, "File/Directory does not exist: /fooz");
    /*
     * test: get storage policy after set
     */
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + fs.getUri() + "/foo", 0, "The storage policy of " + fs.getUri() + "/foo:\n" + warm);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0, "The storage policy of " + bar.toString() + ":\n" + cold);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar/wow", 0, "The storage policy of " + wow.toString() + ":\n" + hot);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz");
    /*
     * test: unset storage policy
     */
    DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path " + fs.getUri() + "/foo", 0, "Unset storage policy from " + fs.getUri() + "/foo");
    DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /foo/bar", 0, "Unset storage policy from " + bar.toString());
    DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /foo/bar/wow", 0, "Unset storage policy from " + wow.toString());
    DFSTestUtil.toolRun(admin, "-unsetStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz");
    /*
     * test: get storage policy after unset
     */
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0, "The storage policy of " + foo.toString() + " is unspecified");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0, "The storage policy of " + bar.toString() + " is unspecified");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar/wow", 0, "The storage policy of " + wow.toString() + " is unspecified");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz");
}

19 View Complete Implementation : TestSequentialBlockId.java
Copyright Apache License 2.0
Author : apache
/**
 * Test that collisions in the block ID space are handled gracefully.
 *
 * @throws IOException
 */
@Test
public void testTriggerBlockIdCollision() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        FSNamesystem fsn = cluster.getNamesystem();
        final int blockCount = 10;
        // Create a file with a few blocks to rev up the global block ID
        // counter.
        Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
        DFSTestUtil.createFile(fs, path1, IO_SIZE, BLOCK_SIZE * blockCount, BLOCK_SIZE, REPLICATION, SEED);
        List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
        // Rewind the block ID counter in the name system object. This will result
        // in block ID collisions when we try to allocate new blocks.
        SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockManager().getBlockIdManager().getBlockIdGenerator();
        blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);
        // Trigger collisions by creating a new file.
        Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
        DFSTestUtil.createFile(fs, path2, IO_SIZE, BLOCK_SIZE * blockCount, BLOCK_SIZE, REPLICATION, SEED);
        List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
        replacedertThat(blocks2.size(), is(blockCount));
        // Make sure that file2 block IDs start immediately after file1
        replacedertThat(blocks2.get(0).getBlock().getBlockId(), is(blocks1.get(9).getBlock().getBlockId() + 1));
    } finally {
        cluster.shutdown();
    }
}

19 View Complete Implementation : TestStoragePolicySatisfyAdminCommands.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 30000)
public void testStoragePolicySatisfierCommand() throws Exception {
    final String file = "/testStoragePolicySatisfierCommand";
    DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path " + file, 0, "The storage policy of " + file + " is unspecified");
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path " + file + " -policy COLD", 0, "Set storage policy COLD on " + file.toString());
    DFSTestUtil.toolRun(admin, "-satisfyStoragePolicy -path " + file, 0, "Scheduled blocks to move based on the current storage policy on " + file.toString());
    DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 1, 30000, dfs);
}

19 View Complete Implementation : TestGetContentSummaryWithPermission.java
Copyright Apache License 2.0
Author : apache
/**
 * Test getContentSummary for super user. For super user, whatever
 * permission the directories are with, always allowed to access
 *
 * @throws Exception
 */
@Test
public void testGetContentSummarySuperUser() throws Exception {
    final Path foo = new Path("/fooSuper");
    final Path bar = new Path(foo, "barSuper");
    final Path baz = new Path(bar, "bazSuper");
    dfs.mkdirs(bar);
    DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
    ContentSummary summary;
    summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
    verifySummary(summary, 2, 1, 10);
    dfs.setPermission(foo, new FsPermission((short) 0));
    summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
    verifySummary(summary, 2, 1, 10);
    dfs.setPermission(bar, new FsPermission((short) 0));
    summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
    verifySummary(summary, 2, 1, 10);
    dfs.setPermission(baz, new FsPermission((short) 0));
    summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
    verifySummary(summary, 2, 1, 10);
}

19 View Complete Implementation : TestWebHdfsWithRestCsrfPreventionFilter.java
Copyright Apache License 2.0
Author : apache
@Test
public void testTruncate() throws Exception {
    DFSTestUtil.createFile(fs, FILE, 1024, (short) 1, 0L);
    // truncate is an HTTP POST that executes solely within the NameNode as a
    // metadata operation, so we expect CSRF prevention configured on the
    // NameNode to block an unconfigured client.
    if (nnRestCsrf && !clientRestCsrf) {
        expectException();
    }
    replacedertTrue(webhdfs.truncate(FILE, 0L));
}

19 View Complete Implementation : TestDataNodeTcpNoDelay.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests the {@code DataNode#transferBlocks()} path by re-replicating an
 * existing block.
 */
private void transferBlock(DistributedFileSystem dfs) throws Exception {
    Path dir = new Path("test-block-transfer");
    Path f = new Path(dir, "testfile");
    DFSTestUtil.createFile(dfs, f, 10240, (short) 1, 0);
    // force a block transfer to another DN
    dfs.setReplication(f, (short) 2);
    DFSTestUtil.waitForReplication(dfs, f, (short) 2, 20000);
}

19 View Complete Implementation : TestSnapshotRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Test renaming a snapshot with illegal name
 */
@Test
public void testRenameWithIllegalName() throws Exception {
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Create snapshots for sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
    final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
    try {
        hdfs.renameSnapshot(sub1, "s1", name1);
        fail("Exception expected when an illegal name is given for rename");
    } catch (RemoteException e) {
        String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name.";
        GenericTestUtils.replacedertExceptionContains(errorMsg, e);
    }
    String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
    final String[] badNames = new String[] { "foo" + Path.SEPARATOR, Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
    for (String badName : badNames) {
        try {
            hdfs.renameSnapshot(sub1, "s1", badName);
            fail("Exception expected when an illegal name is given");
        } catch (RemoteException e) {
            GenericTestUtils.replacedertExceptionContains(errorMsg, e);
        }
    }
}

19 View Complete Implementation : TestFileContextSnapshot.java
Copyright Apache License 2.0
Author : apache
/**
 * Test FileStatus of snapshot file before/after rename
 */
@Test(timeout = 60000)
public void testRenameSnapshot() throws Exception {
    DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
    dfs.allowSnapshot(snapRootPath);
    // Create snapshot for sub1
    Path snapPath1 = fileContext.createSnapshot(snapRootPath, "s1");
    Path ssPath = new Path(snapPath1, filePath.getName());
    replacedertTrue("Failed to create snapshot", dfs.exists(ssPath));
    FileStatus statusBeforeRename = dfs.getFileStatus(ssPath);
    // Rename the snapshot
    fileContext.renameSnapshot(snapRootPath, "s1", "s2");
    // <sub1>/.snapshot/s1/file1 should no longer exist
    replacedertFalse("Old snapshot still exists after rename!", dfs.exists(ssPath));
    Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapRootPath, "s2");
    ssPath = new Path(snapshotRoot, filePath.getName());
    // Instead, <sub1>/.snapshot/s2/file1 should exist
    replacedertTrue("Snapshot doesn't exists!", dfs.exists(ssPath));
    FileStatus statusAfterRename = dfs.getFileStatus(ssPath);
    // FileStatus of the snapshot should not change except the path
    replacedertFalse("Filestatus of the snapshot matches", statusBeforeRename.equals(statusAfterRename));
    statusBeforeRename.setPath(statusAfterRename.getPath());
    replacedertEquals("FileStatus of the snapshot mismatches!", statusBeforeRename.toString(), statusAfterRename.toString());
}

19 View Complete Implementation : TestSymlinkHdfsDisable.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 60000)
public void testSymlinkHdfsDisable() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // disable symlink resolution
    conf.setBoolean(CommonConfigurationKeys.FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY, false);
    // spin up minicluster, get dfs and filecontext
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // Create test files/links
    FileContextTestHelper helper = new FileContextTestHelper("/tmp/TestSymlinkHdfsDisable");
    Path root = helper.getTestRootPath(fc);
    Path target = new Path(root, "target");
    Path link = new Path(root, "link");
    DFSTestUtil.createFile(dfs, target, 4096, (short) 1, 0xDEADDEAD);
    fc.createSymlink(target, link, false);
    // Try to resolve links with FileSystem and FileContext
    try {
        fc.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.replacedertExceptionContains("resolution is disabled", e);
    }
    try {
        dfs.open(link);
        fail("Expected error when attempting to resolve link");
    } catch (IOException e) {
        GenericTestUtils.replacedertExceptionContains("resolution is disabled", e);
    }
}

19 View Complete Implementation : TestFileContextSnapshot.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 60000)
public void testCreateAndDeleteSnapshot() throws Exception {
    DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
    // disallow snapshot on dir
    dfs.disallowSnapshot(snapRootPath);
    try {
        fileContext.createSnapshot(snapRootPath, "s1");
    } catch (SnapshotException e) {
        GenericTestUtils.replacedertExceptionContains("Directory is not a snapshottable directory: " + snapRootPath, e);
    }
    // allow snapshot on dir
    dfs.allowSnapshot(snapRootPath);
    Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
    replacedertTrue("Failed to create snapshot", dfs.exists(ssPath));
    fileContext.deleteSnapshot(snapRootPath, "s1");
    replacedertFalse("Failed to delete snapshot", dfs.exists(ssPath));
}

19 View Complete Implementation : TestFSDirectory.java
Copyright Apache License 2.0
Author : apache
@Test
public void testSkipQuotaCheck() throws Exception {
    try {
        // set quota. nsQuota of 1 means no files can be created
        // under this directory.
        hdfs.setQuota(sub2, 1, Long.MAX_VALUE);
        // create a file
        try {
            // this should fail
            DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
            throw new IOException("The create should have failed.");
        } catch (NSQuotaExceededException qe) {
        // ignored
        }
        // disable the quota check and retry. this should succeed.
        fsdir.disableQuotaChecks();
        DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
        // trying again after re-enabling the check.
        // cleanup
        hdfs.delete(file6, false);
        fsdir.enableQuotaChecks();
        try {
            // this should fail
            DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed);
            throw new IOException("The create should have failed.");
        } catch (NSQuotaExceededException qe) {
        // ignored
        }
    } finally {
        // cleanup, in case the test failed in the middle.
        hdfs.delete(file6, false);
        hdfs.setQuota(sub2, Long.MAX_VALUE, Long.MAX_VALUE);
    }
}

19 View Complete Implementation : TestViewFSStoragePolicyCommands.java
Copyright Apache License 2.0
Author : apache
@Test
public void testStoragePolicyCommandPathWithSchema() throws Exception {
    Path base1 = new Path("/user1");
    final Path bar = new Path(base1, "bar");
    DFSTestUtil.createFile(cluster.getFileSystem(0), bar, 1024, (short) 1, 0);
    // Test with hdfs:// schema
    String pathHdfsSchema = "hdfs://" + cluster.getNameNode(0).getClientNamenodeAddress() + "/" + bar.toString();
    checkCommandsWithUriPath(pathHdfsSchema);
    // Test with webhdfs:// schema
    InetSocketAddress httpAddress = cluster.getNameNode(0).getHttpAddress();
    String pathWebhdfsSchema = "webhdfs://" + httpAddress.getHostName() + ":" + httpAddress.getPort() + "/" + bar.toString();
    checkCommandsWithUriPath(pathWebhdfsSchema);
}

19 View Complete Implementation : TestSnapshotNameWithInvalidCharacters.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 60000)
public void TestSnapshotWithInvalidName1() throws Exception {
    Path file1 = new Path(dir1, file1Name);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
    hdfs.allowSnapshot(dir1);
    try {
        hdfs.createSnapshot(dir1, snapshot2);
    } catch (RemoteException e) {
    }
}

19 View Complete Implementation : TestWebHdfsWithRestCsrfPreventionFilter.java
Copyright Apache License 2.0
Author : apache
@Test
public void testDelete() throws Exception {
    DFSTestUtil.createFile(fs, FILE, 1024, (short) 1, 0L);
    // delete is an HTTP DELETE that executes solely within the NameNode as a
    // metadata operation, so we expect CSRF prevention configured on the
    // NameNode to block an unconfigured client.
    if (nnRestCsrf && !clientRestCsrf) {
        expectException();
    }
    replacedertTrue(webhdfs.delete(FILE, false));
}

19 View Complete Implementation : TestPersistentStoragePolicySatisfier.java
Copyright Apache License 2.0
Author : apache
/**
 * Setup test files for testing.
 * @param dfs
 * @param replication
 * @throws Exception
 */
private void createTestFiles(DistributedFileSystem dfs, short replication) throws Exception {
    DFSTestUtil.createFile(dfs, testFile, 1024L, replication, 0L);
    DFSTestUtil.createFile(dfs, parentFile, 1024L, replication, 0L);
    DFSTestUtil.createFile(dfs, childFile, 1024L, replication, 0L);
    DFSTestUtil.waitReplication(dfs, testFile, replication);
    DFSTestUtil.waitReplication(dfs, parentFile, replication);
    DFSTestUtil.waitReplication(dfs, childFile, replication);
}

19 View Complete Implementation : TestNameNodeXAttr.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 120000)
public void testXAttrSymlinks() throws Exception {
    fs.mkdirs(linkParent);
    fs.mkdirs(targetParent);
    DFSTestUtil.createFile(fs, target, 1024, (short) 3, 0xBEEFl);
    fs.createSymlink(target, link, false);
    fs.setXAttr(target, name1, value1);
    fs.setXAttr(target, name2, value2);
    Map<String, byte[]> xattrs = fs.getXAttrs(link);
    replacedert.replacedertEquals(xattrs.size(), 2);
    replacedert.replacedertArrayEquals(value1, xattrs.get(name1));
    replacedert.replacedertArrayEquals(value2, xattrs.get(name2));
    fs.setXAttr(link, name3, null);
    xattrs = fs.getXAttrs(target);
    replacedert.replacedertEquals(xattrs.size(), 3);
    replacedert.replacedertArrayEquals(value1, xattrs.get(name1));
    replacedert.replacedertArrayEquals(value2, xattrs.get(name2));
    replacedert.replacedertArrayEquals(new byte[0], xattrs.get(name3));
    fs.removeXAttr(link, name1);
    xattrs = fs.getXAttrs(target);
    replacedert.replacedertEquals(xattrs.size(), 2);
    replacedert.replacedertArrayEquals(value2, xattrs.get(name2));
    replacedert.replacedertArrayEquals(new byte[0], xattrs.get(name3));
    fs.removeXAttr(target, name3);
    xattrs = fs.getXAttrs(link);
    replacedert.replacedertEquals(xattrs.size(), 1);
    replacedert.replacedertArrayEquals(value2, xattrs.get(name2));
    fs.delete(linkParent, true);
    fs.delete(targetParent, true);
}

19 View Complete Implementation : TestStoragePolicyCommands.java
Copyright Apache License 2.0
Author : apache
@Test
public void testSetAndGetStoragePolicy() throws Exception {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(fs, bar, SIZE, REPL, 0);
    final StoragePolicyAdmin admin = new StoragePolicyAdmin(conf);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0, "The storage policy of " + foo.toString() + " is unspecified");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0, "The storage policy of " + bar.toString() + " is unspecified");
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo -policy WARM", 0, "Set storage policy WARM on " + foo.toString());
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /foo/bar -policy COLD", 0, "Set storage policy COLD on " + bar.toString());
    DFSTestUtil.toolRun(admin, "-setStoragePolicy -path /fooz -policy WARM", 2, "File/Directory does not exist: /fooz");
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo", 0, "The storage policy of " + foo.toString() + ":\n" + warm);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /foo/bar", 0, "The storage policy of " + bar.toString() + ":\n" + cold);
    DFSTestUtil.toolRun(admin, "-getStoragePolicy -path /fooz", 2, "File/Directory does not exist: /fooz");
}

18 View Complete Implementation : TestDistCpSyncReverseBase.java
Copyright Apache License 2.0
Author : apache
/**
 * create some files and directories under the given directory.
 * the final subtree looks like this:
 *                     dir/
 *              foo/          bar/
 *           d1/    f1     d2/    f2
 *         f3            f4
 */
private void initData(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path d1 = new Path(foo, "d1");
    final Path f1 = new Path(foo, "f1");
    final Path d2 = new Path(bar, "d2");
    final Path f2 = new Path(bar, "f2");
    final Path f3 = new Path(d1, "f3");
    final Path f4 = new Path(d2, "f4");
    DFSTestUtil.createFile(dfs, f1, blockSize, dataNum, 0);
    DFSTestUtil.createFile(dfs, f2, blockSize, dataNum, 0);
    DFSTestUtil.createFile(dfs, f3, blockSize, dataNum, 0);
    DFSTestUtil.createFile(dfs, f4, blockSize, dataNum, 0);
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
private void initData8(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path d1 = new Path(dir, "d1");
    final Path foo_f1 = new Path(foo, "f1");
    final Path bar_f1 = new Path(bar, "f1");
    final Path d1_f1 = new Path(d1, "f1");
    DFSTestUtil.createFile(dfs, foo_f1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, bar_f1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, d1_f1, BLOCK_SIZE, DATA_NUM, 0L);
}

18 View Complete Implementation : TestDFSAdmin.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 300000L)
public void testCheckNumOfBlocksInReportCommand() throws Exception {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path path = new Path("/tmp.txt");
    DatanodeInfo[] dn = dfs.getDataNodeStats();
    replacedertEquals(dn.length, NUM_DATANODES);
    // Block count should be 0, as no files are created
    int actualBlockCount = 0;
    for (DatanodeInfo d : dn) {
        actualBlockCount += d.getNumBlocks();
    }
    replacedertEquals(0, actualBlockCount);
    // Create a file with 2 blocks
    DFSTestUtil.createFile(dfs, path, 1024, (short) 1, 0);
    int expectedBlockCount = 2;
    // Wait for One Heartbeat
    Thread.sleep(3 * 1000);
    dn = dfs.getDataNodeStats();
    replacedertEquals(dn.length, NUM_DATANODES);
    // Block count should be 2, as file is created with block count 2
    actualBlockCount = 0;
    for (DatanodeInfo d : dn) {
        actualBlockCount += d.getNumBlocks();
    }
    replacedertEquals(expectedBlockCount, actualBlockCount);
}

18 View Complete Implementation : TestSnapshotReplication.java
Copyright Apache License 2.0
Author : apache
/**
 * Test replication number calculation for a normal file without snapshots.
 */
@Test(timeout = 60000)
public void testReplicationWithoutSnapshot() throws Exception {
    // Create file1, set its replication to REPLICATION
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Check the replication of file1
    checkFileReplication(file1, REPLICATION, REPLICATION);
    // Change the replication factor of file1 from 3 to 2
    hdfs.setReplication(file1, (short) (REPLICATION - 1));
    // Check the replication again
    checkFileReplication(file1, (short) (REPLICATION - 1), (short) (REPLICATION - 1));
}

18 View Complete Implementation : TestSnapshotRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Test rename a snapshot to another existing snapshot
 */
@Test(timeout = 60000)
public void testRenameToExistingSnapshot() throws Exception {
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Create snapshots for sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
    exception.expect(SnapshotException.clreplaced);
    String error = "The snapshot s2 already exists for directory " + sub1.toString();
    exception.expectMessage(error);
    hdfs.renameSnapshot(sub1, "s1", "s2");
}

18 View Complete Implementation : TestHASafeMode.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests the case where, while a standby is down, more blocks are
 * added to the namespace, but not rolled. So, when it starts up,
 * it receives notification about the new blocks during
 * the safemode extension period.
 */
@Test
public void testBlocksAddedBeforeStandbyRestart() throws Exception {
    banner("Starting with NN0 active and NN1 standby, creating some blocks");
    DFSTestUtil.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
    // Roll edit log so that, when the SBN restarts, it will load
    // the namespace during startup.
    nn0.getRpcServer().rollEditLog();
    banner("Creating some blocks that won't be in the edit log");
    DFSTestUtil.createFile(fs, new Path("/test2"), 5 * BLOCK_SIZE, (short) 3, 1L);
    banner("Restarting standby");
    restartStandby();
    // We expect it not to be stuck in safemode, since those blocks
    // that are already visible to the SBN should be processed
    // in the initial block reports.
    replacedertSafeMode(nn1, 3, 3, 3, 0);
    banner("Waiting for standby to catch up to active namespace");
    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
    replacedertSafeMode(nn1, 8, 8, 3, 0);
}

18 View Complete Implementation : TestDiskspaceQuotaUpdate.java
Copyright Apache License 2.0
Author : apache
/**
 * Test if the quota can be correctly updated for create file
 */
@Test(timeout = 60000)
public void testQuotaUpdateWithFileCreate() throws Exception {
    final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo");
    Path createdFile = new Path(foo, "created_file.data");
    getDFS().mkdirs(foo);
    getDFS().setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
    DFSTestUtil.createFile(getDFS(), createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, REPLICATION, seed);
    INode fnode = getFSDirectory().getINode4Write(foo.toString());
    replacedertTrue(fnode.isDirectory());
    replacedertTrue(fnode.isQuotaSet());
    QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed();
    replacedertEquals(2, cnt.getNameSpace());
    replacedertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}

18 View Complete Implementation : TestDistCpSyncReverseBase.java
Copyright Apache License 2.0
Author : apache
private void initData3(Path dir) throws Exception {
    final Path test = new Path(dir, "test");
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path f1 = new Path(test, "file");
    final Path f2 = new Path(foo, "file");
    final Path f3 = new Path(bar, "file");
    DFSTestUtil.createFile(dfs, f1, blockSize, dataNum, 0L);
    DFSTestUtil.createFile(dfs, f2, blockSize * 2, dataNum, 1L);
    DFSTestUtil.createFile(dfs, f3, blockSize * 3, dataNum, 2L);
}

18 View Complete Implementation : TestSnapshotRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Test rename a non-existing snapshot to itself.
 */
@Test(timeout = 60000)
public void testRenameNonExistingSnapshotToItself() throws Exception {
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Create snapshot for sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
    exception.expect(SnapshotException.clreplaced);
    String error = "The snapshot wrongName does not exist for directory " + sub1.toString();
    exception.expectMessage(error);
    hdfs.renameSnapshot(sub1, "wrongName", "wrongName");
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
private void initData2(Path dir) throws Exception {
    final Path test = new Path(dir, "test");
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path f1 = new Path(test, "f1");
    final Path f2 = new Path(foo, "f2");
    final Path f3 = new Path(bar, "f3");
    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 1L);
    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 2L);
}

18 View Complete Implementation : TestClientAccessPrivilege.java
Copyright Apache License 2.0
Author : apache
@Before
public void createFiles() throws IllegalArgumentException, IOException {
    hdfs.delete(new Path(testdir), true);
    hdfs.mkdirs(new Path(testdir));
    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
}

18 View Complete Implementation : TestRpcProgramNfs3.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 120000)
public void testEncryptedReadWrite() throws Exception {
    final int len = 8192;
    final Path zone = new Path("/zone");
    hdfs.mkdirs(zone);
    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
    final byte[] buffer = new byte[len];
    for (int i = 0; i < len; i++) {
        buffer[i] = (byte) i;
    }
    final String encFile1 = "/zone/myfile";
    createFileUsingNfs(encFile1, buffer);
    commit(encFile1, len);
    replacedertArrayEquals("encFile1 not equal", getFileContentsUsingNfs(encFile1, len), getFileContentsUsingDfs(encFile1, len));
    /*
     * Same thing except this time create the encrypted file using DFS.
     */
    final String encFile2 = "/zone/myfile2";
    final Path encFile2Path = new Path(encFile2);
    DFSTestUtil.createFile(hdfs, encFile2Path, len, (short) 1, 0xFEED);
    replacedertArrayEquals("encFile2 not equal", getFileContentsUsingNfs(encFile2, len), getFileContentsUsingDfs(encFile2, len));
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
/**
 * create some files and directories under the given directory.
 * the final subtree looks like this:
 *                     dir/
 *              foo/          bar/
 *           d1/    f1     d2/    f2
 *         f3            f4
 */
private void initData(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path d1 = new Path(foo, "d1");
    final Path f1 = new Path(foo, "f1");
    final Path d2 = new Path(bar, "d2");
    final Path f2 = new Path(bar, "f2");
    final Path f3 = new Path(d1, "f3");
    final Path f4 = new Path(d2, "f4");
    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0);
    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 0);
    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 0);
    DFSTestUtil.createFile(dfs, f4, BLOCK_SIZE, DATA_NUM, 0);
}

18 View Complete Implementation : TestIncrementalBrVariations.java
Copyright Apache License 2.0
Author : apache
private LocatedBlocks createFileGetBlocks(String filenamePrefix) throws IOException {
    Path filePath = new Path("/" + filenamePrefix + ".dat");
    // Write out a file with a few blocks, get block locations.
    DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, BLOCK_SIZE, NUM_DATANODES, seed);
    // Get the block list for the file with the block locations.
    LocatedBlocks blocks = client.getLocatedBlocks(filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
    replacedertThat(cluster.getNamesystem().getUnderReplicatedBlocks(), is(0L));
    return blocks;
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
private void initData11(Path dir) throws Exception {
    final Path staging = new Path(dir, "prod");
    final Path stagingF1 = new Path(staging, "f1");
    final Path data = new Path(dir, "data");
    final Path dataF1 = new Path(data, "f1");
    DFSTestUtil.createFile(dfs, stagingF1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, dataF1, BLOCK_SIZE, DATA_NUM, 0L);
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
private void initData6(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path foo_f1 = new Path(foo, "f1");
    final Path bar_f1 = new Path(bar, "f1");
    DFSTestUtil.createFile(dfs, foo_f1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, bar_f1, BLOCK_SIZE, DATA_NUM, 0L);
}

18 View Complete Implementation : BlockReportTestBase.java
Copyright Apache License 2.0
Author : apache
private ArrayList<Block> prepareForRide(final Path filePath, final String METHOD_NAME, long fileSize) throws IOException {
    LOG.info("Running test " + METHOD_NAME);
    DFSTestUtil.createFile(fs, filePath, fileSize, REPL_FACTOR, rand.nextLong());
    return locatedToBlocks(cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), FILE_START, fileSize).getLocatedBlocks(), null);
}

18 View Complete Implementation : TestDeleteBlockPool.java
Copyright Apache License 2.0
Author : apache
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
        conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1,namesServerId2");
        cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES))).numDataNodes(1).build();
        cluster.waitActive();
        FileSystem fs1 = cluster.getFileSystem(0);
        FileSystem fs2 = cluster.getFileSystem(1);
        DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
        DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
        DataNode dn1 = cluster.getDataNodes().get(0);
        String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
        String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
        Configuration nn1Conf = cluster.getConfiguration(0);
        nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
        dn1.refreshNamenodes(nn1Conf);
        replacedertEquals(1, dn1.getAllBpOs().size());
        DFSAdmin admin = new DFSAdmin(nn1Conf);
        String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
        String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
        int ret = admin.run(args);
        replacedertFalse(0 == ret);
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid2);
        String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
        ret = admin.run(forceArgs);
        replacedertEquals(0, ret);
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolMissing(bpid2);
        // bpid1 remains good
        cluster.getFsDatasetTestUtils(0).verifyBlockPoolExists(bpid1);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

18 View Complete Implementation : TestReaddir.java
Copyright Apache License 2.0
Author : apache
@Before
public void createFiles() throws IllegalArgumentException, IOException {
    hdfs.delete(new Path(testdir), true);
    hdfs.mkdirs(new Path(testdir));
    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f1"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f2"), 0, (short) 1, 0);
    DFSTestUtil.createFile(hdfs, new Path(testdir + "/f3"), 0, (short) 1, 0);
}

18 View Complete Implementation : TestDistCpSyncReverseBase.java
Copyright Apache License 2.0
Author : apache
private void initData8(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path d1 = new Path(dir, "d1");
    final Path foo_f1 = new Path(foo, "f1");
    final Path bar_f1 = new Path(bar, "f1");
    final Path d1_f1 = new Path(d1, "f1");
    DFSTestUtil.createFile(dfs, foo_f1, blockSize, dataNum, 0L);
    DFSTestUtil.createFile(dfs, bar_f1, blockSize, dataNum, 0L);
    DFSTestUtil.createFile(dfs, d1_f1, blockSize, dataNum, 0L);
}

18 View Complete Implementation : TestResolveHdfsSymlink.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests resolution of an hdfs symlink to the local file system.
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testFcResolveAfs() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    FileContext fcLocal = FileContext.getLocalFSFileContext();
    FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem().getUri());
    final String localTestRoot = helper.getAbsoluteTestRootDir(fcLocal);
    Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri().toString(), new File(localTestRoot, "alpha").getPath());
    DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16, (short) 1, 2);
    Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri().toString(), localTestRoot);
    Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(), "/tmp/link");
    fcHdfs.createSymlink(linkTarget, hdfsLink, true);
    Path alphaHdfsPathViaLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString() + "/tmp/link/alpha");
    Set<AbstractFileSystem> afsList = fcHdfs.resolveAbstractFileSystems(alphaHdfsPathViaLink);
    replacedert.replacedertEquals(2, afsList.size());
    for (AbstractFileSystem afs : afsList) {
        if ((!afs.equals(fcHdfs.getDefaultFileSystem())) && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
            replacedert.fail("Failed to resolve AFS correctly");
        }
    }
}

18 View Complete Implementation : TestSnapshotRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Test rename a non-existing snapshot
 */
@Test(timeout = 60000)
public void testRenameNonExistingSnapshot() throws Exception {
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
    // Create snapshot for sub1
    SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
    exception.expect(SnapshotException.clreplaced);
    String error = "The snapshot wrongName does not exist for directory " + sub1.toString();
    exception.expectMessage(error);
    hdfs.renameSnapshot(sub1, "wrongName", "s2");
}

18 View Complete Implementation : TestDistCpSyncReverseBase.java
Copyright Apache License 2.0
Author : apache
private void initData7(Path dir) throws Exception {
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path foo_f1 = new Path(foo, "f1");
    final Path bar_f1 = new Path(bar, "f1");
    DFSTestUtil.createFile(dfs, foo_f1, blockSize, dataNum, 0L);
    DFSTestUtil.createFile(dfs, bar_f1, blockSize, dataNum, 0L);
}

18 View Complete Implementation : TestDistCpSyncReverseBase.java
Copyright Apache License 2.0
Author : apache
private void initData2(Path dir) throws Exception {
    final Path test = new Path(dir, "test");
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path f1 = new Path(test, "f1");
    final Path f2 = new Path(foo, "f2");
    final Path f3 = new Path(bar, "f3");
    DFSTestUtil.createFile(dfs, f1, blockSize, dataNum, 0L);
    DFSTestUtil.createFile(dfs, f2, blockSize, dataNum, 1L);
    DFSTestUtil.createFile(dfs, f3, blockSize, dataNum, 2L);
}

18 View Complete Implementation : TestDistCpSync.java
Copyright Apache License 2.0
Author : apache
private void initData3(Path dir) throws Exception {
    final Path test = new Path(dir, "test");
    final Path foo = new Path(dir, "foo");
    final Path bar = new Path(dir, "bar");
    final Path f1 = new Path(test, "file");
    final Path f2 = new Path(foo, "file");
    final Path f3 = new Path(bar, "file");
    DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
    DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE * 2, DATA_NUM, 1L);
    DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE * 3, DATA_NUM, 2L);
}

18 View Complete Implementation : TestDNFencingWithReplication.java
Copyright Apache License 2.0
Author : apache
@Test
public void testFencingStress() throws Exception {
    HAStressTestHarness harness = new HAStressTestHarness();
    harness.setNumberOfNameNodes(3);
    harness.conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    harness.conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
    harness.conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
    final MiniDFSCluster cluster = harness.startCluster();
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        FileSystem fs = harness.getFailoverFs();
        TestContext togglers = new TestContext();
        for (int i = 0; i < NUM_THREADS; i++) {
            Path p = new Path("/test-" + i);
            DFSTestUtil.createFile(fs, p, BLOCK_SIZE * 10, (short) 3, (long) i);
            togglers.addThread(new ReplicationToggler(togglers, fs, p, cluster));
        }
        // Start a separate thread which will make sure that replication
        // happens quickly by triggering deletion reports and replication
        // work calculation frequently.
        harness.addReplicationTriggerThread(500);
        harness.addFailoverThread(5000);
        harness.startThreads();
        togglers.startThreads();
        togglers.waitFor(RUNTIME);
        togglers.stop();
        harness.stopThreads();
        // CHeck that the files can be read without throwing
        for (int i = 0; i < NUM_THREADS; i++) {
            Path p = new Path("/test-" + i);
            DFSTestUtil.readFile(fs, p);
        }
    } finally {
        System.err.println("===========================\n\n\n\n");
        harness.shutdown();
    }
}

18 View Complete Implementation : TestHASafeMode.java
Copyright Apache License 2.0
Author : apache
/**
 * Similar to {@link #testBlocksAddedBeforeStandbyRestart()} except that
 * the new blocks are allocated after the SBN has restarted. So, the
 * blocks were not present in the original block reports at startup
 * but are reported separately by blockReceived calls.
 */
@Test
public void testBlocksAddedWhileInSafeMode() throws Exception {
    banner("Starting with NN0 active and NN1 standby, creating some blocks");
    DFSTestUtil.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
    // Roll edit log so that, when the SBN restarts, it will load
    // the namespace during startup.
    nn0.getRpcServer().rollEditLog();
    banner("Restarting standby");
    restartStandby();
    replacedertSafeMode(nn1, 3, 3, 3, 0);
    // Create a few blocks which will send blockReceived calls to the
    // SBN.
    banner("Creating some blocks while SBN is in safe mode");
    DFSTestUtil.createFile(fs, new Path("/test2"), 5 * BLOCK_SIZE, (short) 3, 1L);
    banner("Waiting for standby to catch up to active namespace");
    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
    replacedertSafeMode(nn1, 8, 8, 3, 0);
}

18 View Complete Implementation : TestSequentialBlockGroupId.java
Copyright Apache License 2.0
Author : apache
/**
 * Test that collisions in the blockGroup ID space are handled gracefully.
 */
@Test(timeout = 60000)
public void testTriggerBlockGroupIdCollision() throws IOException {
    long blockGroupIdInitialValue = blockGrpIdGenerator.getCurrentValue();
    // Create a file with a few blocks to rev up the global block ID
    // counter.
    Path path1 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file1.dat");
    DFSTestUtil.createFile(fs, path1, cellSize, fileLen, blockSize, REPLICATION, SEED);
    List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);
    replacedertThat("Wrong BlockGrps", blocks1.size(), is(blockGrpCount));
    // Rewind the block ID counter in the name system object. This will result
    // in block ID collisions when we try to allocate new blocks.
    blockGrpIdGenerator.setCurrentValue(blockGroupIdInitialValue);
    // Trigger collisions by creating a new file.
    Path path2 = new Path(ecDir, "testBlockGrpIdCollisionDetection_file2.dat");
    DFSTestUtil.createFile(fs, path2, cellSize, fileLen, blockSize, REPLICATION, SEED);
    List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
    replacedertThat("Wrong BlockGrps", blocks2.size(), is(blockGrpCount));
    // Make sure that file1 and file2 block IDs are different
    for (LocatedBlock locBlock1 : blocks1) {
        long blockId1 = locBlock1.getBlock().getBlockId();
        for (LocatedBlock locBlock2 : blocks2) {
            long blockId2 = locBlock2.getBlock().getBlockId();
            replacedertThat("BlockGrpId mismatches!", blockId1, is(not(blockId2)));
        }
    }
}