org.apache.hadoop.hdfs.MiniDFSCluster.getFileSystem() - java examples

Here are the examples of the java api org.apache.hadoop.hdfs.MiniDFSCluster.getFileSystem() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : ClusterMapReduceTestCase.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Returns a preconfigured Filesystem instance for test cases to read and
 * write files to it.
 * <p/>
 * TestCases should use this Filesystem instance.
 *
 * @return the filesystem used by Hadoop.
 * @throws IOException
 */
protected FileSystem getFileSystem() throws IOException {
    return dfsCluster.getFileSystem();
}

19 View Complete Implementation : TestDataJoin.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testDataJoin() throws Exception {
    final int srcs = 4;
    JobConf job = new JobConf();
    job.setBoolean("mapreduce.fileoutputcommitter.marksuccessfuljobs", false);
    Path base = cluster.getFileSystem().makeQualified(new Path("/inner"));
    Path[] src = writeSimpleSrc(base, job, srcs);
    job.setInputFormat(SequenceFileInputFormat.clreplaced);
    Path outdir = new Path(base, "out");
    FileOutputFormat.setOutputPath(job, outdir);
    job.setMapperClreplaced(SampleDataJoinMapper.clreplaced);
    job.setReducerClreplaced(SampleDataJoinReducer.clreplaced);
    job.setMapOutputKeyClreplaced(Text.clreplaced);
    job.setMapOutputValueClreplaced(SampleTaggedMapOutput.clreplaced);
    job.setOutputKeyClreplaced(Text.clreplaced);
    job.setOutputValueClreplaced(Text.clreplaced);
    job.setOutputFormat(TextOutputFormat.clreplaced);
    job.setNumMapTasks(1);
    job.setNumReduceTasks(1);
    FileInputFormat.setInputPaths(job, src);
    try {
        JobClient.runJob(job);
        confirmOutput(outdir, job, srcs);
    } finally {
        base.getFileSystem(job).delete(base, true);
    }
}

19 View Complete Implementation : FSXAttrBaseTest.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Creates a FileSystem for the super-user.
 *
 * @return FileSystem for super-user
 * @throws Exception if creation fails
 */
protected FileSystem createFileSystem() throws Exception {
    return dfsCluster.getFileSystem();
}

19 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
public static void testWrite() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.writeTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
}

19 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 60000)
public void testTruncate() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    bench.createControlFile(fs, DEFAULT_NR_BYTES / 2, DEFAULT_NR_FILES);
    long tStart = System.currentTimeMillis();
    bench.truncateTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_TRUNCATE, execTime);
}

19 View Complete Implementation : TestLargeDirectoryDelete.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Run multiple threads doing simultaneous operations on the namenode
 * while a large directory is being deleted.
 */
private void runThreads() throws Throwable {
    final TestThread[] threads = new TestThread[2];
    // Thread for creating files
    threads[0] = new TestThread() {

        @Override
        protected void execute() throws Throwable {
            while (live) {
                try {
                    int blockcount = getBlockCount();
                    if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
                        String file = "/tmp" + createOps;
                        createFile(file, 1);
                        mc.getFileSystem().delete(new Path(file), true);
                        createOps++;
                    }
                } catch (IOException ex) {
                    LOG.info("createFile exception ", ex);
                    break;
                }
            }
        }
    };
    // Thread that periodically acquires the FSNamesystem lock
    threads[1] = new TestThread() {

        @Override
        protected void execute() throws Throwable {
            while (live) {
                try {
                    int blockcount = getBlockCount();
                    if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
                        mc.getNamesystem().writeLock();
                        try {
                            lockOps++;
                        } finally {
                            mc.getNamesystem().writeUnlock();
                        }
                        Thread.sleep(1);
                    }
                } catch (InterruptedException ex) {
                    LOG.info("lockOperation exception ", ex);
                    break;
                }
            }
        }
    };
    threads[0].start();
    threads[1].start();
    final long start = Time.now();
    FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
    // recursive delete
    mc.getFileSystem().delete(new Path("/root"), true);
    final long end = Time.now();
    threads[0].endThread();
    threads[1].endThread();
    LOG.info("Deletion took " + (end - start) + "msecs");
    LOG.info("createOperations " + createOps);
    LOG.info("lockOperations " + lockOps);
    replacedert.replacedertTrue(lockOps + createOps > 0);
    threads[0].rethrow();
    threads[1].rethrow();
}

18 View Complete Implementation : TestJoinDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testEmptyJoin() throws Exception {
    Configuration conf = new Configuration();
    Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer", MapReduceTestUtil.Fake_IF.clreplaced, src));
    MapReduceTestUtil.Fake_IF.setKeyClreplaced(conf, MapReduceTestUtil.IncomparableKey.clreplaced);
    Job job = Job.getInstance(conf);
    job.setInputFormatClreplaced(CompositeInputFormat.clreplaced);
    FileOutputFormat.setOutputPath(job, new Path(base, "out"));
    job.setMapperClreplaced(Mapper.clreplaced);
    job.setReducerClreplaced(Reducer.clreplaced);
    job.setOutputKeyClreplaced(MapReduceTestUtil.IncomparableKey.clreplaced);
    job.setOutputValueClreplaced(NullWritable.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue(job.isSuccessful());
    base.getFileSystem(conf).delete(base, true);
}

18 View Complete Implementation : TestJoinDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void joinAs(String jointype, Clreplaced<? extends SimpleCheckerMapBase<?>> map, Clreplaced<? extends SimpleCheckerReduceBase> reduce) throws Exception {
    final int srcs = 4;
    Configuration conf = new Configuration();
    Path base = cluster.getFileSystem().makeQualified(new Path("/" + jointype));
    Path[] src = writeSimpleSrc(base, conf, srcs);
    conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose(jointype, SequenceFileInputFormat.clreplaced, src));
    conf.setInt("testdatamerge.sources", srcs);
    Job job = Job.getInstance(conf);
    job.setInputFormatClreplaced(CompositeInputFormat.clreplaced);
    FileOutputFormat.setOutputPath(job, new Path(base, "out"));
    job.setMapperClreplaced(map);
    job.setReducerClreplaced(reduce);
    job.setOutputFormatClreplaced(SequenceFileOutputFormat.clreplaced);
    job.setOutputKeyClreplaced(IntWritable.clreplaced);
    job.setOutputValueClreplaced(IntWritable.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue("Job failed", job.isSuccessful());
    if ("outer".equals(jointype)) {
        checkOuterConsistency(job, src);
    }
    base.getFileSystem(conf).delete(base, true);
}

18 View Complete Implementation : TestGlobbedCopyListing.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test
public void testRun() throws Exception {
    final URI uri = cluster.getFileSystem().getUri();
    final String pathString = uri.toString();
    Path fileSystemPath = new Path(pathString);
    Path source = new Path(fileSystemPath.toString() + "/tmp/source");
    Path target = new Path(fileSystemPath.toString() + "/tmp/target");
    Path listingPath = new Path(fileSystemPath.toString() + "/tmp/META/fileList.seq");
    DistCpOptions options = new DistCpOptions(Arrays.asList(source), target);
    options.setTargetPathExists(false);
    new GlobbedCopyListing(new Configuration(), CREDENTIALS).buildListing(listingPath, options);
    verifyContents(listingPath);
}

18 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@BeforeClreplaced
public static void beforeClreplaced() throws Exception {
    bench = new TestDFSIO();
    bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    cluster = new MiniDFSCluster.Builder(bench.getConf()).numDataNodes(2).format(true).build();
    FileSystem fs = cluster.getFileSystem();
    bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);
    /**
     * Check write here, as it is required for other tests
     */
    testWrite();
}

18 View Complete Implementation : GridmixTestUtils.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Methods to generate the home directory for dummy users.
 *
 * @param conf
 */
public static void createHomeAndStagingDirectory(String user, Configuration conf) {
    try {
        FileSystem fs = dfsCluster.getFileSystem();
        String path = "/user/" + user;
        Path homeDirectory = new Path(path);
        if (!fs.exists(homeDirectory)) {
            LOG.info("Creating Home directory : " + homeDirectory);
            fs.mkdirs(homeDirectory);
            changePermission(user, homeDirectory, fs);
        }
        changePermission(user, homeDirectory, fs);
        Path stagingArea = new Path(conf.get("mapreduce.jobtracker.staging.root.dir", "/tmp/hadoop/mapred/staging"));
        LOG.info("Creating Staging root directory : " + stagingArea);
        fs.mkdirs(stagingArea);
        fs.setPermission(stagingArea, new FsPermission((short) 0777));
    } catch (IOException ioe) {
        ioe.printStackTrace();
    }
}

18 View Complete Implementation : TestDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testEmptyJoin() throws Exception {
    JobConf job = new JobConf();
    Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    job.set("mapreduce.join.expr", CompositeInputFormat.compose("outer", Fake_IF.clreplaced, src));
    job.setInputFormat(CompositeInputFormat.clreplaced);
    FileOutputFormat.setOutputPath(job, new Path(base, "out"));
    job.setMapperClreplaced(IdenreplacedyMapper.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(IncomparableKey.clreplaced);
    job.setOutputValueClreplaced(NullWritable.clreplaced);
    JobClient.runJob(job);
    base.getFileSystem(job).delete(base, true);
}

18 View Complete Implementation : TestCopyMapper.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void deleteState() throws IOException {
    pathList.clear();
    nFiles = 0;
    cluster.getFileSystem().delete(new Path(SOURCE_PATH), true);
    cluster.getFileSystem().delete(new Path(TARGET_PATH), true);
}

18 View Complete Implementation : TestReduceFetchFromPartialMem.java
Copyright Apache License 2.0
Author : aliyun-beta
public static Counters runJob(JobConf conf) throws Exception {
    conf.setMapperClreplaced(MapMB.clreplaced);
    conf.setReducerClreplaced(MBValidate.clreplaced);
    conf.setOutputKeyClreplaced(Text.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.setNumReduceTasks(1);
    conf.setInputFormat(FakeIF.clreplaced);
    conf.setNumTasksToExecutePerJvm(1);
    conf.setInt(JobContext.MAP_MAX_ATTEMPTS, 0);
    conf.setInt(JobContext.REDUCE_MAX_ATTEMPTS, 0);
    FileInputFormat.setInputPaths(conf, new Path("/in"));
    final Path outp = new Path("/out");
    FileOutputFormat.setOutputPath(conf, outp);
    RunningJob job = null;
    try {
        job = JobClient.runJob(conf);
        replacedertTrue(job.isSuccessful());
    } finally {
        FileSystem fs = dfsCluster.getFileSystem();
        if (fs.exists(outp)) {
            fs.delete(outp, true);
        }
    }
    return job.getCounters();
}

18 View Complete Implementation : TestECAdmin.java
Copyright Apache License 2.0
Author : apache
@Test
public void testXOR21MinRacks() throws Exception {
    final String testPolicy = XOR_2_1;
    final int numDataNodes = 5;
    final int numRacks = 2;
    final int expectedNumRacks = 3;
    cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
    cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
    cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
    int ret = runCommandWithParams("-verifyClusterSetup");
    replacedertEquals("Return value of the command is not successful", 2, ret);
    replacedertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
}

18 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 3000)
public void testReadRandom() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.getConf().setLong("test.io.skip.size", 0);
    bench.randomReadTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_READ_RANDOM, execTime);
}

18 View Complete Implementation : TestCopyCommitter.java
Copyright Apache License 2.0
Author : aliyun-beta
@After
public void cleanupMetaFolder() {
    Path meta = new Path("/meta");
    try {
        if (cluster.getFileSystem().exists(meta)) {
            cluster.getFileSystem().delete(meta, true);
            replacedert.fail("Expected meta folder to be deleted");
        }
    } catch (IOException e) {
        LOG.error("Exception encountered while cleaning up folder", e);
        replacedert.fail("Unable to clean up meta folder");
    }
}

18 View Complete Implementation : TestJoinDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void checkOuterConsistency(Job job, Path[] src) throws IOException {
    Path outf = FileOutputFormat.getOutputPath(job);
    FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new Utils.OutputFileUtils.OutputFilesFilter());
    replacedertEquals("number of part files is more than 1. It is" + outlist.length, 1, outlist.length);
    replacedertTrue("output file with zero length" + outlist[0].getLen(), 0 < outlist[0].getLen());
    SequenceFile.Reader r = new SequenceFile.Reader(cluster.getFileSystem(), outlist[0].getPath(), job.getConfiguration());
    IntWritable k = new IntWritable();
    IntWritable v = new IntWritable();
    while (r.next(k, v)) {
        replacedertEquals("counts does not match", v.get(), countProduct(k, src, job.getConfiguration()));
    }
    r.close();
}

18 View Complete Implementation : TestDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void joinAs(String jointype, Clreplaced<? extends SimpleCheckerBase> c) throws Exception {
    final int srcs = 4;
    Configuration conf = new Configuration();
    JobConf job = new JobConf(conf, c);
    Path base = cluster.getFileSystem().makeQualified(new Path("/" + jointype));
    Path[] src = writeSimpleSrc(base, conf, srcs);
    job.set("mapreduce.join.expr", CompositeInputFormat.compose(jointype, SequenceFileInputFormat.clreplaced, src));
    job.setInt("testdatamerge.sources", srcs);
    job.setInputFormat(CompositeInputFormat.clreplaced);
    FileOutputFormat.setOutputPath(job, new Path(base, "out"));
    job.setMapperClreplaced(c);
    job.setReducerClreplaced(c);
    job.setOutputKeyClreplaced(IntWritable.clreplaced);
    job.setOutputValueClreplaced(IntWritable.clreplaced);
    JobClient.runJob(job);
    base.getFileSystem(job).delete(base, true);
}

18 View Complete Implementation : TestMRCredentials.java
Copyright Apache License 2.0
Author : aliyun-beta
@SuppressWarnings("deprecation")
@BeforeClreplaced
public static void setUp() throws Exception {
    System.setProperty("hadoop.log.dir", "logs");
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numSlaves).build();
    jConf = new JobConf(conf);
    FileSystem.setDefaultUri(conf, dfsCluster.getFileSystem().getUri().toString());
    mrCluster = MiniMRClientClusterFactory.create(TestMRCredentials.clreplaced, 1, jConf);
    createKeysAsJson("keys.json");
}

18 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 3000)
public void testRead() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.readTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_READ, execTime);
}

17 View Complete Implementation : TestJoinDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testNestedJoin() throws Exception {
    // outer(inner(S1,...,Sn),outer(S1,...Sn))
    final int SOURCES = 3;
    final int ITEMS = (SOURCES + 1) * (SOURCES + 1);
    Configuration conf = new Configuration();
    Path base = cluster.getFileSystem().makeQualified(new Path("/nested"));
    int[][] source = new int[SOURCES][];
    for (int i = 0; i < SOURCES; ++i) {
        source[i] = new int[ITEMS];
        for (int j = 0; j < ITEMS; ++j) {
            source[i][j] = (i + 2) * (j + 1);
        }
    }
    Path[] src = new Path[SOURCES];
    SequenceFile.Writer[] out = createWriters(base, conf, SOURCES, src);
    IntWritable k = new IntWritable();
    for (int i = 0; i < SOURCES; ++i) {
        IntWritable v = new IntWritable();
        v.set(i);
        for (int j = 0; j < ITEMS; ++j) {
            k.set(source[i][j]);
            out[i].append(k, v);
        }
        out[i].close();
    }
    out = null;
    StringBuilder sb = new StringBuilder();
    sb.append("outer(inner(");
    for (int i = 0; i < SOURCES; ++i) {
        sb.append(CompositeInputFormat.compose(SequenceFileInputFormat.clreplaced, src[i].toString()));
        if (i + 1 != SOURCES)
            sb.append(",");
    }
    sb.append("),outer(");
    sb.append(CompositeInputFormat.compose(MapReduceTestUtil.Fake_IF.clreplaced, "foobar"));
    sb.append(",");
    for (int i = 0; i < SOURCES; ++i) {
        sb.append(CompositeInputFormat.compose(SequenceFileInputFormat.clreplaced, src[i].toString()));
        sb.append(",");
    }
    sb.append(CompositeInputFormat.compose(MapReduceTestUtil.Fake_IF.clreplaced, "raboof") + "))");
    conf.set(CompositeInputFormat.JOIN_EXPR, sb.toString());
    MapReduceTestUtil.Fake_IF.setKeyClreplaced(conf, IntWritable.clreplaced);
    MapReduceTestUtil.Fake_IF.setValClreplaced(conf, IntWritable.clreplaced);
    Job job = Job.getInstance(conf);
    Path outf = new Path(base, "out");
    FileOutputFormat.setOutputPath(job, outf);
    job.setInputFormatClreplaced(CompositeInputFormat.clreplaced);
    job.setMapperClreplaced(Mapper.clreplaced);
    job.setReducerClreplaced(Reducer.clreplaced);
    job.setNumReduceTasks(0);
    job.setOutputKeyClreplaced(IntWritable.clreplaced);
    job.setOutputValueClreplaced(TupleWritable.clreplaced);
    job.setOutputFormatClreplaced(SequenceFileOutputFormat.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue("Job failed", job.isSuccessful());
    FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new Utils.OutputFileUtils.OutputFilesFilter());
    replacedertEquals(1, outlist.length);
    replacedertTrue(0 < outlist[0].getLen());
    SequenceFile.Reader r = new SequenceFile.Reader(cluster.getFileSystem(), outlist[0].getPath(), conf);
    TupleWritable v = new TupleWritable();
    while (r.next(k, v)) {
        replacedertFalse(((TupleWritable) v.get(1)).has(0));
        replacedertFalse(((TupleWritable) v.get(1)).has(SOURCES + 1));
        boolean chk = true;
        int ki = k.get();
        for (int i = 2; i < SOURCES + 2; ++i) {
            if ((ki % i) == 0 && ki <= i * ITEMS) {
                replacedertEquals(i - 2, ((IntWritable) ((TupleWritable) v.get(1)).get((i - 1))).get());
            } else
                chk = false;
        }
        if (chk) {
            // present in all sources; chk inner
            replacedertTrue(v.has(0));
            for (int i = 0; i < SOURCES; ++i) replacedertTrue(((TupleWritable) v.get(0)).has(i));
        } else {
            // should not be present in inner join
            replacedertFalse(v.has(0));
        }
    }
    r.close();
    base.getFileSystem(conf).delete(base, true);
}

17 View Complete Implementation : TestECAdmin.java
Copyright Apache License 2.0
Author : apache
@Test
public void testSuccessfulEnablePolicyMessage() throws Exception {
    final String testPolicy = RS_3_2;
    cluster = DFSTestUtil.setupCluster(conf, 5, 3, 0);
    cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
    final int ret = runCommandWithParams("-enablePolicy", "-policy", testPolicy);
    replacedertEquals("Return value of the command is successful", 0, ret);
    replacedertTrue("Enabling policy should be logged", out.toString().contains("Erasure coding policy " + testPolicy + " is enabled"));
    replacedertFalse("Warning about cluster topology should not be printed", out.toString().contains("Warning: The cluster setup does not support"));
    replacedertTrue("Error output should be empty", err.toString().isEmpty());
}

17 View Complete Implementation : TestHistoryFileManager.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test
public void testCreateDirsWithFileSystemInSafeMode() throws Exception {
    dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
    testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), false);
}

17 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 3000)
public void testReadBackward() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.getConf().setLong("test.io.skip.size", -DEFAULT_BUFFER_SIZE);
    bench.randomReadTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_READ_BACKWARD, execTime);
}

17 View Complete Implementation : TestGlobbedCopyListing.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void recordInExpectedValues(String path) throws Exception {
    FileSystem fileSystem = cluster.getFileSystem();
    Path sourcePath = new Path(fileSystem.getUri().toString() + path);
    expectedValues.put(sourcePath.toString(), DistCpUtils.getRelativePath(new Path("/tmp/source"), sourcePath));
}

17 View Complete Implementation : TestECAdmin.java
Copyright Apache License 2.0
Author : apache
@Test
public void testUnsuccessfulEnablePolicyMessage() throws Exception {
    final String testPolicy = RS_3_2;
    final int numDataNodes = 5;
    final int numRacks = 2;
    cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
    cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
    final int ret = runCommandWithParams("-enablePolicy", "-policy", testPolicy);
    replacedertEquals("Return value of the command is successful", 0, ret);
    replacedertTrue("Enabling policy should be logged", out.toString().contains("Erasure coding policy " + testPolicy + " is enabled"));
    replacedertTrue("Warning about cluster topology should be printed", err.toString().contains("Warning: The cluster setup does not support " + "EC policy " + testPolicy + ". Reason:"));
    replacedertTrue("Warning about cluster topology should be printed", err.toString().contains(" racks are required for the erasure coding policies: " + testPolicy));
}

17 View Complete Implementation : TestLargeDirectoryDelete.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * create a file with a length of <code>filelen</code>
 */
private void createFile(final String fileName, final long filelen) throws IOException {
    FileSystem fs = mc.getFileSystem();
    Path filePath = new Path(fileName);
    DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
}

17 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void beforeClreplaced() throws Exception {
    bench = new TestDFSIO();
    bench.getConf().setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    cluster = new MiniDFSCluster.Builder(bench.getConf()).numDataNodes(2).format(true).build();
    FileSystem fs = cluster.getFileSystem();
    bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);
    /**
     * Check write here, as it is required for other tests
     */
    testWrite();
}

17 View Complete Implementation : TestJoinDatamerge.java
Copyright Apache License 2.0
Author : apache
@Test
public void testEmptyJoin() throws Exception {
    Configuration conf = new Configuration();
    Path base = cluster.getFileSystem().makeQualified(new Path("/empty"));
    Path[] src = { new Path(base, "i0"), new Path("i1"), new Path("i2") };
    conf.set(CompositeInputFormat.JOIN_EXPR, CompositeInputFormat.compose("outer", MapReduceTestUtil.Fake_IF.clreplaced, src));
    MapReduceTestUtil.Fake_IF.setKeyClreplaced(conf, MapReduceTestUtil.IncomparableKey.clreplaced);
    Job job = Job.getInstance(conf);
    job.setInputFormatClreplaced(CompositeInputFormat.clreplaced);
    FileOutputFormat.setOutputPath(job, new Path(base, "out"));
    job.setMapperClreplaced(Mapper.clreplaced);
    job.setReducerClreplaced(Reducer.clreplaced);
    job.setOutputKeyClreplaced(MapReduceTestUtil.IncomparableKey.clreplaced);
    job.setOutputValueClreplaced(NullWritable.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue(job.isSuccessful());
    base.getFileSystem(conf).delete(base, true);
}

17 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 3000)
public void testReadSkip() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.getConf().setLong("test.io.skip.size", 1);
    bench.randomReadTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
}

17 View Complete Implementation : TestHistoryFileManager.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception {
    dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
    new Thread() {

        @Override
        public void run() {
            try {
                Thread.sleep(500);
                dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
                replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
            } catch (Exception ex) {
                replacedert.fail(ex.toString());
            }
        }
    }.start();
    testCreateHistoryDirs(dfsCluster.getConfiguration(0), new SystemClock());
}

17 View Complete Implementation : HDFSContract.java
Copyright Apache License 2.0
Author : aliyun-beta
@Override
public FileSystem getTestFileSystem() throws IOException {
    // replacedumes cluster is not null
    replacedert.replacedertNotNull("cluster not created", cluster);
    return cluster.getFileSystem();
}

17 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(timeout = 6000)
public void testAppend() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    long tStart = System.currentTimeMillis();
    bench.appendTest(fs);
    long execTime = System.currentTimeMillis() - tStart;
    bench.replacedyzeResult(fs, TestType.TEST_TYPE_APPEND, execTime);
}

17 View Complete Implementation : TestHistoryFileManager.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test
public void testCreateDirsWithFileSystem() throws Exception {
    dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
    replacedert.replacedertFalse(dfsCluster.getFileSystem().isInSafeMode());
    testTryCreateHistoryDirs(dfsCluster.getConfiguration(0), true);
}

17 View Complete Implementation : TestDFSIO.java
Copyright Apache License 2.0
Author : aliyun-beta
@AfterClreplaced
public static void afterClreplaced() throws Exception {
    if (cluster == null)
        return;
    FileSystem fs = cluster.getFileSystem();
    bench.cleanup(fs);
    cluster.shutdown();
}

17 View Complete Implementation : TestDynamicInputFormat.java
Copyright Apache License 2.0
Author : aliyun-beta
@BeforeClreplaced
public static void setup() throws Exception {
    cluster = new MiniDFSCluster.Builder(getConfigurationForCluster()).numDataNodes(1).format(true).build();
    for (int i = 0; i < N_FILES; ++i) createFile("/tmp/source/" + String.valueOf(i));
    FileSystem fileSystem = cluster.getFileSystem();
    expectedFilePaths.add(fileSystem.listStatus(new Path("/tmp/source/0"))[0].getPath().getParent().toString());
}

16 View Complete Implementation : TestCopyMapper.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void appendSourceData() throws Exception {
    FileSystem fs = cluster.getFileSystem();
    for (Path source : pathList) {
        if (fs.getFileStatus(source).isFile()) {
            // append 2048 bytes per file
            appendFile(source, DEFAULT_FILE_SIZE * 2);
        }
    }
}

16 View Complete Implementation : TestPersistentStoragePolicySatisfier.java
Copyright Apache License 2.0
Author : apache
/**
 * Test SPS when satisfyStoragePolicy called on child file and
 * parent directory.
 * 1. Create one parent directory and child directory.
 * 2. Create some file in both the directory.
 * 3. Set storage policy for parent directory and call
 * satisfyStoragePolicy.
 * 4. Set storage policy for child directory and call
 * satisfyStoragePolicy.
 * 5. restart the namenode.
 * All the file blocks should satisfy the policy.
 */
@Test(timeout = 300000)
public void testSPSOnChildAndParentDirectory() throws Exception {
    try {
        clusterSetUp();
        fs.setStoragePolicy(parentDir, "COLD");
        fs.satisfyStoragePolicy(childDir);
        DFSTestUtil.waitExpectedStorageType(childFileName, StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
        fs.satisfyStoragePolicy(parentDir);
        DFSTestUtil.waitExpectedStorageType(parentFileName, StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
    } finally {
        clusterShutdown();
    }
}

16 View Complete Implementation : TestHDFSFileContextMainOperations.java
Copyright Apache License 2.0
Author : aliyun-beta
private void oldRename(Path src, Path dst, boolean renameSucceeds, boolean exception) throws Exception {
    DistributedFileSystem fs = cluster.getFileSystem();
    try {
        replacedert.replacedertEquals(renameSucceeds, fs.rename(src, dst));
    } catch (Exception ex) {
        replacedert.replacedertTrue(exception);
    }
    replacedert.replacedertEquals(renameSucceeds, !exists(fc, src));
    replacedert.replacedertEquals(renameSucceeds, exists(fc, dst));
}

16 View Complete Implementation : TestHistoryFileManager.java
Copyright Apache License 2.0
Author : apache
@Test
public void testCreateDirsWithFileSystemBecomingAvailBeforeTimeout() throws Exception {
    dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
    new Thread() {

        @Override
        public void run() {
            try {
                Thread.sleep(500);
                dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
                replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
            } catch (Exception ex) {
                replacedert.fail(ex.toString());
            }
        }
    }.start();
    testCreateHistoryDirs(dfsCluster.getConfiguration(0), SystemClock.getInstance());
}

16 View Complete Implementation : TestNNHealthCheck.java
Copyright Apache License 2.0
Author : apache
@Test
public void testNNHealthCheckWithSafemodeAsUnhealthy() throws Exception {
    conf.setBoolean(DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, true);
    // now bring up just the NameNode.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).nnTopology(MiniDFSNNTopology.simpleHATopology()).build();
    cluster.waitActive();
    // manually set safemode.
    cluster.getFileSystem(0).setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1");
    final String expectedTargetString = haTarget.getAddress().toString();
    replacedertTrue("Expected haTarget " + haTarget + " containing " + expectedTargetString, haTarget.toString().contains(expectedTargetString));
    HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, 5000);
    LambdaTestUtils.intercept(RemoteException.clreplaced, "The NameNode is configured to report UNHEALTHY to ZKFC in Safemode.", () -> rpc.monitorHealth());
}

16 View Complete Implementation : TestBlockTokenWithDFSStriped.java
Copyright Apache License 2.0
Author : apache
@Test
@Override
public void testRead() throws Exception {
    conf = getConf();
    /*
     * prefer non-ephemeral port to avoid conflict with tests using
     * ephemeral ports on MiniDFSCluster#restartDataNode(true).
     */
    Configuration[] overlays = new Configuration[numDNs];
    for (int i = 0; i < overlays.length; i++) {
        int offset = i * 10;
        Configuration c = new Configuration();
        c.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:" + ServerSocketUtil.getPort(19866 + offset, 100));
        c.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:" + ServerSocketUtil.getPort(19867 + offset, 100));
        overlays[i] = c;
    }
    cluster = new MiniDFSCluster.Builder(conf).nameNodePort(ServerSocketUtil.getPort(18020, 100)).nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100)).numDataNodes(numDNs).build();
    cluster.getFileSystem().enableErasureCodingPolicy(StripedFileTestUtil.getDefaultECPolicy().getName());
    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", StripedFileTestUtil.getDefaultECPolicy().getName());
    try {
        cluster.waitActive();
        doTestRead(conf, cluster, true);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

16 View Complete Implementation : TestPersistentStoragePolicySatisfier.java
Copyright Apache License 2.0
Author : apache
/**
 * Test loading of SPS xAttrs from the edits log when satisfyStoragePolicy
 * called on child file and parent directory.
 * 1. Create one directory and create one child file.
 * 2. Set storage policy for child file and call
 * satisfyStoragePolicy.
 * 3. wait for SPS to remove xAttr for file child file.
 * 4. Set storage policy for parent directory and call
 * satisfyStoragePolicy.
 * 5. restart the namenode.
 * NameNode should be started successfully.
 */
@Test(timeout = 300000)
public void testNameNodeRestartWhenSPSCalledOnChildFileAndParentDir() throws Exception {
    try {
        clusterSetUp();
        fs.setStoragePolicy(childFile, "COLD");
        fs.satisfyStoragePolicy(childFile);
        DFSTestUtil.waitExpectedStorageType(childFile.toUri().getPath(), StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
        // wait for SPS to remove Xattr from file
        Thread.sleep(30000);
        fs.setStoragePolicy(childDir, "COLD");
        fs.satisfyStoragePolicy(childDir);
        try {
            cluster.restartNameNodes();
        } catch (Exception e) {
            replacedertFalse(e.getMessage().contains("Cannot request to call satisfy storage policy"));
        }
    } finally {
        clusterShutdown();
    }
}

16 View Complete Implementation : TestResolveHdfsSymlink.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Tests delegation token APIs in FileContext for Hdfs; and renew and cancel
 * APIs in Hdfs.
 *
 * @throws UnsupportedFileSystemException
 * @throws IOException
 * @throws InterruptedException
 */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFcDelegationToken() throws UnsupportedFileSystemException, IOException, InterruptedException {
    FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem().getUri());
    final AbstractFileSystem afs = fcHdfs.getDefaultFileSystem();
    final List<Token<?>> tokenList = afs.getDelegationTokens(UserGroupInformation.getCurrentUser().getUserName());
    ((Hdfs) afs).renewDelegationToken((Token<DelegationTokenIdentifier>) tokenList.get(0));
    ((Hdfs) afs).cancelDelegationToken((Token<? extends AbstractDelegationTokenIdentifier>) tokenList.get(0));
}

16 View Complete Implementation : TestMRCJCFileInputFormat.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testMultiLevelInput() throws Exception {
    JobConf job = new JobConf(conf);
    job.setBoolean("dfs.replication.considerLoad", false);
    dfs = new MiniDFSCluster.Builder(job).racks(rack1).hosts(hosts1).build();
    dfs.waitActive();
    String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + (dfs.getFileSystem()).getUri().getPort();
    FileSystem fileSys = dfs.getFileSystem();
    if (!fileSys.mkdirs(dir1)) {
        throw new IOException("Mkdirs failed to create " + root.toString());
    }
    writeFile(job, file1, (short) 1, 1);
    writeFile(job, file2, (short) 1, 1);
    // split it using a CombinedFile input format
    DummyFileInputFormat inFormat = new DummyFileInputFormat();
    inFormat.setInputPaths(job, root);
    // By default, we don't allow multi-level/recursive inputs
    boolean exceptionThrown = false;
    try {
        InputSplit[] splits = inFormat.getSplits(job, 1);
    } catch (Exception e) {
        exceptionThrown = true;
    }
    replacedertTrue("Exception should be thrown by default for scanning a " + "directory with directories inside.", exceptionThrown);
    // Enable multi-level/recursive inputs
    job.setBoolean(FileInputFormat.INPUT_DIR_RECURSIVE, true);
    InputSplit[] splits = inFormat.getSplits(job, 1);
    replacedertEquals(splits.length, 2);
}

16 View Complete Implementation : TestDatamerge.java
Copyright Apache License 2.0
Author : aliyun-beta
public void testNestedJoin() throws Exception {
    // outer(inner(S1,...,Sn),outer(S1,...Sn))
    final int SOURCES = 3;
    final int ITEMS = (SOURCES + 1) * (SOURCES + 1);
    JobConf job = new JobConf();
    Path base = cluster.getFileSystem().makeQualified(new Path("/nested"));
    int[][] source = new int[SOURCES][];
    for (int i = 0; i < SOURCES; ++i) {
        source[i] = new int[ITEMS];
        for (int j = 0; j < ITEMS; ++j) {
            source[i][j] = (i + 2) * (j + 1);
        }
    }
    Path[] src = new Path[SOURCES];
    SequenceFile.Writer[] out = createWriters(base, job, SOURCES, src);
    IntWritable k = new IntWritable();
    for (int i = 0; i < SOURCES; ++i) {
        IntWritable v = new IntWritable();
        v.set(i);
        for (int j = 0; j < ITEMS; ++j) {
            k.set(source[i][j]);
            out[i].append(k, v);
        }
        out[i].close();
    }
    out = null;
    StringBuilder sb = new StringBuilder();
    sb.append("outer(inner(");
    for (int i = 0; i < SOURCES; ++i) {
        sb.append(CompositeInputFormat.compose(SequenceFileInputFormat.clreplaced, src[i].toString()));
        if (i + 1 != SOURCES)
            sb.append(",");
    }
    sb.append("),outer(");
    sb.append(CompositeInputFormat.compose(Fake_IF.clreplaced, "foobar"));
    sb.append(",");
    for (int i = 0; i < SOURCES; ++i) {
        sb.append(CompositeInputFormat.compose(SequenceFileInputFormat.clreplaced, src[i].toString()));
        sb.append(",");
    }
    sb.append(CompositeInputFormat.compose(Fake_IF.clreplaced, "raboof") + "))");
    job.set("mapreduce.join.expr", sb.toString());
    job.setInputFormat(CompositeInputFormat.clreplaced);
    Path outf = new Path(base, "out");
    FileOutputFormat.setOutputPath(job, outf);
    Fake_IF.setKeyClreplaced(job, IntWritable.clreplaced);
    Fake_IF.setValClreplaced(job, IntWritable.clreplaced);
    job.setMapperClreplaced(IdenreplacedyMapper.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setNumReduceTasks(0);
    job.setOutputKeyClreplaced(IntWritable.clreplaced);
    job.setOutputValueClreplaced(TupleWritable.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    JobClient.runJob(job);
    FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new Utils.OutputFileUtils.OutputFilesFilter());
    replacedertEquals(1, outlist.length);
    replacedertTrue(0 < outlist[0].getLen());
    SequenceFile.Reader r = new SequenceFile.Reader(cluster.getFileSystem(), outlist[0].getPath(), job);
    TupleWritable v = new TupleWritable();
    while (r.next(k, v)) {
        replacedertFalse(((TupleWritable) v.get(1)).has(0));
        replacedertFalse(((TupleWritable) v.get(1)).has(SOURCES + 1));
        boolean chk = true;
        int ki = k.get();
        for (int i = 2; i < SOURCES + 2; ++i) {
            if ((ki % i) == 0 && ki <= i * ITEMS) {
                replacedertEquals(i - 2, ((IntWritable) ((TupleWritable) v.get(1)).get((i - 1))).get());
            } else
                chk = false;
        }
        if (chk) {
            // present in all sources; chk inner
            replacedertTrue(v.has(0));
            for (int i = 0; i < SOURCES; ++i) replacedertTrue(((TupleWritable) v.get(0)).has(i));
        } else {
            // should not be present in inner join
            replacedertFalse(v.has(0));
        }
    }
    r.close();
    base.getFileSystem(job).delete(base, true);
}

16 View Complete Implementation : TestResolveHdfsSymlink.java
Copyright Apache License 2.0
Author : apache
/**
 * Tests delegation token APIs in FileContext for Hdfs; and renew and cancel
 * APIs in Hdfs.
 *
 * @throws UnsupportedFileSystemException
 * @throws IOException
 * @throws InterruptedException
 */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFcDelegationToken() throws UnsupportedFileSystemException, IOException, InterruptedException {
    FileContext fcHdfs = FileContext.getFileContext(cluster.getFileSystem().getUri());
    final AbstractFileSystem afs = fcHdfs.getDefaultFileSystem();
    final List<Token<?>> tokenList = afs.getDelegationTokens(UserGroupInformation.getCurrentUser().getUserName());
    ((Hdfs) afs).renewDelegationToken((Token<DelegationTokenIdentifier>) tokenList.get(0));
    ((Hdfs) afs).cancelDelegationToken((Token<? extends AbstractDelegationTokenIdentifier>) tokenList.get(0));
}

16 View Complete Implementation : TestCopyCommitter.java
Copyright Apache License 2.0
Author : aliyun-beta
@Before
public void createMetaFolder() {
    config.set(DistCpConstants.CONF_LABEL_META_FOLDER, "/meta");
    Path meta = new Path("/meta");
    try {
        cluster.getFileSystem().mkdirs(meta);
    } catch (IOException e) {
        LOG.error("Exception encountered while creating meta folder", e);
        replacedert.fail("Unable to create meta folder");
    }
}

16 View Complete Implementation : TestHistoryFileManager.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test(expected = YarnRuntimeException.clreplaced)
public void testCreateDirsWithFileSystemNotBecomingAvailBeforeTimeout() throws Exception {
    dfsCluster.getFileSystem().setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    replacedert.replacedertTrue(dfsCluster.getFileSystem().isInSafeMode());
    final ControlledClock clock = new ControlledClock(new SystemClock());
    clock.setTime(1);
    new Thread() {

        @Override
        public void run() {
            try {
                Thread.sleep(500);
                clock.setTime(3000);
            } catch (Exception ex) {
                replacedert.fail(ex.toString());
            }
        }
    }.start();
    testCreateHistoryDirs(dfsCluster.getConfiguration(0), clock);
}