org.apache.hadoop.fs.FileSystem.get() - java examples

Here are the examples of the java api org.apache.hadoop.fs.FileSystem.get() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : MultipleOutputs.java
Copyright Apache License 2.0
Author : iVCE
// by being synchronized MultipleOutputTask can be use with a
// MulreplacedhreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter) throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
        if (countersEnabled && reporter == null) {
            throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
        }
        JobConf jobConf = new JobConf(conf);
        jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
        FileSystem fs = FileSystem.get(conf);
        writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
        if (countersEnabled) {
            if (reporter == null) {
                throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
            }
            writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
        }
        recordWriters.put(baseFileName, writer);
    }
    return writer;
}

19 View Complete Implementation : Grep.java
Copyright Apache License 2.0
Author : facebookarchive
public int run(String[] args) throws Exception {
    if (args.length < 3) {
        System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
        ToolRunner.printGenericCommandUsage(System.out);
        return -1;
    }
    Path tempDir = new Path("grep-temp-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
    JobConf grepJob = new JobConf(getConf(), Grep.clreplaced);
    try {
        grepJob.setJobName("grep-search");
        FileInputFormat.setInputPaths(grepJob, args[0]);
        grepJob.setMapperClreplaced(RegexMapper.clreplaced);
        grepJob.set("mapred.mapper.regex", args[2]);
        if (args.length == 4)
            grepJob.set("mapred.mapper.regex.group", args[3]);
        grepJob.setCombinerClreplaced(LongSumReducer.clreplaced);
        grepJob.setReducerClreplaced(LongSumReducer.clreplaced);
        FileOutputFormat.setOutputPath(grepJob, tempDir);
        grepJob.setOutputFormat(SequenceFileOutputFormat.clreplaced);
        grepJob.setOutputKeyClreplaced(Text.clreplaced);
        grepJob.setOutputValueClreplaced(LongWritable.clreplaced);
        JobClient.runJob(grepJob);
        JobConf sortJob = new JobConf(Grep.clreplaced);
        sortJob.setJobName("grep-sort");
        FileInputFormat.setInputPaths(sortJob, tempDir);
        sortJob.setInputFormat(SequenceFileInputFormat.clreplaced);
        sortJob.setMapperClreplaced(InverseMapper.clreplaced);
        // write a single file
        sortJob.setNumReduceTasks(1);
        FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
        // sort by decreasing freq
        sortJob.setOutputKeyComparatorClreplaced(LongWritable.DecreasingComparator.clreplaced);
        JobClient.runJob(sortJob);
    } finally {
        FileSystem.get(grepJob).delete(tempDir, true);
    }
    return 0;
}

19 View Complete Implementation : AdminHelper.java
Copyright Apache License 2.0
Author : apache
static DistributedFileSystem getDFS(URI uri, Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(uri, conf);
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IllegalArgumentException("FileSystem " + fs.getUri() + " is not an HDFS file system");
    }
    return (DistributedFileSystem) fs;
}

19 View Complete Implementation : TestChainMapReduce.java
Copyright Apache License 2.0
Author : yncxcw
private static boolean getFlag(JobConf conf, String flag) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    return fs.exists(new Path(getFlagDir(conf.getBoolean("localFS", true)), flag));
}

19 View Complete Implementation : TestMapOutputType.java
Copyright Apache License 2.0
Author : yncxcw
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInt(JobContext.IO_SORT_MB, 1);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setMapperClreplaced(TextGen.clreplaced);
    conf.setReducerClreplaced(TextReduce.clreplaced);
    conf.setOutputKeyClreplaced(Text.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, Text.clreplaced, Text.clreplaced);
    writer.append(new Text("rec: 1"), new Text("Hello"));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : AbfsDtFetcher.java
Copyright Apache License 2.0
Author : apache
/**
 *  Returns Token object via FileSystem, null if bad argument.
 *  @param conf - a Configuration object used with FileSystem.get()
 *  @param creds - a Credentials object to which token(s) will be added
 *  @param renewer  - the renewer to send with the token request
 *  @param url  - the URL to which the request is sent
 *  @return a Token, or null if fetch fails.
 */
public Token<?> addDelegationTokens(Configuration conf, Credentials creds, String renewer, String url) throws Exception {
    if (!url.startsWith(getServiceName().toString())) {
        url = getServiceName().toString() + "://" + url;
    }
    FileSystem fs = FileSystem.get(URI.create(url), conf);
    Token<?> token = fs.getDelegationToken(renewer);
    if (token == null) {
        throw new IOException(FETCH_FAILED + ": " + url);
    }
    creds.addToken(token.getService(), token);
    return token;
}

19 View Complete Implementation : Grep.java
Copyright Apache License 2.0
Author : iVCE
public int run(String[] args) throws Exception {
    if (args.length < 3) {
        System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
        ToolRunner.printGenericCommandUsage(System.out);
        return -1;
    }
    Path tempDir = new Path("grep-temp-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
    JobConf grepJob = new JobConf(getConf(), Grep.clreplaced);
    try {
        grepJob.setJobName("grep-search");
        FileInputFormat.setInputPaths(grepJob, args[0]);
        grepJob.setMapperClreplaced(RegexMapper.clreplaced);
        grepJob.set("mapred.mapper.regex", args[2]);
        if (args.length == 4)
            grepJob.set("mapred.mapper.regex.group", args[3]);
        grepJob.setCombinerClreplaced(LongSumReducer.clreplaced);
        grepJob.setReducerClreplaced(LongSumReducer.clreplaced);
        FileOutputFormat.setOutputPath(grepJob, tempDir);
        grepJob.setOutputFormat(SequenceFileOutputFormat.clreplaced);
        grepJob.setOutputKeyClreplaced(Text.clreplaced);
        grepJob.setOutputValueClreplaced(LongWritable.clreplaced);
        JobClient.runJob(grepJob);
        JobConf sortJob = new JobConf(Grep.clreplaced);
        sortJob.setJobName("grep-sort");
        FileInputFormat.setInputPaths(sortJob, tempDir);
        sortJob.setInputFormat(SequenceFileInputFormat.clreplaced);
        sortJob.setMapperClreplaced(InverseMapper.clreplaced);
        // write a single file
        sortJob.setNumReduceTasks(1);
        FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
        // sort by decreasing freq
        sortJob.setOutputKeyComparatorClreplaced(LongWritable.DecreasingComparator.clreplaced);
        JobClient.runJob(sortJob);
    } finally {
        FileSystem.get(grepJob).delete(tempDir, true);
    }
    return 0;
}

19 View Complete Implementation : ResourceMgrDelegate.java
Copyright Apache License 2.0
Author : aliyun-beta
public String getFilesystemName() throws IOException, InterruptedException {
    return FileSystem.get(conf).getUri().toString();
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : madiator
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : TestMapOutputType.java
Copyright Apache License 2.0
Author : apache
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInt(JobContext.IO_SORT_MB, 1);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setMapperClreplaced(TextGen.clreplaced);
    conf.setReducerClreplaced(TextReduce.clreplaced);
    conf.setOutputKeyClreplaced(Text.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, Text.clreplaced, Text.clreplaced);
    writer.append(new Text("rec: 1"), new Text("Hello"));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : TestChainMapReduce.java
Copyright Apache License 2.0
Author : yncxcw
private static void cleanFlags(JobConf conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    fs.delete(getFlagDir(conf.getBoolean("localFS", true)), true);
    fs.mkdirs(getFlagDir(conf.getBoolean("localFS", true)));
}

19 View Complete Implementation : TestComparators.java
Copyright Apache License 2.0
Author : yncxcw
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setOutputKeyClreplaced(IntWritable.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.setMapOutputValueClreplaced(IntWritable.clreplaced);
    // set up two map jobs, so we can test merge phase in Reduce also
    conf.setNumMapTasks(2);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    // set up input data in 2 files
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(11), new IntWritable(999));
    writer.append(new IntWritable(23), new IntWritable(456));
    writer.append(new IntWritable(10), new IntWritable(780));
    writer.close();
    inFile = new Path(inDir, "part1");
    writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(45), new IntWritable(100));
    writer.append(new IntWritable(18), new IntWritable(200));
    writer.append(new IntWritable(27), new IntWritable(300));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : AbstractFSContract.java
Copyright Apache License 2.0
Author : apache
/**
 * Get the FS from a URI. The default implementation just retrieves
 * it from the norrmal FileSystem factory/cache, with the local configuration
 * @param uri URI of FS
 * @return the filesystem
 * @throws IOException IO problems
 */
public FileSystem getFileSystem(URI uri) throws IOException {
    return FileSystem.get(uri, getConf());
}

19 View Complete Implementation : TestStreaming.java
Copyright Apache License 2.0
Author : yncxcw
protected FileSystem getFileSystem() throws IOException {
    return FileSystem.get(getConf());
}

19 View Complete Implementation : TestChainMapReduce.java
Copyright Apache License 2.0
Author : aliyun-beta
private static boolean getFlag(JobConf conf, String flag) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    return fs.exists(new Path(getFlagDir(conf.getBoolean("localFS", true)), flag));
}

19 View Complete Implementation : DirectoryTraversal.java
Copyright Apache License 2.0
Author : madiator
public static DirectoryTraversal raidLeafDirectoryRetriever(final PolicyInfo info, List<Path> roots, Collection<PolicyInfo> allInfos, final Configuration conf, int numThreads, boolean doShuffle, boolean allowStandby) throws IOException {
    final RaidState.Checker checker = new RaidState.Checker(allInfos, conf);
    final FileSystem fs = FileSystem.get(conf);
    Filter filter = new Filter() {

        @Override
        public boolean check(FileStatus f) throws IOException {
            long now = RaidNode.now();
            if (!f.isDir()) {
                return false;
            }
            List<FileStatus> lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, f.getPath());
            RaidState state = checker.check(info, f, now, false, lfs);
            if (LOG.isDebugEnabled()) {
                LOG.debug(f.getPath() + " : " + state);
            }
            return state == RaidState.NOT_RAIDED_BUT_SHOULD;
        }
    };
    return new DirectoryTraversal("Raid File Retriever ", roots, fs, filter, numThreads, doShuffle, allowStandby, true);
}

19 View Complete Implementation : HdfsDtFetcher.java
Copyright Apache License 2.0
Author : apache
/**
 *  Returns Token object via FileSystem, null if bad argument.
 *  @param conf - a Configuration object used with FileSystem.get()
 *  @param creds - a Credentials object to which token(s) will be added
 *  @param renewer  - the renewer to send with the token request
 *  @param url  - the URL to which the request is sent
 *  @return a Token, or null if fetch fails.
 */
public Token<?> addDelegationTokens(Configuration conf, Credentials creds, String renewer, String url) throws Exception {
    if (!url.startsWith(getServiceName().toString())) {
        url = getServiceName().toString() + "://" + url;
    }
    FileSystem fs = FileSystem.get(URI.create(url), conf);
    Token<?> token = fs.getDelegationToken(renewer);
    if (token == null) {
        LOG.error(FETCH_FAILED);
        throw new IOException(FETCH_FAILED);
    }
    creds.addToken(token.getService(), token);
    return token;
}

19 View Complete Implementation : HadoopServer.java
Copyright Apache License 2.0
Author : iVCE
/*
   * Rewrite of the connecting and tunneling to the Hadoop location
   */
/**
 * Provides access to the default file system of this location.
 *
 * @return a {@link FileSystem}
 */
public FileSystem getDFS() throws IOException {
    return FileSystem.get(this.conf);
}

19 View Complete Implementation : TestNoJobSetupCleanup.java
Copyright Apache License 2.0
Author : aliyun-beta
private Job submitAndValidateJob(Configuration conf, int numMaps, int numReds) throws IOException, InterruptedException, ClreplacedNotFoundException {
    Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds);
    job.setJobSetupCleanupNeeded(false);
    job.setOutputFormatClreplaced(MyOutputFormat.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue(job.isSuccessful());
    replacedertTrue(job.getTaskReports(TaskType.JOB_SETUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.JOB_CLEANUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.MAP).length == numMaps);
    replacedertTrue(job.getTaskReports(TaskType.REDUCE).length == numReds);
    FileSystem fs = FileSystem.get(conf);
    replacedertTrue("Job output directory doesn't exit!", fs.exists(outDir));
    // job commit done only in cleanup
    // therefore output should still be in temp location
    String tempWorkingPathStr = outDir + Path.SEPARATOR + "_temporary" + Path.SEPARATOR + "0";
    Path tempWorkingPath = new Path(tempWorkingPathStr);
    FileStatus[] list = fs.listStatus(tempWorkingPath, new OutputFilter());
    int numPartFiles = numReds == 0 ? numMaps : numReds;
    replacedertTrue("Number of part-files is " + list.length + " and not " + numPartFiles, list.length == numPartFiles);
    return job;
}

19 View Complete Implementation : HadoopServer.java
Copyright Apache License 2.0
Author : facebookarchive
/*
   * Rewrite of the connecting and tunneling to the Hadoop location
   */
/**
 * Provides access to the default file system of this location.
 *
 * @return a {@link FileSystem}
 */
public FileSystem getDFS() throws IOException {
    return FileSystem.get(this.conf);
}

19 View Complete Implementation : ResourceMgrDelegate.java
Copyright Apache License 2.0
Author : apache
public String getFilesystemName() throws IOException, InterruptedException {
    return FileSystem.get(conf).getUri().toString();
}

19 View Complete Implementation : TestStreaming.java
Copyright Apache License 2.0
Author : apache
protected FileSystem getFileSystem() throws IOException {
    return FileSystem.get(getConf());
}

19 View Complete Implementation : AbstractFSContract.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Get the FS from a URI. The default implementation just retrieves
 * it from the norrmal FileSystem factory/cache, with the local configuration
 * @param uri URI of FS
 * @return the filesystem
 * @throws IOException IO problems
 */
public FileSystem getFileSystem(URI uri) throws IOException {
    return FileSystem.get(uri, getConf());
}

19 View Complete Implementation : TestNoJobSetupCleanup.java
Copyright Apache License 2.0
Author : yncxcw
private Job submitAndValidateJob(Configuration conf, int numMaps, int numReds) throws IOException, InterruptedException, ClreplacedNotFoundException {
    Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds);
    job.setJobSetupCleanupNeeded(false);
    job.setOutputFormatClreplaced(MyOutputFormat.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue(job.isSuccessful());
    replacedertTrue(job.getTaskReports(TaskType.JOB_SETUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.JOB_CLEANUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.MAP).length == numMaps);
    replacedertTrue(job.getTaskReports(TaskType.REDUCE).length == numReds);
    FileSystem fs = FileSystem.get(conf);
    replacedertTrue("Job output directory doesn't exit!", fs.exists(outDir));
    // job commit done only in cleanup
    // therefore output should still be in temp location
    String tempWorkingPathStr = outDir + Path.SEPARATOR + "_temporary" + Path.SEPARATOR + "0";
    Path tempWorkingPath = new Path(tempWorkingPathStr);
    FileStatus[] list = fs.listStatus(tempWorkingPath, new OutputFilter());
    int numPartFiles = numReds == 0 ? numMaps : numReds;
    replacedertTrue("Number of part-files is " + list.length + " and not " + numPartFiles, list.length == numPartFiles);
    return job;
}

19 View Complete Implementation : S3ADtFetcher.java
Copyright Apache License 2.0
Author : apache
/**
 *  Returns Token object via FileSystem, null if bad argument.
 *  @param conf - a Configuration object used with FileSystem.get()
 *  @param creds - a Credentials object to which token(s) will be added
 *  @param renewer  - the renewer to send with the token request
 *  @param url  - the URL to which the request is sent
 *  @return a Token, or null if fetch fails.
 */
public Token<?> addDelegationTokens(Configuration conf, Credentials creds, String renewer, String url) throws Exception {
    if (!url.startsWith(getServiceName().toString())) {
        url = getServiceName().toString() + "://" + url;
    }
    FileSystem fs = FileSystem.get(URI.create(url), conf);
    Token<?> token = fs.getDelegationToken(renewer);
    if (token == null) {
        throw new DelegationTokenIOException(FETCH_FAILED + ": " + url);
    }
    creds.addToken(token.getService(), token);
    return token;
}

19 View Complete Implementation : MultipleOutputs.java
Copyright Apache License 2.0
Author : yncxcw
// by being synchronized MultipleOutputTask can be use with a
// MulreplacedhreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter) throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
        if (countersEnabled && reporter == null) {
            throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
        }
        JobConf jobConf = new JobConf(conf);
        jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
        FileSystem fs = FileSystem.get(conf);
        writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
        if (countersEnabled) {
            if (reporter == null) {
                throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
            }
            writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
        }
        recordWriters.put(baseFileName, writer);
    }
    return writer;
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : TestComparators.java
Copyright Apache License 2.0
Author : apache
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setOutputKeyClreplaced(IntWritable.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.setMapOutputValueClreplaced(IntWritable.clreplaced);
    // set up two map jobs, so we can test merge phase in Reduce also
    conf.setNumMapTasks(2);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    // set up input data in 2 files
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(11), new IntWritable(999));
    writer.append(new IntWritable(23), new IntWritable(456));
    writer.append(new IntWritable(10), new IntWritable(780));
    writer.close();
    inFile = new Path(inDir, "part1");
    writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(45), new IntWritable(100));
    writer.append(new IntWritable(18), new IntWritable(200));
    writer.append(new IntWritable(27), new IntWritable(300));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : AdminHelper.java
Copyright Apache License 2.0
Author : yncxcw
static DistributedFileSystem getDFS(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IllegalArgumentException("FileSystem " + fs.getUri() + " is not an HDFS file system");
    }
    return (DistributedFileSystem) fs;
}

19 View Complete Implementation : TestStreaming.java
Copyright Apache License 2.0
Author : aliyun-beta
protected FileSystem getFileSystem() throws IOException {
    return FileSystem.get(getConf());
}

19 View Complete Implementation : ResourceMgrDelegate.java
Copyright Apache License 2.0
Author : yncxcw
public String getFilesystemName() throws IOException, InterruptedException {
    return FileSystem.get(conf).getUri().toString();
}

19 View Complete Implementation : AbstractS3GuardDynamoDBDiagnostic.java
Copyright Apache License 2.0
Author : apache
/**
 * Bind to the store from a CLI argument.
 * @param fsURI filesystem URI
 * @throws IOException failure
 */
protected void bindFromCLI(String fsURI) throws IOException {
    Configuration conf = getConfig();
    setUri(fsURI);
    FileSystem fs = FileSystem.get(getUri(), conf);
    require(fs instanceof S3AFileSystem, "Not an S3A Filesystem:  " + fsURI);
    filesystem = (S3AFileSystem) fs;
    bindStore(filesystem);
    setUri(fs.getUri());
}

19 View Complete Implementation : DirectoryTraversal.java
Copyright Apache License 2.0
Author : facebookarchive
public static DirectoryTraversal raidLeafDirectoryRetriever(final PolicyInfo info, List<Path> roots, Collection<PolicyInfo> allInfos, final Configuration conf, int numThreads, boolean doShuffle, boolean allowStandby) throws IOException {
    final RaidState.Checker checker = new RaidState.Checker(allInfos, conf);
    final FileSystem fs = FileSystem.get(conf);
    Filter filter = new Filter() {

        @Override
        public boolean check(FileStatus f) throws IOException {
            long now = RaidNode.now();
            if (!f.isDir()) {
                return false;
            }
            List<FileStatus> lfs = RaidNode.listDirectoryRaidFileStatus(conf, fs, f.getPath());
            RaidState state = checker.check(info, f, now, false, lfs);
            if (LOG.isDebugEnabled()) {
                LOG.debug(f.getPath() + " : " + state);
            }
            return state == RaidState.NOT_RAIDED_BUT_SHOULD;
        }
    };
    return new DirectoryTraversal("Raid File Retriever ", roots, fs, filter, numThreads, doShuffle, allowStandby, true);
}

19 View Complete Implementation : TestMapOutputType.java
Copyright Apache License 2.0
Author : aliyun-beta
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInt(JobContext.IO_SORT_MB, 1);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setMapperClreplaced(TextGen.clreplaced);
    conf.setReducerClreplaced(TextReduce.clreplaced);
    conf.setOutputKeyClreplaced(Text.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, Text.clreplaced, Text.clreplaced);
    writer.append(new Text("rec: 1"), new Text("Hello"));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : MultipleOutputs.java
Copyright Apache License 2.0
Author : madiator
// by being synchronized MultipleOutputTask can be use with a
// MulreplacedhreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter) throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
        if (countersEnabled && reporter == null) {
            throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
        }
        JobConf jobConf = new JobConf(conf);
        jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
        FileSystem fs = FileSystem.get(conf);
        writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
        if (countersEnabled) {
            if (reporter == null) {
                throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
            }
            writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
        }
        recordWriters.put(baseFileName, writer);
    }
    return writer;
}

19 View Complete Implementation : MultipleOutputs.java
Copyright Apache License 2.0
Author : apache
// by being synchronized MultipleOutputTask can be use with a
// MulreplacedhreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter) throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
        if (countersEnabled && reporter == null) {
            throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
        }
        JobConf jobConf = new JobConf(conf);
        jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
        FileSystem fs = FileSystem.get(conf);
        writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
        if (countersEnabled) {
            if (reporter == null) {
                throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
            }
            writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
        }
        recordWriters.put(baseFileName, writer);
    }
    return writer;
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : facebookarchive
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : AdminHelper.java
Copyright Apache License 2.0
Author : aliyun-beta
static DistributedFileSystem getDFS(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IllegalArgumentException("FileSystem " + fs.getUri() + " is not an HDFS file system");
    }
    return (DistributedFileSystem) fs;
}

19 View Complete Implementation : HadoopServer.java
Copyright Apache License 2.0
Author : madiator
/*
   * Rewrite of the connecting and tunneling to the Hadoop location
   */
/**
 * Provides access to the default file system of this location.
 *
 * @return a {@link FileSystem}
 */
public FileSystem getDFS() throws IOException {
    return FileSystem.get(this.conf);
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : apache
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : iVCE
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : TestComparators.java
Copyright Apache License 2.0
Author : aliyun-beta
@Before
public void configure() throws Exception {
    Path testdir = new Path(TEST_DIR.getAbsolutePath());
    Path inDir = new Path(testdir, "in");
    Path outDir = new Path(testdir, "out");
    FileSystem fs = FileSystem.get(conf);
    fs.delete(testdir, true);
    conf.setInputFormat(SequenceFileInputFormat.clreplaced);
    FileInputFormat.setInputPaths(conf, inDir);
    FileOutputFormat.setOutputPath(conf, outDir);
    conf.setOutputKeyClreplaced(IntWritable.clreplaced);
    conf.setOutputValueClreplaced(Text.clreplaced);
    conf.setMapOutputValueClreplaced(IntWritable.clreplaced);
    // set up two map jobs, so we can test merge phase in Reduce also
    conf.setNumMapTasks(2);
    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
    conf.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    if (!fs.mkdirs(testdir)) {
        throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    // set up input data in 2 files
    Path inFile = new Path(inDir, "part0");
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(11), new IntWritable(999));
    writer.append(new IntWritable(23), new IntWritable(456));
    writer.append(new IntWritable(10), new IntWritable(780));
    writer.close();
    inFile = new Path(inDir, "part1");
    writer = SequenceFile.createWriter(fs, conf, inFile, IntWritable.clreplaced, IntWritable.clreplaced);
    writer.append(new IntWritable(45), new IntWritable(100));
    writer.append(new IntWritable(18), new IntWritable(200));
    writer.append(new IntWritable(27), new IntWritable(300));
    writer.close();
    jc = new JobClient(conf);
}

19 View Complete Implementation : Grep.java
Copyright Apache License 2.0
Author : madiator
public int run(String[] args) throws Exception {
    if (args.length < 3) {
        System.out.println("Grep <inDir> <outDir> <regex> [<group>]");
        ToolRunner.printGenericCommandUsage(System.out);
        return -1;
    }
    Path tempDir = new Path("grep-temp-" + Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
    JobConf grepJob = new JobConf(getConf(), Grep.clreplaced);
    try {
        grepJob.setJobName("grep-search");
        FileInputFormat.setInputPaths(grepJob, args[0]);
        grepJob.setMapperClreplaced(RegexMapper.clreplaced);
        grepJob.set("mapred.mapper.regex", args[2]);
        if (args.length == 4)
            grepJob.set("mapred.mapper.regex.group", args[3]);
        grepJob.setCombinerClreplaced(LongSumReducer.clreplaced);
        grepJob.setReducerClreplaced(LongSumReducer.clreplaced);
        FileOutputFormat.setOutputPath(grepJob, tempDir);
        grepJob.setOutputFormat(SequenceFileOutputFormat.clreplaced);
        grepJob.setOutputKeyClreplaced(Text.clreplaced);
        grepJob.setOutputValueClreplaced(LongWritable.clreplaced);
        JobClient.runJob(grepJob);
        JobConf sortJob = new JobConf(Grep.clreplaced);
        sortJob.setJobName("grep-sort");
        FileInputFormat.setInputPaths(sortJob, tempDir);
        sortJob.setInputFormat(SequenceFileInputFormat.clreplaced);
        sortJob.setMapperClreplaced(InverseMapper.clreplaced);
        // write a single file
        sortJob.setNumReduceTasks(1);
        FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
        // sort by decreasing freq
        sortJob.setOutputKeyComparatorClreplaced(LongWritable.DecreasingComparator.clreplaced);
        JobClient.runJob(sortJob);
    } finally {
        FileSystem.get(grepJob).delete(tempDir, true);
    }
    return 0;
}

19 View Complete Implementation : AdminHelper.java
Copyright Apache License 2.0
Author : apache
static DistributedFileSystem getDFS(Configuration conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    if (!(fs instanceof DistributedFileSystem)) {
        throw new IllegalArgumentException("FileSystem " + fs.getUri() + " is not an HDFS file system");
    }
    return (DistributedFileSystem) fs;
}

19 View Complete Implementation : TestChainMapReduce.java
Copyright Apache License 2.0
Author : aliyun-beta
private static void cleanFlags(JobConf conf) throws IOException {
    FileSystem fs = FileSystem.get(conf);
    fs.delete(getFlagDir(conf.getBoolean("localFS", true)), true);
    fs.mkdirs(getFlagDir(conf.getBoolean("localFS", true)));
}

19 View Complete Implementation : ThreadedMapBenchmark.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Generate input data for the benchmark
 */
public static void generateInputData(int dataSizePerMap, int numSpillsPerMap, int numMapsPerHost, JobConf masterConf) throws Exception {
    JobConf job = new JobConf(masterConf, ThreadedMapBenchmark.clreplaced);
    job.setJobName("threaded-map-benchmark-random-writer");
    job.setJarByClreplaced(ThreadedMapBenchmark.clreplaced);
    job.setInputFormat(UtilsForTests.RandomInputFormat.clreplaced);
    job.setOutputFormat(SequenceFileOutputFormat.clreplaced);
    job.setMapperClreplaced(Map.clreplaced);
    job.setReducerClreplaced(IdenreplacedyReducer.clreplaced);
    job.setOutputKeyClreplaced(BytesWritable.clreplaced);
    job.setOutputValueClreplaced(BytesWritable.clreplaced);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    long totalDataSize = dataSizePerMap * numMapsPerHost * cluster.getTaskTrackers();
    job.set("test.tmb.bytes_per_map", String.valueOf(dataSizePerMap * 1024 * 1024));
    // none reduce
    job.setNumReduceTasks(0);
    job.setNumMapTasks(numMapsPerHost * cluster.getTaskTrackers());
    FileOutputFormat.setOutputPath(job, INPUT_DIR);
    FileSystem fs = FileSystem.get(job);
    fs.delete(BASE_DIR, true);
    LOG.info("Generating random input for the benchmark");
    LOG.info("Total data : " + totalDataSize + " mb");
    LOG.info("Data per map: " + dataSizePerMap + " mb");
    LOG.info("Number of spills : " + numSpillsPerMap);
    LOG.info("Number of maps per host : " + numMapsPerHost);
    LOG.info("Number of hosts : " + cluster.getTaskTrackers());
    // generates the input for the benchmark
    JobClient.runJob(job);
}

19 View Complete Implementation : MultipleOutputs.java
Copyright Apache License 2.0
Author : aliyun-beta
// by being synchronized MultipleOutputTask can be use with a
// MulreplacedhreaderMapRunner.
private synchronized RecordWriter getRecordWriter(String namedOutput, String baseFileName, final Reporter reporter) throws IOException {
    RecordWriter writer = recordWriters.get(baseFileName);
    if (writer == null) {
        if (countersEnabled && reporter == null) {
            throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
        }
        JobConf jobConf = new JobConf(conf);
        jobConf.set(InternalFileOutputFormat.CONFIG_NAMED_OUTPUT, namedOutput);
        FileSystem fs = FileSystem.get(conf);
        writer = outputFormat.getRecordWriter(fs, jobConf, baseFileName, reporter);
        if (countersEnabled) {
            if (reporter == null) {
                throw new IllegalArgumentException("Counters are enabled, Reporter cannot be NULL");
            }
            writer = new RecordWriterWithCounter(writer, baseFileName, reporter);
        }
        recordWriters.put(baseFileName, writer);
    }
    return writer;
}

19 View Complete Implementation : TestLeaseRecoveryStriped.java
Copyright Apache License 2.0
Author : aliyun-beta
private FileSystem getFSAsAnotherUser(final Configuration c) throws IOException, InterruptedException {
    return FileSystem.get(FileSystem.getDefaultUri(c), c, UserGroupInformation.createUserForTesting(fakeUsername, new String[] { fakeGroup }).getUserName());
}

19 View Complete Implementation : TestNoJobSetupCleanup.java
Copyright Apache License 2.0
Author : apache
private Job submitAndValidateJob(Configuration conf, int numMaps, int numReds) throws IOException, InterruptedException, ClreplacedNotFoundException {
    Job job = MapReduceTestUtil.createJob(conf, inDir, outDir, numMaps, numReds);
    job.setJobSetupCleanupNeeded(false);
    job.setOutputFormatClreplaced(MyOutputFormat.clreplaced);
    job.waitForCompletion(true);
    replacedertTrue(job.isSuccessful());
    replacedertTrue(job.getTaskReports(TaskType.JOB_SETUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.JOB_CLEANUP).length == 0);
    replacedertTrue(job.getTaskReports(TaskType.MAP).length == numMaps);
    replacedertTrue(job.getTaskReports(TaskType.REDUCE).length == numReds);
    FileSystem fs = FileSystem.get(conf);
    replacedertTrue("Job output directory doesn't exit!", fs.exists(outDir));
    // job commit done only in cleanup
    // therefore output should still be in temp location
    String tempWorkingPathStr = outDir + Path.SEPARATOR + "_temporary" + Path.SEPARATOR + "0";
    Path tempWorkingPath = new Path(tempWorkingPathStr);
    FileStatus[] list = fs.listStatus(tempWorkingPath, new OutputFilter());
    int numPartFiles = numReds == 0 ? numMaps : numReds;
    replacedertTrue("Number of part-files is " + list.length + " and not " + numPartFiles, list.length == numPartFiles);
    return job;
}

19 View Complete Implementation : AbstractFSContract.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Get the FS from a URI. The default implementation just retrieves
 * it from the norrmal FileSystem factory/cache, with the local configuration
 * @param uri URI of FS
 * @return the filesystem
 * @throws IOException IO problems
 */
public FileSystem getFileSystem(URI uri) throws IOException {
    return FileSystem.get(uri, getConf());
}