org.apache.hadoop.fs.FileSystem.getUri() - java examples

Here are the examples of the java api org.apache.hadoop.fs.FileSystem.getUri() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : LocalJobRunner.java
Copyright Apache License 2.0
Author : apache
public String getFilesystemName() throws IOException {
    return fs.getUri().toString();
}

18 View Complete Implementation : CoreFileSystem.java
Copyright Apache License 2.0
Author : apache
@Override
public String toString() {
    final StringBuilder sb = new StringBuilder("CoreFileSystem{");
    sb.append("fileSystem=").append(fileSystem.getUri()).append('}');
    return sb.toString();
}

17 View Complete Implementation : ITestAbfsDelegationTokens.java
Copyright Apache License 2.0
Author : apache
protected String getDefaultServiceName(final FileSystem fs) {
    return SecurityUtil.buildDTServiceName(fs.getUri(), 0);
}

16 View Complete Implementation : TestMove.java
Copyright Apache License 2.0
Author : apache
@Test
public void testMoveTargetExistsWithoutExplicitRename() throws Exception {
    Path srcPath = new Path("mockfs:/file");
    Path targetPath = new Path("mockfs:/fold0");
    Path dupPath = new Path("mockfs:/fold0/file");
    Path srcPath2 = new Path("mockfs://user/file");
    Path targetPath2 = new Path("mockfs://user/fold0");
    Path dupPath2 = new Path("mockfs://user/fold0/file");
    InstrumentedRenameCommand cmd;
    String[] cmdargs = new String[] { "mockfs:/file", "mockfs:/fold0" };
    FileStatus src_fileStat, target_fileStat, dup_fileStat;
    URI myuri;
    src_fileStat = mock(FileStatus.clreplaced);
    target_fileStat = mock(FileStatus.clreplaced);
    dup_fileStat = mock(FileStatus.clreplaced);
    myuri = new URI("mockfs://user");
    when(src_fileStat.isDirectory()).thenReturn(false);
    when(target_fileStat.isDirectory()).thenReturn(true);
    when(dup_fileStat.isDirectory()).thenReturn(false);
    when(src_fileStat.getPath()).thenReturn(srcPath2);
    when(target_fileStat.getPath()).thenReturn(targetPath2);
    when(dup_fileStat.getPath()).thenReturn(dupPath2);
    when(mockFs.getFileStatus(eq(srcPath))).thenReturn(src_fileStat);
    when(mockFs.getFileStatus(eq(targetPath))).thenReturn(target_fileStat);
    when(mockFs.getFileStatus(eq(dupPath))).thenReturn(dup_fileStat);
    when(mockFs.getFileStatus(eq(srcPath2))).thenReturn(src_fileStat);
    when(mockFs.getFileStatus(eq(targetPath2))).thenReturn(target_fileStat);
    when(mockFs.getFileStatus(eq(dupPath2))).thenReturn(dup_fileStat);
    when(mockFs.getUri()).thenReturn(myuri);
    cmd = new InstrumentedRenameCommand();
    cmd.setConf(conf);
    cmd.setOverwrite(true);
    cmd.run(cmdargs);
    // make sure command failed with the proper exception
    replacedertTrue("Rename should have failed with path exists exception", cmd.error instanceof PathExistsException);
}

16 View Complete Implementation : AbstractFSContractTestBase.java
Copyright Apache License 2.0
Author : apache
/**
 * Setup: create the contract then init it.
 * @throws Exception on any failure
 */
@Before
public void setup() throws Exception {
    Thread.currentThread().setName("setup");
    LOG.debug("== Setup ==");
    contract = createContract(createConfiguration());
    contract.init();
    // skip tests if they aren't enabled
    replacedumeEnabled();
    // extract the test FS
    fileSystem = contract.getTestFileSystem();
    replacedertNotNull("null filesystem", fileSystem);
    URI fsURI = fileSystem.getUri();
    LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem);
    // sanity check to make sure that the test FS picked up really matches
    // the scheme chosen. This is to avoid defaulting back to the localFS
    // which would be drastic for root FS tests
    replacedertEquals("wrong filesystem of " + fsURI, contract.getScheme(), fsURI.getScheme());
    // create the test path
    testPath = getContract().getTestPath();
    mkdirs(testPath);
    LOG.debug("== Setup complete ==");
}

16 View Complete Implementation : ITestAbfsDelegationTokens.java
Copyright Apache License 2.0
Author : apache
public void verifyCredentialsContainsToken(final Credentials credentials, FileSystem fs) throws IOException {
    verifyCredentialsContainsToken(credentials, fs.getCanonicalServiceName(), fs.getUri().toString());
}

16 View Complete Implementation : TestCopyFromLocal.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void init() throws Exception {
    conf = new Configuration(false);
    conf.set("fs.file.impl", LocalFileSystem.clreplaced.getName());
    fs = FileSystem.getLocal(conf);
    testDir = new FileSystemTestHelper().getTestRootPath(fs);
    // don't want scheme on the path, just an absolute path
    testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
    FileSystem.setDefaultUri(conf, fs.getUri());
    fs.setWorkingDirectory(testDir);
}

16 View Complete Implementation : TokenCache.java
Copyright Apache License 2.0
Author : apache
static boolean isTokenRenewalExcluded(FileSystem fs, Configuration conf) {
    String[] nns = conf.getStrings(MRJobConfig.JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE);
    if (nns != null) {
        String host = fs.getUri().getHost();
        for (int i = 0; i < nns.length; i++) {
            if (nns[i].equals(host)) {
                return true;
            }
        }
    }
    return false;
}

16 View Complete Implementation : AbstractS3GuardDynamoDBDiagnostic.java
Copyright Apache License 2.0
Author : apache
/**
 * Bind to the store from a CLI argument.
 * @param fsURI filesystem URI
 * @throws IOException failure
 */
protected void bindFromCLI(String fsURI) throws IOException {
    Configuration conf = getConfig();
    setUri(fsURI);
    FileSystem fs = FileSystem.get(getUri(), conf);
    require(fs instanceof S3AFileSystem, "Not an S3A Filesystem:  " + fsURI);
    filesystem = (S3AFileSystem) fs;
    bindStore(filesystem);
    setUri(fs.getUri());
}

15 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : apache
@Test
public void testUpdateGlobTargetMissingSingleLevel() throws IOException {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, true);
        checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

15 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : apache
@Test
public void testUpdateGlobTargetMissingMultiLevel() throws IOException {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, true);
        checkResult(target, 6, "file3", "file4", "file5", "file7", "file8", "file9");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

15 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : apache
@Test
public void testGlobTargetMissingSingleLevel() throws IOException {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, false);
        checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir2/file6");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

15 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : apache
@Test
public void testGlobTargetMissingMultiLevel() throws IOException {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, false);
        checkResult(target, 4, "file3", "file4", "file5", "dir3/file7", "dir3/file8", "dir3/file9");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

15 View Complete Implementation : TestChRootedFileSystem.java
Copyright Apache License 2.0
Author : apache
@Test
public void testURI() {
    URI uri = fSys.getUri();
    replacedert.replacedertEquals(chrootedTo.toUri(), uri);
}

15 View Complete Implementation : TestPseudoLocalFs.java
Copyright Apache License 2.0
Author : apache
/**
 *  Test Pseudo Local File System methods like getFileStatus(), create(),
 *  open(), exists() for <li> valid file paths and <li> invalid file paths.
 * @throws IOException
 */
@Test
public void testPseudoLocalFsFileNames() throws IOException {
    PseudoLocalFs pfs = new PseudoLocalFs();
    Configuration conf = new Configuration();
    conf.setClreplaced("fs.pseudo.impl", PseudoLocalFs.clreplaced, FileSystem.clreplaced);
    Path path = new Path("pseudo:///myPsedoFile.1234");
    FileSystem testFs = path.getFileSystem(conf);
    replacedertEquals("Failed to obtain a pseudo local file system object from path", pfs.getUri().getScheme(), testFs.getUri().getScheme());
    // Validate PseudoLocalFS operations on URI of some other file system
    path = new Path("file:///myPsedoFile.12345");
    validateGetFileStatus(pfs, path, false);
    validateCreate(pfs, path, false);
    validateOpen(pfs, path, false);
    validateExists(pfs, path, false);
    // .<fileSize> missing
    path = new Path("pseudo:///myPsedoFile");
    validateGetFileStatus(pfs, path, false);
    validateCreate(pfs, path, false);
    validateOpen(pfs, path, false);
    validateExists(pfs, path, false);
    // thing after final '.' is not a number
    path = new Path("pseudo:///myPsedoFile.txt");
    validateGetFileStatus(pfs, path, false);
    validateCreate(pfs, path, false);
    validateOpen(pfs, path, false);
    validateExists(pfs, path, false);
    // Generate valid file name(relative path) and validate operations on it
    long fileSize = 231456;
    path = PseudoLocalFs.generateFilePath("my.Psedo.File", fileSize);
    // Validate the above generateFilePath()
    replacedertEquals("generateFilePath() failed.", fileSize, pfs.validateFileNameFormat(path));
    validateGetFileStatus(pfs, path, true);
    validateCreate(pfs, path, true);
    validateOpen(pfs, path, true);
    validateExists(pfs, path, true);
    // Validate operations on valid qualified path
    path = new Path("myPsedoFile.1237");
    path = pfs.makeQualified(path);
    validateGetFileStatus(pfs, path, true);
    validateCreate(pfs, path, true);
    validateOpen(pfs, path, true);
    validateExists(pfs, path, true);
}

14 View Complete Implementation : LocalMetadataStore.java
Copyright Apache License 2.0
Author : apache
@Override
public void initialize(FileSystem fileSystem, ITtlTimeProvider ttlTp) throws IOException {
    Preconditions.checkNotNull(fileSystem);
    fs = fileSystem;
    URI fsURI = fs.getUri();
    uriHost = fsURI.getHost();
    if (uriHost != null && uriHost.equals("")) {
        uriHost = null;
    }
    initialize(fs.getConf(), ttlTp);
}

14 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 100000)
public void testUpdateGlobTargetMissingSingleLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, true);
        checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

14 View Complete Implementation : TestBlockGen.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster.Builder(conf).build();
    dfsCluster.waitActive();
    LOG.info("Started MiniDFSCluster");
    fs = dfsCluster.getFileSystem();
    FileSystem.setDefaultUri(conf, fs.getUri());
    tmpPath = fs.makeQualified(new Path("/tmp"));
    fs.mkdirs(tmpPath);
    String fsImageFile = this.getClreplaced().getClreplacedLoader().getResource(FS_IMAGE_NAME).getPath();
    fs.copyFromLocalFile(new Path(fsImageFile), new Path(tmpPath, FS_IMAGE_NAME));
}

14 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 100000)
public void testUpdateGlobTargetMissingMultiLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, true);
        checkResult(target, 6, "file3", "file4", "file5", "file7", "file8", "file9");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

14 View Complete Implementation : TestPathData.java
Copyright Apache License 2.0
Author : apache
@Before
public void initialize() throws Exception {
    conf = new Configuration();
    fs = FileSystem.getLocal(conf);
    testDir = new Path(TEST_ROOT_DIR);
    // don't want scheme on the path, just an absolute path
    testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
    fs.mkdirs(testDir);
    FileSystem.setDefaultUri(conf, fs.getUri());
    fs.setWorkingDirectory(testDir);
    fs.mkdirs(new Path("d1"));
    fs.createNewFile(new Path("d1", "f1"));
    fs.createNewFile(new Path("d1", "f1.1"));
    fs.createNewFile(new Path("d1", "f2"));
    fs.mkdirs(new Path("d2"));
    fs.create(new Path("d2", "f3"));
}

14 View Complete Implementation : SimpleCopyListing.java
Copyright Apache License 2.0
Author : apache
private Path makeQualified(Path path) throws IOException {
    final FileSystem fs = path.getFileSystem(getConf());
    return path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
}

14 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 100000)
public void testGlobTargetMissingSingleLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, false);
        checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir2/file6");
    } catch (IOException e) {
        LOG.error("Exception encountered while testing distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

14 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 100000)
public void testGlobTargetMissingMultiLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, false);
        checkResult(target, 4, "file3", "file4", "file5", "dir3/file7", "dir3/file8", "dir3/file9");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

13 View Complete Implementation : TestHadoopArchives.java
Copyright Apache License 2.0
Author : yncxcw
/*
   * Run the HadoopArchives tool to create an archive on the 
   * given file system.
   */
private String makeArchive(Path parentPath, String relGlob) throws Exception {
    final String parentPathStr = parentPath.toUri().getPath();
    final String relPathGlob = relGlob == null ? "*" : relGlob;
    System.out.println("parentPathStr = " + parentPathStr);
    final URI uri = fs.getUri();
    final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort() + archivePath.toUri().getPath() + Path.SEPARATOR;
    final String harName = "foo.har";
    final String fullHarPathStr = prefix + harName;
    final String[] args = { "-archiveName", harName, "-p", parentPathStr, relPathGlob, archivePath.toString() };
    System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR);
    final HadoopArchives har = new HadoopArchives(conf);
    replacedertEquals(0, ToolRunner.run(har, args));
    return fullHarPathStr;
}

13 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : yncxcw
@Test(timeout = 100000)
public void testGlobTargetMissingSingleLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, false);
        checkResult(target, 2, "multifile/file3", "multifile/file4", "multifile/file5", "singledir/dir2/file6");
    } catch (IOException e) {
        LOG.error("Exception encountered while testing distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

13 View Complete Implementation : TestFileSystemCaching.java
Copyright Apache License 2.0
Author : yncxcw
@Test
public void testDefaultFsUris() throws Exception {
    final Configuration conf = new Configuration();
    conf.set("fs.defaultfs.impl", DefaultFs.clreplaced.getName());
    final URI defaultUri = URI.create("defaultfs://host");
    FileSystem.setDefaultUri(conf, defaultUri);
    FileSystem fs = null;
    // sanity check default fs
    final FileSystem defaultFs = FileSystem.get(conf);
    replacedertEquals(defaultUri, defaultFs.getUri());
    // has scheme, no auth
    fs = FileSystem.get(URI.create("defaultfs:/"), conf);
    replacedertSame(defaultFs, fs);
    fs = FileSystem.get(URI.create("defaultfs:///"), conf);
    replacedertSame(defaultFs, fs);
    // has scheme, same auth
    fs = FileSystem.get(URI.create("defaultfs://host"), conf);
    replacedertSame(defaultFs, fs);
    // has scheme, different auth
    fs = FileSystem.get(URI.create("defaultfs://host2"), conf);
    replacedertNotSame(defaultFs, fs);
    // no scheme, no auth
    fs = FileSystem.get(URI.create("/"), conf);
    replacedertSame(defaultFs, fs);
    // no scheme, same auth
    try {
        fs = FileSystem.get(URI.create("//host"), conf);
        fail("got fs with auth but no scheme");
    } catch (Exception e) {
        replacedertEquals("No FileSystem for scheme: null", e.getMessage());
    }
    // no scheme, different auth
    try {
        fs = FileSystem.get(URI.create("//host2"), conf);
        fail("got fs with auth but no scheme");
    } catch (Exception e) {
        replacedertEquals("No FileSystem for scheme: null", e.getMessage());
    }
}

13 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : yncxcw
@Test
public void testUpdateGlobTargetMissingSingleLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir/dir2/file6");
        runTest(listFile, target, false, true);
        checkResult(target, 4, "file3", "file4", "file5", "dir2/file6");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

13 View Complete Implementation : TestDistCpViewFs.java
Copyright Apache License 2.0
Author : yncxcw
@Test
public void testGlobTargetMissingMultiLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, false);
        checkResult(target, 4, "file3", "file4", "file5", "dir3/file7", "dir3/file8", "dir3/file9");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

13 View Complete Implementation : TestIntegration.java
Copyright Apache License 2.0
Author : yncxcw
@Test(timeout = 100000)
public void testGlobTargetMissingMultiLevel() {
    try {
        Path listFile = new Path("target/tmp1/listing").makeQualified(fs.getUri(), fs.getWorkingDirectory());
        addEntries(listFile, "*/*");
        createFiles("multifile/file3", "multifile/file4", "multifile/file5");
        createFiles("singledir1/dir3/file7", "singledir1/dir3/file8", "singledir1/dir3/file9");
        runTest(listFile, target, false, false);
        checkResult(target, 4, "file3", "file4", "file5", "dir3/file7", "dir3/file8", "dir3/file9");
    } catch (IOException e) {
        LOG.error("Exception encountered while running distcp", e);
        replacedert.fail("distcp failure");
    } finally {
        TestDistCpUtils.delete(fs, root);
        TestDistCpUtils.delete(fs, "target/tmp1");
    }
}

13 View Complete Implementation : TestHadoopArchives.java
Copyright Apache License 2.0
Author : apache
/*
   * Run the HadoopArchives tool to create an archive on the 
   * given file system.
   */
private String makeArchive(Path parentPath, String relGlob) throws Exception {
    final String parentPathStr = parentPath.toUri().getPath();
    final String relPathGlob = relGlob == null ? "*" : relGlob;
    System.out.println("parentPathStr = " + parentPathStr);
    final URI uri = fs.getUri();
    final String prefix = "har://hdfs-" + uri.getHost() + ":" + uri.getPort() + archivePath.toUri().getPath() + Path.SEPARATOR;
    final String harName = "foo.har";
    final String fullHarPathStr = prefix + harName;
    final String[] args = { "-archiveName", harName, "-p", parentPathStr, relPathGlob, archivePath.toString() };
    System.setProperty(HadoopArchives.TEST_HADOOP_ARCHIVES_JAR_PATH, HADOOP_ARCHIVES_JAR);
    final HadoopArchives har = new HadoopArchives(conf);
    replacedertEquals(0, ToolRunner.run(har, args));
    return fullHarPathStr;
}

12 View Complete Implementation : Nfs3Utils.java
Copyright Apache License 2.0
Author : apache
public static URI getResolvedURI(FileSystem fs, String exportPath) throws IOException {
    URI fsURI = fs.getUri();
    String scheme = fs.getScheme();
    if (scheme.equalsIgnoreCase(FsConstants.VIEWFS_SCHEME)) {
        ViewFileSystem viewFs = (ViewFileSystem) fs;
        ViewFileSystem.MountPoint[] mountPoints = viewFs.getMountPoints();
        for (ViewFileSystem.MountPoint mount : mountPoints) {
            String mountedPath = mount.getMountedOnPath().toString();
            if (exportPath.startsWith(mountedPath)) {
                String subpath = exportPath.substring(mountedPath.length());
                fsURI = mount.getTargetFileSystemURIs()[0].resolve(subpath);
                break;
            }
        }
    } else if (scheme.equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
        fsURI = fsURI.resolve(exportPath);
    }
    if (!fsURI.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
        throw new FileSystemException("Only HDFS is supported as underlying" + "FileSystem, fs scheme:" + scheme + " uri to be added" + fsURI);
    }
    return fsURI;
}

12 View Complete Implementation : TestHDFSTrash.java
Copyright Apache License 2.0
Author : apache
@Test
public void testNonDefaultFS() throws IOException {
    FileSystem fileSystem = cluster.getFileSystem();
    Configuration config = fileSystem.getConf();
    config.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, fileSystem.getUri().toString());
    TestTrash.trashNonDefaultFS(config);
}

12 View Complete Implementation : TestJobResourceUploaderWithSharedCache.java
Copyright Apache License 2.0
Author : apache
private JobConf createJobConf() {
    JobConf jobConf = new JobConf();
    jobConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
    jobConf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true);
    jobConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, remoteFs.getUri().toString());
    return jobConf;
}

11 View Complete Implementation : TestDistCacheEmulation.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(RecordReader<LongWritable, BytesWritable> reader, FileSystem fs, long[] sortedFileSizes) throws IOException, InterruptedException {
    // Validate permissions of dist cache directory
    Path distCacheDir = dce.getDistributedCacheDir();
    replacedertEquals("Wrong permissions for distributed cache dir " + distCacheDir, fs.getFileStatus(distCacheDir).getPermission().getOtherAction().and(FsAction.EXECUTE), FsAction.EXECUTE);
    // Validate the content of the sequence file generated by
    // dce.setupGenerateDistCacheData().
    LongWritable key = new LongWritable();
    BytesWritable val = new BytesWritable();
    for (int i = 0; i < sortedFileSizes.length; i++) {
        replacedertTrue("Number of files written to the sequence file by " + "setupGenerateDistCacheData is less than the expected.", reader.nextKeyValue());
        key = reader.getCurrentKey();
        val = reader.getCurrentValue();
        long fileSize = key.get();
        String file = new String(val.getBytes(), 0, val.getLength());
        // Dist Cache files should be sorted based on file size.
        replacedertEquals("Dist cache file size is wrong.", sortedFileSizes[i], fileSize);
        // Validate dist cache file path.
        // parent dir of dist cache file
        Path parent = new Path(file).getParent().makeQualified(fs.getUri(), fs.getWorkingDirectory());
        // should exist in dist cache dir
        replacedertTrue("Public dist cache file path is wrong.", distCacheDir.equals(parent));
    }
}

11 View Complete Implementation : TestMRJobClient.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * print job history from file
 */
private void testJobHistory(Configuration conf) throws Exception {
    CLI jc = createJobClient();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    File f = new File("src/test/resources/job_1329348432655_0001-10.jhist");
    FileSystem localFs = FileSystem.getLocal(conf);
    String historyFileUri = new Path(f.getAbsolutePath()).makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toUri().toString();
    // bad command
    int exitCode = runTool(conf, jc, new String[] { "-history", "pul", historyFileUri }, out);
    replacedertEquals("Exit code", -1, exitCode);
    exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileUri }, out);
    replacedertEquals("Exit code", 0, exitCode);
    String line;
    BufferedReader br = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(out.toByteArray())));
    int counter = 0;
    while ((line = br.readLine()) != null) {
        LOG.info("line = " + line);
        if (line.startsWith("task_")) {
            counter++;
        }
    }
    replacedertEquals(23, counter);
}

11 View Complete Implementation : TestCopyFiles.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * copy files from local file system to dfs file system
 */
public void testCopyFromLocalToDfs() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new Configuration();
        cluster = new MiniDFSCluster.Builder(conf).build();
        final FileSystem hdfs = cluster.getFileSystem();
        final String namenode = hdfs.getUri().toString();
        if (namenode.startsWith("hdfs://")) {
            MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR + "/srcdat");
            ToolRunner.run(new DistCpV1(conf), new String[] { "-log", namenode + "/logs", "file:///" + TEST_ROOT_DIR + "/srcdat", namenode + "/destdat" });
            replacedertTrue("Source and destination directories do not match.", checkFiles(cluster.getFileSystem(), "/destdat", files));
            replacedertTrue("Log directory does not exist.", hdfs.exists(new Path(namenode + "/logs")));
            deldir(hdfs, "/destdat");
            deldir(hdfs, "/logs");
            deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR + "/srcdat");
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

11 View Complete Implementation : TestFileStatus.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Test the FileStatus obtained calling getFileStatus on a file
 */
@Test
public void testGetFileStatusOnFile() throws Exception {
    checkFile(fs, file1, 1);
    // test getFileStatus on a file
    FileStatus status = fs.getFileStatus(file1);
    replacedertFalse(file1 + " should be a file", status.isDirectory());
    replacedertEquals(blockSize, status.getBlockSize());
    replacedertEquals(1, status.getReplication());
    replacedertEquals(fileSize, status.getLen());
    replacedertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
}

11 View Complete Implementation : TestChRootedFileSystem.java
Copyright Apache License 2.0
Author : yncxcw
@Test
public void testBasicPaths() {
    URI uri = fSys.getUri();
    replacedert.replacedertEquals(chrootedTo.toUri(), uri);
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getWorkingDirectory());
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getHomeDirectory());
    /*
     * ChRootedFs as its uri like file:///chrootRoot.
     * This is questionable since path.makequalified(uri, path) ignores
     * the pathPart of a uri. So our notion of chrooted URI is questionable.
     * But if we were to fix Path#makeQualified() then  the next test should
     *  have been:

    replacedert.replacedertEquals(
        new Path(chrootedTo + "/foo/bar").makeQualified(
            FsConstants.LOCAL_FS_URI, null),
        fSys.makeQualified(new Path( "/foo/bar")));
    */
    replacedert.replacedertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null), fSys.makeQualified(new Path("/foo/bar")));
}

11 View Complete Implementation : TestFileStatus.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Test the FileStatus obtained calling listStatus on a file
 */
@Test
public void testListStatusOnFile() throws IOException {
    FileStatus[] stats = fs.listStatus(file1);
    replacedertEquals(1, stats.length);
    FileStatus status = stats[0];
    replacedertFalse(file1 + " should be a file", status.isDirectory());
    replacedertEquals(blockSize, status.getBlockSize());
    replacedertEquals(1, status.getReplication());
    replacedertEquals(fileSize, status.getLen());
    replacedertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
    RemoteIterator<FileStatus> itor = fc.listStatus(file1);
    status = itor.next();
    replacedertEquals(stats[0], status);
    replacedertFalse(file1 + " should be a file", status.isDirectory());
}

11 View Complete Implementation : TestChRootedFileSystem.java
Copyright Apache License 2.0
Author : apache
@Test
public void testBasicPaths() {
    URI uri = fSys.getUri();
    replacedert.replacedertEquals(chrootedTo.toUri(), uri);
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getWorkingDirectory());
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getHomeDirectory());
    /*
     * ChRootedFs as its uri like file:///chrootRoot.
     * This is questionable since path.makequalified(uri, path) ignores
     * the pathPart of a uri. So our notion of chrooted URI is questionable.
     * But if we were to fix Path#makeQualified() then  the next test should
     *  have been:

    replacedert.replacedertEquals(
        new Path(chrootedTo + "/foo/bar").makeQualified(
            FsConstants.LOCAL_FS_URI, null),
        fSys.makeQualified(new Path( "/foo/bar")));
    */
    replacedert.replacedertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null), fSys.makeQualified(new Path("/foo/bar")));
}

11 View Complete Implementation : TestChRootedFileSystem.java
Copyright Apache License 2.0
Author : aliyun-beta
@Test
public void testBasicPaths() {
    URI uri = fSys.getUri();
    replacedert.replacedertEquals(chrootedTo.toUri(), uri);
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getWorkingDirectory());
    replacedert.replacedertEquals(fSys.makeQualified(new Path(System.getProperty("user.home"))), fSys.getHomeDirectory());
    /*
     * ChRootedFs as its uri like file:///chrootRoot.
     * This is questionable since path.makequalified(uri, path) ignores
     * the pathPart of a uri. So our notion of chrooted URI is questionable.
     * But if we were to fix Path#makeQualified() then  the next test should
     *  have been:

    replacedert.replacedertEquals(
        new Path(chrootedTo + "/foo/bar").makeQualified(
            FsConstants.LOCAL_FS_URI, null),
        fSys.makeQualified(new Path( "/foo/bar")));
    */
    replacedert.replacedertEquals(new Path("/foo/bar").makeQualified(FsConstants.LOCAL_FS_URI, null), fSys.makeQualified(new Path("/foo/bar")));
}

11 View Complete Implementation : TestDistCacheEmulation.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(RecordReader<LongWritable, BytesWritable> reader, FileSystem fs, long[] sortedFileSizes) throws IOException, InterruptedException {
    // Validate permissions of dist cache directory
    Path distCacheDir = dce.getDistributedCacheDir();
    replacedertEquals("Wrong permissions for distributed cache dir " + distCacheDir, fs.getFileStatus(distCacheDir).getPermission().getOtherAction().and(FsAction.EXECUTE), FsAction.EXECUTE);
    // Validate the content of the sequence file generated by
    // dce.setupGenerateDistCacheData().
    LongWritable key = new LongWritable();
    BytesWritable val = new BytesWritable();
    for (int i = 0; i < sortedFileSizes.length; i++) {
        replacedertTrue("Number of files written to the sequence file by " + "setupGenerateDistCacheData is less than the expected.", reader.nextKeyValue());
        key = reader.getCurrentKey();
        val = reader.getCurrentValue();
        long fileSize = key.get();
        String file = new String(val.getBytes(), 0, val.getLength());
        // Dist Cache files should be sorted based on file size.
        replacedertEquals("Dist cache file size is wrong.", sortedFileSizes[i], fileSize);
        // Validate dist cache file path.
        // parent dir of dist cache file
        Path parent = new Path(file).getParent().makeQualified(fs.getUri(), fs.getWorkingDirectory());
        // should exist in dist cache dir
        replacedertTrue("Public dist cache file path is wrong.", distCacheDir.equals(parent));
    }
}

11 View Complete Implementation : TestFileStatus.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Test the FileStatus obtained calling getFileStatus on a file
 */
@Test
public void testGetFileStatusOnFile() throws Exception {
    checkFile(fs, file1, 1);
    // test getFileStatus on a file
    FileStatus status = fs.getFileStatus(file1);
    replacedertFalse(file1 + " should be a file", status.isDirectory());
    replacedertEquals(blockSize, status.getBlockSize());
    replacedertEquals(1, status.getReplication());
    replacedertEquals(fileSize, status.getLen());
    replacedertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
}

11 View Complete Implementation : TestMRJobClient.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * print job history from file
 */
private void testJobHistory(Configuration conf) throws Exception {
    CLI jc = createJobClient();
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    File f = new File("src/test/resources/job_1329348432655_0001-10.jhist");
    FileSystem localFs = FileSystem.getLocal(conf);
    String historyFileUri = new Path(f.getAbsolutePath()).makeQualified(localFs.getUri(), localFs.getWorkingDirectory()).toUri().toString();
    // bad command
    int exitCode = runTool(conf, jc, new String[] { "-history", "pul", historyFileUri }, out);
    replacedertEquals("Exit code", -1, exitCode);
    exitCode = runTool(conf, jc, new String[] { "-history", "all", historyFileUri }, out);
    replacedertEquals("Exit code", 0, exitCode);
    String line;
    BufferedReader br = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(out.toByteArray())));
    int counter = 0;
    while ((line = br.readLine()) != null) {
        LOG.info("line = " + line);
        if (line.startsWith("task_")) {
            counter++;
        }
    }
    replacedertEquals(23, counter);
}

11 View Complete Implementation : TestTotalOrderPartitioner.java
Copyright Apache License 2.0
Author : aliyun-beta
private static <T> Path writeParreplacedionFile(String testname, Configuration conf, T[] splits) throws IOException {
    final FileSystem fs = FileSystem.getLocal(conf);
    final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs.getUri(), fs.getWorkingDirectory());
    Path p = new Path(testdir, testname + "/_parreplacedion.lst");
    TotalOrderParreplacedioner.setParreplacedionFile(conf, p);
    conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
    SequenceFile.Writer w = null;
    try {
        w = SequenceFile.createWriter(conf, SequenceFile.Writer.file(p), SequenceFile.Writer.keyClreplaced(splits[0].getClreplaced()), SequenceFile.Writer.valueClreplaced(NullWritable.clreplaced), SequenceFile.Writer.compression(CompressionType.NONE));
        for (int i = 0; i < splits.length; ++i) {
            w.append(splits[i], NullWritable.get());
        }
    } finally {
        if (null != w)
            w.close();
    }
    return p;
}

11 View Complete Implementation : TestTotalOrderPartitioner.java
Copyright Apache License 2.0
Author : apache
private static <T> Path writeParreplacedionFile(String testname, Configuration conf, T[] splits) throws IOException {
    final FileSystem fs = FileSystem.getLocal(conf);
    final Path testdir = new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(fs.getUri(), fs.getWorkingDirectory());
    Path p = new Path(testdir, testname + "/_parreplacedion.lst");
    TotalOrderParreplacedioner.setParreplacedionFile(conf, p);
    conf.setInt(MRJobConfig.NUM_REDUCES, splits.length + 1);
    SequenceFile.Writer w = null;
    try {
        w = SequenceFile.createWriter(conf, SequenceFile.Writer.file(p), SequenceFile.Writer.keyClreplaced(splits[0].getClreplaced()), SequenceFile.Writer.valueClreplaced(NullWritable.clreplaced), SequenceFile.Writer.compression(CompressionType.NONE));
        for (int i = 0; i < splits.length; ++i) {
            w.append(splits[i], NullWritable.get());
        }
    } finally {
        if (null != w)
            w.close();
    }
    return p;
}

11 View Complete Implementation : TestDistCacheEmulation.java
Copyright Apache License 2.0
Author : apache
/**
 * Validate setupGenerateDistCacheData by validating <li>permissions of the
 * distributed cache directory and <li>content of the generated sequence file.
 * This includes validation of dist cache file paths and their file sizes.
 */
private void doValidateSetupGenDC(RecordReader<LongWritable, BytesWritable> reader, FileSystem fs, long[] sortedFileSizes) throws IOException, InterruptedException {
    // Validate permissions of dist cache directory
    Path distCacheDir = dce.getDistributedCacheDir();
    replacedertEquals("Wrong permissions for distributed cache dir " + distCacheDir, fs.getFileStatus(distCacheDir).getPermission().getOtherAction().and(FsAction.EXECUTE), FsAction.EXECUTE);
    // Validate the content of the sequence file generated by
    // dce.setupGenerateDistCacheData().
    LongWritable key = new LongWritable();
    BytesWritable val = new BytesWritable();
    for (int i = 0; i < sortedFileSizes.length; i++) {
        replacedertTrue("Number of files written to the sequence file by " + "setupGenerateDistCacheData is less than the expected.", reader.nextKeyValue());
        key = reader.getCurrentKey();
        val = reader.getCurrentValue();
        long fileSize = key.get();
        String file = new String(val.getBytes(), 0, val.getLength());
        // Dist Cache files should be sorted based on file size.
        replacedertEquals("Dist cache file size is wrong.", sortedFileSizes[i], fileSize);
        // Validate dist cache file path.
        // parent dir of dist cache file
        Path parent = new Path(file).getParent().makeQualified(fs.getUri(), fs.getWorkingDirectory());
        // should exist in dist cache dir
        replacedertTrue("Public dist cache file path is wrong.", distCacheDir.equals(parent));
    }
}

11 View Complete Implementation : TestFileStatus.java
Copyright Apache License 2.0
Author : aliyun-beta
/**
 * Test the FileStatus obtained calling listStatus on a file
 */
@Test
public void testListStatusOnFile() throws IOException {
    FileStatus[] stats = fs.listStatus(file1);
    replacedertEquals(1, stats.length);
    FileStatus status = stats[0];
    replacedertFalse(file1 + " should be a file", status.isDirectory());
    replacedertEquals(blockSize, status.getBlockSize());
    replacedertEquals(1, status.getReplication());
    replacedertEquals(fileSize, status.getLen());
    replacedertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
    RemoteIterator<FileStatus> itor = fc.listStatus(file1);
    status = itor.next();
    replacedertEquals(stats[0], status);
    replacedertFalse(file1 + " should be a file", status.isDirectory());
}

11 View Complete Implementation : ITestS3AEncryptionAlgorithmValidation.java
Copyright Apache License 2.0
Author : apache
@Test
public void testEncryptionAlgorithmSetToDES() throws Throwable {
    // skip tests if they aren't enabled
    replacedumeEnabled();
    intercept(IOException.clreplaced, "Unknown Server Side algorithm DES", () -> {
        Configuration conf = super.createConfiguration();
        // DES is an invalid encryption algorithm
        conf.set(Constants.SERVER_SIDE_ENCRYPTION_ALGORITHM, "DES");
        S3AContract contract = (S3AContract) createContract(conf);
        contract.init();
        // extract the test FS
        FileSystem fileSystem = contract.getTestFileSystem();
        replacedertNotNull("null filesystem", fileSystem);
        URI fsURI = fileSystem.getUri();
        LOG.info("Test filesystem = {} implemented by {}", fsURI, fileSystem);
        replacedertEquals("wrong filesystem of " + fsURI, contract.getScheme(), fsURI.getScheme());
        fileSystem.initialize(fsURI, conf);
        throw new Exception("Do not reach here");
    });
}

10 View Complete Implementation : TestFileStatus.java
Copyright Apache License 2.0
Author : apache
/**
 * Test the FileStatus obtained calling listStatus on a file
 */
@Test
public void testListStatusOnFile() throws IOException {
    FileStatus[] stats = fs.listStatus(file1);
    replacedertEquals(1, stats.length);
    FileStatus status = stats[0];
    replacedertFalse(file1 + " should be a file", status.isDirectory());
    replacedertEquals(blockSize, status.getBlockSize());
    replacedertEquals(1, status.getReplication());
    replacedertEquals(fileSize, status.getLen());
    ContractTestUtils.replacedertNotErasureCoded(fs, file1);
    replacedertEquals(file1.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString(), status.getPath().toString());
    RemoteIterator<FileStatus> itor = fc.listStatus(file1);
    status = itor.next();
    replacedertEquals(stats[0], status);
    replacedertFalse(file1 + " should be a file", status.isDirectory());
}