org.apache.hadoop.conf.Configuration.setBoolean() - java examples

Here are the examples of the java api org.apache.hadoop.conf.Configuration.setBoolean() taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : SkipBadRecords.java
Copyright Apache License 2.0
Author : apache
/**
 * Set the flag which if set to true,
 * {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented
 * by MapRunner after invoking the map function. This value must be set to
 * false for applications which process the records asynchronously
 * or buffer the input records. For example streaming.
 * In such cases applications should increment this counter on their own.
 * Default value is true.
 *
 * @param conf the configuration
 * @param autoIncr whether to auto increment
 *        {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
 */
public static void setAutoIncrMapperProcCount(Configuration conf, boolean autoIncr) {
    conf.setBoolean(AUTO_INCR_MAP_PROC_COUNT, autoIncr);
}

19 View Complete Implementation : ITestAbfsDelegationTokens.java
Copyright Apache License 2.0
Author : apache
@Override
public void setup() throws Exception {
    // create the FS
    Configuration conf = getRawConfiguration();
    cluster.bindConfToCluster(conf);
    conf.setBoolean(HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, false);
    resetUGI();
    UserGroupInformation.setConfiguration(conf);
    aliceUser = cluster.createAliceUser();
    replacedertSecurityEnabled();
    // log in as alice so that filesystems belong to that user
    UserGroupInformation.setLoginUser(aliceUser);
    StubDelegationTokenManager.useStubDTManager(conf);
    FileSystem.closeAllForUGI(UserGroupInformation.getLoginUser());
    super.setup();
    replacedertNotNull("No StubDelegationTokenManager created in filesystem init", getStubDTManager());
}

19 View Complete Implementation : TestNNHealthCheck.java
Copyright Apache License 2.0
Author : apache
@Test
public void testNNHealthCheckWithSafemodeAsUnhealthy() throws Exception {
    conf.setBoolean(DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, true);
    // now bring up just the NameNode.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).nnTopology(MiniDFSNNTopology.simpleHATopology()).build();
    cluster.waitActive();
    // manually set safemode.
    cluster.getFileSystem(0).setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1");
    final String expectedTargetString = haTarget.getAddress().toString();
    replacedertTrue("Expected haTarget " + haTarget + " containing " + expectedTargetString, haTarget.toString().contains(expectedTargetString));
    HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, 5000);
    LambdaTestUtils.intercept(RemoteException.clreplaced, "The NameNode is configured to report UNHEALTHY to ZKFC in Safemode.", () -> rpc.monitorHealth());
}

19 View Complete Implementation : TestCGroupsHandlerImpl.java
Copyright Apache License 2.0
Author : apache
/**
 * Create configuration where the cgroups are premounted.
 * @param myHierarchy YARN cgroup
 * @return configuration object
 */
private Configuration createNoMountConfiguration(String myHierarchy) {
    Configuration confNoMount = new Configuration();
    confNoMount.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, myHierarchy);
    confNoMount.setBoolean(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
    return confNoMount;
}

19 View Complete Implementation : TestCGroupsMemoryResourceHandlerImpl.java
Copyright Apache License 2.0
Author : apache
@Test
public void testPreStart() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    cGroupsMemoryResourceHandler.bootstrap(conf);
    String id = "container_01_01";
    String path = "test-path/" + id;
    ContainerId mockContainerId = mock(ContainerId.clreplaced);
    when(mockContainerId.toString()).thenReturn(id);
    Container mockContainer = mock(Container.clreplaced);
    when(mockContainer.getContainerId()).thenReturn(mockContainerId);
    when(mockCGroupsHandler.getPathForCGroupTasks(CGroupsHandler.CGroupController.MEMORY, id)).thenReturn(path);
    int memory = 1024;
    when(mockContainer.getResource()).thenReturn(Resource.newInstance(memory, 1));
    List<PrivilegedOperation> ret = cGroupsMemoryResourceHandler.preStart(mockContainer);
    verify(mockCGroupsHandler, times(1)).createCGroup(CGroupsHandler.CGroupController.MEMORY, id);
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, String.valueOf(memory) + "M");
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, String.valueOf((int) (memory * 0.9)) + "M");
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, String.valueOf(0));
    replacedert.replacedertNotNull(ret);
    replacedert.replacedertEquals(1, ret.size());
    PrivilegedOperation op = ret.get(0);
    replacedert.replacedertEquals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, op.getOperationType());
    List<String> args = op.getArguments();
    replacedert.replacedertEquals(1, args.size());
    replacedert.replacedertEquals(PrivilegedOperation.CGROUP_ARG_PREFIX + path, args.get(0));
}

19 View Complete Implementation : TestMapReduceChildJVM.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 30000)
public void testCommandLineWithLog4JConifg() throws Exception {
    MyMRApp app = new MyMRApp(1, 0, true, this.getClreplaced().getName(), true);
    Configuration conf = new Configuration();
    conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
    String testLogPropertieFile = "test-log4j.properties";
    String testLogPropertiePath = "../" + "test-log4j.properties";
    conf.set(MRJobConfig.MAPREDUCE_JOB_LOG4J_PROPERTIES_FILE, testLogPropertiePath);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    replacedert.replacedertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + "  -Xmx820m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" + " -Dlog4j.configuration=" + testLogPropertieFile + " -Dyarn.app.container.log.dir=<LOG_DIR>" + " -Dyarn.app.container.log.filesize=0" + " -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" + " org.apache.hadoop.mapred.YarnChild 127.0.0.1" + " 54321" + " attempt_0_0000_m_000000_0" + " 0" + " 1><LOG_DIR>/stdout" + " 2><LOG_DIR>/stderr ]", app.launchCmdList.get(0));
}

19 View Complete Implementation : TestFuseDFS.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void startUp() throws IOException {
    Configuration conf = new HdfsConfiguration();
    r = Runtime.getRuntime();
    mountPoint = System.getProperty("build.test") + "/mnt";
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitClusterUp();
    fs = cluster.getFileSystem();
    fuseProcess = establishMount(fs.getUri());
}

19 View Complete Implementation : TestResourceProfiles.java
Copyright Apache License 2.0
Author : apache
@Test
public void testProfilesEnabled() throws Exception {
    ResourceProfilesManager manager = new ResourceProfilesManagerImpl();
    Configuration conf = new Configuration();
    // be default resource profiles should not be enabled
    manager.init(conf);
    try {
        manager.getResourceProfiles();
        replacedert.fail("Exception should be thrown as resource profile is not enabled" + " and getResourceProfiles is invoked.");
    } catch (YarnException ie) {
    }
    conf.setBoolean(YarnConfiguration.RM_RESOURCE_PROFILES_ENABLED, true);
    try {
        manager.init(conf);
        replacedert.fail("Exception should be thrown due to missing resource profiles file");
    } catch (IOException ie) {
    }
    conf.set(YarnConfiguration.RM_RESOURCE_PROFILES_SOURCE_FILE, "profiles/sample-profiles-1.json");
    manager.init(conf);
}

19 View Complete Implementation : TestMemoryRMStateStore.java
Copyright Apache License 2.0
Author : apache
@Test
public void testNotifyStoreOperationFailed() throws Exception {
    RMStateStore store = new MemoryRMStateStore() {

        @Override
        public synchronized void removeRMDelegationTokenState(RMDelegationTokenIdentifier rmDTIdentifier) throws Exception {
            throw new Exception("testNotifyStoreOperationFailed");
        }
    };
    Configuration conf = new Configuration();
    conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
    store.init(conf);
    ResourceManager mockRM = mock(ResourceManager.clreplaced);
    store.setResourceManager(mockRM);
    store.setRMDispatcher(new RMStateStoreTestBase.TestDispatcher());
    RMDelegationTokenIdentifier mockTokenId = mock(RMDelegationTokenIdentifier.clreplaced);
    store.removeRMDelegationToken(mockTokenId);
    replacedertTrue("RMStateStore should have been in fenced state", store.isFencedState());
    store = new MemoryRMStateStore() {

        @Override
        public synchronized void removeRMDelegationToken(RMDelegationTokenIdentifier rmDTIdentifier) {
            notifyStoreOperationFailed(new Exception("testNotifyStoreOperationFailed"));
        }
    };
    store.init(conf);
    store.setResourceManager(mockRM);
    store.setRMDispatcher(new RMStateStoreTestBase.TestDispatcher());
    store.removeRMDelegationToken(mockTokenId);
    replacedertTrue("RMStateStore should have been in fenced state", store.isFencedState());
}

19 View Complete Implementation : ITestRestrictedReadAccess.java
Copyright Apache License 2.0
Author : apache
@Override
public Configuration createConfiguration() {
    Configuration conf = super.createConfiguration();
    String bucketName = getTestBucketName(conf);
    // is s3guard enabled?
    boolean guardedTestRun = isS3GuardTestPropertySet(conf);
    // in a guarded test run, except for the special case of raw,
    // all DDB settings are left alone.
    removeBaseAndBucketOverrides(bucketName, conf, METADATASTORE_AUTHORITATIVE);
    removeBucketOverrides(bucketName, conf, S3_METADATA_STORE_IMPL);
    if (!s3guard) {
        removeBaseAndBucketOverrides(bucketName, conf, S3_METADATA_STORE_IMPL);
    }
    conf.setBoolean(METADATASTORE_AUTHORITATIVE, authMode);
    disableFilesystemCaching(conf);
    return conf;
}

19 View Complete Implementation : TestCGroupsMemoryResourceHandlerImpl.java
Copyright Apache License 2.0
Author : apache
@Test
public void testOpportunistic() throws Exception {
    Configuration conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    cGroupsMemoryResourceHandler.bootstrap(conf);
    ContainerTokenIdentifier tokenId = mock(ContainerTokenIdentifier.clreplaced);
    when(tokenId.getExecutionType()).thenReturn(ExecutionType.OPPORTUNISTIC);
    Container container = mock(Container.clreplaced);
    String id = "container_01_01";
    ContainerId mockContainerId = mock(ContainerId.clreplaced);
    when(mockContainerId.toString()).thenReturn(id);
    when(container.getContainerId()).thenReturn(mockContainerId);
    when(container.getContainerTokenIdentifier()).thenReturn(tokenId);
    when(container.getResource()).thenReturn(Resource.newInstance(1024, 2));
    cGroupsMemoryResourceHandler.preStart(container);
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, "0M");
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, "100");
    verify(mockCGroupsHandler, times(1)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, "1024M");
}

19 View Complete Implementation : TestMetricsInvariantChecker.java
Copyright Apache License 2.0
Author : apache
@Test
public void testViolation() {
    // create a "wrong" condition in which the invariants are not respected
    QueueMetrics qm = QueueMetrics.forQueue(metricsSystem, "root", null, false, conf);
    qm.setAvailableResourcesToQueue(RMNodeLabelsManager.NO_LABEL, Resource.newInstance(-1, -1));
    // test with throwing exception turned on
    try {
        ic.editSchedule();
        fail();
    } catch (InvariantViolationException i) {
    // expected
    }
    // test log-only mode
    conf.setBoolean(MetricsInvariantChecker.THROW_ON_VIOLATION, false);
    ic.init(conf, null, null);
    ic.editSchedule();
}

19 View Complete Implementation : TestFileInputFormat.java
Copyright Apache License 2.0
Author : apache
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf, FileSystem localFs) throws IOException {
    Path base1 = new Path(TEST_ROOT_DIR, "input1");
    Path base2 = new Path(TEST_ROOT_DIR, "input2");
    conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
    conf.setBoolean(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE, true);
    localFs.mkdirs(base1);
    Path inFile1 = new Path(base1, "file1");
    Path inFile2 = new Path(base1, "file2");
    localFs.createNewFile(inFile1);
    localFs.createNewFile(inFile2);
    List<Path> expectedPaths = Lists.newArrayList();
    return expectedPaths;
}

19 View Complete Implementation : TestNumaResourceAllocator.java
Copyright Apache License 2.0
Author : apache
@Test
public void testReadNumaTopologyFromCmdOutput() throws Exception {
    conf.setBoolean(YarnConfiguration.NM_NUMA_AWARENESS_READ_TOPOLOGY, true);
    String cmdOutput = "available: 2 nodes (0-1)\n\t" + "node 0 cpus: 0 2 4 6\n\t" + "node 0 size: 73717 MB\n\t" + "node 0 free: 17272 MB\n\t" + "node 1 cpus: 1 3 5 7\n\t" + "node 1 size: 73727 MB\n\t" + "node 1 free: 10699 MB\n\t" + "node distances:\n\t" + "node 0 1\n\t" + "0: 10 20\n\t" + "1: 20 10";
    numaResourceAllocator = new NumaResourceAllocator(mock(Context.clreplaced)) {

        @Override
        String executeNGetCmdOutput(Configuration config) throws YarnRuntimeException {
            return cmdOutput;
        }
    };
    numaResourceAllocator.init(conf);
    Collection<NumaNodeResource> nodesList = numaResourceAllocator.getNumaNodesList();
    Collection<NumaNodeResource> expectedNodesList = getExpectedNumaNodesList();
    replacedert.replacedertEquals(expectedNodesList, nodesList);
}

19 View Complete Implementation : TestViewFileSystemLinkMergeSlash.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void clusterSetupAtBeginning() throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    cluster = new MiniDFSCluster.Builder(CONF).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(NAME_SPACES_COUNT)).numDataNodes(DATA_NODES_COUNT).build();
    cluster.waitClusterUp();
    for (int i = 0; i < NAME_SPACES_COUNT; i++) {
        FS_HDFS[i] = cluster.getFileSystem(i);
    }
    fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
}

19 View Complete Implementation : TestViewFileSystemLinkFallback.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void clusterSetupAtBeginning() throws IOException, LoginException, URISyntaxException {
    SupportsBlocks = true;
    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    cluster = new MiniDFSCluster.Builder(CONF).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(NAME_SPACES_COUNT)).numDataNodes(DATA_NODES_COUNT).build();
    cluster.waitClusterUp();
    for (int i = 0; i < NAME_SPACES_COUNT; i++) {
        FS_HDFS[i] = cluster.getFileSystem(i);
    }
    fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
}

19 View Complete Implementation : TestRMHAMetrics.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 300000)
public void testMetricsAfterTransitionToStandby() throws Exception {
    configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
    Configuration conf = new YarnConfiguration(configuration);
    MockRM rm = new MockRM(conf);
    rm.init(conf);
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName("Hadoop:service=ResourceManager,name=RMInfo");
    replacedert.replacedertEquals("initializing", (String) mbs.getAttribute(mxbeanName, "State"));
    rm.start();
    replacedert.replacedertEquals("standby", (String) mbs.getAttribute(mxbeanName, "State"));
    rm.transitionToActive();
    replacedert.replacedertEquals("active", (String) mbs.getAttribute(mxbeanName, "State"));
    rm.transitionToStandby(true);
    replacedert.replacedertEquals("standby", (String) mbs.getAttribute(mxbeanName, "State"));
    replacedertNotNull(DefaultMetricsSystem.instance().getSource("JvmMetrics"));
    replacedertNotNull(DefaultMetricsSystem.instance().getSource("UgiMetrics"));
    rm.stop();
}

19 View Complete Implementation : ITestBlobDataValidation.java
Copyright Apache License 2.0
Author : apache
/**
 * Test that we don't check block-level MD5 if we specify that in the
 * configuration.
 */
@Test
public void testDontCheckBlockMd5() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean(KEY_CHECK_BLOCK_MD5, false);
    testAccount = AzureBlobStorageTestAccount.create(conf);
    testCheckBlockMd5(false);
}

19 View Complete Implementation : TestMapReduceChildJVM.java
Copyright Apache License 2.0
Author : apache
private void testReduceCommandLine(Configuration conf) throws Exception {
    MyMRApp app = new MyMRApp(0, 1, true, this.getClreplaced().getName(), true);
    conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    final long shuffleLogSize = conf.getLong(MRJobConfig.SHUFFLE_LOG_KB, 0L) * 1024L;
    final int shuffleBackups = conf.getInt(MRJobConfig.SHUFFLE_LOG_BACKUPS, 0);
    final String appenderName = shuffleLogSize > 0L && shuffleBackups > 0 ? "shuffleCRLA" : "shuffleCLA";
    replacedert.replacedertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + "  -Xmx820m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" + " -Dlog4j.configuration=container-log4j.properties" + " -Dyarn.app.container.log.dir=<LOG_DIR>" + " -Dyarn.app.container.log.filesize=0" + " -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" + " -Dyarn.app.mapreduce.shuffle.logger=INFO," + appenderName + " -Dyarn.app.mapreduce.shuffle.logfile=syslog.shuffle" + " -Dyarn.app.mapreduce.shuffle.log.filesize=" + shuffleLogSize + " -Dyarn.app.mapreduce.shuffle.log.backups=" + shuffleBackups + " org.apache.hadoop.mapred.YarnChild 127.0.0.1" + " 54321" + " attempt_0_0000_r_000000_0" + " 0" + " 1><LOG_DIR>/stdout" + " 2><LOG_DIR>/stderr ]", app.launchCmdList.get(0));
    replacedert.replacedertTrue("HADOOP_ROOT_LOGGER not set for job", app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertEquals("INFO,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertTrue("HADOOP_CLIENT_OPTS not set for job", app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
    replacedert.replacedertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}

19 View Complete Implementation : TestMapReduceChildJVM.java
Copyright Apache License 2.0
Author : apache
@Test(timeout = 30000)
public void testCommandLine() throws Exception {
    MyMRApp app = new MyMRApp(1, 0, true, this.getClreplaced().getName(), true);
    Configuration conf = new Configuration();
    conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    replacedert.replacedertEquals("[" + MRApps.crossPlatformify("JAVA_HOME") + "/bin/java" + " -Djava.net.preferIPv4Stack=true" + " -Dhadoop.metrics.log.level=WARN " + "  -Xmx820m -Djava.io.tmpdir=" + MRApps.crossPlatformify("PWD") + "/tmp" + " -Dlog4j.configuration=container-log4j.properties" + " -Dyarn.app.container.log.dir=<LOG_DIR>" + " -Dyarn.app.container.log.filesize=0" + " -Dhadoop.root.logger=INFO,CLA -Dhadoop.root.logfile=syslog" + " org.apache.hadoop.mapred.YarnChild 127.0.0.1" + " 54321" + " attempt_0_0000_m_000000_0" + " 0" + " 1><LOG_DIR>/stdout" + " 2><LOG_DIR>/stderr ]", app.launchCmdList.get(0));
    replacedert.replacedertTrue("HADOOP_ROOT_LOGGER not set for job", app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertEquals("INFO,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertTrue("HADOOP_CLIENT_OPTS not set for job", app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
    replacedert.replacedertEquals("", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
}

19 View Complete Implementation : ITestS3AContractRename.java
Copyright Apache License 2.0
Author : apache
/**
 * Create a configuration, possibly patching in S3Guard options.
 * @return a configuration
 */
@Override
protected Configuration createConfiguration() {
    Configuration conf = super.createConfiguration();
    // patch in S3Guard options
    maybeEnableS3Guard(conf);
    conf.setBoolean(METADATASTORE_AUTHORITATIVE, authoritative);
    return conf;
}

19 View Complete Implementation : TestTrafficController.java
Copyright Apache License 2.0
Author : apache
@Test
public void testBootstrapRecoveryEnabled() {
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
    TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock);
    try {
        // Return a default tc state when attempting to read state
        when(privilegedOperationExecutorMock.executePrivilegedOperation(any(PrivilegedOperation.clreplaced), eq(true))).thenReturn(DEFAULT_TC_STATE_EXAMPLE);
        trafficController.bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT);
        ArgumentCaptor<PrivilegedOperation> readOpCaptor = ArgumentCaptor.forClreplaced(PrivilegedOperation.clreplaced);
        // NM_RECOVERY_ENABLED - so we expect three privileged operation executions
        // 1) read tc state 2) wipe tc state 3) init tc state
        // one for wiping tc state - a second for initializing state
        // First, verify read op
        verify(privilegedOperationExecutorMock, times(1)).executePrivilegedOperation(readOpCaptor.capture(), eq(true));
        List<PrivilegedOperation> readOps = readOpCaptor.getAllValues();
        verifyTrafficControlOperation(readOps.get(0), PrivilegedOperation.OperationType.TC_READ_STATE, Arrays.asList(READ_QDISC_CMD, READ_FILTER_CMD, READ_CLreplaced_CMD));
        ArgumentCaptor<PrivilegedOperation> writeOpCaptor = ArgumentCaptor.forClreplaced(PrivilegedOperation.clreplaced);
        verify(privilegedOperationExecutorMock, times(2)).executePrivilegedOperation(writeOpCaptor.capture(), eq(false));
        // Now verify that the two write operations were correct
        List<PrivilegedOperation> writeOps = writeOpCaptor.getAllValues();
        verifyTrafficControlOperation(writeOps.get(0), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(WIPE_STATE_CMD));
        verifyTrafficControlOperation(writeOps.get(1), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD, ADD_ROOT_CLreplaced_CMD, ADD_DEFAULT_CLreplaced_CMD, ADD_YARN_CLreplaced_CMD));
    } catch (ResourceHandlerException | PrivilegedOperationException | IOException e) {
        LOG.error("Unexpected exception: " + e);
        replacedert.fail("Caught unexpected exception: " + e.getClreplaced().getSimpleName());
    }
}

19 View Complete Implementation : ITestAzureBlobFileSystemFinalize.java
Copyright Apache License 2.0
Author : apache
@Test
public void testFinalize() throws Exception {
    // Disable the cache for filesystem to make sure there is no reference.
    Configuration rawConfig = this.getRawConfiguration();
    rawConfig.setBoolean(DISABLE_ABFS_CACHE_KEY, true);
    rawConfig.setBoolean(DISABLE_ABFSS_CACHE_KEY, true);
    AzureBlobFileSystem fs = (AzureBlobFileSystem) FileSystem.get(rawConfig);
    WeakReference<Object> ref = new WeakReference<Object>(fs);
    fs = null;
    int i = 0;
    int maxTries = 1000;
    while (ref.get() != null && i < maxTries) {
        System.gc();
        System.runFinalization();
        i++;
    }
    replacedert.replacedertTrue("testFinalizer didn't get cleaned up within maxTries", ref.get() == null);
}

19 View Complete Implementation : TestFSNamesystemLock.java
Copyright Apache License 2.0
Author : apache
@Test
public void testFsLockFairness() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, true);
    FSNamesystemLock fsnLock = new FSNamesystemLock(conf, null);
    replacedertTrue(fsnLock.coarseLock.isFair());
    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, false);
    fsnLock = new FSNamesystemLock(conf, null);
    replacedertFalse(fsnLock.coarseLock.isFair());
}

19 View Complete Implementation : TestTrafficController.java
Copyright Apache License 2.0
Author : apache
@Test
public void testBootstrapRecoveryDisabled() {
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false);
    TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock);
    try {
        trafficController.bootstrap(DEVICE, ROOT_BANDWIDTH_MBIT, YARN_BANDWIDTH_MBIT);
        ArgumentCaptor<PrivilegedOperation> opCaptor = ArgumentCaptor.forClreplaced(PrivilegedOperation.clreplaced);
        // NM_RECOVERY_DISABLED - so we expect two privileged operation executions
        // one for wiping tc state - a second for initializing state
        verify(privilegedOperationExecutorMock, times(2)).executePrivilegedOperation(opCaptor.capture(), eq(false));
        // Now verify that the two operations were correct
        List<PrivilegedOperation> ops = opCaptor.getAllValues();
        verifyTrafficControlOperation(ops.get(0), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(WIPE_STATE_CMD));
        verifyTrafficControlOperation(ops.get(1), PrivilegedOperation.OperationType.TC_MODIFY_STATE, Arrays.asList(ADD_ROOT_QDISC_CMD, ADD_CGROUP_FILTER_CMD, ADD_ROOT_CLreplaced_CMD, ADD_DEFAULT_CLreplaced_CMD, ADD_YARN_CLreplaced_CMD));
    } catch (ResourceHandlerException | PrivilegedOperationException | IOException e) {
        LOG.error("Unexpected exception: " + e);
        replacedert.fail("Caught unexpected exception: " + e.getClreplaced().getSimpleName());
    }
}

19 View Complete Implementation : TestMapReduceChildJVM.java
Copyright Apache License 2.0
Author : apache
@Test
public void testEnvironmentVariables() throws Exception {
    MyMRApp app = new MyMRApp(1, 0, true, this.getClreplaced().getName(), true);
    Configuration conf = new Configuration();
    conf.set(JobConf.MAPRED_MAP_TASK_ENV, "HADOOP_CLIENT_OPTS=test");
    conf.setStrings(MRJobConfig.MAP_LOG_LEVEL, "WARN");
    conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, false);
    Job job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    replacedert.replacedertTrue("HADOOP_ROOT_LOGGER not set for job", app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertEquals("WARN,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertTrue("HADOOP_CLIENT_OPTS not set for job", app.cmdEnvironment.containsKey("HADOOP_CLIENT_OPTS"));
    replacedert.replacedertEquals("test", app.cmdEnvironment.get("HADOOP_CLIENT_OPTS"));
    // Try one more.
    app = new MyMRApp(1, 0, true, this.getClreplaced().getName(), true);
    conf = new Configuration();
    conf.set(JobConf.MAPRED_MAP_TASK_ENV, "HADOOP_ROOT_LOGGER=trace");
    job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    replacedert.replacedertTrue("HADOOP_ROOT_LOGGER not set for job", app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertEquals("trace", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
    // Try one using the mapreduce.task.env.var=value syntax
    app = new MyMRApp(1, 0, true, this.getClreplaced().getName(), true);
    conf = new Configuration();
    conf.set(JobConf.MAPRED_MAP_TASK_ENV + ".HADOOP_ROOT_LOGGER", "DEBUG,console");
    job = app.submit(conf);
    app.waitForState(job, JobState.SUCCEEDED);
    app.verifyCompleted();
    replacedert.replacedertTrue("HADOOP_ROOT_LOGGER not set for job", app.cmdEnvironment.containsKey("HADOOP_ROOT_LOGGER"));
    replacedert.replacedertEquals("DEBUG,console", app.cmdEnvironment.get("HADOOP_ROOT_LOGGER"));
}

19 View Complete Implementation : TestFileInputFormat.java
Copyright Apache License 2.0
Author : apache
@Test
public void testListLocatedStatus() throws Exception {
    Configuration conf = getConfiguration();
    conf.setBoolean("fs.test.impl.disable.cache", false);
    conf.setInt(FileInputFormat.LIST_STATUS_NUM_THREADS, numThreads);
    conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR, "test:///a1/a2");
    MockFileSystem mockFs = (MockFileSystem) new Path("test:///").getFileSystem(conf);
    replacedert.replacedertEquals("listLocatedStatus already called", 0, mockFs.numListLocatedStatusCalls);
    JobConf job = new JobConf(conf);
    TextInputFormat fileInputFormat = new TextInputFormat();
    fileInputFormat.configure(job);
    InputSplit[] splits = fileInputFormat.getSplits(job, 1);
    replacedert.replacedertEquals("Input splits are not correct", 2, splits.length);
    replacedert.replacedertEquals("listLocatedStatuss calls", 1, mockFs.numListLocatedStatusCalls);
    FileSystem.closeAll();
}

19 View Complete Implementation : ITestAuthoritativePath.java
Copyright Apache License 2.0
Author : apache
private S3AFileSystem createFullyAuthFS() throws Exception {
    S3AFileSystem testFS = getFileSystem();
    Configuration config = new Configuration(testFS.getConf());
    URI uri = testFS.getUri();
    removeBaseAndBucketOverrides(uri.getHost(), config, METADATASTORE_AUTHORITATIVE);
    config.setBoolean(METADATASTORE_AUTHORITATIVE, true);
    final S3AFileSystem newFS = createFS(uri, config);
    // set back the same metadata store instance
    newFS.setMetadataStore(ms);
    return newFS;
}

19 View Complete Implementation : TestFSNamesystemLock.java
Copyright Apache License 2.0
Author : apache
@Test
public void testFSLockGetWaiterCount() throws InterruptedException {
    final int threadCount = 3;
    final CountDownLatch latch = new CountDownLatch(threadCount);
    final Configuration conf = new Configuration();
    conf.setBoolean(DFS_NAMENODE_FSLOCK_FAIR_KEY, true);
    final FSNamesystemLock rwLock = new FSNamesystemLock(conf, null);
    rwLock.writeLock();
    ExecutorService helper = Executors.newFixedThreadPool(threadCount);
    for (int x = 0; x < threadCount; x++) {
        helper.execute(new Runnable() {

            @Override
            public void run() {
                latch.countDown();
                rwLock.readLock();
            }
        });
    }
    latch.await();
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return (threadCount == rwLock.getQueueLength());
            }
        }, 10, 1000);
    } catch (TimeoutException e) {
        fail("Expected number of blocked thread not found");
    }
}

19 View Complete Implementation : TestLocalJobSubmission.java
Copyright Apache License 2.0
Author : apache
/**
 * Test the local job submission options of -jt local -libjars.
 *
 * @throws IOException thrown if there's an error creating the JAR file
 */
@Test
public void testLocalJobLibjarsOption() throws IOException {
    Configuration conf = new Configuration();
    testLocalJobLibjarsOption(conf);
    conf.setBoolean(Job.USE_WILDCARD_FOR_LIBJARS, false);
    testLocalJobLibjarsOption(conf);
}

19 View Complete Implementation : TestStandbyInProgressTail.java
Copyright Apache License 2.0
Author : apache
@Before
public void startUp() throws IOException {
    conf = new Configuration();
    // Set period of tail edits to a large value (20 mins) for test purposes
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 20 * 60);
    conf.setBoolean(DFSConfigKeys.DFS_HA_TAILEDITS_INPROGRESS_KEY, true);
    conf.setInt(DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY, 500);
    HAUtil.setAllowStandbyReads(conf, true);
    qjmhaCluster = new MiniQJMHACluster.Builder(conf).build();
    cluster = qjmhaCluster.getDfsCluster();
    // Get NameNode from cluster to future manual control
    nn0 = cluster.getNameNode(0);
    nn1 = cluster.getNameNode(1);
}

19 View Complete Implementation : TestHttpServerWithSpnego.java
Copyright Apache License 2.0
Author : apache
private Configuration getSpengoConf(Configuration conf) {
    conf = new Configuration();
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY, ProxyUserAuthenticationFilterInitializer.clreplaced.getName());
    conf.set(PREFIX + "type", "kerberos");
    conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
    conf.set(PREFIX + "signature.secret.file", secretFile.getAbsolutePath());
    conf.set(PREFIX + "kerberos.keytab", httpSpnegoKeytabFile.getAbsolutePath());
    conf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal);
    conf.set(PREFIX + "cookie.domain", realm);
    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    return conf;
}

19 View Complete Implementation : TestCGroupsMemoryResourceHandlerImpl.java
Copyright Apache License 2.0
Author : apache
@Test
public void testPreStartNonEnforced() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
    conf.setBoolean(YarnConfiguration.NM_MEMORY_RESOURCE_ENFORCED, false);
    cGroupsMemoryResourceHandler.bootstrap(conf);
    String id = "container_01_01";
    String path = "test-path/" + id;
    ContainerId mockContainerId = mock(ContainerId.clreplaced);
    when(mockContainerId.toString()).thenReturn(id);
    Container mockContainer = mock(Container.clreplaced);
    when(mockContainer.getContainerId()).thenReturn(mockContainerId);
    when(mockCGroupsHandler.getPathForCGroupTasks(CGroupsHandler.CGroupController.MEMORY, id)).thenReturn(path);
    int memory = 1024;
    when(mockContainer.getResource()).thenReturn(Resource.newInstance(memory, 1));
    List<PrivilegedOperation> ret = cGroupsMemoryResourceHandler.preStart(mockContainer);
    verify(mockCGroupsHandler, times(1)).createCGroup(CGroupsHandler.CGroupController.MEMORY, id);
    verify(mockCGroupsHandler, times(0)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_HARD_LIMIT_BYTES, String.valueOf(memory) + "M");
    verify(mockCGroupsHandler, times(0)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SOFT_LIMIT_BYTES, String.valueOf((int) (memory * 0.9)) + "M");
    verify(mockCGroupsHandler, times(0)).updateCGroupParam(CGroupsHandler.CGroupController.MEMORY, id, CGroupsHandler.CGROUP_PARAM_MEMORY_SWAPPINESS, String.valueOf(0));
    replacedert.replacedertNotNull(ret);
    replacedert.replacedertEquals(1, ret.size());
    PrivilegedOperation op = ret.get(0);
    replacedert.replacedertEquals(PrivilegedOperation.OperationType.ADD_PID_TO_CGROUP, op.getOperationType());
    List<String> args = op.getArguments();
    replacedert.replacedertEquals(1, args.size());
    replacedert.replacedertEquals(PrivilegedOperation.CGROUP_ARG_PREFIX + path, args.get(0));
}

19 View Complete Implementation : ITestS3AHugeMagicCommits.java
Copyright Apache License 2.0
Author : apache
/**
 * Create the scale IO conf with the committer enabled.
 * @return the configuration to use for the test FS.
 */
@Override
protected Configuration createScaleConfiguration() {
    Configuration conf = super.createScaleConfiguration();
    conf.setBoolean(MAGIC_COMMITTER_ENABLED, true);
    return conf;
}

19 View Complete Implementation : TestFileSystem.java
Copyright Apache License 2.0
Author : apache
@Test
public void testFsShutdownHook() throws Exception {
    final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
    Configuration conf = new Configuration();
    Configuration confNoAuto = new Configuration();
    conf.setClreplaced("fs.test.impl", TestShutdownFileSystem.clreplaced, FileSystem.clreplaced);
    confNoAuto.setClreplaced("fs.test.impl", TestShutdownFileSystem.clreplaced, FileSystem.clreplaced);
    confNoAuto.setBoolean("fs.automatic.close", false);
    TestShutdownFileSystem fsWithAuto = (TestShutdownFileSystem) (new Path("test://a/").getFileSystem(conf));
    TestShutdownFileSystem fsWithoutAuto = (TestShutdownFileSystem) (new Path("test://b/").getFileSystem(confNoAuto));
    fsWithAuto.setClosedSet(closed);
    fsWithoutAuto.setClosedSet(closed);
    // Different URIs should result in different FS instances
    replacedertNotSame(fsWithAuto, fsWithoutAuto);
    FileSystem.CACHE.closeAll(true);
    replacedertEquals(1, closed.size());
    replacedertTrue(closed.contains(fsWithAuto));
    closed.clear();
    FileSystem.closeAll();
    replacedertEquals(1, closed.size());
    replacedertTrue(closed.contains(fsWithoutAuto));
}

19 View Complete Implementation : ITestStagingCommitProtocol.java
Copyright Apache License 2.0
Author : apache
@Override
protected Configuration createConfiguration() {
    Configuration conf = super.createConfiguration();
    conf.setInt(FS_S3A_COMMITTER_THREADS, 1);
    // switch to the inconsistent filesystem
    conf.setClreplaced(S3_CLIENT_FACTORY_IMPL, InconsistentS3ClientFactory.clreplaced, S3ClientFactory.clreplaced);
    // disable unique filenames so that the protocol tests of FileOutputFormat
    // and this test generate consistent names.
    conf.setBoolean(FS_S3A_COMMITTER_STAGING_UNIQUE_FILENAMES, false);
    return conf;
}

19 View Complete Implementation : TestAllowFormat.java
Copyright Apache License 2.0
Author : apache
/**
 * start MiniDFScluster, try formatting with different settings
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testAllowFormat() throws IOException {
    LOG.info("--starting mini cluster");
    // manage dirs parameter set to false
    NameNode nn;
    // 1. Create a new cluster and format DFS
    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
    cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    replacedertNotNull(cluster);
    nn = cluster.getNameNode();
    replacedertNotNull(nn);
    LOG.info("Mini cluster created OK");
    // 2. Try formatting DFS with allowformat false.
    // NOTE: the cluster must be shut down for format to work.
    LOG.info("Verifying format will fail with allowformat false");
    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, false);
    try {
        cluster.shutdown();
        NameNode.format(config);
        fail("Format succeeded, when it should have failed");
    } catch (IOException e) {
        // expected to fail
        // Verify we got message we expected
        replacedertTrue("Exception was not about formatting Namenode", e.getMessage().startsWith("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
        LOG.info("Expected failure: " + StringUtils.stringifyException(e));
        LOG.info("Done verifying format will fail with allowformat false");
    }
    // 3. Try formatting DFS with allowformat true
    LOG.info("Verifying format will succeed with allowformat true");
    config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
    NameNode.format(config);
    LOG.info("Done verifying format will succeed with allowformat true");
}

19 View Complete Implementation : AzureBlobStorageTestAccount.java
Copyright Apache License 2.0
Author : apache
public static AzureBlobStorageTestAccount createOutOfBandStore(int uploadBlockSize, int downloadBlockSize, boolean enableSecureMode) throws Exception {
    saveMetricsConfigFile();
    CloudBlobContainer container = null;
    Configuration conf = createTestConfiguration();
    CloudStorageAccount account = createTestAccount(conf);
    if (null == account) {
        return null;
    }
    String containerName = String.format("wasbtests-%s-%tQ", System.getProperty("user.name"), new Date());
    // Create the container.
    container = account.createCloudBlobClient().getContainerReference(containerName);
    container.create();
    String accountName = verifyWasbAccountNameInConfig(conf);
    // Ensure that custom throttling is disabled and tolerate concurrent
    // out-of-band appends.
    conf.setBoolean(KEY_DISABLE_THROTTLING, true);
    conf.setBoolean(KEY_READ_TOLERATE_CONCURRENT_APPEND, true);
    conf.setBoolean(KEY_USE_SECURE_MODE, enableSecureMode);
    configureSecureModeTestSettings(conf);
    // Set account URI and initialize Azure file system.
    URI accountUri = createAccountUri(accountName, containerName);
    // Set up instrumentation.
    // 
    AzureFileSystemMetricsSystem.fileSystemStarted();
    String sourceName = NativeAzureFileSystem.newMetricsSourceName();
    String sourceDesc = "Azure Storage Volume File System metrics";
    AzureFileSystemInstrumentation instrumentation = new AzureFileSystemInstrumentation(conf);
    AzureFileSystemMetricsSystem.registerSource(sourceName, sourceDesc, instrumentation);
    // Create a new AzureNativeFileSystemStore object.
    AzureNativeFileSystemStore testStorage = new AzureNativeFileSystemStore();
    // Initialize the store with the throttling feedback interfaces.
    testStorage.initialize(accountUri, conf, instrumentation);
    // Create test account initializing the appropriate member variables.
    // 
    AzureBlobStorageTestAccount testAcct = new AzureBlobStorageTestAccount(testStorage, account, container);
    return testAcct;
}

19 View Complete Implementation : TestRMWebServicesHttpStaticUserPermissions.java
Copyright Apache License 2.0
Author : apache
private static void setupAndStartRM() throws Exception {
    Configuration rmconf = new Configuration();
    rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
    rmconf.setClreplaced(YarnConfiguration.RM_SCHEDULER, FifoScheduler.clreplaced, ResourceScheduler.clreplaced);
    rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
    rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    rmconf.set(YarnConfiguration.RM_PRINCIPAL, spnegoPrincipal);
    rmconf.set(YarnConfiguration.RM_KEYTAB, spnegoKeytabFile.getAbsolutePath());
    rmconf.setBoolean("mockrm.webapp.enabled", true);
    UserGroupInformation.setConfiguration(rmconf);
    rm = new MockRM(rmconf);
    rm.start();
}

19 View Complete Implementation : TestFSQueueConverter.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() {
    config = new Configuration(false);
    config.set(FairSchedulerConfiguration.ALLOCATION_FILE, FAIR_SCHEDULER_XML);
    config.setBoolean(FairSchedulerConfiguration.MIGRATION_MODE, true);
    csConfig = new Configuration(false);
    dryRunResultHolder = new DryRunResultHolder();
    conversionOptions = new ConversionOptions(dryRunResultHolder, false);
    fs = createFairScheduler();
    createBuilder();
    rootQueue = fs.getQueueManager().getRootQueue();
}

19 View Complete Implementation : ITestS3GuardTtl.java
Copyright Apache License 2.0
Author : apache
/**
 * Patch the configuration - this test needs disabled filesystem caching.
 * These tests modify the fs instance that would cause flaky tests.
 * @return a configuration
 */
@Override
protected Configuration createConfiguration() {
    Configuration configuration = super.createConfiguration();
    S3ATestUtils.disableFilesystemCaching(configuration);
    configuration = S3ATestUtils.prepareTestConfiguration(configuration);
    configuration.setBoolean(METADATASTORE_AUTHORITATIVE, authoritative);
    return configuration;
}

19 View Complete Implementation : TestSharedCacheUploader.java
Copyright Apache License 2.0
Author : apache
/**
 * If the localPath does not exists, getActualPath should get to one level
 * down
 */
@Test
public void testGetActualPath() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean(YarnConfiguration.SHARED_CACHE_ENABLED, true);
    LocalResource resource = mock(LocalResource.clreplaced);
    // give public visibility
    when(resource.getVisibility()).thenReturn(LocalResourceVisibility.PUBLIC);
    Path localPath = new Path("foo.jar");
    String user = "joe";
    SCMUploaderProtocol scmClient = mock(SCMUploaderProtocol.clreplaced);
    FileSystem fs = mock(FileSystem.clreplaced);
    FileSystem localFs = mock(FileSystem.clreplaced);
    // stub it to return a status that indicates a directory
    FileStatus status = mock(FileStatus.clreplaced);
    when(status.isDirectory()).thenReturn(true);
    when(localFs.getFileStatus(localPath)).thenReturn(status);
    SharedCacheUploader spied = createSpiedUploader(resource, localPath, user, conf, scmClient, fs, localFs);
    Path actualPath = spied.getActualPath();
    replacedertEquals(actualPath.getName(), localPath.getName());
    replacedertEquals(actualPath.getParent().getName(), localPath.getName());
}

19 View Complete Implementation : TestNameNodeRetryCacheMetrics.java
Copyright Apache License 2.0
Author : apache
/**
 * Start a cluster
 */
@Before
public void setup() throws Exception {
    conf = new HdfsConfiguration();
    conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3).build();
    cluster.waitActive();
    cluster.transitionToActive(namenodeId);
    HATestUtil.setFailoverConfigurations(cluster, conf);
    filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
    namesystem = cluster.getNamesystem(namenodeId);
    metrics = namesystem.getRetryCache().getMetricsForTests();
}

19 View Complete Implementation : TestHadoopArchiveLogsRunner.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogsRunner.clreplaced.getSimpleName(), 1, 2, 1, 1);
    conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
    conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
    yarnCluster.init(conf);
    yarnCluster.start();
    conf = yarnCluster.getConfig();
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    conf = new JobConf(conf);
    app1 = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    fs = FileSystem.get(conf);
    remoteRootLogDir = new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
    workingDir = new Path(remoteRootLogDir, "archive-logs-work");
    suffix = "logs";
    Path logDir = new Path(remoteRootLogDir, new Path(System.getProperty("user.name"), suffix));
    fs.mkdirs(logDir);
    app1Path = new Path(logDir, app1.toString());
    fs.mkdirs(app1Path);
    for (int i = 0; i < FILE_COUNT; i++) {
        createFile(fs, new Path(app1Path, "log" + (i + 1)), FILE_SIZES[i]);
    }
    FileStatus[] app1Files = fs.listStatus(app1Path);
    replacedert.replacedertEquals(FILE_COUNT, app1Files.length);
}

19 View Complete Implementation : TestLeveldbTimelineStateStore.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    fsPath = new File("target", getClreplaced().getSimpleName() + "-tmpDir").getAbsoluteFile();
    fsContext = FileContext.getLocalFSFileContext();
    fsContext.delete(new Path(fsPath.getAbsolutePath()), true);
    conf = new YarnConfiguration();
    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_RECOVERY_ENABLED, true);
    conf.setClreplaced(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLreplaced, LeveldbTimelineStateStore.clreplaced, TimelineStateStore.clreplaced);
    conf.set(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH, fsPath.getAbsolutePath());
}

19 View Complete Implementation : TestTrafficController.java
Copyright Apache License 2.0
Author : apache
@Test
public void testClreplacedIdFileContentParsing() {
    conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, false);
    TrafficController trafficController = new TrafficController(conf, privilegedOperationExecutorMock);
    // Verify that clreplacedid file contents are parsed correctly
    // This call strips the QDISC prefix and returns the clreplacedid asociated with
    // the container
    int parsedClreplacedId = trafficController.getClreplacedIdFromFileContents(TEST_CLreplaced_ID_DECIMAL_STR);
    replacedert.replacedertEquals(TEST_CLreplaced_ID, parsedClreplacedId);
}

19 View Complete Implementation : TestDSAppMaster.java
Copyright Apache License 2.0
Author : apache
private Configuration getTimelineServiceConf(boolean v1Enabled, boolean v2Enabled) {
    Configuration conf = new YarnConfiguration(new Configuration(false));
    replacedert.replacedertFalse(YarnConfiguration.timelineServiceEnabled(conf));
    if (v1Enabled || v2Enabled) {
        conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
    }
    if (v1Enabled) {
        conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.0f);
    }
    if (v2Enabled) {
        conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
        conf.setClreplaced(YarnConfiguration.TIMELINE_SERVICE_WRITER_CLreplaced, FileSystemTimelineWriterImpl.clreplaced, TimelineWriter.clreplaced);
    }
    if (v1Enabled && v2Enabled) {
        conf.set(YarnConfiguration.TIMELINE_SERVICE_VERSION, "1.0");
        conf.set(YarnConfiguration.TIMELINE_SERVICE_VERSIONS, "1.0,2.0f");
    }
    return conf;
}

19 View Complete Implementation : TestWorkPreservingRMRestartForNodeLabel.java
Copyright Apache License 2.0
Author : apache
@Before
public void setUp() throws Exception {
    conf = new YarnConfiguration();
    conf.setClreplaced(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.clreplaced, ResourceScheduler.clreplaced);
    conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
    conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
    conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.clreplaced.getName());
    mgr = new NullRMNodeLabelsManager();
    mgr.init(conf);
}

19 View Complete Implementation : TestDFSZKFailoverController.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    conf = new Configuration();
    // Specify the quorum per-nameservice, to ensure that these configs
    // can be nameservice-scoped.
    conf.set(ZKFailoverController.ZK_QUORUM_KEY + ".ns1", hostPort);
    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, AlwaysSucceedFencer.clreplaced.getName());
    conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
    // Turn off IPC client caching, so that the suite can handle
    // the restart of the daemons between test cases.
    conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
    // Get random port numbers in advance. Because ZKFCs and DFSHAAdmin
    // needs rpc port numbers of all ZKFCs, Setting 0 does not work here.
    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", ServerSocketUtil.getPort(10023, 100));
    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", ServerSocketUtil.getPort(10024, 100));
    // prefer non-ephemeral port to avoid port collision on restartNameNode
    MiniDFSNNTopology topology = new MiniDFSNNTopology().addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(ServerSocketUtil.getPort(10021, 100))).addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(ServerSocketUtil.getPort(10022, 100))));
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology).numDataNodes(0).build();
    cluster.waitActive();
    ctx = new TestContext();
    ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
    replacedertEquals(0, thr1.zkfc.run(new String[] { "-formatZK" }));
    thr1.start();
    waitForHAState(0, HAServiceState.ACTIVE);
    ctx.addThread(thr2 = new ZKFCThread(ctx, 1));
    thr2.start();
    // Wait for the ZKFCs to fully start up
    ZKFCTestUtil.waitForHealthState(thr1.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx);
    ZKFCTestUtil.waitForHealthState(thr2.zkfc, HealthMonitor.State.SERVICE_HEALTHY, ctx);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
}

19 View Complete Implementation : TestAddBlockTailing.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void startUpCluster() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean(DFS_HA_TAILEDITS_INPROGRESS_KEY, true);
    MiniQJMHACluster.Builder qjmBuilder = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2);
    qjmBuilder.getDfsBuilder().numDataNodes(1);
    qjmhaCluster = qjmBuilder.build();
    dfsCluster = qjmhaCluster.getDfsCluster();
    dfsCluster.waitActive();
    dfsCluster.transitionToActive(0);
    dfs = dfsCluster.getFileSystem(0);
    fsn0 = dfsCluster.getNameNode(0).getNamesystem();
    fsn1 = dfsCluster.getNameNode(1).getNamesystem();
    dfs.mkdirs(new Path(TEST_DIR), new FsPermission("755"));
    dn0 = dfsCluster.getDataNodes().get(0);
}