org.apache.flink.configuration.Configuration - java examples

Here are the examples of the java api org.apache.flink.configuration.Configuration taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : AbstractTableInputFormat.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link Scan} object and opens the {@link HTable} connection.
 *
 * <p>These are opened here because they are needed in the createInputSplits
 * which is called before the openInputFormat method.
 *
 * <p>The connection is opened in this method and closed in {@link #closeInputFormat()}.
 *
 * @param parameters The configuration that is to be used
 * @see Configuration
 */
public abstract void configure(Configuration parameters);

19 View Complete Implementation : PubSubSource.java
Copyright Apache License 2.0
Author : apache
@Override
public void open(Configuration configuration) throws Exception {
    super.open(configuration);
    if (hasNoCheckpointingEnabled(getRuntimeContext())) {
        throw new IllegalArgumentException("The PubSubSource REQUIRES Checkpointing to be enabled and " + "the checkpointing frequency must be MUCH lower than the PubSub timeout for it to retry a message.");
    }
    getRuntimeContext().getMetricGroup().gauge("PubSubMessagesProcessedNotAcked", this::getOutstandingMessagesToAck);
    // convert per-subtask-limit to global rate limit, as FlinkConnectorRateLimiter::setRate expects a global rate limit.
    rateLimiter.setRate(messagePerSecondRateLimit * getRuntimeContext().getNumberOfParallelSubtasks());
    rateLimiter.open(getRuntimeContext());
    createAndSetPubSubSubscriber();
    this.isRunning = true;
}

19 View Complete Implementation : PythonScalarFunctionFlatMap.java
Copyright Apache License 2.0
Author : apache
@Override
public void open(Configuration parameters) throws Exception {
    super.open(parameters);
    this.elementCount = 0;
    this.bundleStarted = new AtomicBoolean(false);
    this.maxBundleSize = config.getMaxBundleSize();
    if (this.maxBundleSize <= 0) {
        this.maxBundleSize = PythonOptions.MAX_BUNDLE_SIZE.defaultValue();
        LOG.error("Invalid value for the maximum bundle size. Using default value of " + this.maxBundleSize + '.');
    } else {
        LOG.info("The maximum bundle size is configured to {}.", this.maxBundleSize);
    }
    if (config.getMaxBundleTimeMills() != PythonOptions.MAX_BUNDLE_TIME_MILLS.defaultValue()) {
        LOG.info("Maximum bundle time takes no effect in old planner under batch mode. " + "Config maximum bundle size instead! " + "Under batch mode, bundle size should be enough to control both throughput and latency.");
    }
    forwardedInputQueue = new LinkedBlockingQueue<>();
    udfResultQueue = new LinkedBlockingQueue<>();
    udfInputType = new RowType(Arrays.stream(udfInputOffsets).mapToObj(i -> inputType.getFields().get(i)).collect(Collectors.toList()));
    udfOutputType = new RowType(outputType.getFields().subList(forwardedFields.length, outputType.getFieldCount()));
    RowTypeInfo forwardedInputTypeInfo = new RowTypeInfo(Arrays.stream(forwardedFields).mapToObj(i -> inputType.getFields().get(i)).map(RowType.RowField::getType).map(TypeConversions::fromLogicalToDataType).map(TypeConversions::fromDataTypeToLegacyInfo).toArray(TypeInformation[]::new));
    forwardedInputSerializer = forwardedInputTypeInfo.createSerializer(getRuntimeContext().getExecutionConfig());
    this.pythonFunctionRunner = createPythonFunctionRunner();
    this.pythonFunctionRunner.open();
}

19 View Complete Implementation : KubernetesSessionCli.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) {
    final Configuration configuration = GlobalConfiguration.loadConfiguration();
    int retCode;
    try {
        final KubernetesSessionCli cli = new KubernetesSessionCli(configuration);
        retCode = SecurityUtils.getInstalledContext().runSecured(() -> cli.run(args));
    } catch (CliArgsException e) {
        retCode = AbstractCustomCommandLine.handleCliArgsException(e, LOG);
    } catch (Exception e) {
        retCode = AbstractCustomCommandLine.handleError(e, LOG);
    }
    System.exit(retCode);
}

19 View Complete Implementation : FileOutputFormat.java
Copyright Apache License 2.0
Author : apache
/**
 * Initialize defaults for output format. Needs to be a static method because it is configured for local
 * cluster execution.
 * @param configuration The configuration to load defaults from
 */
public static void initDefaultsFromConfiguration(Configuration configuration) {
    final boolean overwrite = configuration.getBoolean(CoreOptions.FILESYTEM_DEFAULT_OVERRIDE);
    DEFAULT_WRITE_MODE = overwrite ? WriteMode.OVERWRITE : WriteMode.NO_OVERWRITE;
    final boolean alwaysCreateDirectory = configuration.getBoolean(CoreOptions.FILESYSTEM_OUTPUT_ALWAYS_CREATE_DIRECTORY);
    DEFAULT_OUTPUT_DIRECTORY_MODE = alwaysCreateDirectory ? OutputDirectoryMode.ALWAYS : OutputDirectoryMode.PARONLY;
}

19 View Complete Implementation : SingleInputUdfOperator.java
Copyright Apache License 2.0
Author : apache
// --------------------------------------------------------------------------------------------
// Fluent API methods
// --------------------------------------------------------------------------------------------
@Override
public O withParameters(Configuration parameters) {
    this.parameters = parameters;
    @SuppressWarnings("unchecked")
    O returnType = (O) this;
    return returnType;
}

19 View Complete Implementation : HBaseRowInputFormat.java
Copyright Apache License 2.0
Author : apache
@Override
public void configure(Configuration parameters) {
    LOG.info("Initializing HBase configuration.");
    // prepare hbase read helper
    this.readHelper = new HBaseReadWriteHelper(schema);
    connectToTable();
    if (table != null) {
        scan = getScanner();
    }
}

19 View Complete Implementation : StreamNetworkPointToPointBenchmark.java
Copyright Apache License 2.0
Author : apache
/**
 * Initializes the throughput benchmark with the given parameters.
 *
 * @param flushTimeout
 * 		output flushing interval of the
 * 		{@link org.apache.flink.runtime.io.network.api.writer.RecordWriter}'s output flusher thread
 */
public void setUp(long flushTimeout, Configuration config) throws Exception {
    environment = new StreamNetworkBenchmarkEnvironment<>();
    environment.setUp(1, 1, false, -1, -1, config);
    ResultParreplacedionWriter resultParreplacedionWriter = environment.createResultParreplacedionWriter(0);
    recordWriter = new RecordWriterBuilder().setTimeout(flushTimeout).build(resultParreplacedionWriter);
    receiver = environment.createReceiver();
}

19 View Complete Implementation : ZooKeeperUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link ZooKeeperLeaderRetrievalService} instance.
 *
 * @param client        The {@link CuratorFramework} ZooKeeper client to use
 * @param configuration {@link Configuration} object containing the configuration values
 * @return {@link ZooKeeperLeaderRetrievalService} instance.
 */
public static ZooKeeperLeaderRetrievalService createLeaderRetrievalService(final CuratorFramework client, final Configuration configuration) {
    return createLeaderRetrievalService(client, configuration, "");
}

19 View Complete Implementation : FileOutputFormat.java
Copyright Apache License 2.0
Author : apache
// ----------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
    // get the output file path, if it was not yet set
    if (this.outputFilePath == null) {
        // get the file parameter
        String filePath = parameters.getString(FILE_PARAMETER_KEY, null);
        if (filePath == null) {
            throw new IllegalArgumentException("The output path has been specified neither via constructor/setters" + ", nor via the Configuration.");
        }
        try {
            this.outputFilePath = new Path(filePath);
        } catch (RuntimeException rex) {
            throw new RuntimeException("Could not create a valid URI from the given file path name: " + rex.getMessage());
        }
    }
    // check if have not been set and use the defaults in that case
    if (this.writeMode == null) {
        this.writeMode = DEFAULT_WRITE_MODE;
    }
    if (this.outputDirectoryMode == null) {
        this.outputDirectoryMode = DEFAULT_OUTPUT_DIRECTORY_MODE;
    }
}

19 View Complete Implementation : FlinkDistributionOverlayTest.java
Copyright Apache License 2.0
Author : apache
public void testBuilderFromEnvironmentBad(String obligatoryEnvironmentVariable) throws Exception {
    Configuration conf = new Configuration();
    // adjust the test environment for the purposes of this test
    Map<String, String> map = new HashMap<>(System.getenv());
    map.remove(obligatoryEnvironmentVariable);
    CommonTestUtils.setEnv(map);
    try {
        FlinkDistributionOverlay.Builder builder = FlinkDistributionOverlay.newBuilder().fromEnvironment(conf);
        fail();
    } catch (IllegalStateException e) {
    // expected
    }
}

19 View Complete Implementation : BlobUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a BlobStore based on the parameters set in the configuration.
 *
 * @param config
 * 		configuration to use
 *
 * @return a (distributed) blob store for high availability
 *
 * @throws IOException
 * 		thrown if the (distributed) file storage cannot be created
 */
public static BlobStoreService createBlobStoreFromConfig(Configuration config) throws IOException {
    if (HighAvailabilityMode.isHighAvailabilityModeActivated(config)) {
        return createFileSystemBlobStore(config);
    } else {
        return new VoidBlobStore();
    }
}

19 View Complete Implementation : BlobCacheCorruptionTest.java
Copyright Apache License 2.0
Author : apache
/**
 * Checks the GET operation fails when the downloaded file (from HA store)
 * is corrupt, i.e. its content's hash does not match the {@link BlobKey}'s hash, using a
 * permanent BLOB.
 *
 * @param jobId
 * 		job ID
 * @param config
 * 		blob server configuration (including HA settings like {@link HighAvailabilityOptions#HA_STORAGE_PATH}
 * 		and {@link HighAvailabilityOptions#HA_CLUSTER_ID}) used to set up <tt>blobStore</tt>
 * @param blobStore
 * 		shared HA blob store to use
 * @param expectedException
 * 		expected exception rule to use
 */
public static void testGetFailsFromCorruptFile(JobID jobId, Configuration config, BlobStore blobStore, ExpectedException expectedException) throws IOException {
    testGetFailsFromCorruptFile(jobId, PERMANENT_BLOB, true, config, blobStore, expectedException);
}

19 View Complete Implementation : CassandraPojoSink.java
Copyright Apache License 2.0
Author : apache
@Override
public void open(Configuration configuration) {
    super.open(configuration);
    try {
        this.mappingManager = new MappingManager(session);
        this.mapper = mappingManager.mapper(clazz);
        if (options != null) {
            Mapper.Option[] optionsArray = options.getMapperOptions();
            if (optionsArray != null) {
                this.mapper.setDefaultSaveOptions(optionsArray);
            }
        }
    } catch (Exception e) {
        throw new RuntimeException("Cannot create CreplacedandraPojoSink with input: " + clazz.getSimpleName(), e);
    }
}

19 View Complete Implementation : StandaloneUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link StandaloneLeaderRetrievalService} from the given configuration. The
 * host and port for the remote Akka URL are retrieved from the provided configuration.
 *
 * @param configuration Configuration instance containing the host and port information
 * @return StandaloneLeaderRetrievalService
 * @throws ConfigurationException
 * @throws UnknownHostException
 */
public static StandaloneLeaderRetrievalService createLeaderRetrievalService(Configuration configuration) throws ConfigurationException, UnknownHostException {
    return createLeaderRetrievalService(configuration, false, null);
}

19 View Complete Implementation : ZooKeeperUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link FileSystemStateStorageHelper} instance.
 *
 * @param configuration {@link Configuration} object
 * @param prefix Prefix for the created files
 * @param <T> Type of the state objects
 * @return {@link FileSystemStateStorageHelper} instance
 * @throws IOException if file system state storage cannot be created
 */
public static <T extends Serializable> FileSystemStateStorageHelper<T> createFileSystemStateStorage(Configuration configuration, String prefix) throws IOException {
    return new FileSystemStateStorageHelper<>(HighAvailabilityServicesUtils.getClusterHighAvailableStoragePath(configuration), prefix);
}

19 View Complete Implementation : TableInputFormat.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link Scan} object and opens the {@link HTable} connection.
 * These are opened here because they are needed in the createInputSplits
 * which is called before the openInputFormat method.
 * So the connection is opened in {@link #configure(Configuration)} and closed in {@link #closeInputFormat()}.
 *
 * @param parameters The configuration that is to be used
 * @see Configuration
 */
@Override
public void configure(Configuration parameters) {
    table = createTable();
    if (table != null) {
        scan = getScanner();
    }
}

19 View Complete Implementation : FixedDelayRestartStrategy.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a FixedDelayRestartStrategy from the given Configuration.
 *
 * @param configuration Configuration containing the parameter values for the restart strategy
 * @return Initialized instance of FixedDelayRestartStrategy
 * @throws Exception
 */
public static FixedDelayRestartStrategyFactory createFactory(Configuration configuration) throws Exception {
    int maxAttempts = configuration.getInteger(RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_ATTEMPTS);
    long delay = configuration.get(RestartStrategyOptions.RESTART_STRATEGY_FIXED_DELAY_DELAY).toMillis();
    return new FixedDelayRestartStrategyFactory(maxAttempts, delay);
}

19 View Complete Implementation : RestartBackoffTimeStrategyFactoryLoader.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates {@link RestartBackoffTimeStrategy.Factory} from the given configuration.
 *
 * <p>The strategy factory is decided in order as follows:
 * <ol>
 *     <li>Strategy set within job graph, i.e. {@link RestartStrategies.RestartStrategyConfiguration},
 * unless the config is {@link RestartStrategies.FallbackRestartStrategyConfiguration}.</li>
 *     <li>Strategy set in the cluster(server-side) config (flink-conf.yaml),
 * unless the strategy is not specified</li>
 *     <li>{@link FixedDelayRestartBackoffTimeStrategy.FixedDelayRestartBackoffTimeStrategyFactory} if
 * checkpointing is enabled. Otherwise {@link NoRestartBackoffTimeStrategy.NoRestartBackoffTimeStrategyFactory}</li>
 * </ol>
 *
 * @param jobRestartStrategyConfiguration restart configuration given within the job graph
 * @param clusterConfiguration cluster(server-side) configuration
 * @param isCheckpointingEnabled if checkpointing is enabled for the job
 * @return new version restart strategy factory
 */
public static RestartBackoffTimeStrategy.Factory createRestartBackoffTimeStrategyFactory(final RestartStrategies.RestartStrategyConfiguration jobRestartStrategyConfiguration, final Configuration clusterConfiguration, final boolean isCheckpointingEnabled) {
    checkNotNull(jobRestartStrategyConfiguration);
    checkNotNull(clusterConfiguration);
    return getJobRestartStrategyFactory(jobRestartStrategyConfiguration).orElse(getClusterRestartStrategyFactory(clusterConfiguration).orElse(getDefaultRestartStrategyFactory(isCheckpointingEnabled)));
}

19 View Complete Implementation : KinesisEventsGeneratorProducerThread.java
Copyright Apache License 2.0
Author : apache
public static Thread create(final int totalEventCount, final int parallelism, final String awsAccessKey, final String awsSecretKey, final String awsRegion, final String kinesisStreamName, final AtomicReference<Throwable> errorHandler, final int flinkPort, final Configuration flinkConfig) {
    Runnable kinesisEventsGeneratorProducer = new Runnable() {

        @Override
        public void run() {
            try {
                StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
                see.setParallelism(parallelism);
                // start data generator
                DataStream<String> simpleStringStream = see.addSource(new KinesisEventsGeneratorProducerThread.EventsGenerator(totalEventCount)).setParallelism(1);
                Properties producerProps = new Properties();
                producerProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
                producerProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
                producerProps.setProperty(AWSConfigConstants.AWS_REGION, awsRegion);
                FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(new SimpleStringSchema(), producerProps);
                kinesis.setFailOnError(true);
                kinesis.setDefaultStream(kinesisStreamName);
                kinesis.setDefaultParreplacedion("0");
                simpleStringStream.addSink(kinesis);
                LOG.info("Starting producing topology");
                see.execute("Producing topology");
                LOG.info("Producing topo finished");
            } catch (Exception e) {
                LOG.warn("Error while running producing topology", e);
                errorHandler.set(e);
            }
        }
    };
    return new Thread(kinesisEventsGeneratorProducer);
}

19 View Complete Implementation : MemoryStateBackend.java
Copyright Apache License 2.0
Author : apache
// ------------------------------------------------------------------------
// Reconfiguration
// ------------------------------------------------------------------------
/**
 * Creates a copy of this state backend that uses the values defined in the configuration
 * for fields where that were not specified in this state backend.
 *
 * @param config The configuration
 * @param clreplacedLoader The clreplaced loader
 * @return The re-configured variant of the state backend
 */
@Override
public MemoryStateBackend configure(Configuration config, ClreplacedLoader clreplacedLoader) {
    return new MemoryStateBackend(this, config, clreplacedLoader);
}

19 View Complete Implementation : SSLUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a SSLEngineFactory to be used by internal communication server endpoints.
 */
public static SSLHandlerFactory createInternalServerSSLEngineFactory(final Configuration config) throws Exception {
    SslContext sslContext = createInternalNettySSLContext(config, false);
    if (sslContext == null) {
        throw new IllegalConfigurationException("SSL is not enabled for internal communication.");
    }
    return new SSLHandlerFactory(sslContext, config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT), config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
}

19 View Complete Implementation : SSLUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a SSLEngineFactory to be used by internal communication client endpoints.
 */
public static SSLHandlerFactory createInternalClientSSLEngineFactory(final Configuration config) throws Exception {
    SslContext sslContext = createInternalNettySSLContext(config, true);
    if (sslContext == null) {
        throw new IllegalConfigurationException("SSL is not enabled for internal communication.");
    }
    return new SSLHandlerFactory(sslContext, config.getInteger(SecurityOptions.SSL_INTERNAL_HANDSHAKE_TIMEOUT), config.getInteger(SecurityOptions.SSL_INTERNAL_CLOSE_NOTIFY_FLUSH_TIMEOUT));
}

19 View Complete Implementation : AkkaRpcServiceUtils.java
Copyright Apache License 2.0
Author : apache
// ------------------------------------------------------------------------
// RPC endpoint addressing
// ------------------------------------------------------------------------
/**
 * @param hostname The hostname or address where the target RPC service is listening.
 * @param port The port where the target RPC service is listening.
 * @param endpointName The name of the RPC endpoint.
 * @param addressResolution Whether to try address resolution of the given hostname or not.
 *                          This allows to fail fast in case that the hostname cannot be resolved.
 * @param config The configuration from which to deduce further settings.
 *
 * @return The RPC URL of the specified RPC endpoint.
 */
public static String getRpcUrl(String hostname, int port, String endpointName, HighAvailabilityServicesUtils.AddressResolution addressResolution, Configuration config) throws UnknownHostException {
    checkNotNull(config, "config is null");
    final boolean sslEnabled = config.getBoolean(AkkaOptions.SSL_ENABLED) && Sreplacedils.isInternalSSLEnabled(config);
    return getRpcUrl(hostname, port, endpointName, addressResolution, sslEnabled ? AkkaProtocol.SSL_TCP : AkkaProtocol.TCP);
}

19 View Complete Implementation : FileJobGraphRetriever.java
Copyright Apache License 2.0
Author : apache
@Override
public JobGraph retrieveJobGraph(Configuration configuration) throws FlinkException {
    final File fp = new File(jobGraphFile);
    try (FileInputStream input = new FileInputStream(fp);
        ObjectInputStream obInput = new ObjectInputStream(input)) {
        final JobGraph jobGraph = (JobGraph) obInput.readObject();
        addUserClreplacedPathsToJobGraph(jobGraph);
        return jobGraph;
    } catch (FileNotFoundException e) {
        throw new FlinkException("Could not find the JobGraph file.", e);
    } catch (ClreplacedNotFoundException | IOException e) {
        throw new FlinkException("Could not load the JobGraph from file.", e);
    }
}

19 View Complete Implementation : ExactlyOnceValidatingConsumerThread.java
Copyright Apache License 2.0
Author : apache
public static Thread create(final int totalEventCount, final int failAtRecordCount, final int parallelism, final int checkpointInterval, final long restartDelay, final String awsAccessKey, final String awsSecretKey, final String awsRegion, final String kinesisStreamName, final AtomicReference<Throwable> errorHandler, final int flinkPort, final Configuration flinkConfig) {
    Runnable exactlyOnceValidationConsumer = new Runnable() {

        @Override
        public void run() {
            try {
                StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
                see.setParallelism(parallelism);
                see.enableCheckpointing(checkpointInterval);
                // we restart two times
                see.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, restartDelay));
                // consuming topology
                Properties consumerProps = new Properties();
                consumerProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
                consumerProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
                consumerProps.setProperty(ConsumerConfigConstants.AWS_REGION, awsRegion);
                // start reading from beginning
                consumerProps.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
                DataStream<String> consuming = see.addSource(new FlinkKinesisConsumer<>(kinesisStreamName, new SimpleStringSchema(), consumerProps));
                consuming.flatMap(new ArtificialFailOnceFlatMapper(failAtRecordCount)).flatMap(new ExactlyOnceValidatingMapper(totalEventCount)).setParallelism(1);
                LOG.info("Starting consuming topology");
                tryExecute(see, "Consuming topo");
                LOG.info("Consuming topo finished");
            } catch (Exception e) {
                LOG.warn("Error while running consuming topology", e);
                errorHandler.set(e);
            }
        }
    };
    return new Thread(exactlyOnceValidationConsumer);
}

19 View Complete Implementation : BootstrapTools.java
Copyright Apache License 2.0
Author : apache
/**
 * Calculate heap size after cut-off. The heap size after cut-off will be used to set -Xms and -Xmx for jobmanager
 * start command.
 */
public static int calculateHeapSize(int memory, Configuration conf) {
    final float memoryCutoffRatio = conf.getFloat(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_RATIO);
    final int minCutoff = conf.getInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN);
    if (memoryCutoffRatio > 1 || memoryCutoffRatio < 0) {
        throw new IllegalArgumentException("The configuration value '" + ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_RATIO.key() + "' must be between 0 and 1. Value given=" + memoryCutoffRatio);
    }
    if (minCutoff > memory) {
        throw new IllegalArgumentException("The configuration value '" + ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN.key() + "' is higher (" + minCutoff + ") than the requested amount of memory " + memory);
    }
    int heapLimit = (int) ((float) memory * memoryCutoffRatio);
    if (heapLimit < minCutoff) {
        heapLimit = minCutoff;
    }
    return memory - heapLimit;
}

19 View Complete Implementation : TaskExecutorResourceUtils.java
Copyright Apache License 2.0
Author : apache
public static TaskExecutorResourceSpec resourceSpecFromConfig(final Configuration config) {
    if (isTaskHeapMemorySizeExplicitlyConfigured(config) && isManagedMemorySizeExplicitlyConfigured(config)) {
        // both task heap memory and managed memory are configured, use these to derive total flink memory
        return deriveResourceSpecWithExplicitTaskAndManagedMemory(config);
    } else if (isTotalFlinkMemorySizeExplicitlyConfigured(config)) {
        // either of task heap memory and managed memory is not configured, total flink memory is configured,
        // derive from total flink memory
        return deriveResourceSpecWithTotalFlinkMemory(config);
    } else if (isTotalProcessMemorySizeExplicitlyConfigured(config)) {
        // total flink memory is not configured, total process memory is configured,
        // derive from total process memory
        return deriveResourceSpecWithTotalProcessMemory(config);
    } else {
        throw new IllegalConfigurationException(String.format("Either Task Heap Memory size (%s) and Managed Memory size (%s), or Total Flink" + " Memory size (%s), or Total Process Memory size (%s) need to be configured explicitly.", TaskManagerOptions.TASK_HEAP_MEMORY.key(), TaskManagerOptions.MANAGED_MEMORY_SIZE.key(), TaskManagerOptions.TOTAL_FLINK_MEMORY.key(), TaskManagerOptions.TOTAL_PROCESS_MEMORY.key()));
    }
}

19 View Complete Implementation : ZooKeeperUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link ZooKeeperLeaderElectionService} instance.
 *
 * @param client        The {@link CuratorFramework} ZooKeeper client to use
 * @param configuration {@link Configuration} object containing the configuration values
 * @return {@link ZooKeeperLeaderElectionService} instance.
 */
public static ZooKeeperLeaderElectionService createLeaderElectionService(CuratorFramework client, Configuration configuration) throws Exception {
    return createLeaderElectionService(client, configuration, "");
}

19 View Complete Implementation : BlobClient.java
Copyright Apache License 2.0
Author : apache
/**
 * Uploads the JAR files to the {@link PermanentBlobService} of the {@link BlobServer} at the
 * given address with HA as configured.
 *
 * @param serverAddress
 * 		Server address of the {@link BlobServer}
 * @param clientConfig
 * 		Any additional configuration for the blob client
 * @param jobId
 * 		ID of the job this blob belongs to (or <tt>null</tt> if job-unrelated)
 * @param files
 * 		List of files to upload
 *
 * @throws IOException
 * 		if the upload fails
 */
public static List<PermanentBlobKey> uploadFiles(InetSocketAddress serverAddress, Configuration clientConfig, JobID jobId, List<Path> files) throws IOException {
    checkNotNull(jobId);
    if (files.isEmpty()) {
        return Collections.emptyList();
    } else {
        List<PermanentBlobKey> blobKeys = new ArrayList<>();
        try (BlobClient blobClient = new BlobClient(serverAddress, clientConfig)) {
            for (final Path file : files) {
                final PermanentBlobKey key = blobClient.uploadFile(jobId, file);
                blobKeys.add(key);
            }
        }
        return blobKeys;
    }
}

19 View Complete Implementation : FailoverStrategyFactoryLoader.java
Copyright Apache License 2.0
Author : apache
/**
 * Loads a {@link FailoverStrategy.Factory} from the given configuration.
 *
 * @param config which specifies the failover strategy factory to load
 * @return failover strategy factory loaded
 */
public static FailoverStrategy.Factory loadFailoverStrategyFactory(final Configuration config) {
    checkNotNull(config);
    // the default NG failover strategy is the region failover strategy.
    // TODO: Remove the overridden default value when removing legacy scheduler
    // and change the default value of JobManagerOptions.EXECUTION_FAILOVER_STRATEGY
    // to be "region"
    final String strategyParam = config.getString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, PIPELINED_REGION_RESTART_STRATEGY_NAME);
    switch(strategyParam.toLowerCase()) {
        case FULL_RESTART_STRATEGY_NAME:
            return new RestartAllFailoverStrategy.Factory();
        case PIPELINED_REGION_RESTART_STRATEGY_NAME:
            return new RestartPipelinedRegionFailoverStrategy.Factory();
        default:
            throw new IllegalConfigurationException("Unknown failover strategy: " + strategyParam);
    }
}

19 View Complete Implementation : ContextEnvironmentFactory.java
Copyright Apache License 2.0
Author : apache
/**
 * The factory that instantiates the environment to be used when running jobs that are
 * submitted through a pre-configured client connection.
 * This happens for example when a job is submitted from the command line.
 */
public clreplaced ContextEnvironmentFactory implements ExecutionEnvironmentFactory {

    private final PipelineExecutorServiceLoader executorServiceLoader;

    private final Configuration configuration;

    private final ClreplacedLoader userCodeClreplacedLoader;

    public ContextEnvironmentFactory(final PipelineExecutorServiceLoader executorServiceLoader, final Configuration configuration, final ClreplacedLoader userCodeClreplacedLoader) {
        this.executorServiceLoader = checkNotNull(executorServiceLoader);
        this.configuration = checkNotNull(configuration);
        this.userCodeClreplacedLoader = checkNotNull(userCodeClreplacedLoader);
    }

    @Override
    public ExecutionEnvironment createExecutionEnvironment() {
        return new ContextEnvironment(executorServiceLoader, configuration, userCodeClreplacedLoader);
    }
}

19 View Complete Implementation : TwoInputUdfOperator.java
Copyright Apache License 2.0
Author : apache
// --------------------------------------------------------------------------------------------
// Fluent API methods
// --------------------------------------------------------------------------------------------
@Override
public O withParameters(Configuration parameters) {
    this.parameters = parameters;
    @SuppressWarnings("unchecked")
    O returnType = (O) this;
    return returnType;
}

19 View Complete Implementation : AppendOnlyTopNFunction.java
Copyright Apache License 2.0
Author : apache
public void open(Configuration parameters) throws Exception {
    super.open(parameters);
    int lruCacheSize = Math.max(1, (int) (cacheSize / getDefaultTopNSize()));
    kvSortedMap = new LRUMap<>(lruCacheSize);
    LOG.info("Top{} operator is using LRU caches key-size: {}", getDefaultTopNSize(), lruCacheSize);
    ListTypeInfo<BaseRow> valueTypeInfo = new ListTypeInfo<>(inputRowType);
    MapStateDescriptor<BaseRow, List<BaseRow>> mapStateDescriptor = new MapStateDescriptor<>("data-state-with-append", sortKeyType, valueTypeInfo);
    dataState = getRuntimeContext().getMapState(mapStateDescriptor);
    // metrics
    registerMetric(kvSortedMap.size() * getDefaultTopNSize());
}

19 View Complete Implementation : DelimitedInputFormat.java
Copyright Apache License 2.0
Author : apache
// --------------------------------------------------------------------------------------------
// Pre-flight: Configuration, Splits, Sampling
// --------------------------------------------------------------------------------------------
/**
 * Configures this input format by reading the path to the file from the configuration and the string that
 * defines the record delimiter.
 *
 * @param parameters The configuration object to read the parameters from.
 */
@Override
public void configure(Configuration parameters) {
    super.configure(parameters);
    // the if() clauses are to prevent the configure() method from
    // overwriting the values set by the setters
    if (Arrays.equals(delimiter, new byte[] { '\n' })) {
        String delimString = parameters.getString(RECORD_DELIMITER, null);
        if (delimString != null) {
            setDelimiter(delimString);
        }
    }
    // set the number of samples
    if (numLineSamples == NUM_SAMPLES_UNDEFINED) {
        String samplesString = parameters.getString(NUM_STATISTICS_SAMPLES, null);
        if (samplesString != null) {
            try {
                setNumLineSamples(Integer.parseInt(samplesString));
            } catch (NumberFormatException e) {
                if (LOG.isWarnEnabled()) {
                    LOG.warn("Invalid value for number of samples to take: " + samplesString + ". Skipping sampling.");
                }
                setNumLineSamples(0);
            }
        }
    }
}

19 View Complete Implementation : HadoopOutputFormatBase.java
Copyright Apache License 2.0
Author : apache
// --------------------------------------------------------------------------------------------
// OutputFormat
// --------------------------------------------------------------------------------------------
@Override
public void configure(Configuration parameters) {
    // enforce sequential configure() calls
    synchronized (CONFIGURE_MUTEX) {
        if (this.mapreduceOutputFormat instanceof Configurable) {
            ((Configurable) this.mapreduceOutputFormat).setConf(this.configuration);
        }
    }
}

19 View Complete Implementation : TaskManagerLoadingDynamicPropertiesITCase.java
Copyright Apache License 2.0
Author : apache
private Configuration getJobManagerUpdatedConfiguration() {
    final Configuration updatedConfig = new Configuration();
    updatedConfig.setString(KEY_A, VALUE_A);
    updatedConfig.setString(KEY_B, VALUE_B);
    updatedConfig.setString(KEY_C, VALUE_C);
    updatedConfig.setString(KEY_D, VALUE_D);
    updatedConfig.setString(KEY_E, VALUE_E);
    updatedConfig.setString(KEY_F, VALUE_F);
    return updatedConfig;
}

19 View Complete Implementation : RocksDBStateBackendConfigTest.java
Copyright Apache License 2.0
Author : apache
private void verifyIllegalArgument(ConfigOption<?> configOption, String configValue) {
    Configuration configuration = new Configuration();
    configuration.setString(configOption.key(), configValue);
    DefaultConfigurableOptionsFactory optionsFactory = new DefaultConfigurableOptionsFactory();
    try {
        optionsFactory.configure(configuration);
        fail("Not throwing expected IllegalArgumentException.");
    } catch (IllegalArgumentException e) {
    // ignored
    }
}

19 View Complete Implementation : PackagedProgramUtils.java
Copyright Apache License 2.0
Author : apache
/**
 * Creates a {@link JobGraph} with a random {@link JobID}
 * from the given {@link PackagedProgram}.
 *
 * @param packagedProgram to extract the JobGraph from
 * @param configuration to use for the optimizer and job graph generator
 * @param defaultParallelism for the JobGraph
 * @param suppressOutput Whether to suppress stdout/stderr during interactive JobGraph creation.
 * @return JobGraph extracted from the PackagedProgram
 * @throws ProgramInvocationException if the JobGraph generation failed
 */
public static JobGraph createJobGraph(PackagedProgram packagedProgram, Configuration configuration, int defaultParallelism, boolean suppressOutput) throws ProgramInvocationException {
    return createJobGraph(packagedProgram, configuration, defaultParallelism, null, suppressOutput);
}

19 View Complete Implementation : RocksDBStateBackendFactory.java
Copyright Apache License 2.0
Author : apache
@Override
public RocksDBStateBackend createFromConfig(Configuration config, ClreplacedLoader clreplacedLoader) throws IllegalConfigurationException, IOException {
    // we need to explicitly read the checkpoint directory here, because that
    // is a required constructor parameter
    final String checkpointDirURI = config.getString(CheckpointingOptions.CHECKPOINTS_DIRECTORY);
    if (checkpointDirURI == null) {
        throw new IllegalConfigurationException("Cannot create the RocksDB state backend: The configuration does not specify the " + "checkpoint directory '" + CheckpointingOptions.CHECKPOINTS_DIRECTORY.key() + '\'');
    }
    return new RocksDBStateBackend(checkpointDirURI).configure(config, clreplacedLoader);
}

19 View Complete Implementation : StreamFaultToleranceTestBase.java
Copyright Apache License 2.0
Author : apache
@Before
public void setup() throws Exception {
    Configuration configuration = new Configuration();
    switch(failoverStrategy) {
        case RestartPipelinedRegionFailoverStrategy:
            configuration.setString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, "region");
            break;
        case RestartAllFailoverStrategy:
            configuration.setString(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, "full");
    }
    cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(configuration).setNumberTaskManagers(NUM_TASK_MANAGERS).setNumberSlotsPerTaskManager(NUM_TASK_SLOTS).build());
    cluster.before();
}

19 View Complete Implementation : FileInputFormat.java
Copyright Apache License 2.0
Author : apache
/**
 * Initialize defaults for input format. Needs to be a static method because it is configured for local
 * cluster execution.
 * @param configuration The configuration to load defaults from
 */
private static void initDefaultsFromConfiguration(Configuration configuration) {
    final long to = configuration.getLong(ConfigConstants.FS_STREAM_OPENING_TIMEOUT_KEY, ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
    if (to < 0) {
        LOG.error("Invalid timeout value for filesystem stream opening: " + to + ". Using default value of " + ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT);
        DEFAULT_OPENING_TIMEOUT = ConfigConstants.DEFAULT_FS_STREAM_OPENING_TIMEOUT;
    } else if (to == 0) {
        // 5 minutes
        DEFAULT_OPENING_TIMEOUT = 300000;
    } else {
        DEFAULT_OPENING_TIMEOUT = to;
    }
}

19 View Complete Implementation : KubernetesResource.java
Copyright Apache License 2.0
Author : apache
/**
 * Represent a kubernetes resource.
 */
public abstract clreplaced KubernetesResource<T> {

    private T internalResource;

    private final Configuration flinkConfig;

    public KubernetesResource(Configuration flinkConfig, T internalResource) {
        this.flinkConfig = flinkConfig;
        this.internalResource = internalResource;
    }

    public Configuration getFlinkConfig() {
        return flinkConfig;
    }

    public T getInternalResource() {
        return internalResource;
    }

    public void setInternalResource(T resource) {
        this.internalResource = resource;
    }
}

19 View Complete Implementation : MiniCluster.java
Copyright Apache License 2.0
Author : apache
// ------------------------------------------------------------------------
// factories - can be overridden by subclreplacedes to alter behavior
// ------------------------------------------------------------------------
/**
 * Factory method to create the metric registry for the mini cluster.
 *
 * @param config The configuration of the mini cluster
 */
protected MetricRegistryImpl createMetricRegistry(Configuration config) {
    return new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(config), ReporterSetup.fromConfiguration(config));
}

19 View Complete Implementation : StreamNetworkBenchmarkEnvironment.java
Copyright Apache License 2.0
Author : apache
/**
 * Sets up the environment including buffer pools and netty threads.
 *
 * @param writers
 * 		number of writers
 * @param channels
 * 		outgoing channels per writer
 * @param localMode
 * 		only local channels?
 * @param senderBufferPoolSize
 * 		buffer pool size for the sender (set to <tt>-1</tt> for default)
 * @param receiverBufferPoolSize
 * 		buffer pool size for the receiver (set to <tt>-1</tt> for default)
 */
public void setUp(int writers, int channels, boolean localMode, int senderBufferPoolSize, int receiverBufferPoolSize, Configuration config) throws Exception {
    this.localMode = localMode;
    this.channels = channels;
    this.parreplacedionIds = new ResultParreplacedionID[writers];
    if (senderBufferPoolSize == -1) {
        senderBufferPoolSize = Math.max(2048, writers * channels * 4);
    }
    if (receiverBufferPoolSize == -1) {
        receiverBufferPoolSize = Math.max(2048, writers * channels * 4);
    }
    senderEnv = createShuffleEnvironment(senderBufferPoolSize, config);
    this.dataPort = senderEnv.start();
    if (localMode && senderBufferPoolSize == receiverBufferPoolSize) {
        receiverEnv = senderEnv;
    } else {
        receiverEnv = createShuffleEnvironment(receiverBufferPoolSize, config);
        receiverEnv.start();
    }
    gateFactory = new SingleInputGateFactory(location, receiverEnv.getConfiguration(), receiverEnv.getConnectionManager(), receiverEnv.getResultParreplacedionManager(), new TaskEventDispatcher(), receiverEnv.getNetworkBufferPool());
    generateParreplacedionIds();
}

19 View Complete Implementation : TaskExecutorResourceSpecBuilder.java
Copyright Apache License 2.0
Author : apache
/**
 * Builder for {@link TaskExecutorResourceSpec}.
 */
public clreplaced TaskExecutorResourceSpecBuilder {

    private final Configuration configuration;

    private TaskExecutorResourceSpecBuilder(final Configuration configuration) {
        this.configuration = new Configuration(checkNotNull(configuration));
    }

    static TaskExecutorResourceSpecBuilder newBuilder(final Configuration configuration) {
        return new TaskExecutorResourceSpecBuilder(configuration);
    }

    public TaskExecutorResourceSpecBuilder withCpuCores(double cpuCores) {
        return withCpuCores(new CPUResource(cpuCores));
    }

    public TaskExecutorResourceSpecBuilder withCpuCores(CPUResource cpuCores) {
        configuration.setDouble(TaskManagerOptions.CPU_CORES, cpuCores.getValue().doubleValue());
        return this;
    }

    public TaskExecutorResourceSpecBuilder withTotalProcessMemory(MemorySize totalProcessMemory) {
        configuration.set(TaskManagerOptions.TOTAL_PROCESS_MEMORY, totalProcessMemory);
        return this;
    }

    public TaskExecutorResourceSpec build() {
        return TaskExecutorResourceUtils.resourceSpecFromConfig(configuration);
    }
}

19 View Complete Implementation : Slf4jReporterTest.java
Copyright Apache License 2.0
Author : apache
@BeforeClreplaced
public static void setUp() {
    TestUtils.addTestAppenderForRootLogger();
    Configuration configuration = new Configuration();
    configuration.setString(MetricOptions.SCOPE_NAMING_TASK, "<host>.<tm_id>.<job_name>");
    registry = new MetricRegistryImpl(MetricRegistryConfiguration.fromConfiguration(configuration), Collections.singletonList(ReporterSetup.forReporter("slf4j", new Slf4jReporter())));
    delimiter = registry.getDelimiter();
    taskMetricGroup = new TaskManagerMetricGroup(registry, HOST_NAME, TASK_MANAGER_ID).addTaskForJob(new JobID(), JOB_NAME, new JobVertexID(), new ExecutionAttemptID(), TASK_NAME, 0, 0);
    reporter = (Slf4jReporter) registry.getReporters().get(0);
}

19 View Complete Implementation : ParameterTool.java
Copyright Apache License 2.0
Author : apache
// ------------------------- Export to different targets -------------------------
/**
 * Returns a {@link Configuration} object from this {@link ParameterTool}.
 *
 * @return A {@link Configuration}
 */
public Configuration getConfiguration() {
    final Configuration conf = new Configuration();
    for (Map.Entry<String, String> entry : data.entrySet()) {
        conf.setString(entry.getKey(), entry.getValue());
    }
    return conf;
}

19 View Complete Implementation : FsStateBackend.java
Copyright Apache License 2.0
Author : apache
// ------------------------------------------------------------------------
// Reconfiguration
// ------------------------------------------------------------------------
/**
 * Creates a copy of this state backend that uses the values defined in the configuration
 * for fields where that were not specified in this state backend.
 *
 * @param config the configuration
 * @return The re-configured variant of the state backend
 */
@Override
public FsStateBackend configure(Configuration config, ClreplacedLoader clreplacedLoader) {
    return new FsStateBackend(this, config, clreplacedLoader);
}

19 View Complete Implementation : CassandraSinkBase.java
Copyright Apache License 2.0
Author : apache
@Override
public void open(Configuration configuration) {
    this.callback = new FutureCallback<V>() {

        @Override
        public void onSuccess(V ignored) {
            semapreplaced.release();
        }

        @Override
        public void onFailure(Throwable t) {
            throwable.compareAndSet(null, t);
            log.error("Error while sending value.", t);
            semapreplaced.release();
        }
    };
    this.cluster = builder.getCluster();
    this.session = createSession();
    throwable = new AtomicReference<>();
    semapreplaced = new Semapreplaced(config.getMaxConcurrentRequests());
}