org.influxdb.InfluxDB - java examples

Here are the examples of the java api org.influxdb.InfluxDB taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

120 Examples 7

19 View Complete Implementation : InfluxdbReporter.java
Copyright Apache License 2.0
Author : apache
/**
 * {@link MetricReporter} that exports {@link Metric Metrics} via InfluxDB.
 */
public clreplaced InfluxdbReporter extends AbstractReporter<MeasurementInfo> implements Scheduled {

    private String database;

    private String retentionPolicy;

    private InfluxDB.ConsistencyLevel consistency;

    private InfluxDB influxDB;

    public InfluxdbReporter() {
        super(new MeasurementInfoProvider());
    }

    @Override
    public void open(MetricConfig config) {
        String host = getString(config, HOST);
        int port = getInteger(config, PORT);
        if (!isValidHost(host) || !NetUtils.isValidClientPort(port)) {
            throw new IllegalArgumentException("Invalid host/port configuration. Host: " + host + " Port: " + port);
        }
        String database = getString(config, DB);
        if (database == null) {
            throw new IllegalArgumentException("'" + DB.key() + "' configuration option is not set");
        }
        String url = String.format("http://%s:%d", host, port);
        String username = getString(config, USERNAME);
        String preplacedword = getString(config, PreplacedWORD);
        this.database = database;
        this.retentionPolicy = getString(config, RETENTION_POLICY);
        this.consistency = getConsistencyLevel(config, CONSISTENCY);
        int connectTimeout = getInteger(config, CONNECT_TIMEOUT);
        int writeTimeout = getInteger(config, WRITE_TIMEOUT);
        OkHttpClient.Builder client = new OkHttpClient.Builder().connectTimeout(connectTimeout, TimeUnit.MILLISECONDS).writeTimeout(writeTimeout, TimeUnit.MILLISECONDS);
        if (username != null && preplacedword != null) {
            influxDB = InfluxDBFactory.connect(url, username, preplacedword, client);
        } else {
            influxDB = InfluxDBFactory.connect(url, client);
        }
        log.info("Configured InfluxDBReporter with {host:{}, port:{}, db:{}, retentionPolicy:{} and consistency:{}}", host, port, database, retentionPolicy, consistency.name());
    }

    @Override
    public void close() {
        if (influxDB != null) {
            influxDB.close();
            influxDB = null;
        }
    }

    @Override
    public void report() {
        BatchPoints report = buildReport();
        if (report != null) {
            influxDB.write(report);
        }
    }

    @Nullable
    private BatchPoints buildReport() {
        Instant timestamp = Instant.now();
        BatchPoints.Builder report = BatchPoints.database(database);
        report.retentionPolicy(retentionPolicy);
        report.consistency(consistency);
        try {
            for (Map.Entry<Gauge<?>, MeasurementInfo> entry : gauges.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Counter, MeasurementInfo> entry : counters.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Histogram, MeasurementInfo> entry : histograms.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Meter, MeasurementInfo> entry : meters.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
        } catch (ConcurrentModificationException | NoSuchElementException e) {
            // ignore - may happen when metrics are concurrently added or removed
            // report next time
            return null;
        }
        return report.build();
    }

    private static boolean isValidHost(String host) {
        return host != null && !host.isEmpty();
    }
}

19 View Complete Implementation : LegacyInfluxDBConnection.java
Copyright MIT License
Author : Scrin
public clreplaced LegacyInfluxDBConnection implements DBConnection {

    private final InfluxDB influxDB;

    public LegacyInfluxDBConnection() {
        influxDB = InfluxDBFactory.connect(Config.getInfluxUrl(), Config.getInfluxUser(), Config.getInfluxPreplacedword());
        influxDB.setDatabase(Config.getInfluxDatabase());
        influxDB.enableGzip();
        // TODO: make these configurable
        influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);
    }

    @Override
    public void save(EnhancedRuuviMeasurement measurement) {
        BatchPoints points = InfluxDBConverter.toLegacyInflux(measurement);
        influxDB.write(points);
    }

    @Override
    public void close() {
        influxDB.close();
    }
}

19 View Complete Implementation : InfluxdbReporter.java
Copyright Apache License 2.0
Author : ljygz
/**
 * {@link MetricReporter} that exports {@link Metric Metrics} via InfluxDB.
 */
public clreplaced InfluxdbReporter extends AbstractReporter<MeasurementInfo> implements Scheduled {

    private String database;

    private InfluxDB influxDB;

    public InfluxdbReporter() {
        super(new MeasurementInfoProvider());
    }

    @Override
    public void open(MetricConfig config) {
        String host = getString(config, HOST);
        int port = getInteger(config, PORT);
        if (!isValidHost(host) || !isValidPort(port)) {
            throw new IllegalArgumentException("Invalid host/port configuration. Host: " + host + " Port: " + port);
        }
        String database = getString(config, DB);
        if (database == null) {
            throw new IllegalArgumentException("'" + DB.key() + "' configuration option is not set");
        }
        String url = String.format("http://%s:%d", host, port);
        String username = getString(config, USERNAME);
        String preplacedword = getString(config, PreplacedWORD);
        this.database = database;
        if (username != null && preplacedword != null) {
            influxDB = InfluxDBFactory.connect(url, username, preplacedword);
        } else {
            influxDB = InfluxDBFactory.connect(url);
        }
        log.info("Configured InfluxDBReporter with {host:{}, port:{}, db:{}}", host, port, database);
    }

    @Override
    public void close() {
        if (influxDB != null) {
            influxDB.close();
            influxDB = null;
        }
    }

    @Override
    public void report() {
        BatchPoints report = buildReport();
        if (report != null) {
            influxDB.write(report);
        }
    }

    @Nullable
    private BatchPoints buildReport() {
        Instant timestamp = Instant.now();
        BatchPoints.Builder report = BatchPoints.database(database);
        report.retentionPolicy("");
        try {
            for (Map.Entry<Gauge<?>, MeasurementInfo> entry : gauges.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Counter, MeasurementInfo> entry : counters.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Histogram, MeasurementInfo> entry : histograms.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
            for (Map.Entry<Meter, MeasurementInfo> entry : meters.entrySet()) {
                report.point(MetricMapper.map(entry.getValue(), timestamp, entry.getKey()));
            }
        } catch (ConcurrentModificationException | NoSuchElementException e) {
            // ignore - may happen when metrics are concurrently added or removed
            // report next time
            return null;
        }
        return report.build();
    }

    private static boolean isValidHost(String host) {
        return host != null && !host.isEmpty();
    }

    private static boolean isValidPort(int port) {
        return 0 < port && port <= 65535;
    }
}

19 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private int getAggregationValue(String index, InfluxDB influxDB) throws ParseException {
    long timerange = getDateFromNewestRecordReOfTable(index, influxDB) - getDateFromOldestRecordReOfTable(index, influxDB);
    double v = timerange / NUM_OF_AUTO_AGGREGATION_VALUES;
    return Double.valueOf(v).intValue();
}

19 View Complete Implementation : InfluxDBSink.java
Copyright Apache License 2.0
Author : apache
/**
 * Sink to save data into a InfluxDB cluster.
 */
public clreplaced InfluxDBSink extends RichSinkFunction<InfluxDBPoint> {

    private transient InfluxDB influxDBClient;

    private final InfluxDBConfig influxDBConfig;

    /**
     * Creates a new {@link InfluxDBSink} that connects to the InfluxDB server.
     *
     * @param influxDBConfig The configuration of {@link InfluxDBConfig}
     */
    public InfluxDBSink(InfluxDBConfig influxDBConfig) {
        this.influxDBConfig = Preconditions.checkNotNull(influxDBConfig, "InfluxDB client config should not be null");
    }

    /**
     * Initializes the connection to InfluxDB by either cluster or sentinels or single server.
     */
    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        influxDBClient = InfluxDBFactory.connect(influxDBConfig.getUrl(), influxDBConfig.getUsername(), influxDBConfig.getPreplacedword());
        if (!influxDBClient.databaseExists(influxDBConfig.getDatabase())) {
            if (influxDBConfig.isCreateDatabase()) {
                influxDBClient.createDatabase(influxDBConfig.getDatabase());
            } else {
                throw new RuntimeException("This " + influxDBConfig.getDatabase() + " database does not exist!");
            }
        }
        influxDBClient.setDatabase(influxDBConfig.getDatabase());
        if (influxDBConfig.getBatchActions() > 0) {
            influxDBClient.enableBatch(influxDBConfig.getBatchActions(), influxDBConfig.getFlushDuration(), influxDBConfig.getFlushDurationTimeUnit());
        }
        if (influxDBConfig.isEnableGzip()) {
            influxDBClient.enableGzip();
        }
    }

    /**
     * Called when new data arrives to the sink, and forwards it to InfluxDB.
     *
     * @param dataPoint {@link InfluxDBPoint}
     */
    @Override
    public void invoke(InfluxDBPoint dataPoint, Context context) throws Exception {
        if (StringUtils.isNullOrWhitespaceOnly(dataPoint.getMeasurement())) {
            throw new RuntimeException("No measurement defined");
        }
        Point.Builder builder = Point.measurement(dataPoint.getMeasurement()).time(dataPoint.getTimestamp(), TimeUnit.MILLISECONDS);
        if (!CollectionUtil.isNullOrEmpty(dataPoint.getFields())) {
            builder.fields(dataPoint.getFields());
        }
        if (!CollectionUtil.isNullOrEmpty(dataPoint.getTags())) {
            builder.tag(dataPoint.getTags());
        }
        Point point = builder.build();
        influxDBClient.write(point);
    }

    @Override
    public void close() {
        if (influxDBClient.isBatchEnabled()) {
            influxDBClient.disableBatch();
        }
        influxDBClient.close();
    }
}

19 View Complete Implementation : InfluxdbComponent.java
Copyright Apache License 2.0
Author : archerfeel
/**
 * Created by pixyonly on 03/09/2017.
 */
public clreplaced InfluxdbComponent implements Configurable, Releasable {

    private static final String CFG_URL = "url";

    private static final String CFG_USERNAME = "username";

    private static final String CFG_PreplacedWORD = "preplacedword";

    private static final String CFG_DATABASE = "database";

    private static final String CFG_RETENTION_POLICY = "retention_policy";

    private static final String CFG_CONSISTENCY_LEVEL = "consistency_level";

    private static final InfluxDB.ConsistencyLevel DEFAULT_CONSISTENCY_LEVEL = InfluxDB.ConsistencyLevel.ANY;

    private InfluxDB influx;

    private String database;

    private String retentionPolicy;

    private InfluxDB.ConsistencyLevel consistency;

    @Override
    public void init(Configuration configuration) throws InitializationException {
        database = configuration.getString(CFG_DATABASE);
        retentionPolicy = configuration.getString(CFG_RETENTION_POLICY, "");
        try {
            consistency = InfluxDB.ConsistencyLevel.valueOf(configuration.getString(CFG_CONSISTENCY_LEVEL));
        } catch (Exception e) {
            consistency = DEFAULT_CONSISTENCY_LEVEL;
        }
        influx = InfluxDBFactory.connect(configuration.getString(CFG_URL), configuration.getString(CFG_USERNAME), configuration.getString(CFG_PreplacedWORD));
    }

    public void write(List<String> batch) {
        influx.write(database, retentionPolicy, consistency, batch);
    }

    public void write(String single) {
        influx.write(database, retentionPolicy, consistency, single);
    }

    @Override
    public void release() {
        influx.close();
    }
}

19 View Complete Implementation : InfluxDBConnection.java
Copyright MIT License
Author : Scrin
public clreplaced InfluxDBConnection implements DBConnection {

    private final InfluxDB influxDB;

    public InfluxDBConnection() {
        this(Config.getInfluxUrl(), Config.getInfluxUser(), Config.getInfluxPreplacedword(), Config.getInfluxDatabase(), Config.getInfluxRetentionPolicy(), Config.isInfluxGzip(), Config.isInfluxBatch(), Config.getInfluxBatchMaxSize(), Config.getInfluxBatchMaxTimeMs());
    }

    public InfluxDBConnection(String url, String user, String preplacedword, String database, String retentionPolicy, boolean gzip, boolean batch, int batchSize, int batchTime) {
        influxDB = InfluxDBFactory.connect(url, user, preplacedword).setDatabase(database).setRetentionPolicy(retentionPolicy);
        if (gzip) {
            influxDB.enableGzip();
        } else {
            influxDB.disableGzip();
        }
        if (batch) {
            influxDB.enableBatch(batchSize, batchTime, TimeUnit.MILLISECONDS);
        } else {
            influxDB.disableBatch();
        }
    }

    @Override
    public void save(EnhancedRuuviMeasurement measurement) {
        Point point = InfluxDBConverter.toInflux(measurement);
        influxDB.write(point);
    }

    @Override
    public void close() {
        influxDB.close();
    }
}

19 View Complete Implementation : InfluxDBPusher.java
Copyright Apache License 2.0
Author : apache
/**
 * Establishes a connection to InfluxDB and pushes {@link Point}s.
 *
 * @author Lorand Bendig
 */
public clreplaced InfluxDBPusher {

    private final InfluxDB influxDB;

    private final String database;

    private InfluxDBPusher(Builder builder) {
        this.influxDB = builder.influxDB;
        this.database = builder.database;
    }

    public static clreplaced Builder {

        private final InfluxDB influxDB;

        private final String database;

        public Builder(String url, String username, String preplacedword, String database, InfluxDBConnectionType connectionType) {
            this.influxDB = connectionType.createConnection(url, username, preplacedword);
            this.database = database;
        }

        /**
         * Set the connection timeout for InfluxDB
         * @param connectTimeout
         * @param timeUnit
         * @return
         */
        public Builder withConnectTimeout(long connectTimeout, TimeUnit timeUnit) {
            influxDB.setConnectTimeout(connectTimeout, timeUnit);
            return this;
        }

        /**
         * Set the writer timeout for the InfluxDB connection
         * @param writeTimeout
         * @param timeUnit
         * @return
         */
        public Builder withWriteTimeout(long writeTimeout, TimeUnit timeUnit) {
            influxDB.setWriteTimeout(writeTimeout, timeUnit);
            return this;
        }

        public InfluxDBPusher build() {
            return new InfluxDBPusher(this);
        }
    }

    /**
     * Push a single Point
     * @param point the {@link Point} to report
     */
    public void push(Point point) {
        BatchPoints.Builder batchPointsBuilder = BatchPoints.database(database);
        batchPointsBuilder.point(point);
        influxDB.write(batchPointsBuilder.build());
    }

    /**
     * Push multiple points at once.
     * @param points list of {@link Point}s to report
     */
    public void push(List<Point> points) {
        BatchPoints.Builder batchPointsBuilder = BatchPoints.database(database);
        for (Point point : points) {
            batchPointsBuilder.point(point);
        }
        influxDB.write(batchPointsBuilder.build());
    }
}

19 View Complete Implementation : InfluxDBSink.java
Copyright Apache License 2.0
Author : zhisheng17
/**
 * Desc: InfluxDB sink
 * Created by zhisheng on 2019-05-01
 * blog:http://www.54tianzhisheng.cn/
 * 微信公众号:zhisheng
 */
public clreplaced InfluxDBSink extends RichSinkFunction<MetricEvent> {

    private transient InfluxDB influxDBClient;

    private final InfluxDBConfig influxDBConfig;

    public InfluxDBSink(InfluxDBConfig influxDBConfig) {
        this.influxDBConfig = Preconditions.checkNotNull(influxDBConfig, "InfluxDB client config should not be null");
    }

    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        influxDBClient = InfluxDBFactory.connect(influxDBConfig.getUrl(), influxDBConfig.getUsername(), influxDBConfig.getPreplacedword());
        if (!influxDBClient.databaseExists(influxDBConfig.getDatabase())) {
            if (influxDBConfig.isCreateDatabase()) {
                influxDBClient.createDatabase(influxDBConfig.getDatabase());
            } else {
                throw new RuntimeException("This " + influxDBConfig.getDatabase() + " database does not exist!");
            }
        }
        influxDBClient.setDatabase(influxDBConfig.getDatabase());
        if (influxDBConfig.getBatchActions() > 0) {
            influxDBClient.enableBatch(influxDBConfig.getBatchActions(), influxDBConfig.getFlushDuration(), influxDBConfig.getFlushDurationTimeUnit());
        }
        if (influxDBConfig.isEnableGzip()) {
            influxDBClient.enableGzip();
        }
    }

    @Override
    public void invoke(MetricEvent metricEvent, Context context) throws Exception {
        if (StringUtils.isNullOrWhitespaceOnly(metricEvent.getName())) {
            throw new RuntimeException("No measurement defined");
        }
        Point.Builder builder = Point.measurement(metricEvent.getName()).time(metricEvent.getTimestamp(), TimeUnit.MILLISECONDS);
        if (!CollectionUtil.isNullOrEmpty(metricEvent.getFields())) {
            builder.fields(metricEvent.getFields());
        }
        if (!CollectionUtil.isNullOrEmpty(metricEvent.getTags())) {
            builder.tag(metricEvent.getTags());
        }
        Point point = builder.build();
        influxDBClient.write(point);
    }

    @Override
    public void close() {
        if (influxDBClient.isBatchEnabled()) {
            influxDBClient.disableBatch();
        }
        influxDBClient.close();
    }
}

19 View Complete Implementation : InfluxDBQueries.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
public clreplaced InfluxDBQueries {

    private final InfluxDB influxdb;

    abstract public static clreplaced DBNameMap {

        public abstract String getDataDBName(final String channel_name) throws Exception;

        public abstract String getMetaDBName(final String channel_name) throws Exception;

        public abstract List<String> getAllDBNames();
    }

    private final DBNameMap dbnames;

    public static clreplaced DefaultDBNameMap extends DBNameMap {

        protected final String db_name;

        protected final String meta_db_name;

        protected final List<String> all_names;

        public DefaultDBNameMap() {
            db_name = InfluxDBArchivePreferences.getDBPrefix() + InfluxDBArchivePreferences.getDBName();
            meta_db_name = InfluxDBArchivePreferences.getDBPrefix() + InfluxDBArchivePreferences.getMetaDBName();
            all_names = new ArrayList<String>();
            all_names.add(db_name);
            all_names.add(meta_db_name);
        }

        @Override
        public String getDataDBName(String channel_name) {
            return db_name;
        }

        @Override
        public String getMetaDBName(String channel_name) {
            return meta_db_name;
        }

        @Override
        public List<String> getAllDBNames() {
            return all_names;
        }
    }

    public List<String> getAllDBNames() {
        return dbnames.getAllDBNames();
    }

    public void initDatabases(final InfluxDB influxdb) {
        for (String db : dbnames.getAllDBNames()) {
            influxdb.createDatabase(db);
        }
    }

    // TODO: test this
    /**
     * Create a retention policy.
     * @param rpname
     * @param dbname
     * @param duration
     */
    public void createRetentionPolicy(final String rpname, final String dbname, final String duration) {
        // Influxdb-java versions > 2.7 have retention policy APIs.
        // TODO: validate duration ?
        String command = String.format("CREATE RETENTION POLICY \"%s\" ON \"%s\" DURATION %s REPLICATION 1", rpname, dbname, duration);
        QueryResult result = influxdb.query(new Query(command, dbname));
    // TODO: test success?
    // seems like there would be an error in the QueryResult
    // if it fails (result.getError() != null), not sure
    }

    // this doesn't have a place to be used
    private void createDownsampleContinuousQuery(final String channelName, final String retentionPolicy, final String decimateTime) {
        try {
            final String dbname = dbnames.getDataDBName(channelName);
            String statement = new StringBuilder("CREATE CONTINUOUS QUERY ").append('"').append(retentionPolicy).append('_').append(decimateTime).append('"').append(" ON \"").append(dbname).append('"').append(" BEGIN").append(" SELECT ").append("MODE(*) AS average, MEAN(*) AS average").append(" INTO \"").append(channelName).append('"').append(" FROM \"").append(retentionPolicy).append("\".\"").append(channelName).append('"').append(" GROUP BY time(").append(decimateTime).append(")").append(" END").toString();
            QueryResult result = influxdb.query(new Query(statement, dbname));
        // TODO: check result for errors
        } catch (Exception e) {
        // TODO: log something
        }
    }

    private List<String> getRetentionPoliciesForChannel(String channel_name) {
        // TODO: be more specific?
        try {
            // TODO: call once, then store in some variable?
            return getRetentionPoliciesForDB(dbnames.getDataDBName(channel_name));
        } catch (Exception e) {
            return Collections.emptyList();
        }
    }

    private List<String> getRetentionPoliciesForDB(String dbname) {
        final String stmt = "SHOW RETENTION POLICIES ON \"" + dbname + '"';
        final QueryResult results = makeQuery(influxdb, stmt, dbname);
        final List<String> rps = new ArrayList<String>();
        for (QueryResult.Series series : InfluxDBResults.getNonEmptySeries(results)) {
            final int iend = InfluxDBResults.getValueCount(series);
            for (int i = 0; i < iend; ++i) if (InfluxDBResults.getValue(series, "default", i).equals(false))
                rps.add((String) InfluxDBResults.getValue(series, "name", i));
        }
        return rps;
    }

    private String getFromClause(String channel_name, boolean isData) {
        StringBuilder sb = new StringBuilder("\"").append(channel_name).append('"');
        if (isData) {
            for (String rp : getRetentionPoliciesForChannel(channel_name)) {
                // if (rp != null && !rp.isEmpty())
                sb.append(", \"").append(rp).append("\".\"").append(channel_name).append("\"");
            }
        }
        return sb.toString();
    }

    public InfluxDBQueries(InfluxDB influxdb, final DBNameMap dbnames) {
        this.influxdb = influxdb;
        if (dbnames == null)
            this.dbnames = new DefaultDBNameMap();
        else
            this.dbnames = dbnames;
    }

    // TODO: timestamps come back with wrong values stored in Double... would be faster if it worked.
    // private final static boolean query_nanos = true;
    public static QueryResult makeQuery(final InfluxDB influxdb, final String stmt, final String dbName) {
        Activator.getLogger().log(Level.FINE, "InfluxDB query ({0}): {1}", new Object[] { dbName, stmt });
        // if (query_nanos)
        // return influxdb.query(new Query(stmt, dbName), TimeUnit.NANOSECONDS);
        return influxdb.query(new Query(stmt, dbName));
    }

    public static void makeChunkQuery(int chunkSize, Consumer<QueryResult> consumer, InfluxDB influxdb, String stmt, String dbName) throws Exception {
        Activator.getLogger().log(Level.FINE, "InfluxDB chunked ({2}) query ({0}): {1}", new Object[] { dbName, stmt, chunkSize });
        // if (query_nanos)
        // influxdb.query(new Query(stmt, dbName), TimeUnit.NANOSECONDS, chunkSize, consumer);
        // else
        influxdb.query(new Query(stmt, dbName), chunkSize, consumer);
    }

    private static String get_points(final StringBuilder sb, final List<String> where_clauses, String group_by_what, final Long limit) {
        if ((where_clauses != null) && (where_clauses.size() > 0)) {
            sb.append(" WHERE ");
            for (int idx = 0; idx < where_clauses.size(); idx++) {
                if (idx > 0)
                    sb.append(" AND ");
                sb.append(where_clauses.get(idx));
            }
        }
        if (group_by_what != null) {
            sb.append(" GROUP BY ");
            sb.append(group_by_what);
        }
        sb.append(" ORDER BY time ");
        if (limit != null) {
            if (limit > 0)
                sb.append(" LIMIT ").append(limit);
            else if (limit < 0)
                sb.append(" DESC LIMIT ").append(-limit);
        }
        return sb.toString();
    }

    private static List<String> getTimeClauses(final Instant starttime, final Instant endtime) {
        if ((starttime == null) && (endtime == null))
            return null;
        List<String> where_clauses = new ArrayList<String>();
        if (starttime != null) {
            where_clauses.add("time >= " + InfluxDBUtil.toNano(starttime).toString());
        }
        if (endtime != null) {
            where_clauses.add("time <= " + InfluxDBUtil.toNano(endtime).toString());
        }
        return where_clauses;
    }

    private static String getGroupByTimeClause(final Instant starttime, final Instant endtime, final long count) {
        // TODO: rounding/truncation problem?
        StringBuilder ret = new StringBuilder();
        ret.append("time(");
        ret.append(InfluxDBUtil.toMicro(Duration.between(starttime, endtime).dividedBy(count)).toString());
        // Fill options: fill(none) is best, because empty time intervals (a.k.a. buckets) are automatically excluded.
        // There is no need to try to sample data with metadata, because metadata exists independently of sample data.
        ret.append("u) fill(none)");
        return ret.toString();
    }

    public static String get_channel_points(final String select_what, final String from_what, final Instant starttime, final Instant endtime, String where_what, String group_by_what, final Long limit) {
        StringBuilder sb = new StringBuilder();
        sb.append("SELECT ").append(select_what).append(" FROM ").append(from_what);
        List<String> where_clauses = getTimeClauses(starttime, endtime);
        if (where_what != null)
            where_clauses.add(where_what);
        return get_points(sb, where_clauses, group_by_what, limit);
    }

    public static String get_series_points(final InfluxDBSeriesInfo series, final Instant starttime, final Instant endtime, final Long limit) {
        StringBuilder sb = new StringBuilder();
        sb.append("SELECT \"").append(series.field).append("\" FROM \"").append(series.measurement).append('\"');
        List<String> where_clauses = series.getTagClauses();
        final List<String> time_clauses = getTimeClauses(starttime, endtime);
        if (where_clauses == null)
            where_clauses = time_clauses;
        else if (time_clauses != null)
            where_clauses.addAll(time_clauses);
        return get_points(sb, where_clauses, null, limit);
    }

    public static String get_pattern_points(final String select_what, final String pattern, final Instant starttime, final Instant endtime, final Long limit) {
        StringBuilder sb = new StringBuilder();
        sb.append("SELECT ").append(select_what).append(" FROM /").append(pattern).append('/');
        return get_points(sb, getTimeClauses(starttime, endtime), null, limit);
    }

    // /////////////////////////// RAW DATA QUERIES
    public void chunk_get_series_samples(final int chunkSize, final InfluxDBSeriesInfo series, final Instant starttime, final Instant endtime, Long limit, Consumer<QueryResult> consumer) throws Exception {
        makeChunkQuery(chunkSize, consumer, influxdb, get_series_points(series, starttime, endtime, limit), dbnames.getDataDBName(series.getMeasurement()));
    }

    public QueryResult get_oldest_series_sample(final InfluxDBSeriesInfo series) throws Exception {
        return makeQuery(influxdb, get_series_points(series, null, null, 1L), dbnames.getDataDBName(series.getMeasurement()));
    }

    public QueryResult get_newest_series_samples(final InfluxDBSeriesInfo series, final Instant starttime, final Instant endtime, Long num) throws Exception {
        return makeQuery(influxdb, get_series_points(series, starttime, endtime, -num), dbnames.getDataDBName(series.getMeasurement()));
    }

    public QueryResult get_series_samples(final InfluxDBSeriesInfo series, final Instant starttime, final Instant endtime, Long num) throws Exception {
        return makeQuery(influxdb, get_series_points(series, starttime, endtime, num), dbnames.getDataDBName(series.getMeasurement()));
    }

    // /////////////////////////// DATA ARCHIVE QUERIES
    public QueryResult get_oldest_channel_sample(final String channel_name) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, true), null, null, null, null, 1L), dbnames.getDataDBName(channel_name));
    }

    public QueryResult get_newest_channel_samples(final String channel_name, final Instant starttime, final Instant endtime, Long num) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, true), starttime, endtime, null, null, -num), dbnames.getDataDBName(channel_name));
    }

    public QueryResult get_channel_samples(final String channel_name, final Instant starttime, final Instant endtime, Long num) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, true), starttime, endtime, null, null, num), dbnames.getDataDBName(channel_name));
    }

    public void chunk_get_channel_samples(final int chunkSize, final String channel_name, final Instant starttime, final Instant endtime, Long limit, Consumer<QueryResult> consumer) throws Exception {
        makeChunkQuery(chunkSize, consumer, influxdb, get_channel_points("*", getFromClause(channel_name, true), starttime, endtime, null, null, limit), dbnames.getDataDBName(channel_name));
    }

    public QueryResult get_newest_channel_datum_regex(final String pattern) throws Exception {
        return makeQuery(influxdb, get_pattern_points("*", pattern, null, null, -1L), dbnames.getDataDBName(pattern));
    }

    public QueryResult get_newest_channel_sample_count_in_intervals(final String channel_name, final Instant starttime, final Instant endtime, Long numIntervals, Long numResults) throws Exception {
        return makeQuery(influxdb, get_channel_points("COUNT(*)", getFromClause(channel_name, true), starttime, endtime, null, getGroupByTimeClause(starttime, endtime, numIntervals), -numResults), dbnames.getDataDBName(channel_name));
    }

    public QueryResult get_channel_sample_count_in_intervals(final String channel_name, final Instant starttime, final Instant endtime, Long numIntervals, Long numResults) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, true), starttime, endtime, null, getGroupByTimeClause(starttime, endtime, numIntervals), numResults), dbnames.getDataDBName(channel_name));
    }

    public void chunk_get_channel_sample_stats(final int chunkSize, final String channel_name, final Instant starttime, final Instant endtime, Long limit, boolean stdDev, Consumer<QueryResult> consumer) throws Exception {
        StringBuilder select_what = new StringBuilder("MEAN(*),MAX(*),MIN(*),COUNT(*)");
        if (stdDev)
            select_what.append(",STDDEV(*)");
        makeChunkQuery(chunkSize, consumer, influxdb, get_channel_points(select_what.toString(), getFromClause(channel_name, true), starttime, endtime, "status != 'NaN'", getGroupByTimeClause(starttime, endtime, limit), null), dbnames.getDataDBName(channel_name));
    }

    // /////////////////////////// META DATA ARCHIVE QUERIES
    public QueryResult get_newest_meta_data(final String channel_name, final Instant starttime, final Instant endtime, Long num) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, false), starttime, endtime, null, null, -num), dbnames.getMetaDBName(channel_name));
    }

    public QueryResult get_newest_meta_datum(final String channel_name) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, false), null, null, null, null, -1L), dbnames.getMetaDBName(channel_name));
    }

    public QueryResult get_newest_meta_datum_regex(final String pattern) throws Exception {
        return makeQuery(influxdb, get_pattern_points("*", pattern, null, null, -1L), dbnames.getMetaDBName(pattern));
    }

    public QueryResult get_all_meta_data(final String channel_name) throws Exception {
        return makeQuery(influxdb, get_channel_points("*", getFromClause(channel_name, false), null, null, null, null, null), dbnames.getMetaDBName(channel_name));
    }

    public void chunk_get_channel_metadata(final int chunkSize, final String channel_name, final Instant starttime, final Instant endtime, Long limit, Consumer<QueryResult> consumer) throws Exception {
        makeChunkQuery(chunkSize, consumer, influxdb, get_channel_points("*", getFromClause(channel_name, false), starttime, endtime, null, null, limit), dbnames.getMetaDBName(channel_name));
    }
}

19 View Complete Implementation : InfluxConnector.java
Copyright The Unlicense
Author : games647
public clreplaced InfluxConnector implements Closeable {

    private final String url;

    private final String username;

    private final String preplacedword;

    private final String database;

    private InfluxDB connection;

    public InfluxConnector(String url, String username, String preplacedword, String database) {
        this.url = url;
        this.username = username;
        this.preplacedword = preplacedword;
        this.database = database;
    }

    protected void init() {
        InfluxDB influxDB = InfluxDBFactory.connect(url, username, preplacedword);
        if (!influxDB.databaseExists(database)) {
            influxDB.createDatabase(database);
        }
        // Flush every 2000 Points, at least every 1s
        // Only one of these 2 calls should be enabled
        // influxDB.enableBatch(2_000, 2, TimeUnit.MINUTES);
        influxDB.enableBatch(BatchOptions.DEFAULTS.jitterDuration(500));
        influxDB.enableGzip();
        connection = influxDB;
    }

    public void send(Point measurement) {
        send(Collections.singletonList(measurement));
    }

    public void send(Iterable<Point> measurements) {
        BatchPoints batchPoints = BatchPoints.database(database).retentionPolicy("autogen").build();
        measurements.forEach(batchPoints::point);
        connection.write(batchPoints);
    }

    @Override
    public void close() {
        if (connection != null) {
            connection.close();
        }
    }
}

19 View Complete Implementation : InfluxDBJavaTest.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
public clreplaced InfluxDBJavaTest {

    String dbName = "aTimeSeries";

    InfluxDB influxDB;

    final private static int TEST_DURATION_SECS = 60;

    final private static long FLUSH_COUNT = 500;

    public void printInfo(InfluxDB influxdb) {
        List<String> dbs = influxdb.describeDatabases();
        String ver = influxdb.version();
        System.out.println("Connected to database version: " + ver);
        System.out.println("Contains " + dbs.size() + " databases: ");
        for (String db : dbs) {
            System.out.println("\t" + db);
        }
    }

    @Before
    public void connect() throws Exception {
        try {
            influxDB = InfluxDBFactory.connect("http://diane.ornl.gov:8086");
            printInfo(influxDB);
        } catch (Exception e1) {
            // TODO Auto-generated catch block
            e1.printStackTrace();
            return;
        }
        influxDB.createDatabase(dbName);
    }

    /**
     * Basic connection
     */
    @Test
    public void LongValueProblem() {
        long millis = System.currentTimeMillis();
        long innano = millis * 1000000 + 1;
        Instant instamp = Instant.ofEpochMilli(millis).plusNanos(1);
        long lval = 1485370052974000001L;
        System.out.println("Timestamp: " + innano + " = " + instamp);
        BatchPoints batchPoints = BatchPoints.database(dbName).tag("async", "true").retentionPolicy("autogen").consistency(ConsistencyLevel.ALL).build();
        Point point = Point.measurement("testProblems").time(innano, TimeUnit.NANOSECONDS).addField("double", 3.14).addField("long", lval).build();
        batchPoints.point(point);
        try {
            // Any failure response from the influx server will thrown as an exception
            // with the exception error message set to the server response
            influxDB.write(batchPoints);
        } catch (Exception e) {
            System.err.println("Write Failed: " + e.getMessage());
            e.printStackTrace();
        }
        Query query = new Query("SELECT * FROM testProblems ORDER BY time DESC LIMIT 1", dbName);
        System.out.println("Testing query using rfc3339 time/date: ");
        QueryResult result = influxDB.query(query);
        System.out.println(InfluxDBResults.toString(result));
        Series series0 = result.getResults().get(0).getSeries().get(0);
        List<String> cols = series0.getColumns();
        List<Object> val0 = series0.getValues().get(0);
        String tsstr = (String) val0.get(cols.indexOf("time"));
        Instant outstamp = Instant.from(DateTimeFormatter.ISO_INSTANT.parse(tsstr));
        if (!outstamp.equals(instamp)) {
            System.err.println("Got bad timestamp value back as string [" + tsstr + "] -> " + outstamp + " != " + instamp);
        }
        Object outlval_obj = val0.get(cols.indexOf("long"));
        if (outlval_obj instanceof Double) {
            long outlval = ((Double) outlval_obj).longValue();
            if (outlval != lval) {
                System.err.println("Got bad lval back as double [" + (outlval_obj) + "] -> " + outlval + " != " + lval);
            }
        } else {
            System.err.println("Expected Double output for long value... got " + outlval_obj.getClreplaced().getName() + " = " + outlval_obj.toString());
        }
        System.out.println("Testing query using epoch=n time/date: ");
        result = influxDB.query(query, TimeUnit.NANOSECONDS);
        System.out.println(InfluxDBResults.toString(result));
        series0 = result.getResults().get(0).getSeries().get(0);
        cols = series0.getColumns();
        val0 = series0.getValues().get(0);
        Double tsnano = (Double) val0.get(cols.indexOf("time"));
        long outnano = tsnano.longValue();
        long outmillis = outnano / 1000000;
        outstamp = Instant.ofEpochMilli(outmillis).plusNanos(outnano - (outmillis * 1000000));
        if ((outnano != innano) || (!outstamp.equals(instamp))) {
            System.err.println("Got bad long nanos back as double [" + tsnano + "] -> " + outnano + " ?= " + innano);
            System.err.println("Got bad timestamp back as double [" + tsnano + "] -> " + outstamp + " ?= " + instamp);
        }
        outlval_obj = val0.get(cols.indexOf("long"));
        if (outlval_obj instanceof Double) {
            long outlval = ((Double) outlval_obj).longValue();
            if (outlval != lval) {
                System.err.println("Got bad lval back as double [" + outlval_obj + "] -> " + outlval + " != " + lval);
            }
        }
    }

    /**
     * Basic connection
     */
    @Test
    public void demoBasicConnect() throws Exception {
        long millis = System.currentTimeMillis();
        long tnano = millis * 1000000 + 1;
        Instant stamp = Instant.ofEpochMilli(millis).plusNanos(1);
        System.out.println("Timestamp: " + tnano + " = " + stamp + " = " + InfluxDBUtil.toInfluxDBTimeFormat(stamp));
        double tricky = -Double.MAX_VALUE;
        // double tricky = Double.NaN;
        byte[] trickybytes = InfluxDBUtil.toByteArray(tricky);
        System.out.println("Tricky: " + tricky + " = " + InfluxDBUtil.bytesToHex(trickybytes));
        BatchPoints batchPoints = BatchPoints.database(dbName).tag("async", "true").retentionPolicy("autogen").consistency(ConsistencyLevel.ALL).build();
        Point point1 = Point.measurement("cpu1").time(tnano, TimeUnit.NANOSECONDS).addField("idle", 90L).addField("user", tricky).addField("system", 1L).build();
        Point point2 = Point.measurement("disk").time(System.currentTimeMillis(), TimeUnit.MILLISECONDS).addField("used", 80L).addField("free", 1L).build();
        batchPoints.point(point1);
        batchPoints.point(point2);
        System.out.println("Line Protocol for points: " + batchPoints.lineProtocol());
        try {
            // Any failure response from the influx server will thrown as an exception
            // with the exception error message set to the server response
            influxDB.write(batchPoints);
        } catch (Exception e) {
            System.err.println("Write Failed: " + e.getMessage());
            e.printStackTrace();
        }
        // Query query = new Query("SELECT idle FROM cpu", dbName);
        // Select 3 most recent points
        Query query = new Query("SELECT * FROM cpu1 ORDER BY time DESC LIMIT 2", dbName);
        System.out.println("Sending query: " + query.getCommandWithUrlEncoded());
        QueryResult result = influxDB.query(query);
        System.out.println(InfluxDBResults.toString(result));
    }

    @Test
    public void testWriteSpeedDouble() throws Exception {
        final String channel_name = "testDouble";
        System.out.println("Write test: Adding samples to " + channel_name + " for " + TEST_DURATION_SECS + " secs");
        long count = 0;
        final long start = System.currentTimeMillis();
        final long to_end = start + TEST_DURATION_SECS * 1000L;
        long end;
        BatchPoints batchPoints = BatchPoints.database(dbName).retentionPolicy("autogen").consistency(ConsistencyLevel.ALL).build();
        do {
            ++count;
            Point point = Point.measurement(channel_name).time(System.currentTimeMillis(), TimeUnit.MILLISECONDS).tag("a", "isA").tag("b", "isB").addField("double", 3.1).build();
            batchPoints.point(point);
            if (count % FLUSH_COUNT == 0) {
                influxDB.write(batchPoints);
                batchPoints = BatchPoints.database(dbName).retentionPolicy("autogen").consistency(ConsistencyLevel.ALL).build();
            }
            end = System.currentTimeMillis();
        } while (end < to_end);
        double secs = (end - start) / 1000.0;
        System.out.println("Wrote " + count + " samples, i.e. " + (count / secs) + " samples/sec.");
        influxDB.write(batchPoints);
    }
}

18 View Complete Implementation : InflusDbUtil.java
Copyright Apache License 2.0
Author : ucarGroup
/**
 * Created by wangjiulin on 2017/11/6.
 */
public clreplaced InflusDbUtil {

    protected static Logger LOGGER = LoggerFactory.getLogger(InflusDbUtil.clreplaced);

    private static InfluxDB influxDB;

    public static String HOST_PORT;

    public static String USER_NAME;

    public static String PWD;

    public static String DATABASE;

    public static Boolean FLAG;

    static {
        PropertiesConfiguration propertiesConfiguration = null;
        try {
            propertiesConfiguration = new PropertiesConfiguration("influxdb.properties");
        } catch (ConfigurationException e) {
            e.printStackTrace();
        }
        if (propertiesConfiguration != null) {
            HOST_PORT = propertiesConfiguration.getString("HOST_PORT");
            USER_NAME = propertiesConfiguration.getString("USER_NAME");
            PWD = propertiesConfiguration.getString("PWD");
            DATABASE = propertiesConfiguration.getString("DATABASE");
            FLAG = Boolean.valueOf(propertiesConfiguration.getString("FLAG"));
            try {
                influxDB = InfluxDBFactory.connect(HOST_PORT, USER_NAME, PWD);
                influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);
            } catch (Exception e) {
                LOGGER.error("influxdb connect error:", e);
            }
        }
    }

    public static InfluxDB getConnection() {
        if (influxDB == null) {
            synchronized (InflusDbUtil.clreplaced) {
                influxDB = InfluxDBFactory.connect(HOST_PORT, USER_NAME, PWD);
                influxDB.enableBatch(2000, 100, TimeUnit.MILLISECONDS);
            }
        }
        return influxDB;
    }

    public static QueryResult query(String q) {
        return getConnection().query(new Query(q, DATABASE));
    }

    public static Map<String, Object> query(QueryResult queryResult, Integer seriesIndex, Integer valRow, Integer indexVal) {
        Map<String, Object> map = new HashMap<String, Object>();
        String result = null;
        if (valRow != null && indexVal != null && queryResult != null) {
            result = queryResult.getResults().get(0).getSeries().get(seriesIndex).getValues().get(valRow).get(indexVal).toString();
            map.put("result", result);
        }
        return map;
    }
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
public DataResult getEventsAutoAggregation(String index, long startDate, long endDate) throws ParseException {
    InfluxDB influxDB = getInfluxDBClient();
    double numberOfRecords = getNumOfRecordsOfTable(index, influxDB, startDate, endDate);
    influxDB.close();
    if (numberOfRecords == 0) {
        influxDB.close();
        return new DataResult();
    } else if (numberOfRecords <= NUM_OF_AUTO_AGGREGATION_VALUES) {
        influxDB.close();
        return getEvents(index, startDate, endDate);
    } else {
        int aggregatinValue = getAggregationValue(index, influxDB);
        influxDB.close();
        return getEvents(index, startDate, endDate, "ms", aggregatinValue);
    }
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
public DataResult getEventsFromNowAutoAggregation(String index, String timeunit, int value) throws ParseException {
    InfluxDB influxDB = getInfluxDBClient();
    double numberOfRecords = getNumOfRecordsOfTableFromNow(index, influxDB, timeunit, value);
    if (numberOfRecords == 0) {
        influxDB.close();
        return new DataResult();
    } else if (numberOfRecords <= NUM_OF_AUTO_AGGREGATION_VALUES) {
        influxDB.close();
        return getEventsFromNow(index, timeunit, value);
    } else {
        int aggregationValue = getAggregationValue(index, influxDB);
        influxDB.close();
        return getEventsFromNow(index, timeunit, value, "ms", aggregationValue);
    }
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private double getNumOfRecordsOfTable(String index, InfluxDB influxDB) {
    double numOfRecords = 0;
    QueryResult.Result result = influxDB.query(new Query("SELECT count(*) FROM " + index, BackendConfig.INSTANCE.getInfluxDatabaseName())).getResults().get(0);
    if (result.getSeries() == null) {
        return numOfRecords;
    }
    for (Object item : result.getSeries().get(0).getValues().get(0)) {
        if (item instanceof Double && numOfRecords < Double.parseDouble(item.toString())) {
            numOfRecords = Double.parseDouble(item.toString());
        }
    }
    return numOfRecords;
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private double getNumOfRecordsOfTableFromNow(String index, InfluxDB influxDB, String timeunit, int value) {
    double numOfRecords = 0;
    QueryResult.Result result = influxDB.query(new Query("SELECT count(*) FROM " + index + " WHERE time > now() -" + value + timeunit, BackendConfig.INSTANCE.getInfluxDatabaseName())).getResults().get(0);
    if (result.getSeries() == null) {
        return numOfRecords;
    }
    for (Object item : result.getSeries().get(0).getValues().get(0)) {
        if (item instanceof Double) {
            numOfRecords = Double.parseDouble(item.toString());
        }
    }
    return numOfRecords;
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private long getDateFromOldestRecordReOfTable(String index, InfluxDB influxDB) throws ParseException {
    Query query = new Query("SELECT * FROM " + index + " ORDER BY asc LIMIT 1 ", BackendConfig.INSTANCE.getInfluxDatabaseName());
    return getDateFromRecordOfTable(query, influxDB);
}

18 View Complete Implementation : InfluxDBQueries.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
public void initDatabases(final InfluxDB influxdb) {
    for (String db : dbnames.getAllDBNames()) {
        influxdb.createDatabase(db);
    }
}

18 View Complete Implementation : InfluxDBQueries.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
// TODO: timestamps come back with wrong values stored in Double... would be faster if it worked.
// private final static boolean query_nanos = true;
public static QueryResult makeQuery(final InfluxDB influxdb, final String stmt, final String dbName) {
    Activator.getLogger().log(Level.FINE, "InfluxDB query ({0}): {1}", new Object[] { dbName, stmt });
    // if (query_nanos)
    // return influxdb.query(new Query(stmt, dbName), TimeUnit.NANOSECONDS);
    return influxdb.query(new Query(stmt, dbName));
}

18 View Complete Implementation : InfluxDBQueries.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
public static void makeChunkQuery(int chunkSize, Consumer<QueryResult> consumer, InfluxDB influxdb, String stmt, String dbName) throws Exception {
    Activator.getLogger().log(Level.FINE, "InfluxDB chunked ({2}) query ({0}): {1}", new Object[] { dbName, stmt, chunkSize });
    // if (query_nanos)
    // influxdb.query(new Query(stmt, dbName), TimeUnit.NANOSECONDS, chunkSize, consumer);
    // else
    influxdb.query(new Query(stmt, dbName), chunkSize, consumer);
}

18 View Complete Implementation : InfluxDBUtil.java
Copyright Eclipse Public License 1.0
Author : ControlSystemStudio
public static InfluxDB connect(final String url, final String user, final String preplacedword) throws Exception {
    Activator.getLogger().log(Level.FINE, "Connecting to {0}", url);
    InfluxDB influxdb;
    if (user == null || user.isEmpty() || preplacedword == null || preplacedword.isEmpty()) {
        influxdb = InfluxDBFactory.connect(url);
    } else {
        influxdb = InfluxDBFactory.connect(url, user, preplacedword);
    }
    try {
        // Have to do something like this because connect fails silently.
        influxdb.version();
    } catch (Exception e) {
        throw new Exception("Failed to connect to InfluxDB as user " + user + " at " + url, e);
    }
    return influxdb;
}

18 View Complete Implementation : InfluxdbDao.java
Copyright Apache License 2.0
Author : dapeng-soa
/**
 * author with struy.
 * Create by 2018/2/7 10:08
 * email :[email protected]
 */
public clreplaced InfluxdbDao {

    private static final Logger LOGGER = LoggerFactory.getLogger(InfluxdbDao.clreplaced);

    private final String INFLUXDB_URL = CounterServiceProperties.SOA_COUNTER_INFLUXDB_URL;

    private final String INFLUXDB_USER = CounterServiceProperties.SOA_COUNTER_INFLUXDB_USER;

    private final String INFLUXDB_PWD = CounterServiceProperties.SOA_COUNTER_INFLUXDB_PWD;

    private InfluxDB influxDB = getInfluxDBConnection();

    public void writePoint(DataPoint dataPoint) {
        if (null == influxDB) {
            influxDB = getInfluxDBConnection();
        }
        long now = System.currentTimeMillis();
        Point.Builder commit = Point.measurement(dataPoint.bizTag);
        dataPoint.values.forEach(commit::addField);
        dataPoint.tags.forEach(commit::tag);
        commit.time(dataPoint.getTimestamp() == 0 ? now : dataPoint.getTimestamp(), TimeUnit.MILLISECONDS);
        try {
            influxDB.write(dataPoint.database, "", commit.build());
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }
    }

    public void writePoints(List<DataPoint> dataPoints) {
        LOGGER.info("counter writePoints {}", dataPoints);
        try {
            if (null == influxDB) {
                influxDB = getInfluxDBConnection();
            }
            long now = System.currentTimeMillis();
            AtomicLong increment = new AtomicLong(0);
            dataPoints.forEach(dataPoint -> {
                Point.Builder commit = Point.measurement(dataPoint.bizTag);
                dataPoint.values.forEach(commit::addField);
                dataPoint.tags.forEach(commit::tag);
                commit.time(dataPoint.getTimestamp() == 0 ? now + increment.incrementAndGet() : dataPoint.getTimestamp(), TimeUnit.MILLISECONDS);
                influxDB.write(dataPoint.database, "", commit.build());
            });
        } finally {
            if (influxDB != null) {
                influxDB.close();
            }
        }
    /*if (dataPoints.size()!=0){
            BatchPoints batchPoints = BatchPoints
                    .database(dataPoints.get(0).getDatabase())
                    .retentionPolicy("default")
                    .consistency(InfluxDB.ConsistencyLevel.ALL)
                    .build();
            dataPoints.forEach((DataPoint dataPoint) -> {
                Point point = Point.measurement(dataPoint.bizTag)
                        .fields(dataPoint.values) // todo
                        .tag(dataPoint.tags)
                        .build();
                batchPoints.point(point);
            });
            InfluxDB influxDB =  getInfluxDBConnection();
            try {
                influxDB.write(batchPoints);
            }catch (Exception e){
                e.printStackTrace();
            }finally {
                influxDB.close();
            }
        }*/
    }

    private InfluxDB getInfluxDBConnection() {
        LOGGER.info("Connection InfluxDB on :{}", INFLUXDB_URL);
        return InfluxDBFactory.connect(INFLUXDB_URL, INFLUXDB_USER, INFLUXDB_PWD);
    }
}

18 View Complete Implementation : InfluxDbPublisher.java
Copyright Apache License 2.0
Author : amient
public clreplaced InfluxDbPublisher implements MeasurementPublisher {

    static private final Logger log = LoggerFactory.getLogger(InfluxDbPublisher.clreplaced);

    static final String COFNIG_INFLUXDB_DATABASE = "influxdb.database";

    static final String COFNIG_INFLUXDB_URL = "influxdb.url";

    static final String COFNIG_INFLUXDB_RETENTION_POLICY = "influxdb.retention.policy";

    static final String COFNIG_INFLUXDB_USERNAME = "influxdb.username";

    static final String COFNIG_INFLUXDB_PreplacedWORD = "influxdb.preplacedword";

    private static final int DEFAULT_BACK_OFF_MS = 15000;

    final private String dbName;

    final private String address;

    private final String username;

    private final String preplacedword;

    private final String retention;

    private InfluxDB influxDB = null;

    volatile private long failureTimestamp = 0;

    public InfluxDbPublisher(Properties config) {
        this.dbName = config.getProperty(COFNIG_INFLUXDB_DATABASE, "metrics");
        this.address = config.getProperty(COFNIG_INFLUXDB_URL, "http://localhost:8086");
        this.username = config.getProperty(COFNIG_INFLUXDB_USERNAME, "root");
        this.preplacedword = config.getProperty(COFNIG_INFLUXDB_PreplacedWORD, "root");
        this.retention = config.getProperty(COFNIG_INFLUXDB_RETENTION_POLICY, "default");
    }

    public void publish(MeasurementV1 m) {
        Long time = m.getTimestamp();
        if (failureTimestamp > 0) {
            if (failureTimestamp + DEFAULT_BACK_OFF_MS < time)
                return;
            else
                failureTimestamp = 0;
        }
        try {
            tryPublish(m);
        } catch (Throwable e) {
            log.warn("Failed to publish measurement to InfluxDB, will retry...", e);
            influxDB = null;
            failureTimestamp = time;
        }
    }

    public void tryPublish(MeasurementV1 m) {
        if (influxDB == null) {
            influxDB = InfluxDBFactory.connect(address, username, preplacedword);
            influxDB.enableBatch(1000, 100, TimeUnit.MILLISECONDS);
        }
        Point.Builder builder = Point.measurement(m.getName().toString()).time(m.getTimestamp(), TimeUnit.MILLISECONDS);
        for (java.util.Map.Entry<String, String> tag : m.getTags().entrySet()) {
            builder.tag(tag.getKey().toString(), tag.getValue().toString());
        }
        for (java.util.Map.Entry<String, Double> field : m.getFields().entrySet()) {
            builder.field(field.getKey().toString(), field.getValue());
        }
        influxDB.write(dbName, retention, builder.build());
    }

    public void close() {
    }
}

18 View Complete Implementation : InfluxDBReporterTest.java
Copyright MIT License
Author : etsy
public clreplaced InfluxDBReporterTest extends BaseReporterTest<InfluxDBReporter> {

    @Mock
    private InfluxDB client;

    @Override
    protected InfluxDBReporter constructReporter() {
        Arguments arguments = MockArguments.createArgs("localhost", 8888, "influxdb.reporter.test", ImmutableMap.of("username", "user", "preplacedword", "preplacedword", "database", "database"));
        return new InfluxDBReporter(arguments);
    }

    @Override
    protected void testCase(Object[] args) {
        replacedertEquals(1, args.length);
        BatchPoints actual = (BatchPoints) args[0];
        Point expectedPoint = Point.measurement("fake").field(InfluxDBReporter.VALUE_COLUMN, 100L).tag(TagUtil.PREFIX_TAG, "influxdb.reporter.test").build();
        BatchPoints expected = BatchPoints.database("database").build();
        expected.point(expectedPoint);
        replacedertEquals(expected.getDatabase(), actual.getDatabase());
        replacedertEquals(expected.getPoints().size(), actual.getPoints().size());
        Point actualPoint = actual.getPoints().get(0);
        // All the fields on Point are private
        replacedertTrue(actualPoint.lineProtocol().startsWith("fake"));
        replacedertTrue(actualPoint.lineProtocol().contains("value=100"));
        replacedertTrue(actualPoint.lineProtocol().contains("prefix=influxdb.reporter.test"));
    }

    @Test
    public void testRecordGaugeValue() {
        Mockito.doAnswer(answer).when(client).write(Matchers.any(BatchPoints.clreplaced));
        reporter.recordGaugeValue("fake", 100L);
    }

    @Test
    public void testHttpsUrlResolution() {
        Arguments arguments = MockArguments.createArgs("localhost", 443, "influxdb.reporter.test", ImmutableMap.of("username", "user", "preplacedword", "preplacedword", "database", "database", "useHttps", "true"));
        InfluxDBReporter reporter = new InfluxDBReporter(arguments);
        replacedertEquals(reporter.resolveUrl("localhost", 443), "https://localhost:443");
    }

    @Test
    public void testHttpUrlResolution() {
        Arguments arguments = MockArguments.createArgs("localhost", 8888, "influxdb.reporter.test", ImmutableMap.of("username", "user", "preplacedword", "preplacedword", "database", "database", "useHttps", "false"));
        InfluxDBReporter reporter = new InfluxDBReporter(arguments);
        replacedertEquals(reporter.resolveUrl("localhost", 8888), "http://localhost:8888");
    }
}

18 View Complete Implementation : InfluxUtil.java
Copyright BSD 3-Clause "New" or "Revised" License
Author : groupon
@Getter
public clreplaced InfluxUtil {

    /**
     * Special tag name used to indicate monsoon range.
     */
    public static final String MONSOON_RANGE_TAG = "__monsoon_range__";

    /**
     * Name of the column holding the timestamp.
     */
    public static final String TIME_COLUMN = "time";

    private final InfluxDB influxDB;

    private final String database;

    protected InfluxUtil(@NonNull InfluxDB influxDB, @NonNull String database) {
        this.influxDB = influxDB;
        this.database = database;
    }

    protected static void throwOnResultError(QueryResult result) {
        if (result.hasError())
            throw new IllegalStateException("influx error: " + result);
    }

    protected static Stream<DateTime> extractTimestamps(QueryResult result) {
        return result.getResults().stream().filter(resultEntry -> !resultEntry.hasError()).filter(resultEntry -> resultEntry.getSeries() != null).flatMap(resultEntry -> resultEntry.getSeries().stream()).map(series -> getColumnFromSeries(series, TIME_COLUMN)).filter(Optional::isPresent).flatMap(Optional::get).map(Number.clreplaced::cast).map(number -> new DateTime(number.longValue(), DateTimeZone.UTC));
    }

    protected static Optional<Integer> getColumnIndexFromSeries(QueryResult.Series series, String columnName) {
        return Optional.of(series.getColumns().indexOf(columnName)).filter(idx -> idx >= 0);
    }

    protected static Optional<Stream<Object>> getColumnFromSeries(QueryResult.Series series, String columnName) {
        return getColumnIndexFromSeries(series, columnName).map(idx -> {
            return series.getValues().stream().map(row -> row.get(idx));
        });
    }

    /**
     * Validation function, to check if an iterable is sorted with ascending
     * timestamps.
     *
     * @param tscIterable An iterable type.
     * @return True iff the iterable is sorted, false otherwise.
     */
    static boolean isSorted(Iterable<? extends TimeSeriesCollection> tscIterable) {
        final Iterator<? extends TimeSeriesCollection> iter = tscIterable.iterator();
        // Empty collection is ordered.
        if (!iter.hasNext())
            return true;
        DateTime timestamp = iter.next().getTimestamp();
        while (iter.hasNext()) {
            final DateTime nextTimestamp = iter.next().getTimestamp();
            if (!nextTimestamp.isAfter(timestamp))
                return false;
            timestamp = nextTimestamp;
        }
        return true;
    }
}

18 View Complete Implementation : InfluxDBClientTest.java
Copyright Apache License 2.0
Author : sonyxperiadev
private InfluxDBClient createClient(Answer answer) {
    return InfluxDBClient.prepareForTest(mapWrap -> {
        InfluxDB mock = mock(InfluxDB.clreplaced);
        doAnswer(answer).when(mock).write(any(BatchPoints.clreplaced));
        return mock;
    });
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private double getNumOfRecordsOfTable(String index, InfluxDB influxDB, long startDate, long endDate) {
    double numOfRecords = 0;
    QueryResult.Result result = influxDB.query(new Query("SELECT count(*) FROM " + index + " WHERE time > " + startDate * 1000000 + " AND time < " + endDate * 1000000, BackendConfig.INSTANCE.getInfluxDatabaseName())).getResults().get(0);
    if (result.getSeries() == null) {
        return numOfRecords;
    }
    for (Object item : result.getSeries().get(0).getValues().get(0)) {
        if (item instanceof Double && numOfRecords < Double.parseDouble(item.toString())) {
            numOfRecords = Double.parseDouble(item.toString());
        }
    }
    return numOfRecords;
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
public GroupedDataResult getEventsAutoAggregation(String index, long startDate, long endDate, String groupingTag) throws ParseException {
    InfluxDB influxDB = getInfluxDBClient();
    double numberOfRecords = getNumOfRecordsOfTable(index, influxDB, startDate, endDate);
    influxDB.close();
    if (numberOfRecords == 0) {
        influxDB.close();
        return new GroupedDataResult(0, new HashMap<>());
    } else if (numberOfRecords <= NUM_OF_AUTO_AGGREGATION_VALUES) {
        influxDB.close();
        return getEvents(index, startDate, endDate, groupingTag);
    } else {
        int aggregatinValue = getAggregationValue(index, influxDB);
        influxDB.close();
        return getEvents(index, startDate, endDate, "ms", aggregatinValue, groupingTag);
    }
}

18 View Complete Implementation : InfluxStorageAdapter.java
Copyright Apache License 2.0
Author : hawkular
/**
 * Pushes the data to Influx.
 *
 * @author Heiko Braun
 * @since 13/10/14
 */
public clreplaced InfluxStorageAdapter implements StorageAdapter {

    private InfluxDB influxDB;

    private String dbName;

    private Diagnostics diagnostics;

    private Configuration config;

    private DefaultKeyResolution keyResolution;

    @Override
    public void setConfiguration(Configuration config) {
        this.config = config;
        this.influxDB = InfluxDBFactory.connect(config.getStorageUrl(), config.getStorageUser(), config.getStoragePreplacedword());
        this.dbName = config.getStorageDBName();
        this.keyResolution = new DefaultKeyResolution();
    }

    @Override
    public void setDiagnostics(Diagnostics diag) {
        this.diagnostics = diag;
    }

    @Override
    public void store(Set<DataPoint> datapoints) {
        try {
            Serie[] series = new Serie[datapoints.size()];
            int i = 0;
            for (DataPoint datapoint : datapoints) {
                Task task = datapoint.getTask();
                String key = keyResolution.resolve(task);
                Serie dataPoint = new Serie.Builder(key).columns("datapoint").values(datapoint.getValue()).build();
                series[i] = dataPoint;
                i++;
            }
            this.influxDB.write(this.dbName, TimeUnit.MILLISECONDS, series);
        } catch (Throwable t) {
            diagnostics.getStorageErrorRate().mark(1);
            t.printStackTrace();
        }
    }
}

18 View Complete Implementation : InfluxDBConnectionFactory.java
Copyright Apache License 2.0
Author : miwurster
public clreplaced InfluxDBConnectionFactory implements InitializingBean {

    private static Logger logger = LoggerFactory.getLogger(InfluxDBConnectionFactory.clreplaced);

    private InfluxDB connection;

    private InfluxDBProperties properties;

    public InfluxDBConnectionFactory() {
    }

    public InfluxDBConnectionFactory(final InfluxDBProperties properties) {
        this.properties = properties;
    }

    public InfluxDB getConnection() {
        replacedert.notNull(getProperties(), "InfluxDBProperties are required");
        if (connection == null) {
            final Builder client = new OkHttpClient.Builder().connectTimeout(properties.getConnectTimeout(), TimeUnit.SECONDS).writeTimeout(properties.getWriteTimeout(), TimeUnit.SECONDS).readTimeout(properties.getReadTimeout(), TimeUnit.SECONDS);
            connection = InfluxDBFactory.connect(properties.getUrl(), properties.getUsername(), properties.getPreplacedword(), client);
            logger.debug("Using InfluxDB '{}' on '{}'", properties.getDatabase(), properties.getUrl());
            if (properties.isGzip()) {
                logger.debug("Enabled gzip compression for HTTP requests");
                connection.enableGzip();
            }
        }
        return connection;
    }

    /**
     * Returns the configuration properties.
     *
     * @return Returns the configuration properties
     */
    public InfluxDBProperties getProperties() {
        return properties;
    }

    /**
     * Sets the configuration properties.
     *
     * @param properties The configuration properties to set
     */
    public void setProperties(final InfluxDBProperties properties) {
        this.properties = properties;
    }

    @Override
    public void afterPropertiesSet() throws Exception {
        replacedert.notNull(getProperties(), "InfluxDBProperties are required");
    }
}

18 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
private long getDateFromNewestRecordReOfTable(String index, InfluxDB influxDB) throws ParseException {
    Query query = new Query("SELECT * FROM " + index + " ORDER BY desc LIMIT 1 ", BackendConfig.INSTANCE.getInfluxDatabaseName());
    return getDateFromRecordOfTable(query, influxDB);
}

18 View Complete Implementation : InfluxReporter.java
Copyright Apache License 2.0
Author : smartcat-labs
/**
 * An InfluxDB based {@link Reporter} implementation. Query reports are sent to influxdb.
 */
public clreplaced InfluxReporter extends Reporter {

    /**
     * Clreplaced logger.
     */
    private static final Logger logger = LoggerFactory.getLogger(InfluxReporter.clreplaced);

    private static final String ADDRESS_PROP = "influxDbAddress";

    private static final String USERNAME_PROP = "influxUsername";

    private static final String PreplacedWORD_PROP = "influxPreplacedword";

    private static final String DB_NAME_PROP = "influxDbName";

    private static final String POINTS_IN_BATCH_PROP = "influxPointsInBatch";

    private static final int DEFAULT_POINTS_IN_BATCH = 1000;

    private static final String FLUSH_PERIOD_IN_SECONDS_PROP = "influxFlushPeriodInSeconds";

    private static final int DEFAULT_FLUSH_PERIOD_IN_SECONDS = 5;

    private static final String DEFAULT_DB_NAME = "creplacedandradb";

    private static final String RETENTION_POLICY_PROP = "influxRetentionPolicy";

    private static final String DEFAULT_RETENTION_POLICY = "autogen";

    private String dbAddress;

    private String username;

    private String preplacedword;

    private String dbName;

    private String retentionPolicy;

    private static InfluxDB influx;

    /**
     * Constructor.
     *
     * @param configuration        Reporter configuration
     * @param globalConfiguration  Global diagnostics configuration
     */
    public InfluxReporter(ReporterConfiguration configuration, GlobalConfiguration globalConfiguration) {
        super(configuration, globalConfiguration);
        if (!configuration.options.containsKey(ADDRESS_PROP)) {
            logger.warn("Not properly configured. Missing influx address. Aborting initialization.");
            return;
        }
        if (!configuration.options.containsKey(USERNAME_PROP)) {
            logger.warn("Not properly configured. Missing influx username. Aborting initialization.");
            return;
        }
        if (!configuration.options.containsKey(DB_NAME_PROP)) {
            logger.warn("Not properly configured. Missing influx db name. Aborting initialization.");
            return;
        }
        dbAddress = configuration.getOption(ADDRESS_PROP);
        username = configuration.getDefaultOption(USERNAME_PROP, "");
        preplacedword = configuration.getDefaultOption(PreplacedWORD_PROP, "");
        dbName = configuration.getDefaultOption(DB_NAME_PROP, DEFAULT_DB_NAME);
        retentionPolicy = configuration.getDefaultOption(RETENTION_POLICY_PROP, DEFAULT_RETENTION_POLICY);
        influx = InfluxDBFactory.connect(dbAddress, username, preplacedword);
        influx.createDatabase(dbName);
        final int pointsInBatch = configuration.getDefaultOption(POINTS_IN_BATCH_PROP, DEFAULT_POINTS_IN_BATCH);
        final int flushPeriodInSeconds = configuration.getDefaultOption(FLUSH_PERIOD_IN_SECONDS_PROP, DEFAULT_FLUSH_PERIOD_IN_SECONDS);
        influx.enableBatch(pointsInBatch, flushPeriodInSeconds, TimeUnit.SECONDS);
    }

    @Override
    public void report(Measurement measurement) {
        if (influx == null) {
            logger.warn("InfluxDB client is not initialized");
            return;
        }
        logger.debug("Sending Query: {}", measurement.toString());
        try {
            final Point.Builder builder = Point.measurement(measurement.name());
            builder.time(measurement.time(), measurement.timeUnit());
            builder.tag("type", measurement.type().toString());
            for (Map.Entry<String, String> tag : measurement.tags().entrySet()) {
                builder.tag(tag.getKey(), tag.getValue());
            }
            for (Map.Entry<String, String> field : measurement.fields().entrySet()) {
                builder.addField(field.getKey(), field.getValue());
            }
            if (measurement.isSimple()) {
                builder.addField("value", measurement.getValue());
            }
            influx.write(dbName, retentionPolicy, builder.build());
        } catch (Exception e) {
            logger.warn("Failed to send report to influx", e);
        }
    }
}

17 View Complete Implementation : AbstractITInfluxDB.java
Copyright Apache License 2.0
Author : apache
/**
 * Base integration test clreplaced for InfluxDB processors
 */
public clreplaced AbstracreplacedInfluxDB {

    protected TestRunner runner;

    protected InfluxDB influxDB;

    protected String dbName = "test";

    protected String dbUrl = "http://localhost:8086";

    protected String user = "admin";

    protected String preplacedword = "admin";

    protected static final String DEFAULT_RETENTION_POLICY = "autogen";

    protected Type QueryResultListType = new TypeToken<List<QueryResult>>() {
    }.getType();

    protected void initInfluxDB() throws InterruptedException, Exception {
        influxDB = InfluxDBFactory.connect(dbUrl, user, preplacedword);
        influxDB.createDatabase(dbName);
        int max = 10;
        while (!influxDB.databaseExists(dbName) && (max-- < 0)) {
            Thread.sleep(5);
        }
        if (!influxDB.databaseExists(dbName)) {
            throw new Exception("unable to create database " + dbName);
        }
    }

    protected void cleanUpDatabase() throws InterruptedException {
        if (influxDB.databaseExists(dbName)) {
            QueryResult result = influxDB.query(new Query("DROP measurement water", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP measurement testm", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP measurement chunkedQueryTest", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP database " + dbName, dbName));
            Thread.sleep(1000);
        }
    }

    protected void checkError(QueryResult result) {
        if (result.hasError()) {
            throw new IllegalStateException("Error while dropping measurements " + result.getError());
        }
    }

    @After
    public void tearDown() throws Exception {
        runner = null;
        if (influxDB != null) {
            cleanUpDatabase();
            influxDB.close();
        }
    }

    protected void initializeRunner() {
        runner.setProperty(ExecuteInfluxDBQuery.DB_NAME, dbName);
        runner.setProperty(ExecuteInfluxDBQuery.USERNAME, user);
        runner.setProperty(ExecuteInfluxDBQuery.PreplacedWORD, preplacedword);
        runner.setProperty(ExecuteInfluxDBQuery.INFLUX_DB_URL, dbUrl);
        runner.setProperty(ExecuteInfluxDBQuery.CHARSET, "UTF-8");
        runner.replacedertValid();
    }
}

17 View Complete Implementation : InfluxDbConnectionFactory.java
Copyright Apache License 2.0
Author : apereo
/**
 * This is {@link InfluxDbConnectionFactory}.
 *
 * @author Misagh Moayyed
 * @since 5.2.0
 */
@Slf4j
public clreplaced InfluxDbConnectionFactory implements AutoCloseable {

    /**
     * The Influx db.
     */
    private final InfluxDB influxDb;

    /**
     * The Influx db properties.
     */
    private InfluxDbProperties influxDbProperties;

    public InfluxDbConnectionFactory(final String url, final String uid, final String psw, final String dbName, final boolean dropDatabase) {
        if (StringUtils.isBlank(dbName) || StringUtils.isBlank(url)) {
            throw new IllegalArgumentException("Database name/url cannot be blank and must be specified");
        }
        val builder = new OkHttpClient.Builder();
        this.influxDb = InfluxDBFactory.connect(url, uid, psw, builder);
        this.influxDb.enableGzip();
        if (dropDatabase) {
            this.influxDb.deleteDatabase(dbName);
        }
        if (!this.influxDb.databaseExists(dbName)) {
            this.influxDb.createDatabase(dbName);
        }
        this.influxDb.setLogLevel(InfluxDB.LogLevel.NONE);
        if (LOGGER.isDebugEnabled()) {
            this.influxDb.setLogLevel(InfluxDB.LogLevel.FULL);
        } else if (LOGGER.isInfoEnabled()) {
            this.influxDb.setLogLevel(InfluxDB.LogLevel.BASIC);
        } else if (LOGGER.isWarnEnabled()) {
            this.influxDb.setLogLevel(InfluxDB.LogLevel.HEADERS);
        } else if (LOGGER.isErrorEnabled()) {
            this.influxDb.setLogLevel(InfluxDB.LogLevel.NONE);
        }
    }

    /**
     * Instantiates a new Influx db connection factory.
     *
     * @param props the props
     */
    public InfluxDbConnectionFactory(final InfluxDbProperties props) {
        this(props.getUrl(), props.getUsername(), props.getPreplacedword(), props.getDatabase(), props.isDropDatabase());
        if (StringUtils.isNotBlank(props.getRetentionPolicy())) {
            this.influxDb.setRetentionPolicy(props.getRetentionPolicy());
        }
        influxDb.setConsistency(InfluxDB.ConsistencyLevel.valueOf(props.getConsistencyLevel().toUpperCase()));
        if (props.getPointsToFlush() > 0 && StringUtils.isNotBlank(props.getBatchInterval())) {
            val interval = (int) Beans.newDuration(props.getBatchInterval()).toMillis();
            this.influxDb.enableBatch(props.getPointsToFlush(), interval, TimeUnit.MILLISECONDS);
        }
        this.influxDbProperties = props;
    }

    /**
     * Write measurement point.
     *
     * @param point the point
     */
    public void write(final Point point) {
        this.influxDb.write(influxDbProperties.getDatabase(), influxDbProperties.getRetentionPolicy(), point);
    }

    /**
     * Write measurement point.
     *
     * @param point  the point
     * @param dbName the db name
     */
    public void write(final Point point, final String dbName) {
        this.influxDb.write(dbName, "autogen", point);
    }

    /**
     * Write synchronized batch.
     *
     * @param point the points to write immediately in sync fashion
     */
    public void writeBatch(final Point... point) {
        val batchPoints = BatchPoints.database(influxDbProperties.getDatabase()).retentionPolicy(influxDbProperties.getRetentionPolicy()).consistency(InfluxDB.ConsistencyLevel.valueOf(influxDbProperties.getConsistencyLevel())).build();
        Arrays.stream(point).forEach(batchPoints::point);
        influxDb.write(batchPoints);
    }

    /**
     * Query all result.
     *
     * @param measurement the measurement
     * @return the query result
     */
    public QueryResult query(final String measurement) {
        return query("*", measurement);
    }

    /**
     * Query result.
     *
     * @param fields      the fields
     * @param measurement the measurement
     * @return the query result
     */
    public QueryResult query(final String fields, final String measurement) {
        return query(fields, measurement, influxDbProperties.getDatabase());
    }

    /**
     * Query result.
     *
     * @param fields      the fields
     * @param measurement the table
     * @param dbName      the db name
     * @return the query result
     */
    public QueryResult query(final String fields, final String measurement, final String dbName) {
        val filter = String.format("SELECT %s FROM %s", fields, measurement);
        val query = new Query(filter, dbName);
        return this.influxDb.query(query);
    }

    @Override
    public void close() {
        this.influxDb.close();
    }
}

17 View Complete Implementation : ChronoSeries.java
Copyright Apache License 2.0
Author : BFergerson
/**
 * Represents a series of timestamps produced by a single source.
 * Series must contain at least two elements.
 *
 * @version 1.0
 * @since 1.0
 * @author <a href="mailto:[email protected]">Brandon Fergerson</a>
 */
public clreplaced ChronoSeries {

    private final static Logger logger = LoggerFactory.getLogger(ChronoSeries.clreplaced);

    private final Cache<ChronoRange, Integer> cachePatternCount = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();

    private final Cache<Integer, Instant> timestampCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();

    private final Cache<String, Instant[]> mulreplacedimestampCache = CacheBuilder.newBuilder().expireAfterAccess(5, TimeUnit.MINUTES).build();

    private ChronoScale chronoScale;

    private Instant beginTimestamp;

    private Instant endTimestamp;

    private int size;

    private List<Instant> seriesList;

    private InfluxDB influxDB;

    private String database;

    private String table;

    private String column;

    private ChronoSeries() {
    // suppresses default constructor
    }

    /**
     * Returns ChronoScale used by ChronoSeries.
     *
     * @return ChronoSeries' underlying ChronoScale
     */
    @NotNull
    public ChronoScale getChronoScale() {
        return chronoScale;
    }

    /**
     * Returns earliest timestamp in ChronoSeries.
     *
     * @return earliest timestamp
     */
    @NotNull
    public Instant getBeginTimestamp() {
        return beginTimestamp;
    }

    /**
     * Returns latest timestamp in ChronoSeries.
     *
     * @return latest timestamp
     */
    @NotNull
    public Instant getEndTimestamp() {
        return endTimestamp;
    }

    /**
     * Returns earliest timestamp as LocalDateTime (UTC).
     *
     * @return earliest timestamp as LocalDateTime (UTC)
     */
    @NotNull
    public LocalDateTime getBeginLocalDateTime() {
        return beginTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
    }

    /**
     * Returns latest timestamp as LocalDateTime (UTC).
     *
     * @return latest timestamp as LocalDateTime (UTC)
     */
    @NotNull
    public LocalDateTime getEndLocalDateTime() {
        return endTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
    }

    /**
     * Counts the distinct amount of appearances of the given
     * ChronoUnit in this ChronoSeries using the given ChronoRange.
     *
     * @param chronoRange desired ChronoRange
     * @param chronoUnit desired ChronoUnit
     * @return distinct amount of times ChronoUnit appears in ChronoSeries with given ChronoRange
     */
    public int countDistinctChronoUnitAppearance(@NotNull ChronoRange chronoRange, @NotNull ChronoUnit chronoUnit) {
        final LocalDateTime startTime = requireNonNull(chronoRange).getPatternStartLocalDateTime().orElse(getBeginLocalDateTime());
        final LocalDateTime endTime = requireNonNull(chronoRange).getPatternEndLocalDateTime().orElse(getEndLocalDateTime());
        LocalDateTime patternStartTime = startTime;
        LocalDateTime patternEndTime = endTime;
        // truncate startTime to chronoUnit (start)
        try {
            patternStartTime = patternStartTime.truncatedTo(chronoUnit);
        } catch (UnsupportedTemporalTypeException ex) {
            if (chronoUnit == ChronoUnit.YEARS) {
                // truncate to beginning of year
                patternStartTime = patternStartTime.with(firstDayOfYear()).truncatedTo(ChronoUnit.DAYS);
            } else if (chronoUnit == ChronoUnit.MONTHS) {
                // truncate to beginning of month
                patternStartTime = patternStartTime.with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS);
            } else {
            // throw new UnsupportedOperationException();
            }
        }
        // truncate endTime to chronoUnit (end)
        try {
            patternEndTime = patternEndTime.truncatedTo(chronoUnit);
        } catch (UnsupportedTemporalTypeException ex) {
            if (chronoUnit == ChronoUnit.YEARS) {
                // truncate to beginning of year
                patternEndTime = patternEndTime.with(firstDayOfYear()).truncatedTo(ChronoUnit.DAYS);
            } else if (chronoUnit == ChronoUnit.MONTHS) {
                // truncate to beginning of month
                patternEndTime = patternEndTime.with(TemporalAdjusters.firstDayOfMonth()).truncatedTo(ChronoUnit.DAYS);
            } else {
            // throw new UnsupportedOperationException();
            }
        }
        patternEndTime = patternEndTime.plus(1, chronoUnit);
        return (int) chronoUnit.between(patternStartTime, patternEndTime);
    }

    /**
     * Counts the number of time events that occur during the given ChronoRange.
     *
     * @param chronoRange desired ChronoRange
     * @return amount of time events that occur during the given ChronoRange
     */
    public int countEventsBetween(@NotNull ChronoRange chronoRange) {
        Integer cacheCount = cachePatternCount.getIfPresent(requireNonNull(chronoRange));
        if (cacheCount != null) {
            return cacheCount;
        } else if (chronoRange.getTimestampRanges().isEmpty()) {
            return 0;
        } else {
            logger.debug("Counting events between: " + chronoRange);
        }
        int count = 0;
        if (seriesList != null) {
            for (Instant timestamp : seriesList) {
                if (chronoRange.containsTime(timestamp)) {
                    count++;
                }
            }
        } else {
            List<Instant[]> timestampRanges = chronoRange.getTimestampRanges();
            StringBuilder whereClause = new StringBuilder();
            boolean first = true;
            for (Instant[] timestampRange : timestampRanges) {
                if (first) {
                    whereClause = whereClause.append("(");
                    first = false;
                } else {
                    whereClause = whereClause.append("OR (");
                }
                long startTime = timestampRange[0].getEpochSecond();
                // convert to nanoseconds
                startTime *= 1000000000L;
                startTime += timestampRange[0].getNano();
                long endTime = timestampRange[1].getEpochSecond();
                // convert to nanoseconds
                endTime *= 1000000000L;
                endTime += timestampRange[1].getNano();
                whereClause = whereClause.append("time >= ").append(startTime);
                whereClause = whereClause.append(" AND ");
                whereClause = whereClause.append("time <= ").append(endTime);
                whereClause = whereClause.append(") ");
            }
            QueryResult queryResult = influxDB.query(new Query(String.format("SELECT COUNT(%s) FROM \"%s\" WHERE %s", column, table, whereClause.toString()), database));
            for (QueryResult.Result result : queryResult.getResults()) {
                if (result.getSeries() != null) {
                    Double dbCount = (Double) result.getSeries().get(0).getValues().get(0).get(1);
                    count += dbCount.intValue();
                }
            }
        }
        cachePatternCount.put(chronoRange, count);
        return count;
    }

    /**
     * Returns the timestamp at the given series position.
     *
     * @param seriesPosition desired position
     * @return timestamp at given series position
     */
    @NotNull
    public Instant getTimestamp(int seriesPosition) {
        Instant cacheTimestamp = timestampCache.getIfPresent(seriesPosition);
        if (cacheTimestamp != null) {
            return cacheTimestamp;
        } else {
            logger.debug("Getting timestamp at position: " + seriesPosition);
        }
        Instant timestamp = null;
        if (seriesList != null) {
            timestamp = seriesList.get(seriesPosition);
        } else {
            QueryResult queryResult = influxDB.query(new Query(String.format("SELECT %s FROM \"%s\" LIMIT 1 OFFSET %d", column, table, seriesPosition), database));
            for (QueryResult.Result result : queryResult.getResults()) {
                String timeString = (String) result.getSeries().get(0).getValues().get(0).get(0);
                timestamp = Instant.parse(timeString);
            }
        }
        if (timestamp != null) {
            timestampCache.put(seriesPosition, timestamp);
            return timestamp;
        } else {
            throw new IllegalStateException("Unable to determine timestamp at series position: " + seriesPosition);
        }
    }

    /**
     * Returns one-to-many timestamp(s) at the given series position for the specified limit.
     *
     * @param seriesPosition desired position
     * @param limit desired limit
     * @return one-to-many timestamp(s) at given series position and limit
     */
    @NotNull
    public Instant[] getTimestamps(int seriesPosition, int limit) {
        Instant[] cacheTimestamps = mulreplacedimestampCache.getIfPresent(seriesPosition + "/" + limit);
        if (cacheTimestamps != null) {
            return cacheTimestamps;
        } else {
            logger.debug("Getting multiple timestamps at position: " + seriesPosition + "; Limit: " + limit);
        }
        Instant[] longArr = new Instant[limit];
        if (seriesList != null) {
            for (int i = 0; i < limit; i++) {
                longArr[i] = seriesList.get(seriesPosition + i);
            }
        } else {
            QueryResult queryResult = influxDB.query(new Query(String.format("SELECT %s FROM \"%s\" LIMIT %d OFFSET %d", column, table, limit, seriesPosition), database));
            int i = 0;
            for (QueryResult.Result result : queryResult.getResults()) {
                for (List<Object> values : result.getSeries().get(0).getValues()) {
                    String timeString = (String) values.get(0);
                    longArr[i++] = Instant.parse(timeString);
                }
            }
        }
        mulreplacedimestampCache.put(seriesPosition + "/" + limit, longArr);
        return longArr;
    }

    /**
     * Returns the size of this ChronoSeries.
     *
     * @return size of ChronoSeries
     */
    public int getSize() {
        return size;
    }

    /**
     * Returns the duration of this ChronoSeries.
     *
     * @return Duration of ChronoSeries
     */
    @NotNull
    public Duration getDuration() {
        LocalDateTime startDate = beginTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        LocalDateTime endDate = endTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        return Duration.between(startDate, endDate);
    }

    /**
     * Create a ChronoSeries from the given frequency information.
     *
     * @param exactFrequency desired exact frequency
     * @param frequencyUnit frequency unit
     * @param startInstant start timestamp
     * @param endInstant end timestamp
     * @return ChronoSeries from the given frequency information
     */
    @NotNull
    public static ChronoSeries fromFrequency(long exactFrequency, @NotNull ChronoUnit frequencyUnit, @NotNull Instant startInstant, @NotNull Instant endInstant) {
        return fromFrequency(exactFrequency, exactFrequency, requireNonNull(frequencyUnit), requireNonNull(startInstant), requireNonNull(endInstant));
    }

    /**
     * Create a ChronoSeries from the given frequency information.
     *
     * @param minimumFrequency minimum frequency
     * @param maximumFrequency maximum frequency
     * @param frequencyUnit frequency unit
     * @param startInstant start timestamp
     * @param endInstant end stamp
     * @return ChronoSeries from the given frequency information
     */
    @NotNull
    public static ChronoSeries fromFrequency(long minimumFrequency, long maximumFrequency, @NotNull ChronoUnit frequencyUnit, @NotNull Instant startInstant, @NotNull Instant endInstant) {
        List<Instant> instants = new ArrayList<>();
        Instant itrTime = startInstant;
        while (itrTime.isBefore(endInstant) || itrTime.equals(endInstant)) {
            instants.add(itrTime);
            itrTime = itrTime.plus(RandomRegistry.getRandom().nextInt((int) (maximumFrequency - minimumFrequency) + 1) + minimumFrequency, frequencyUnit);
        }
        return of(instants.toArray(new Instant[0]));
    }

    /**
     * Create ChronoSeries from the given Dates.
     *
     * @param timestampSeries desired Dates
     * @return ChronoSeries with the given Dates
     */
    @NotNull
    public static ChronoSeries of(@NotNull Date... timestampSeries) {
        Instant[] instants = new Instant[timestampSeries.length];
        for (int i = 0; i < timestampSeries.length; i++) {
            instants[i] = requireNonNull(timestampSeries[i]).toInstant();
        }
        return of(instants);
    }

    /**
     * Create ChronoSeries from the given Instants.
     *
     * @param timestampSeries desired Instants
     * @return ChronoSeries with the given Instants
     */
    @NotNull
    public static ChronoSeries of(@NotNull Instant... timestampSeries) {
        return of(true, timestampSeries);
    }

    /**
     * Create ChronoSeries from the given Instants.
     * Allows specifying whether to disable default ChronoScaleUnits.
     *
     * @param disableScaleUnits whether to disable default ChronoScaleUnits
     * @param timestampSeries desired Instants
     * @return ChronoSeries with the given Instants
     */
    @NotNull
    public static ChronoSeries of(boolean disableScaleUnits, @NotNull Instant... timestampSeries) {
        if (timestampSeries.length < 2) {
            throw new IllegalArgumentException("ChronoSeries requires at least two elements to initiate");
        }
        ChronoSeries series = new ChronoSeries();
        series.chronoScale = new ChronoScale();
        series.seriesList = new ArrayList<>(timestampSeries.length);
        series.seriesList.addAll(Arrays.asList(timestampSeries));
        series.beginTimestamp = series.seriesList.get(0);
        series.endTimestamp = series.seriesList.get(series.seriesList.size() - 1);
        series.size = series.seriesList.size();
        // todo: calculate ChronoScale
        LocalDateTime startDate = series.beginTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        LocalDateTime endDate = series.endTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        if (disableScaleUnits) {
            disableUnnecessaryUnits(series, startDate, endDate);
        } else {
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.CENTURIES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DECADES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.YEARS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MONTHS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        }
        return series;
    }

    /**
     * Create ChronoSeries with the given InfluxDB information.
     *
     * @param influxDB InfluxDB instance
     * @param database database name
     * @param table table name
     * @param column column name
     * @return ChronoSeries from the given InfluxDB information
     */
    @NotNull
    public static ChronoSeries of(@NotNull InfluxDB influxDB, @NotNull String database, @NotNull String table, @NotNull String column) {
        ChronoSeries series = new ChronoSeries();
        series.chronoScale = new ChronoScale();
        series.influxDB = requireNonNull(influxDB);
        series.database = requireNonNull(database);
        series.table = requireNonNull(table);
        series.column = requireNonNull(column);
        influxDB.setDatabase(database);
        // general info
        QueryResult queryResult = series.influxDB.query(new Query(String.format("SELECT COUNT(\"%s\") FROM \"%s\"", column, table), database));
        for (QueryResult.Result result : queryResult.getResults()) {
            series.size = ((Double) result.getSeries().get(0).getValues().get(0).get(1)).intValue();
            if (series.size < 2) {
                throw new IllegalStateException("ChronoSeries requires at least two elements to initiate");
            }
        }
        queryResult = series.influxDB.query(new Query(String.format("SELECT FIRST(\"%s\") FROM \"%s\"", column, table), database));
        for (QueryResult.Result result : queryResult.getResults()) {
            String timeString = (String) result.getSeries().get(0).getValues().get(0).get(0);
            series.beginTimestamp = Instant.parse(timeString);
        }
        queryResult = series.influxDB.query(new Query(String.format("SELECT LAST(\"%s\") FROM \"%s\"", column, table), database));
        for (QueryResult.Result result : queryResult.getResults()) {
            String timeString = (String) result.getSeries().get(0).getValues().get(0).get(0);
            series.endTimestamp = Instant.parse(timeString);
        }
        // todo: calculate ChronoScale
        LocalDateTime startDate = series.beginTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        LocalDateTime endDate = series.endTimestamp.atZone(ZoneOffset.UTC).toLocalDateTime();
        disableUnnecessaryUnits(series, startDate, endDate);
        return series;
    }

    private static void disableUnnecessaryUnits(@NotNull ChronoSeries series, @NotNull LocalDateTime startDate, @NotNull LocalDateTime endDate) {
        if (ChronoUnit.NANOS.between(startDate, endDate) == 0) {
            throw new IllegalStateException("ChronoSeries contains duration of zero nanoseconds");
        } else if (ChronoUnit.MICROS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.NANOS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.MILLIS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.MICROS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.SECONDS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.MILLIS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.MINUTES.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.SECONDS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.HOURS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.MINUTES, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.DAYS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.HALF_DAYS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.WEEKS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.DAYS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.MONTHS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.WEEKS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.YEARS.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.MONTHS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MONTHS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.DECADES.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.YEARS, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.YEARS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MONTHS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.CENTURIES.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.DECADES, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DECADES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.YEARS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MONTHS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else if (ChronoUnit.MILLENNIA.between(startDate, endDate) == 0) {
            disableBiggerThan(ChronoUnit.CENTURIES, series);
            // factuals
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.CENTURIES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DECADES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.YEARS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MONTHS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.WEEKS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.DAYS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.HOURS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MINUTES));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.SECONDS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MILLIS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.MICROS));
            series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asFactual(series, ChronoUnit.NANOS));
        } else {
            throw new UnsupportedOperationException("Unable to disable");
        }
    }

    private static void disableBiggerThan(@NotNull ChronoUnit chronoUnit, @NotNull ChronoSeries series) {
        switch(requireNonNull(chronoUnit)) {
            case NANOS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.MICROS));
            case MICROS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.MILLIS));
            case MILLIS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.SECONDS));
            case SECONDS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.MINUTES));
            case MINUTES:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.HOURS));
            case HOURS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.HALF_DAYS));
            case HALF_DAYS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.DAYS));
            case DAYS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.WEEKS));
            case WEEKS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.MONTHS));
            case MONTHS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.YEARS));
            case YEARS:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.DECADES));
            case DECADES:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.CENTURIES));
            case CENTURIES:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.MILLENNIA));
            case MILLENNIA:
                series.chronoScale.updateChronoScaleUnit(ChronoScaleUnit.asDisabled(ChronoUnit.ERAS));
                break;
            default:
                throw new UnsupportedOperationException("Unable to disable bigger than unit: " + chronoUnit);
        }
    }

    @Override
    public String toString() {
        return String.format("ChronoSeries: { Start: %s - End: %s ; Size: %d }", getBeginLocalDateTime(), getEndLocalDateTime(), getSize());
    }
}

17 View Complete Implementation : InfluxService.java
Copyright Mozilla Public License 2.0
Author : dlmcpaul
@Service
@RequiredArgsConstructor
@Log4j2
@Profile("influxdb")
public clreplaced InfluxService implements InfluxExportInterface {

    private final InfluxDB destinationInfluxDB;

    public void sendMetrics(List<Metric> metrics, LocalDateTime readTime) {
        metrics.forEach(m -> destinationInfluxDB.write(Point.measurement(m.getName()).time(readTime.atZone(ZoneId.systemDefault()).toInstant().toEpochMilli(), TimeUnit.MILLISECONDS).addField("value", m.getValue()).build()));
        log.debug("wrote measurement with {} fields at {}", metrics.size(), readTime);
    }
}

17 View Complete Implementation : AbstractITInfluxDB.java
Copyright Apache License 2.0
Author : influxdata
/**
 * Base integration test clreplaced for InfluxDB processors
 */
public abstract clreplaced AbstracreplacedInfluxDB {

    protected TestRunner runner;

    protected InfluxDB influxDB;

    protected String dbName = "test";

    protected String dbUrl = "http://localhost:8086";

    protected String user = "admin";

    protected String preplacedword = "admin";

    protected static final String DEFAULT_RETENTION_POLICY = "autogen";

    protected Type QueryResultListType = new TypeToken<List<QueryResult>>() {
    }.getType();

    protected void initInfluxDB() throws InterruptedException, Exception {
        influxDB = InfluxDBFactory.connect(dbUrl, user, preplacedword);
        influxDB.query(new Query("CREATE database " + dbName, dbName));
        int max = 10;
        while (!databaseExists(dbName) && (max-- < 0)) {
            Thread.sleep(5);
        }
        if (!databaseExists(dbName)) {
            throw new Exception("unable to create database " + dbName);
        }
    }

    protected void cleanUpDatabase() throws InterruptedException {
        if (databaseExists(dbName)) {
            QueryResult result = influxDB.query(new Query("DROP measurement water", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP measurement testm", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP measurement chunkedQueryTest", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP measurement testRecordMeasurement", dbName));
            checkError(result);
            result = influxDB.query(new Query("DROP database " + dbName, dbName));
            Thread.sleep(1000);
        }
    }

    protected void checkError(QueryResult result) {
        if (result.hasError()) {
            throw new IllegalStateException("Error while dropping measurements " + result.getError());
        }
    }

    @After
    public void tearDown() throws Exception {
        runner = null;
        if (influxDB != null) {
            cleanUpDatabase();
            influxDB.close();
        }
    }

    protected void initializeRunner() {
        runner.setProperty(ExecuteInfluxDatabaseQuery.DB_NAME, dbName);
        runner.setProperty(ExecuteInfluxDatabaseQuery.USERNAME, user);
        runner.setProperty(ExecuteInfluxDatabaseQuery.PreplacedWORD, preplacedword);
        runner.setProperty(ExecuteInfluxDatabaseQuery.INFLUX_DB_URL, dbUrl);
        runner.setProperty(ExecuteInfluxDatabaseQuery.CHARSET, "UTF-8");
        runner.replacedertValid();
    }

    private boolean databaseExists(String dbName) {
        QueryResult result = influxDB.query(new Query("SHOW databases", dbName));
        List<List<Object>> databaseNames = result.getResults().get(0).getSeries().get(0).getValues();
        return databaseNames.stream().anyMatch(name -> name.get(0).toString().equals(dbName));
    }
}

17 View Complete Implementation : TestStandardInfluxDatabaseService.java
Copyright Apache License 2.0
Author : influxdata
@Test
public void successConnect() throws IOException, GeneralSecurityException {
    answerConnect = new CallsRealMethods();
    testRunner.enableControllerService(service);
    InfluxDB influxDB = service.connect();
    replacedert.replacedertNotNull(influxDB);
}

17 View Complete Implementation : DriverInfluxDB.java
Copyright Apache License 2.0
Author : mycontroller-org
/**
 * @author Jeeva Kandasamy (jkandasa)
 * @since 1.2.0
 */
@Slf4j
public clreplaced DriverInfluxDB extends DriverAbstract {

    private static final int FLUSH_POINTS = 200;

    private static final int FLUSH_DURATION = 2000;

    private ExternalServerConfigInfluxDB _config = null;

    private InfluxDB _client = null;

    public DriverInfluxDB(ExternalServerConfigInfluxDB _config) {
        super(_config);
        this._config = _config;
    }

    @Override
    public synchronized void write(SensorVariable sensorVariable) {
        if (_client != null) {
            StringBuilder data = new StringBuilder();
            data.append(getVariableKey(sensorVariable, _config.getKeyFormat())).append(",").append(getVariableKey(sensorVariable, _config.getTags())).append(" value=").append(getValue(sensorVariable)).append(" ").append(sensorVariable.getTimestamp()).append("000000");
            try {
                _client.write(data.toString());
                _logger.debug("data[{}] sent", data.toString());
            } catch (Exception ex) {
                _logger.error("Exception, {}", data.toString(), ex);
            }
        }
    }

    private String getValue(SensorVariable sensorVariable) {
        METRIC_TYPE mType = sensorVariable.getMetricType();
        if (mType == METRIC_TYPE.BINARY || mType == METRIC_TYPE.COUNTER || mType == METRIC_TYPE.DOUBLE) {
            return sensorVariable.getValue();
        } else {
            return "\"" + sensorVariable.getValue() + "\"";
        }
    }

    @Override
    public void connect() {
        if (_config.getUsername() != null && _config.getUsername().trim().length() > 0) {
            _client = InfluxDBFactory.connect(_config.getUrl(), _config.getUsername(), _config.getPreplacedword());
        } else {
            _client = InfluxDBFactory.connect(_config.getUrl());
        }
        _client.setDatabase(_config.getDatabase());
        _client.enableBatch(BatchOptions.DEFAULTS.actions(FLUSH_POINTS).flushDuration(FLUSH_DURATION));
        _logger.debug("External server:{}, Influxdb client BatchSettings[flush, points:{}, duration:{} ms]", _config.getName(), FLUSH_POINTS, FLUSH_DURATION);
    }

    @Override
    public void disconnect() {
        if (_client != null) {
            _client.close();
            _logger.debug("Influxdb client connection closed.");
        }
    }
}

17 View Complete Implementation : FTInflux.java
Copyright Apache License 2.0
Author : smartcat-labs
public clreplaced FTInflux {

    private static Cluster cluster;

    private static Session session;

    private static InfluxDB influxdb;

    @BeforeClreplaced
    public static void setUp() throws ConfigurationException, TTransportException, IOException, InterruptedException {
        cluster = Cluster.builder().addContactPoint(SystemPropertyUtil.get("creplacedandra.host")).withPort(Integer.parseInt(SystemPropertyUtil.get("creplacedandra.port"))).build();
        session = cluster.connect();
        influxdb = InfluxDBFactory.connect(SystemPropertyUtil.get("influxdb.url"), SystemPropertyUtil.get("influxdb.user"), SystemPropertyUtil.get("influxdb.preplacedword"));
    }

    @Test
    public void test() throws Exception {
        FileSystem fileSystem = FileSystems.getDefault();
        WatchService watcher = fileSystem.newWatchService();
        Path logFileDir = fileSystem.getPath(SystemPropertyUtil.get("project.build.directory"));
        logFileDir.register(watcher, StandardWatchEventKinds.ENTRY_MODIFY);
        session.execute("CREATE KEYSPACE IF NOT EXISTS test_keyspace " + "WITH REPLICATION = { 'clreplaced' : 'SimpleStrategy', 'replication_factor' : 1 };");
        session.execute("CREATE TABLE IF NOT EXISTS test_keyspace.test_table (uid uuid PRIMARY KEY);");
        session.execute("SELECT * FROM test_keyspace.test_table");
        QueryResult result = null;
        for (int i = 0; i < 10; i++) {
            result = influxdb.query(new Query("SHOW SERIES FROM \"queryReport\"", SystemPropertyUtil.get("influxdb.dbname")));
            if (!result.hasError()) {
                break;
            }
            Thread.sleep(500);
        }
        replacedertions.replacedertThat(result.getResults().size()).isEqualTo(1);
        cluster.close();
    }
}

17 View Complete Implementation : FTRiemann.java
Copyright Apache License 2.0
Author : smartcat-labs
public clreplaced FTRiemann {

    private static final String INFLUXDB_NAME = "diagnostics-test";

    private static Cluster cluster;

    private static Session session;

    private static InfluxDB influxdb;

    @BeforeClreplaced
    public static void setUp() throws ConfigurationException, TTransportException, IOException, InterruptedException {
        cluster = Cluster.builder().addContactPoint(SystemPropertyUtil.get("creplacedandra.host")).withPort(Integer.parseInt(SystemPropertyUtil.get("creplacedandra.port"))).build();
        session = cluster.connect();
        influxdb = InfluxDBFactory.connect(SystemPropertyUtil.get("influxdb.url"), SystemPropertyUtil.get("influxdb.user"), SystemPropertyUtil.get("influxdb.preplacedword"));
        influxdb.createDatabase(INFLUXDB_NAME);
    }

    @Test
    public void test() throws Exception {
        FileSystem fileSystem = FileSystems.getDefault();
        WatchService watcher = fileSystem.newWatchService();
        Path logFileDir = fileSystem.getPath(SystemPropertyUtil.get("project.build.directory"));
        logFileDir.register(watcher, StandardWatchEventKinds.ENTRY_MODIFY);
        session.execute("CREATE KEYSPACE IF NOT EXISTS test_keyspace " + "WITH REPLICATION = { 'clreplaced' : 'SimpleStrategy', 'replication_factor' : 1 };");
        session.execute("CREATE TABLE IF NOT EXISTS test_keyspace.test_table (uid uuid PRIMARY KEY);");
        session.execute("SELECT * FROM test_keyspace.test_table");
        QueryResult result = null;
        for (int i = 0; i < 10; i++) {
            result = influxdb.query(new Query("SHOW SERIES FROM \"queryReport\"", INFLUXDB_NAME));
            if (!result.hasError()) {
                break;
            }
            Thread.sleep(500);
        }
        replacedertions.replacedertThat(result.getResults().size()).isEqualTo(1);
        cluster.close();
    }
}

17 View Complete Implementation : FTTelegraf.java
Copyright Apache License 2.0
Author : smartcat-labs
public clreplaced FTTelegraf {

    private static final String INFLUXDB_NAME = "diagnostics-test";

    private static Cluster cluster;

    private static Session session;

    private static InfluxDB influxdb;

    @BeforeClreplaced
    public static void setUp() throws ConfigurationException, TTransportException, IOException, InterruptedException {
        System.out.println("Connecting to " + SystemPropertyUtil.get("creplacedandra.host") + ":" + SystemPropertyUtil.get("creplacedandra.port"));
        cluster = Cluster.builder().addContactPoint(SystemPropertyUtil.get("creplacedandra.host")).withPort(Integer.parseInt(SystemPropertyUtil.get("creplacedandra.port"))).build();
        session = cluster.connect();
        influxdb = InfluxDBFactory.connect(SystemPropertyUtil.get("influxdb.url"), SystemPropertyUtil.get("influxdb.user"), SystemPropertyUtil.get("influxdb.preplacedword"));
        influxdb.createDatabase(INFLUXDB_NAME);
    }

    @Test
    public void test() throws Exception {
        FileSystem fileSystem = FileSystems.getDefault();
        WatchService watcher = fileSystem.newWatchService();
        Path logFileDir = fileSystem.getPath(SystemPropertyUtil.get("project.build.directory"));
        logFileDir.register(watcher, StandardWatchEventKinds.ENTRY_MODIFY);
        session.execute("CREATE KEYSPACE IF NOT EXISTS test_keyspace " + "WITH REPLICATION = { 'clreplaced' : 'SimpleStrategy', 'replication_factor' : 1 };");
        session.execute("CREATE TABLE IF NOT EXISTS test_keyspace.test_table (uid uuid PRIMARY KEY);");
        session.execute("SELECT * FROM test_keyspace.test_table");
        QueryResult result = null;
        for (int i = 0; i < 10; i++) {
            result = influxdb.query(new Query("SHOW SERIES FROM \"queryReport\"", INFLUXDB_NAME));
            if (!result.hasError()) {
                break;
            }
            Thread.sleep(500);
        }
        replacedertions.replacedertThat(result.getResults().size()).isEqualTo(1);
        cluster.close();
    }
}

17 View Complete Implementation : InfluxDBClient.java
Copyright Apache License 2.0
Author : sonyxperiadev
/**
 * Saves BatchPoints on IO thread
 */
private Observable<BatchPoints> save(BatchPoints batchPoints, InfluxDB db) {
    return ioJob(() -> {
        db.write(batchPoints);
        return batchPoints;
    });
}

17 View Complete Implementation : InfluxDBContainer.java
Copyright MIT License
Author : testcontainers
/**
 * @return a influxDb client
 */
public InfluxDB getNewInfluxDB() {
    InfluxDB influxDB = InfluxDBFactory.connect(getUrl(), username, preplacedword);
    influxDB.setDatabase(database);
    return influxDB;
}

17 View Complete Implementation : InfluxDBContainerTest.java
Copyright MIT License
Author : testcontainers
@Test
public void getNewInfluxDB() {
    InfluxDB actual = influxDBContainer.getNewInfluxDB();
    replacedertThat(actual, notNullValue());
    replacedertThat(actual.ping(), notNullValue());
}

17 View Complete Implementation : InfluxDBContainerWithUserTest.java
Copyright MIT License
Author : testcontainers
@Test
public void describeDatabases() {
    InfluxDB actual = influxDBContainer.getNewInfluxDB();
    replacedertThat(actual, notNullValue());
    replacedertThat(actual.describeDatabases(), hasItem(DATABASE));
}

16 View Complete Implementation : DataLakeManagementV3.java
Copyright Apache License 2.0
Author : apache
public DataResult getEventsFromNow(String index, String timeunit, int value, String aggregationUnit, int aggregationValue) throws ParseException {
    InfluxDB influxDB = getInfluxDBClient();
    Query query = new Query("SELECT mean(*) FROM " + index + " WHERE time > now() -" + value + timeunit + " GROUP BY time(" + aggregationValue + aggregationUnit + ") fill(none) ORDER BY time", BackendConfig.INSTANCE.getInfluxDatabaseName());
    QueryResult result = influxDB.query(query);
    DataResult dataResult = convertResult(result);
    return dataResult;
}

16 View Complete Implementation : InfluxDBAbstractSink.java
Copyright Apache License 2.0
Author : apache
/**
 * A simple abstract clreplaced for InfluxDB sink
 */
@Slf4j
public abstract clreplaced InfluxDBAbstractSink<T> implements Sink<T> {

    private InfluxDBSinkConfig influxDBSinkConfig;

    private InfluxDB influxDB;

    private InfluxDB.ConsistencyLevel consistencyLevel;

    private String influxDatabase;

    private String retentionPolicy;

    protected InfluxDBBuilder influxDBBuilder = new InfluxDBBuilderImpl();

    private long batchTimeMs;

    private int batchSize;

    private List<Record<T>> incomingList;

    private ScheduledExecutorService flushExecutor;

    @Override
    public void open(Map<String, Object> config, SinkContext sinkContext) throws Exception {
        influxDBSinkConfig = InfluxDBSinkConfig.load(config);
        influxDBSinkConfig.validate();
        try {
            consistencyLevel = InfluxDB.ConsistencyLevel.valueOf(influxDBSinkConfig.getConsistencyLevel().toUpperCase());
        } catch (IllegalArgumentException e) {
            throw new IllegalArgumentException("Illegal Consistency Level, valid values are: " + Arrays.asList(InfluxDB.ConsistencyLevel.values()));
        }
        influxDatabase = influxDBSinkConfig.getDatabase();
        retentionPolicy = influxDBSinkConfig.getRetentionPolicy();
        influxDB = influxDBBuilder.build(influxDBSinkConfig);
        // create the database if not exists
        List<String> databases = influxDB.describeDatabases();
        if (!databases.contains(influxDatabase)) {
            influxDB.createDatabase(influxDatabase);
        }
        batchTimeMs = influxDBSinkConfig.getBatchTimeMs();
        batchSize = influxDBSinkConfig.getBatchSize();
        incomingList = Lists.newArrayList();
        flushExecutor = Executors.newScheduledThreadPool(1);
        flushExecutor.scheduleAtFixedRate(() -> flush(), batchTimeMs, batchTimeMs, TimeUnit.MILLISECONDS);
    }

    @Override
    public void write(Record<T> record) throws Exception {
        int currentSize;
        synchronized (this) {
            if (null != record) {
                incomingList.add(record);
            }
            currentSize = incomingList.size();
        }
        if (currentSize == batchSize) {
            flushExecutor.submit(() -> flush());
        }
    }

    private void flush() {
        BatchPoints.Builder batchBuilder = BatchPoints.database(influxDatabase).retentionPolicy(retentionPolicy).consistency(consistencyLevel);
        List<Record<T>> toFlushList;
        synchronized (this) {
            if (incomingList.isEmpty()) {
                return;
            }
            toFlushList = incomingList;
            incomingList = Lists.newArrayList();
        }
        if (CollectionUtils.isNotEmpty(toFlushList)) {
            for (Record<T> record : toFlushList) {
                try {
                    buildBatch(record, batchBuilder);
                } catch (Exception e) {
                    record.fail();
                    toFlushList.remove(record);
                    log.warn("Record flush thread was exception ", e);
                }
            }
        }
        BatchPoints batch = batchBuilder.build();
        try {
            if (CollectionUtils.isNotEmpty(batch.getPoints())) {
                influxDB.write(batch);
            }
            toFlushList.forEach(tRecord -> tRecord.ack());
            batch.getPoints().clear();
            toFlushList.clear();
        } catch (Exception e) {
            toFlushList.forEach(tRecord -> tRecord.fail());
            log.error("InfluxDB write batch data exception ", e);
        }
    }

    @Override
    public void close() throws Exception {
        if (null != influxDB) {
            influxDB.close();
        }
        if (null != flushExecutor) {
            flushExecutor.shutdown();
        }
    }

    // build Point in BatchPoints builder
    public abstract void buildBatch(Record<T> message, BatchPoints.Builder batchBuilder) throws Exception;
}

16 View Complete Implementation : InfluxDBBuilderImpl.java
Copyright Apache License 2.0
Author : apache
@Override
public InfluxDB build(InfluxDBSinkConfig config) {
    InfluxDB influxDB;
    boolean enableAuth = !Strings.isNullOrEmpty(config.getUsername());
    if (enableAuth) {
        log.info("Authenticating to {} as {}", config.getInfluxdbUrl(), config.getUsername());
        influxDB = InfluxDBFactory.connect(config.getInfluxdbUrl(), config.getUsername(), config.getPreplacedword());
    } else {
        log.info("Connecting to {}", config.getInfluxdbUrl());
        influxDB = InfluxDBFactory.connect(config.getInfluxdbUrl());
    }
    if (config.isGzipEnable()) {
        influxDB.enableGzip();
    }
    InfluxDB.LogLevel logLevel;
    try {
        logLevel = InfluxDB.LogLevel.valueOf(config.getLogLevel().toUpperCase());
    } catch (IllegalArgumentException e) {
        throw new IllegalArgumentException("Illegal Log Level, valid values are: " + Arrays.asList(InfluxDB.LogLevel.values()));
    }
    influxDB.setLogLevel(logLevel);
    return influxDB;
}