org.sfs.SfsVertx - java examples

Here are the examples of the java api org.sfs.SfsVertx taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

111 Examples 7

19 View Complete Implementation : IndexBlockReader.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public clreplaced IndexBlockReader {

    public enum LockType {

        READ, WRITE, NONE
    }

    private final SfsVertx vertx;

    private long position = 0;

    private final long lastBlockPosition;

    private final int batchSize;

    private final int readSize;

    private IndexFile blockFile;

    private LockType lockType = NONE;

    private long lockWaitTimeout = -1;

    private boolean hasNext = true;

    public IndexBlockReader(SfsVertx vertx, IndexFile blockFile, int blockSize, int batchSize, long lastBlockPosition) {
        this.vertx = vertx;
        this.blockFile = blockFile;
        this.batchSize = batchSize;
        this.readSize = blockSize * batchSize;
        this.lastBlockPosition = lastBlockPosition;
    }

    public IndexBlockReader enableLocking(LockType lockType, long lockWaitTimeout) {
        this.lockType = lockType;
        this.lockWaitTimeout = lockWaitTimeout;
        return this;
    }

    public Observable<Iterable<ChecksummedPositional<XIndexBlock>>> toObservable() {
        return create(new OnSubscribeFromBlockReader<>(this));
    }

    protected boolean hasNext() {
        return hasNext;
    }

    protected Observable<? extends List<ChecksummedPositional<XIndexBlock>>> next() {
        if (READ.equals(lockType) || WRITE.equals(lockType)) {
            checkState(lockWaitTimeout > 0, "Invalid LockWaitTimeout value %s", lockWaitTimeout);
            return lockedObservable(vertx, () -> {
                if (WRITE.equals(lockType)) {
                    return blockFile.tryWriteLock(position, readSize);
                } else if (READ.equals(lockType)) {
                    return blockFile.tryReadLock(position, readSize);
                } else {
                    throw new IllegalStateException("Unsupported Locked Type " + lockType);
                }
            }, () -> blockFile.getBlocks(vertx, position, batchSize).map(Lists::newArrayList).doOnNext(checksummedPositionals -> hasNext = !checksummedPositionals.isEmpty()).doOnNext(checksummedPositionals -> position = position + readSize), lockWaitTimeout);
        } else {
            return blockFile.getBlocks(vertx, position, batchSize).map(Lists::newArrayList).doOnNext(checksummedPositionals -> hasNext = !checksummedPositionals.isEmpty()).doOnNext(checksummedPositionals -> position = position + readSize);
        }
    }

    protected static clreplaced OnSubscribeFromBlockReader<T extends Iterable<ChecksummedPositional<XIndexBlock>>> implements Observable.OnSubscribe<T> {

        final IndexBlockReader blockReader;

        public OnSubscribeFromBlockReader(IndexBlockReader iterable) {
            if (iterable == null) {
                throw new NullPointerException("iterable must not be null");
            }
            this.blockReader = iterable;
        }

        @Override
        public void call(Subscriber<? super T> o) {
            if (!blockReader.hasNext() && !o.isUnsubscribed()) {
                o.onCompleted();
            } else {
                o.setProducer(new BlockReaderProducer<>(o, blockReader));
            }
        }

        private static final clreplaced BlockReaderProducer<T extends Iterable<ChecksummedPositional<XIndexBlock>>> extends AtomicLong implements Producer {

            /**
             */
            private static final long serialVersionUID = -1L;

            private final Subscriber<? super T> o;

            private final IndexBlockReader blockReader;

            BlockReaderProducer(Subscriber<? super T> o, IndexBlockReader blockReader) {
                this.o = o;
                this.blockReader = blockReader;
            }

            @Override
            public void request(long n) {
                if (get() == MAX_VALUE) {
                    // already started with fast-path
                    return;
                }
                if (n == MAX_VALUE && compareAndSet(0, MAX_VALUE)) {
                    fastpath();
                } else if (n > 0 && getAndAddRequest(this, n) == 0L) {
                    slowpath(n, 0);
                }
            }

            void slowpath(long n, long numberEmitted) {
                if (o.isUnsubscribed()) {
                // don't recurse
                } else if (numberEmitted < n) {
                    if (blockReader.hasNext()) {
                        blockReader.next().subscribe(new Subscriber<Iterable<ChecksummedPositional<XIndexBlock>>>() {

                            @Override
                            public void onCompleted() {
                                o.onCompleted();
                            }

                            @Override
                            public void onError(Throwable e) {
                                o.onError(e);
                            }

                            @Override
                            public void onNext(Iterable<ChecksummedPositional<XIndexBlock>> checksummedPositionals) {
                                o.onNext((T) checksummedPositionals);
                                blockReader.vertx.runOnContext(event -> slowpath(n, numberEmitted + 1));
                            }
                        });
                    } else {
                        if (!o.isUnsubscribed()) {
                            o.onCompleted();
                        }
                    }
                } else if (numberEmitted >= n) {
                    if (!o.isUnsubscribed()) {
                        o.onCompleted();
                    }
                }
            }

            void fastpath() {
                // fast-path without backpressure
                final Subscriber<? super T> o = this.o;
                final IndexBlockReader it = this.blockReader;
                if (o.isUnsubscribed()) {
                    return;
                } else if (it.hasNext()) {
                    it.next().subscribe(new Subscriber<Iterable<ChecksummedPositional<XIndexBlock>>>() {

                        @Override
                        public void onCompleted() {
                            o.onCompleted();
                        }

                        @Override
                        public void onError(Throwable e) {
                            o.onError(e);
                        }

                        @Override
                        public void onNext(Iterable<ChecksummedPositional<XIndexBlock>> checksummedPositionals) {
                            o.onNext((T) checksummedPositionals);
                            blockReader.vertx.runOnContext(event -> fastpath());
                        }
                    });
                } else if (!o.isUnsubscribed()) {
                    o.onCompleted();
                    return;
                } else {
                    // is unsubscribed
                    return;
                }
            }
        }
    }
}

19 View Complete Implementation : IndexScanner.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
protected void scanIndex0(SfsVertx vertx, LockType lockType, Func1<ChecksummedPositional<XIndexBlock>, Observable<Void>> transformer, ObservableFuture<Void> handler, long fileSize) {
    int bufferSize = batchSize * blockSize;
    if (isDebugEnabled) {
        LOGGER.debug("Reading " + batchSize + " blocks @ position " + position);
    }
    if (READ.equals(lockType) || WRITE.equals(lockType)) {
        checkState(lockWaitTimeout > 0, "Invalid LockWaitTimeout value %s", lockWaitTimeout);
        lockedObservable(vertx, () -> {
            if (WRITE.equals(lockType)) {
                return indexFile.tryWriteLock(position, bufferSize);
            } else if (READ.equals(lockType)) {
                return indexFile.tryReadLock(position, bufferSize);
            } else {
                throw new IllegalStateException("Unsupported Locked Type " + lockType);
            }
        }, () -> indexFile.getBlocks(vertx, position, batchSize), lockWaitTimeout).flatMap(checksummedPositionals -> iterate(vertx, checksummedPositionals, xIndexBlockChecksummedPositional -> transformer.call(xIndexBlockChecksummedPositional).doOnNext(aVoid -> blockCount++).map(aVoid -> TRUE))).doOnNext(aBoolean -> {
            if (isDebugEnabled) {
                LOGGER.debug("Scanned " + blockCount + " blocks");
            }
        }).subscribe(new Subscriber<Boolean>() {

            @Override
            public void onCompleted() {
                if (isDebugEnabled) {
                    LOGGER.debug("Scanned " + blockCount + " blocks");
                }
                position = checkedAdd(position, bufferSize);
                if (position >= fileSize) {
                    handler.complete(null);
                } else {
                    if (!isUnsubscribed()) {
                        vertx.runOnContext(event -> scanIndex0(vertx, lockType, transformer, handler, fileSize));
                    }
                }
            }

            @Override
            public void onError(Throwable e) {
                handler.fail(e);
            }

            @Override
            public void onNext(Boolean aBoolean) {
            }
        });
    } else {
        indexFile.getBlocks(vertx, position, batchSize).flatMap(checksummedPositionals -> iterate(vertx, checksummedPositionals, xIndexBlockChecksummedPositional -> transformer.call(xIndexBlockChecksummedPositional).doOnNext(aVoid -> blockCount++).map(aVoid -> TRUE))).doOnNext(aBoolean -> {
            if (isDebugEnabled) {
                LOGGER.debug("Scanned " + blockCount + " blocks");
            }
        }).subscribe(new Subscriber<Boolean>() {

            @Override
            public void onCompleted() {
                position = checkedAdd(position, bufferSize);
                if (position >= fileSize) {
                    handler.complete(null);
                } else {
                    vertx.runOnContext(event -> scanIndex0(vertx, lockType, transformer, handler, fileSize));
                }
            }

            @Override
            public void onError(Throwable e) {
                handler.fail(e);
            }

            @Override
            public void onNext(Boolean aBoolean) {
            }
        });
    }
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Optional<ChecksummedPositional<byte[]>>> getBlock(SfsVertx vertx, final long position) {
    Context context = vertx.getOrCreateContext();
    return getBlock0(context, position).map(buffer -> {
        Optional<Block.Frame<byte[]>> oFrame = decodeFrame(buffer, false);
        if (oFrame.isPresent()) {
            Block.Frame<byte[]> frame = oFrame.get();
            return of(new ChecksummedPositional<byte[]>(position, frame.getData(), frame.getChecksum()) {

                @Override
                public boolean isChecksumValid() {
                    return frame.isChecksumValid();
                }
            });
        } else {
            return absent();
        }
    });
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
private Observable<Super> getSuperBlock(SfsVertx vertx, BlobFile internalBlobFile) {
    return aVoid().flatMap(aVoid -> getSuperBlock0(vertx, internalBlobFile, SUPER_BLOCK_POSITION_0)).flatMap(superOptional -> {
        if (superOptional.isPresent()) {
            return just(superOptional);
        } else {
            return getSuperBlock0(vertx, internalBlobFile, SUPER_BLOCK_POSITION_1);
        }
    }).doOnNext(superOptional -> checkState(superOptional.isPresent(), "Corrupt Super Block")).map(Optional::get);
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> disableWrites(SfsVertx vertx) {
    return blobFile.disableWrites(vertx);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> open(SfsVertx vertx, StandardOpenOption openOption, StandardOpenOption... openOptions) {
    this.vertx = vertx;
    this.executorService = vertx.getIoPool();
    return aVoid().doOnNext(aVoid -> checkState(status.compareAndSet(STOPPED, STARTING))).flatMap(aVoid -> {
        Context context = vertx.getOrCreateContext();
        return RxHelper.executeBlocking(context, vertx.getBackgroundPool(), () -> {
            try {
                createDirectories(file.getParent());
                Set<StandardOpenOption> options = new HashSet<>();
                options.add(openOption);
                addAll(options, openOptions);
                channel = AsynchronousFileChannel.open(file, options, executorService);
                return (Void) null;
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }).doOnNext(aVoid -> {
        periodics.add(vertx.setPeriodic(100, event -> cleanupOrphanedWriters()));
    }).doOnNext(aVoid -> checkState(status.compareAndSet(STARTING, STARTED))).onErrorResumeNext(throwable -> {
        checkState(status.compareAndSet(STARTING, START_FAILED));
        return error(throwable);
    });
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> disableWrites(SfsVertx vertx) {
    return blockFile.disableWrites(vertx);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> copy(SfsVertx vertx, long srcPosition, long srcLength, BlobFile dstBlobFile, long dstPosition, long dstLength) {
    return defer(() -> {
        checkOpen();
        dstBlobFile.checkOpen();
        dstBlobFile.checkCanWrite();
        ObservableFuture<Void> drainHandler = RxHelper.observableFuture();
        if (dstBlobFile.writeQueueSupport.writeQueueFull()) {
            dstBlobFile.writeQueueSupport.drainHandler(vertx.getOrCreateContext(), drainHandler::complete);
        } else {
            drainHandler.complete(null);
        }
        return drainHandler.flatMap(aVoid -> {
            LimitedWriteEndableWriteStream limitedWriteStream = new LimitedWriteEndableWriteStream(new BufferedEndableWriteStream(dstBlobFile.createWriteStream(vertx.getOrCreateContext(), dstPosition, true)), dstLength);
            return produce(vertx, srcPosition, srcLength, limitedWriteStream);
        });
    });
}

19 View Complete Implementation : WaitForActiveWriters.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public clreplaced WaitForActiveWriters implements Func1<Void, Observable<Void>> {

    private static final Logger LOGGER = getLogger(WaitForActiveWriters.clreplaced);

    private final SfsVertx vertx;

    private final Set<? extends BufferEndableWriteStream> writers;

    public WaitForActiveWriters(SfsVertx vertx, Set<? extends BufferEndableWriteStream> writers) {
        this.vertx = vertx;
        this.writers = writers;
    }

    @Override
    public Observable<Void> call(Void aVoid) {
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Waiting for Active Writers " + on(", ").join(writers));
        }
        ObservableFuture<Void> handler = RxHelper.observableFuture();
        if (hasActive()) {
            vertx.setPeriodic(100, event -> {
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug("Waiting for Active Writers " + on(", ").join(writers));
                }
                if (!hasActive()) {
                    vertx.cancelTimer(event);
                    handler.complete(null);
                }
            });
        } else {
            handler.complete(null);
        }
        return handler.map(aVoid1 -> {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Done waiting for Active Writers " + on(", ").join(writers));
            }
            return null;
        });
    }

    protected boolean hasActive() {
        return !writers.isEmpty();
    }
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> copy(SfsVertx vertx, BlobFile srcBlobFile, long srcPosition, long srcLength, long dstPosition, long dstLength) {
    return defer(() -> {
        Context context = vertx.getOrCreateContext();
        srcBlobFile.checkOpen();
        checkOpen();
        checkCanWrite();
        AsyncFileReader src = srcBlobFile.createReadStream(context, srcPosition, produceBufferSize, srcLength);
        LimitedReadStream value = new LimitedReadStream(src, srcLength);
        return consume(vertx, dstPosition, dstLength, value);
    });
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> force(SfsVertx vertx, boolean metaData) {
    return blobFile.force(vertx, metaData);
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> open(SfsVertx vertx) {
    return aVoid().flatMap(aVoid -> {
        // do some funcky stuff here to read the existing super block so that
        // we have enough information to access the journal entries
        BlobFile internalBlobFile = new BlobFile(path, SUPER_BLOCK_SIZE, DEFAULT_WRITE_STREAM_TIMEOUT);
        return internalBlobFile.open(vertx, CREATE_NEW, READ, WRITE).flatMap(aVoid1 -> internalBlobFile.enableWrites(vertx)).flatMap(aVoid1 -> {
            Super superBlock = newBuilder().setBlockSize(DEFAULT_BLOCK_SIZE).build();
            return setSuperBlock(vertx, internalBlobFile, superBlock).map(aVoid11 -> superBlock);
        }).onErrorResumeNext(throwable -> {
            if (containsException(FileAlreadyExistsException.clreplaced, throwable)) {
                return internalBlobFile.close(vertx).flatMap(aVoid1 -> internalBlobFile.open(vertx, CREATE, READ, WRITE)).flatMap(aVoid1 -> internalBlobFile.enableWrites(vertx)).flatMap(aVoid1 -> getSuperBlock(vertx, internalBlobFile));
            } else {
                return error(throwable);
            }
        }).doOnNext(superBlock -> {
            blockSize = superBlock.getBlockSize();
            logStartPosition = up(SUPER_BLOCK_RESERVED, blockSize);
        }).map(new ToVoid<>()).flatMap(aVoid1 -> internalBlobFile.disableWrites(vertx)).flatMap(aVoid1 -> internalBlobFile.force(vertx, true)).onErrorResumeNext(throwable -> {
            if (!STOPPED.equals(blobFile.getStatus())) {
                return internalBlobFile.close(vertx).onErrorResumeNext(throwable1 -> {
                    return error(new CompositeException(throwable, throwable1));
                });
            } else {
                return error(throwable);
            }
        }).flatMap(aVoid1 -> internalBlobFile.close(vertx));
    }).flatMap(aVoid -> {
        blobFile = new BlobFile(path, blockSize, DEFAULT_WRITE_STREAM_TIMEOUT);
        return blobFile.open(vertx, CREATE, READ, WRITE);
    });
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> consume(SfsVertx vertx, long position, Buffer src) {
    return consume(vertx, position, src, true);
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
private Observable<Void> setSuperBlock(SfsVertx vertx, BlobFile internalBlobFile, Super superBlock) {
    Buffer buffer = buffer(superBlock.toByteArray());
    Block.Frame<Buffer> frame = encodeFrame(buffer);
    Buffer frameBuffer = frame.getData();
    int frameSize = frameBuffer.length();
    checkState(frameSize <= SUPER_BLOCK_SIZE, "Super block frame size was %s, which is greater block size of %s", frameSize, SUPER_BLOCK_SIZE);
    // write the super block twice so that we can recover from a failed
    // write
    return aVoid().flatMap(aVoid -> internalBlobFile.consume(vertx, SUPER_BLOCK_POSITION_0, frameBuffer)).flatMap(aVoid -> internalBlobFile.force(vertx, true)).flatMap(aVoid -> internalBlobFile.consume(vertx, SUPER_BLOCK_POSITION_1, frameBuffer)).flatMap(aVoid -> internalBlobFile.force(vertx, true));
}

19 View Complete Implementation : AzureKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> stop(VertxContext<Server> vertxContext) {
    SfsVertx sfsVertx = vertxContext.vertx();
    return aVoid().filter(aVoid -> started.compareAndSet(true, false)).flatMap(aVoid -> {
        if (properties != null) {
            properties.clear();
            properties = null;
        }
        if (kms != null) {
            return RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
                try {
                    kms.close();
                } catch (Throwable e) {
                    LOGGER.warn("Unhandled Exception", e);
                }
                return (Void) null;
            });
        }
        if (executorService != null) {
            try {
                executorService.shutdown();
            } catch (Throwable e) {
                LOGGER.warn("Unhandled Exception", e);
            } finally {
                executorService = null;
            }
        }
        return aVoid();
    }).singleOrDefault(null);
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> enableWrites(SfsVertx vertx) {
    return blockFile.enableWrites(vertx);
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> open(SfsVertx vertx, StandardOpenOption openOption, StandardOpenOption... openOptions) {
    executorService = vertx.getIoPool();
    return aVoid().doOnNext(aVoid -> checkState(status.compareAndSet(STOPPED, STARTING))).flatMap(aVoid -> {
        Context context = vertx.getOrCreateContext();
        return RxHelper.executeBlocking(context, vertx.getBackgroundPool(), () -> {
            try {
                createDirectories(file.getParent());
                Set<StandardOpenOption> options = new HashSet<>();
                options.add(openOption);
                addAll(options, openOptions);
                channel = AsynchronousFileChannel.open(file, options, executorService);
                return (Void) null;
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    }).doOnNext(aVoid -> checkState(status.compareAndSet(STARTING, STARTED)));
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> setBlock(SfsVertx vertx, long position, XIndexBlock data) {
    return blockFile.setBlock(vertx, position, buffer(data.toByteArray()));
}

19 View Complete Implementation : AwsKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
@Override
public Observable<Encrypted> reencrypt(VertxContext<Server> vertxContext, byte[] cipherBytes) {
    SfsVertx sfsVertx = vertxContext.vertx();
    return Observable.defer(() -> RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
        ReEncryptRequest req = new ReEncryptRequest().withDestinationKeyId(keyId).withCiphertextBlob(ByteBuffer.wrap(cipherBytes.clone()));
        ByteBuffer buffer = kms.reEncrypt(req).getCiphertextBlob();
        byte[] b = new byte[buffer.remaining()];
        buffer.get(b);
        return new Encrypted(b, keyId);
    }));
}

19 View Complete Implementation : MetaFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
private Observable<Optional<XSuperBlock>> getBlock0(SfsVertx vertx, long position) {
    return blockFile.getBlock(vertx, position).filter(Optional::isPresent).map(Optional::get).map(this::parse).filter(Optional::isPresent).map(Optional::get).map(xSuperBlockChecksummedPositional -> of(xSuperBlockChecksummedPositional.getValue())).singleOrDefault(absent());
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
private Observable<Void> setBlock0(SfsVertx vertx, final long position, Buffer data) {
    Context context = vertx.getOrCreateContext();
    long length = data.length();
    // this should never happen but in case things ever get crazy this will prevent corruption
    checkState(length <= blockSize, "Frame size was %s, expected %s", length, blockSize);
    ObservableFuture<Void> drainHandler = RxHelper.observableFuture();
    if (writeQueueSupport.writeQueueFull()) {
        writeQueueSupport.drainHandler(context, drainHandler::complete);
    } else {
        drainHandler.complete(null);
    }
    return drainHandler.flatMap(aVoid -> using(() -> {
        AsyncFileWriter writer = createWriteStream(context, position);
        activeWriters.add(writer);
        return writer;
    }, writer -> end(data, writer).doOnNext(aVoid1 -> activeWriters.remove(writer)), activeWriters::remove));
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> append(SfsVertx vertx, Buffer metadata) {
    return append0(vertx, metadata, 0).map(new ToVoid<>());
}

19 View Complete Implementation : AwsKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
@Override
public Observable<byte[]> decrypt(VertxContext<Server> vertxContext, byte[] cipherBytes) {
    SfsVertx sfsVertx = vertxContext.vertx();
    return Observable.defer(() -> RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
        DecryptRequest req = new DecryptRequest().withCiphertextBlob(ByteBuffer.wrap(cipherBytes.clone()));
        ByteBuffer buffer = kms.decrypt(req).getPlaintext();
        byte[] b = new byte[buffer.remaining()];
        buffer.get(b);
        return b;
    }));
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> enableWrites(SfsVertx vertx) {
    return blobFile.enableWrites(vertx);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Long> size(SfsVertx vertx) {
    return aVoid().doOnNext(aVoid -> checkOpen()).flatMap(aVoid -> {
        Context context = vertx.getOrCreateContext();
        return RxHelper.executeBlocking(context, vertx.getBackgroundPool(), () -> {
            try {
                checkNotNull(channel, "Channel is null. Was everything initialized??");
                return channel.size();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        });
    });
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Optional<Entry>> getFirstEntry(SfsVertx vertx) {
    return getEntry(vertx, firstLogEntryPosition());
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> close(SfsVertx vertx) {
    return blobFile.close(vertx);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> consume(SfsVertx vertx, long position, long length, EndableReadStream<Buffer> src, boolean replacedertAlignment) {
    return defer(() -> {
        checkOpen();
        checkCanWrite();
        ObservableFuture<Void> drainHandler = RxHelper.observableFuture();
        if (writeQueueSupport.writeQueueFull()) {
            writeQueueSupport.drainHandler(vertx.getOrCreateContext(), drainHandler::complete);
        } else {
            drainHandler.complete(null);
        }
        return drainHandler.flatMap(aVoid -> using(() -> {
            AsyncFileWriter dst = createWriteStream(vertx.getOrCreateContext(), position, replacedertAlignment);
            activeWriters.add(dst);
            return dst;
        }, sfsWriteStream -> {
            BufferedEndableWriteStream bufferedWriteStream = new BufferedEndableWriteStream(sfsWriteStream);
            LimitedWriteEndableWriteStream limitedWriteStream = new LimitedWriteEndableWriteStream(bufferedWriteStream, length);
            return pump(src, limitedWriteStream).doOnNext(aVoid1 -> {
                activeWriters.remove(sfsWriteStream);
            });
        }, activeWriters::remove));
    });
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> scan(SfsVertx vertx, long position, Func1<Entry, Observable<Boolean>> func) {
    return aVoid().doOnNext(aVoid -> checkLogEntryPosition(position)).flatMap(aVoid -> {
        JournalScanner journalScanner = new JournalScanner(this, position);
        return journalScanner.scan(vertx, func);
    });
}

19 View Complete Implementation : AzureKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> start(VertxContext<Server> vertxContext, JsonObject config) {
    SfsVertx sfsVertx = vertxContext.vertx();
    return aVoid().filter(aVoid -> started.compareAndSet(false, true)).flatMap(aVoid -> {
        executorService = newCachedThreadPool();
        endpoint = ConfigHelper.getFieldOrEnv(config, "keystore.azure.kms.endpoint");
        checkArgument(endpoint != null, "keystore.azure.kms.endpoint is required");
        keyId = ConfigHelper.getFieldOrEnv(config, "keystore.azure.kms.key_id");
        checkArgument(keyId != null, "keystore.azure.kms.key_id is required");
        accessKeyId = ConfigHelper.getFieldOrEnv(config, "keystore.azure.kms.access_key_id");
        checkArgument(accessKeyId != null, "keystore.aws.kms.access_key_id is required");
        secretKey = ConfigHelper.getFieldOrEnv(config, "keystore.azure.kms.secret_key");
        checkArgument(secretKey != null, "keystore.azure.kms.secret_key is required");
        azureKeyIdentifier = format("%s/keys/%s", endpoint, keyId);
        return RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
            try {
                kms = createKeyVaultClient(vertxContext);
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            return (Void) null;
        });
    }).singleOrDefault(null);
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> scanFromFirst(SfsVertx vertx, Func1<Entry, Observable<Boolean>> func) {
    return aVoid().flatMap(aVoid -> {
        JournalScanner journalScanner = new JournalScanner(this, firstLogEntryPosition());
        return journalScanner.scan(vertx, func);
    });
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> consume(SfsVertx vertx, long position, Buffer src, boolean replacedertAlignment) {
    return defer(() -> {
        checkOpen();
        checkCanWrite();
        ObservableFuture<Void> drainHandler = RxHelper.observableFuture();
        if (writeQueueSupport.writeQueueFull()) {
            writeQueueSupport.drainHandler(vertx.getOrCreateContext(), drainHandler::complete);
        } else {
            drainHandler.complete(null);
        }
        return drainHandler.flatMap(aVoid -> using(() -> {
            AsyncFileWriter dst = createWriteStream(vertx.getOrCreateContext(), position, replacedertAlignment);
            activeWriters.add(dst);
            return dst;
        }, sfsWriteStream -> {
            LimitedWriteEndableWriteStream limitedWriteStream = new LimitedWriteEndableWriteStream(sfsWriteStream, src.length());
            return end(src, limitedWriteStream).doOnNext(aVoid1 -> activeWriters.remove(sfsWriteStream));
        }, activeWriters::remove));
    });
}

19 View Complete Implementation : JournalScanner.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
protected void scan0(SfsVertx vertx, Func1<Entry, Observable<Boolean>> transformer, ObservableFuture<Void> handler, long fileSize) {
    if (isDebugEnabled) {
        LOGGER.debug("Reading 1 blocks @ position " + position);
    }
    journalFile.getEntry(vertx, position).flatMap(entryOptional -> {
        if (entryOptional.isPresent()) {
            Entry entry = entryOptional.get();
            position = entry.getNextHeaderPosition();
            return transformer.call(entry).doOnNext(_continue -> {
                if (isDebugEnabled) {
                    LOGGER.debug("Scanned 1 block");
                }
            }).doOnNext(_continue -> position = entry.getNextHeaderPosition());
        } else {
            // scan forward until we find a readable block. There will
            // eventually be a header block that can be parsed
            // or the end of file will be reached
            if (isDebugEnabled) {
                LOGGER.debug("Skipped 1 block @ position " + position);
            }
            position += blockSize;
            return just(true);
        }
    }).subscribe(new Subscriber<Boolean>() {

        Boolean result;

        @Override
        public void onCompleted() {
            if (!TRUE.equals(result) || position >= fileSize) {
                handler.complete(null);
            } else {
                vertx.runOnContext(event -> scan0(vertx, transformer, handler, fileSize));
            }
        }

        @Override
        public void onError(Throwable e) {
            handler.fail(e);
        }

        @Override
        public void onNext(Boolean _continue) {
            result = _continue;
        }
    });
}

19 View Complete Implementation : AwsKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> start(VertxContext<Server> vertxContext, JsonObject config) {
    AwsKms _this = this;
    SfsVertx sfsVertx = vertxContext.vertx();
    return Defer.aVoid().filter(aVoid -> started.compareAndSet(false, true)).flatMap(aVoid -> {
        String keyStoreAwsKmsEndpoint = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.endpoint");
        Preconditions.checkArgument(keyStoreAwsKmsEndpoint != null, "keystore.aws.kms.endpoint is required");
        _this.keyId = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.key_id");
        Preconditions.checkArgument(_this.keyId != null, "keystore.aws.kms.key_id is required");
        _this.accessKeyId = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.access_key_id");
        Preconditions.checkArgument(_this.accessKeyId != null, "keystore.aws.kms.access_key_id is required");
        _this.secretKey = ConfigHelper.getFieldOrEnv(config, "keystore.aws.kms.secret_key");
        Preconditions.checkArgument(_this.secretKey != null, "keystore.aws.kms.secret_key is required");
        return RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
            kms = new AWSKMSClient(new AWSCredentials() {

                @Override
                public String getAWSAccessKeyId() {
                    return _this.accessKeyId;
                }

                @Override
                public String getAWSSecretKey() {
                    return _this.secretKey;
                }
            });
            kms.setEndpoint(keyStoreAwsKmsEndpoint);
            return (Void) null;
        });
    }).singleOrDefault(null);
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> close(SfsVertx vertx) {
    return blockFile.close(vertx);
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> setBlock(SfsVertx vertx, final long position, final Buffer data) {
    return defer(() -> {
        checkOpen();
        checkCanWrite();
        Block.Frame<Buffer> frame = encodeFrame(data);
        Buffer frameBuffer = frame.getData();
        return setBlock0(vertx, position, frameBuffer);
    });
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Long> size(SfsVertx vertx) {
    return blockFile.size(vertx);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> consume(SfsVertx vertx, long position, long length, EndableReadStream<Buffer> src) {
    return consume(vertx, position, length, src, true);
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Iterable<ChecksummedPositional<XIndexBlock>>> getBlocks(SfsVertx vertx, long position, int numberOfBlocks) {
    return blockFile.getBlocks(vertx, position, numberOfBlocks).map(checksummedPositionals -> from(checksummedPositionals).transform(input -> parse(input)).filter(Optional::isPresent).transform(Optional::get));
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Iterable<ChecksummedPositional<byte[]>>> getBlocks(SfsVertx vertx, final long position, int numberOfBlocks) {
    return defer(() -> {
        checkOpen();
        long bufferSize = blockSize * numberOfBlocks;
        checkState(bufferSize <= MAX_VALUE, "Overflow multiplying %s and %s", blockSize, numberOfBlocks);
        AsyncFileReader src = createReadStream(vertx.getOrCreateContext(), position, (int) bufferSize, bufferSize);
        BufferWriteEndableWriteStream dst = new BufferWriteEndableWriteStream();
        return pump(src, dst).map(aVoid -> {
            Buffer buffer = dst.toBuffer();
            Positional<Buffer> bulk = new Positional<>(position, buffer);
            Iterable<Positional<Buffer>> buffers = parreplacedion(bulk, blockSize);
            return from(buffers).transform(positional -> {
                Optional<Block.Frame<byte[]>> oFrame = decodeFrame(positional.getValue(), false);
                if (oFrame.isPresent()) {
                    Block.Frame<byte[]> frame = oFrame.get();
                    return of(new ChecksummedPositional<byte[]>(positional.getPosition(), frame.getData(), frame.getChecksum()) {

                        @Override
                        public boolean isChecksumValid() {
                            return frame.isChecksumValid();
                        }
                    });
                } else {
                    return Optional.<ChecksummedPositional<byte[]>>absent();
                }
            }).filter(Optional::isPresent).transform(Optional::get);
        });
    });
}

19 View Complete Implementation : IndexFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> force(SfsVertx vertx, boolean metaData) {
    return blockFile.force(vertx, metaData);
}

19 View Complete Implementation : Elasticsearch.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> start(final VertxContext<Server> vertxContext, final JsonObject config, boolean isMasterNode) {
    this.isMasterNode = isMasterNode;
    SfsVertx sfsVertx = vertxContext.vertx();
    return Defer.aVoid().filter(aVoid -> status.compareAndSet(Status.STOPPED, Status.STARTING)).flatMap(aVoid -> RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
        if (elasticSearchClient == null) {
            LOGGER.debug("Starting Elasticsearch");
            try {
                ESLoggerFactory.setDefaultFactory(new Slf4jESLoggerFactory());
                defaultScrollTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultscrolltimeout", String.valueOf(TimeUnit.MINUTES.toMillis(2))));
                defaultIndexTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultindextimeout", "500"));
                defaultGetTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultgettimeout", "500"));
                defaultSearchTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultsearchtimeout", String.valueOf(TimeUnit.SECONDS.toMillis(5))));
                defaultDeleteTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultdeletetimeout", "500"));
                defaultAdminTimeout = Long.parseLong(ConfigHelper.getFieldOrEnv(config, "elasticsearch.defaultadmintimeout", String.valueOf(TimeUnit.SECONDS.toMillis(30))));
                shards = Integer.parseInt(ConfigHelper.getFieldOrEnv(config, "elasticsearch.shards", String.valueOf(1)));
                replicas = Integer.parseInt(ConfigHelper.getFieldOrEnv(config, "elasticsearch.replicas", String.valueOf(0)));
                Settings.Builder settings = Settings.settingsBuilder();
                settings.put("node.client", true);
                String clusterName = ConfigHelper.getFieldOrEnv(config, "elasticsearch.cluster.name");
                if (clusterName != null) {
                    settings.put("cluster.name", clusterName);
                }
                String nodeName = ConfigHelper.getFieldOrEnv(config, "elasticsearch.node.name");
                if (nodeName != null) {
                    settings.put("node.name", nodeName);
                }
                Iterable<String> unicastHosts = ConfigHelper.getArrayFieldOrEnv(config, "elasticsearch.discovery.zen.ping.unicast.hosts", new String[] {});
                settings.put("discovery.zen.ping.multicast.enabled", ConfigHelper.getFieldOrEnv(config, "elasticsearch.discovery.zen.ping.multicast.enabled", "true"));
                settings.put("discovery.zen.ping.unicast.enabled", ConfigHelper.getFieldOrEnv(config, "elasticsearch.discovery.zen.ping.unicast.enabled", "false"));
                settings.put("discovery.zen.ping.unicast.hosts", Joiner.on(',').join(unicastHosts));
                settings.put("client.transport.sniff", "true");
                Iterable<InetSocketTransportAddress> transports = Fluenreplacederable.from(unicastHosts).filter(Predicates.notNull()).transform(HostAndPort::fromString).transform(input -> {
                    try {
                        return new InetSocketTransportAddress(InetAddress.getByName(input.getHostText()), input.getPortOrDefault(9300));
                    } catch (UnknownHostException e) {
                        throw new RuntimeException(e);
                    }
                });
                TransportClient transportClient = TransportClient.builder().settings(settings).build();
                for (InetSocketTransportAddress transportAddress : transports) {
                    transportClient.addTransportAddress(transportAddress);
                }
                elasticSearchClient = transportClient;
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        }
        return null;
    })).flatMap(aVoid -> waitForGreen(vertxContext)).flatMap(aVoid -> prepareCommonIndex(vertxContext, isMasterNode)).flatMap(aVoid -> waitForGreen(vertxContext)).doOnNext(aVoid -> Preconditions.checkState(status.compareAndSet(Status.STARTING, Status.STARTED))).doOnNext(aVoid -> LOGGER.debug("Started Elasticsearch"));
}

19 View Complete Implementation : MetaFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Optional<XSuperBlock>> getBlock(SfsVertx vertx) {
    // attempt to read position0, if position1 fails then attempt to read position1
    return getBlock0(vertx, position0).flatMap(xSuperBlockOptional -> {
        if (xSuperBlockOptional.isPresent()) {
            return just(xSuperBlockOptional);
        } else {
            return getBlock0(vertx, position1);
        }
    });
}

19 View Complete Implementation : BlockFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Boolean> replaceBlock(SfsVertx vertx, final long position, Buffer oldValue, Buffer newValue) {
    return defer(() -> {
        checkOpen();
        checkCanWrite();
        Context context = vertx.getOrCreateContext();
        return getBlock0(context, position).flatMap(buffer -> {
            Optional<Block.Frame<byte[]>> oExistingValue = decodeFrame(buffer, false);
            if (oExistingValue.isPresent()) {
                Block.Frame<byte[]> existingValue = oExistingValue.get();
                if (Arrays.equals(existingValue.getData(), oldValue.getBytes())) {
                    Block.Frame<Buffer> frame = encodeFrame(newValue);
                    Buffer frameBuffer = frame.getData();
                    return setBlock0(vertx, position, frameBuffer).map(aVoid -> true);
                } else {
                    return just(false);
                }
            } else {
                Block.Frame<Buffer> frame = encodeFrame(newValue);
                Buffer frameBuffer = frame.getData();
                return setBlock0(vertx, position, frameBuffer).map(aVoid -> true);
            }
        });
    });
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> enableWrites(SfsVertx vertx) {
    return aVoid().doOnNext(aVoid -> checkOpen()).doOnNext(aVoid -> readOnly.compareAndSet(true, false));
}

19 View Complete Implementation : Elasticsearch.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> stop(VertxContext<Server> vertxContext) {
    SfsVertx vertx = vertxContext.vertx();
    return Defer.aVoid().filter(aVoid -> status.compareAndSet(Status.STARTED, Status.STOPPING) || status.compareAndSet(Status.STARTING, Status.STOPPING)).flatMap(aVoid -> {
        return RxHelper.executeBlocking(vertx.getOrCreateContext(), vertx.getBackgroundPool(), (() -> {
            LOGGER.debug("Stopping Elasticsearch");
            if (elasticSearchClient != null) {
                try {
                    elasticSearchClient.close();
                } catch (Throwable e) {
                    LOGGER.warn(e.getLocalizedMessage(), e);
                }
                elasticSearchClient = null;
            }
            LOGGER.debug("Stopped Elasticsearch");
            return (Void) null;
        })).doOnNext(aVoid1 -> Preconditions.checkState(status.compareAndSet(Status.STOPPING, Status.STOPPED)));
    });
}

19 View Complete Implementation : Elasticsearch.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
protected Observable<String> getMapping(VertxContext<Server> vertxContext, final String name) {
    SfsVertx sfsVertx = vertxContext.vertx();
    Context context = sfsVertx.getOrCreateContext();
    return RxHelper.executeBlocking(context, sfsVertx.getBackgroundPool(), () -> {
        try (Reader reader = new InputStreamReader(Thread.currentThread().getContextClreplacedLoader().getResourcereplacedtream(name), Charsets.UTF_8)) {
            return CharStreams.toString(reader);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    });
}

19 View Complete Implementation : AwsKms.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> stop(VertxContext<Server> vertxContext) {
    SfsVertx sfsVertx = vertxContext.vertx();
    return Defer.aVoid().filter(aVoid -> started.compareAndSet(true, false)).flatMap(aVoid -> {
        if (properties != null) {
            properties.clear();
            properties = null;
        }
        if (kms != null) {
            return RxHelper.executeBlocking(sfsVertx.getOrCreateContext(), sfsVertx.getBackgroundPool(), () -> {
                try {
                    kms.shutdown();
                } catch (Throwable e) {
                    LOGGER.warn("Unhandled Exception", e);
                }
                return (Void) null;
            });
        }
        return Defer.aVoid();
    }).singleOrDefault(null);
}

19 View Complete Implementation : BlobFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Void> disableWrites(SfsVertx vertx) {
    return aVoid().doOnNext(aVoid -> checkOpen()).doOnNext(aVoid -> readOnly.compareAndSet(false, true)).flatMap(new WaitForActiveWriters(vertx, activeWriters)).flatMap(new WaitForEmptyWriteQueue(vertx, writeQueueSupport));
}

19 View Complete Implementation : JournalFile.java
Copyright Apache License 2.0
Author : pitchpoint-solutions
public Observable<Long> size(SfsVertx vertx) {
    return blobFile.size(vertx);
}