org.apache.htrace.TraceScope - java examples

Here are the examples of the java api org.apache.htrace.TraceScope taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

83 Examples 7

19 View Complete Implementation : TraceRepo.java
Copyright Apache License 2.0
Author : apache
@Override
public Repo<T> call(long tid, T environment) throws Exception {
    try (TraceScope t = TraceUtil.trace(new TInfo(traceId, parentId), repo.getDescription())) {
        Repo<T> result = repo.call(tid, environment);
        if (result == null)
            return null;
        return new TraceRepo<>(result);
    }
}

19 View Complete Implementation : DataTransferProtoUtil.java
Copyright Apache License 2.0
Author : yncxcw
public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto, String description) {
    TraceScope scope = null;
    TraceInfo info = fromProto(proto);
    if (info != null) {
        scope = Trace.startSpan(description, info);
    }
    return scope;
}

19 View Complete Implementation : RemoveEntriesForMissingFiles.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    try (TraceScope clientSpan = opts.parseArgsAndTrace(RemoveEntriesForMissingFiles.clreplaced.getName(), args)) {
        checkAllTables(opts.getServerContext(), opts.fix);
    }
}

19 View Complete Implementation : TraceCommand.java
Copyright Apache License 2.0
Author : apache
public clreplaced TraceCommand extends Command {

    private TraceScope traceScope = null;

    @Override
    public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws IOException {
        if (cl.getArgs().length == 1) {
            if (cl.getArgs()[0].equalsIgnoreCase("on")) {
                if (traceScope == null) {
                    traceScope = Trace.startSpan("shell:" + shellState.getAcreplaceduloClient().whoami(), Sampler.ALWAYS);
                }
            } else if (cl.getArgs()[0].equalsIgnoreCase("off")) {
                if (traceScope != null) {
                    final long trace = traceScope.getSpan().getTraceId();
                    traceScope.close();
                    traceScope = null;
                    StringBuilder sb = new StringBuilder();
                    int traceCount = 0;
                    for (int i = 0; i < 30; i++) {
                        sb = new StringBuilder();
                        try {
                            final Map<String, String> properties = shellState.getAcreplaceduloClient().instanceOperations().getSystemConfiguration();
                            final String table = properties.get(Property.TRACE_TABLE.getKey());
                            final String user = shellState.getAcreplaceduloClient().whoami();
                            final Authorizations auths = shellState.getAcreplaceduloClient().securityOperations().getUserAuthorizations(user);
                            final Scanner scanner = shellState.getAcreplaceduloClient().createScanner(table, auths);
                            scanner.setRange(new Range(new Text(Long.toHexString(trace))));
                            final StringBuilder finalSB = sb;
                            traceCount = TraceDump.printTrace(scanner, line -> {
                                try {
                                    finalSB.append(line + "\n");
                                } catch (Exception ex) {
                                    throw new RuntimeException(ex);
                                }
                            });
                            if (traceCount > 0) {
                                shellState.getReader().print(sb.toString());
                                break;
                            }
                        } catch (Exception ex) {
                            shellState.printException(ex);
                        }
                        shellState.getReader().println("Waiting for trace information");
                        shellState.getReader().flush();
                        sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
                    }
                    if (traceCount < 0) {
                        // display the trace even though there are unrooted spans
                        shellState.getReader().print(sb.toString());
                    }
                } else {
                    shellState.getReader().println("Not tracing");
                }
            } else {
                throw new BadArgumentException("Argument must be 'on' or 'off'", fullCommand, fullCommand.indexOf(cl.getArgs()[0]));
            }
        } else if (cl.getArgs().length == 0) {
            shellState.getReader().println(Trace.isTracing() ? "on" : "off");
        } else {
            shellState.printException(new IllegalArgumentException("Expected 0 or 1 argument. There were " + cl.getArgs().length + "."));
            printHelp(shellState);
            return 1;
        }
        return 0;
    }

    @Override
    public String description() {
        return "turns tracing on or off";
    }

    @Override
    public String usage() {
        return getName() + " [ on | off ]";
    }

    @Override
    public int numArgs() {
        return Shell.NO_FIXED_ARG_LENGTH_CHECK;
    }
}

19 View Complete Implementation : TabletServerBatchWriter.java
Copyright Apache License 2.0
Author : apache
public synchronized void flush() throws MutationsRejectedException {
    if (closed)
        throw new IllegalStateException("Closed");
    try (TraceScope span = Trace.startSpan("flush")) {
        checkForFailures();
        if (flushing) {
            // some other thread is currently flushing, so wait
            waitRTE(() -> flushing && !somethingFailed);
            checkForFailures();
            return;
        }
        flushing = true;
        startProcessing();
        checkForFailures();
        waitRTE(() -> totalMemUsed > 0 && !somethingFailed);
        flushing = false;
        this.notifyAll();
        checkForFailures();
    }
}

19 View Complete Implementation : TraceUtil.java
Copyright Apache License 2.0
Author : apache
/**
 * To move trace data from client to server, the RPC call must be annotated to take a TInfo object
 * as its first argument. The user can simply preplaced null, so long as they wrap their Client and
 * Service objects with these functions.
 *
 * <pre>
 * Trace.on("remoteMethod");
 * Iface c = new Client();
 * c = TraceWrap.client(c);
 * c.remoteMethod(null, arg2, arg3);
 * Trace.off();
 * </pre>
 *
 * The wrapper will see the annotated method and send or re-establish the trace information.
 *
 * Note that the result of these calls is a Proxy object that conforms to the basic interfaces,
 * but is not your concrete instance.
 */
public static <T> T wrapClient(final T instance) {
    InvocationHandler handler = (obj, method, args) -> {
        if (args == null || args.length < 1 || args[0] != null) {
            return method.invoke(instance, args);
        }
        if (TInfo.clreplaced.isreplacedignableFrom(method.getParameterTypes()[0])) {
            args[0] = traceInfo();
        }
        try (TraceScope span = Trace.startSpan("client:" + method.getName())) {
            return method.invoke(instance, args);
        } catch (InvocationTargetException ex) {
            throw ex.getCause();
        }
    };
    return wrapRpc(handler, instance);
}

19 View Complete Implementation : TraceRepo.java
Copyright Apache License 2.0
Author : apache
@Override
public void undo(long tid, T environment) throws Exception {
    try (TraceScope t = TraceUtil.trace(new TInfo(traceId, parentId), repo.getDescription())) {
        repo.undo(tid, environment);
    }
}

19 View Complete Implementation : GarbageCollectionAlgorithm.java
Copyright Apache License 2.0
Author : apache
private void deleteConfirmed(GarbageCollectionEnvironment gce, SortedMap<String, String> candidateMap) throws IOException, TableNotFoundException {
    try (TraceScope deleteSpan = Trace.startSpan("deleteFiles")) {
        gce.delete(candidateMap);
    }
    cleanUpDeletedTableDirs(gce, candidateMap);
}

19 View Complete Implementation : TraceUtil.java
Copyright Apache License 2.0
Author : apache
public static <T> T wrapService(final T instance) {
    InvocationHandler handler = (obj, method, args) -> {
        try {
            if (args == null || args.length < 1 || args[0] == null || !(args[0] instanceof TInfo)) {
                return method.invoke(instance, args);
            }
            try (TraceScope span = trace((TInfo) args[0], method.getName())) {
                return method.invoke(instance, args);
            }
        } catch (InvocationTargetException ex) {
            throw ex.getCause();
        }
    };
    return wrapRpc(handler, instance);
}

19 View Complete Implementation : GarbageCollectionAlgorithm.java
Copyright Apache License 2.0
Author : apache
private boolean getCandidates(GarbageCollectionEnvironment gce, String lastCandidate, List<String> candidates) throws TableNotFoundException {
    try (TraceScope candidatesSpan = Trace.startSpan("getCandidates")) {
        return gce.getCandidates(lastCandidate, candidates);
    }
}

19 View Complete Implementation : GarbageCollectionAlgorithm.java
Copyright Apache License 2.0
Author : apache
private void confirmDeletesTrace(GarbageCollectionEnvironment gce, SortedMap<String, String> candidateMap) throws TableNotFoundException {
    try (TraceScope confirmDeletesSpan = Trace.startSpan("confirmDeletes")) {
        confirmDeletes(gce, candidateMap);
    }
}

18 View Complete Implementation : DatafileManager.java
Copyright Apache License 2.0
Author : apache
private TreeSet<FileRef> waitForScansToFinish(Set<FileRef> pathsToWaitFor, boolean blockNewScans, long maxWaitTime) {
    long startTime = System.currentTimeMillis();
    TreeSet<FileRef> inUse = new TreeSet<>();
    try (TraceScope waitForScans = Trace.startSpan("waitForScans")) {
        synchronized (tablet) {
            if (blockNewScans) {
                if (reservationsBlocked)
                    throw new IllegalStateException();
                reservationsBlocked = true;
            }
            for (FileRef path : pathsToWaitFor) {
                while (fileScanReferenceCounts.get(path) > 0 && System.currentTimeMillis() - startTime < maxWaitTime) {
                    try {
                        tablet.wait(100);
                    } catch (InterruptedException e) {
                        log.warn("{}", e.getMessage(), e);
                    }
                }
            }
            for (FileRef path : pathsToWaitFor) {
                if (fileScanReferenceCounts.get(path) > 0)
                    inUse.add(path);
            }
            if (blockNewScans) {
                reservationsBlocked = false;
                tablet.notifyAll();
            }
        }
    }
    return inUse;
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * getChildren is an idempotent operation. Retry before throwing exception
 * @return List of children znodes
 */
public List<String> getChildren(String path, boolean watch) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().getChildren(path, watch);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getChildren");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * getChildren is an idempotent operation. Retry before throwing exception
 * @return List of children znodes
 */
public List<String> getChildren(String path, Watcher watcher) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().getChildren(path, watcher);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getChildren");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * exists is an idempotent operation. Retry before throwing exception
 * @return A Stat instance
 */
public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.exists");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().exists(path, watcher);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "exists");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Run multiple operations in a transactional manner. Retry before throwing exception
 */
public List<OpResult> multi(Iterable<Op> ops) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.multi");
        RetryCounter retryCounter = retryCounterFactory.create();
        Iterable<Op> multiOps = prepareZKMulti(ops);
        while (true) {
            try {
                return checkZk().multi(multiOps);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "multi");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * exists is an idempotent operation. Retry before throwing exception
 * @return A Stat instance
 */
public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.exists");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().exists(path, watch);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "exists");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * <p>
 * NONSEQUENTIAL create is idempotent operation.
 * Retry before throwing exceptions.
 * But this function will not throw the NodeExist exception back to the
 * application.
 * </p>
 * <p>
 * But SEQUENTIAL is NOT idempotent operation. It is necessary to add
 * identifier to the path to verify, whether the previous one is successful
 * or not.
 * </p>
 *
 * @return Path
 */
public String create(String path, byte[] data, List<ACL> acl, CreateMode createMode) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.create");
        byte[] newData = appendMetaData(data);
        switch(createMode) {
            case EPHEMERAL:
            case PERSISTENT:
                return createNonSequential(path, newData, acl, createMode);
            case EPHEMERAL_SEQUENTIAL:
            case PERSISTENT_SEQUENTIAL:
                return createSequential(path, newData, acl, createMode);
            default:
                throw new IllegalArgumentException("Unrecognized CreateMode: " + createMode);
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * delete is an idempotent operation. Retry before throwing exception.
 * This function will not throw NoNodeException if the path does not
 * exist.
 */
public void delete(String path, int version) throws InterruptedException, KeeperException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.delete");
        RetryCounter retryCounter = retryCounterFactory.create();
        // False for first attempt, true for all retries.
        boolean isRetry = false;
        while (true) {
            try {
                checkZk().delete(path, version);
                return;
            } catch (KeeperException e) {
                switch(e.code()) {
                    case NONODE:
                        if (isRetry) {
                            LOG.debug("Node " + path + " already deleted. replaceduming a " + "previous attempt succeeded.");
                            return;
                        }
                        LOG.debug("Node " + path + " already deleted, retry=" + isRetry);
                        throw e;
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "delete");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
            isRetry = true;
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : TabletServerBatchWriter.java
Copyright Apache License 2.0
Author : apache
@Override
public synchronized void close() throws MutationsRejectedException {
    if (closed)
        return;
    try (TraceScope span = Trace.startSpan("close")) {
        closed = true;
        startProcessing();
        waitRTE(() -> totalMemUsed > 0 && !somethingFailed);
        logStats();
        checkForFailures();
    } finally {
        // make a best effort to release these resources
        writer.binningThreadPool.shutdownNow();
        writer.sendThreadPool.shutdownNow();
        jtimer.cancel();
    }
}

18 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * setAcl is an idempotent operation. Retry before throwing exception
 * @return list of ACLs
 */
public Stat setAcl(String path, List<ACL> acls, int version) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.setAcl");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().setACL(path, acls, version);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "setAcl");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

18 View Complete Implementation : IntegrationTestSendTraceRequests.java
Copyright Apache License 2.0
Author : fengchen8086
private void createTable() throws IOException {
    TraceScope createScope = null;
    try {
        createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
        util.createTable(tableName, familyName);
    } finally {
        if (createScope != null)
            createScope.close();
    }
}

18 View Complete Implementation : IntegrationTestSendTraceRequests.java
Copyright Apache License 2.0
Author : fengchen8086
private void deleteTable() throws IOException {
    TraceScope deleteScope = null;
    try {
        if (admin.tableExists(tableName)) {
            deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
            util.deleteTable(tableName);
        }
    } finally {
        if (deleteScope != null)
            deleteScope.close();
    }
}

18 View Complete Implementation : CheckForMetadataProblems.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    ServerUtilOpts opts = new ServerUtilOpts();
    try (TraceScope clientSpan = opts.parseArgsAndTrace(CheckForMetadataProblems.clreplaced.getName(), args)) {
        checkMetadataAndRootTableEntries(RootTable.NAME, opts);
        checkMetadataAndRootTableEntries(MetadataTable.NAME, opts);
        if (sawProblems)
            throw new RuntimeException();
    }
}

18 View Complete Implementation : EventHandler.java
Copyright Apache License 2.0
Author : fengchen8086
@Override
public void run() {
    TraceScope chunk = Trace.startSpan(this.getClreplaced().getSimpleName(), parent);
    try {
        if (getListener() != null)
            getListener().beforeProcess(this);
        process();
        if (getListener() != null)
            getListener().afterProcess(this);
    } catch (Throwable t) {
        handleException(t);
    } finally {
        chunk.close();
    }
}

18 View Complete Implementation : CacheDirectiveIterator.java
Copyright Apache License 2.0
Author : yncxcw
@Override
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey) throws IOException {
    BatchedEntries<CacheDirectiveEntry> entries = null;
    TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
    try {
        entries = namenode.listCacheDirectives(prevKey, filter);
    } catch (IOException e) {
        if (e.getMessage().contains("Filtering by ID is unsupported")) {
            // Retry case for old servers, do the filtering client-side
            long id = filter.getId();
            filter = removeIdFromFilter(filter);
            // Using id - 1 as prevId should get us a window containing the id
            // This is somewhat brittle, since it depends on directives being
            // returned in order of ascending ID.
            entries = namenode.listCacheDirectives(id - 1, filter);
            for (int i = 0; i < entries.size(); i++) {
                CacheDirectiveEntry entry = entries.get(i);
                if (entry.getInfo().getId().equals((Long) id)) {
                    return new SingleEntry(entry);
                }
            }
            throw new RemoteException(InvalidRequestException.clreplaced.getName(), "Did not find requested id " + id);
        }
        throw e;
    } finally {
        scope.close();
    }
    Preconditions.checkNotNull(entries);
    return entries;
}

18 View Complete Implementation : TableDiskUsage.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    try (TraceScope clientSpan = opts.parseArgsAndTrace(TableDiskUsage.clreplaced.getName(), args)) {
        try (AcreplaceduloClient client = Acreplacedulo.newClient().from(opts.getClientProps()).build()) {
            VolumeManager fs = opts.getServerContext().getVolumeManager();
            org.apache.acreplacedulo.server.util.TableDiskUsage.printDiskUsage(opts.tables, fs, client, false);
        }
    }
}

18 View Complete Implementation : CachePoolIterator.java
Copyright Apache License 2.0
Author : yncxcw
@Override
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey) throws IOException {
    TraceScope scope = Trace.startSpan("listCachePools", traceSampler);
    try {
        return namenode.listCachePools(prevKey);
    } finally {
        scope.close();
    }
}

18 View Complete Implementation : EncryptionZoneIterator.java
Copyright Apache License 2.0
Author : yncxcw
@Override
public BatchedEntries<EncryptionZone> makeRequest(Long prevId) throws IOException {
    TraceScope scope = Trace.startSpan("listEncryptionZones", traceSampler);
    try {
        return namenode.listEncryptionZones(prevId);
    } finally {
        scope.close();
    }
}

18 View Complete Implementation : TraceRepo.java
Copyright Apache License 2.0
Author : apache
@Override
public long isReady(long tid, T environment) throws Exception {
    try (TraceScope t = TraceUtil.trace(new TInfo(traceId, parentId), repo.getDescription())) {
        return repo.isReady(tid, environment);
    }
}

18 View Complete Implementation : RemoteBlockReader.java
Copyright Apache License 2.0
Author : yncxcw
@Override
protected synchronized int readChunk(long pos, byte[] buf, int offset, int len, byte[] checksumBuf) throws IOException {
    TraceScope scope = Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")", Sampler.NEVER);
    try {
        return readChunkImpl(pos, buf, offset, len, checksumBuf);
    } finally {
        scope.close();
    }
}

17 View Complete Implementation : MinorCompactionTask.java
Copyright Apache License 2.0
Author : apache
@Override
public void run() {
    tablet.minorCompactionStarted();
    ProbabilitySampler sampler = TraceUtil.probabilitySampler(tracePercent);
    try {
        try (TraceScope minorCompaction = Trace.startSpan("minorCompaction", sampler)) {
            FileRef newMapfileLocation = tablet.getNextMapFilename(mergeFile == null ? "F" : "M");
            FileRef tmpFileRef = new FileRef(newMapfileLocation.path() + "_tmp");
            try (TraceScope span = Trace.startSpan("waitForCommits")) {
                synchronized (tablet) {
                    commitSession.waitForCommitsToFinish();
                }
            }
            try (TraceScope span = Trace.startSpan("start")) {
                while (true) {
                    try {
                        /*
               * the purpose of the minor compaction start event is to keep track of the filename...
               * in the case where the metadata table write for the minor compaction finishes and
               * the process dies before writing the minor compaction finish event, then the start
               * event+filename in metadata table will prevent recovery of duplicate data... the
               * minor compaction start event could be written at any time before the metadata write
               * for the minor compaction
               */
                        tablet.getTabletServer().minorCompactionStarted(commitSession, commitSession.getWALogSeq() + 1, newMapfileLocation.path().toString());
                        break;
                    } catch (IOException e) {
                        log.warn("Failed to write to write ahead log {}", e.getMessage(), e);
                    }
                }
            }
            try (TraceScope span = Trace.startSpan("compact")) {
                this.stats = tablet.minorCompact(tablet.getTabletMemory().getMinCMemTable(), tmpFileRef, newMapfileLocation, mergeFile, true, queued, commitSession, flushId, mincReason);
            }
            if (minorCompaction.getSpan() != null) {
                minorCompaction.getSpan().addKVAnnotation("extent", tablet.getExtent().toString());
                minorCompaction.getSpan().addKVAnnotation("numEntries", Long.toString(this.stats.getNumEntries()));
                minorCompaction.getSpan().addKVAnnotation("size", Long.toString(this.stats.getSize()));
            }
        }
        if (tablet.needsSplit()) {
            tablet.getTabletServer().executeSplit(tablet);
        } else {
            tablet.initiateMajorCompaction(MajorCompactionReason.NORMAL);
        }
    } catch (Throwable t) {
        log.error("Unknown error during minor compaction for extent: " + tablet.getExtent(), t);
        throw new RuntimeException(t);
    } finally {
        tablet.minorCompactionComplete();
    }
}

17 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * getData is an idemnpotent operation. Retry before throwing exception
 * @return Data
 */
public byte[] getData(String path, boolean watch, Stat stat) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getData");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                byte[] revData = checkZk().getData(path, watch, stat);
                return this.removeMetaData(revData);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getData");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

17 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * getAcl is an idempotent operation. Retry before throwing exception
 * @return list of ACLs
 */
public List<ACL> getAcl(String path, Stat stat) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                return checkZk().getACL(path, stat);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getAcl");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

17 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * setData is NOT an idempotent operation. Retry may cause BadVersion Exception
 * Adding an identifier field into the data to check whether
 * badversion is caused by the result of previous correctly setData
 * @return Stat instance
 */
public Stat setData(String path, byte[] data, int version) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.setData");
        RetryCounter retryCounter = retryCounterFactory.create();
        byte[] newData = appendMetaData(data);
        boolean isRetry = false;
        while (true) {
            try {
                return checkZk().setData(path, newData, version);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "setData");
                        break;
                    case BADVERSION:
                        if (isRetry) {
                            // try to verify whether the previous setData success or not
                            try {
                                Stat stat = new Stat();
                                byte[] revData = checkZk().getData(path, false, stat);
                                if (Bytes.compareTo(revData, newData) == 0) {
                                    // the bad version is caused by previous successful setData
                                    return stat;
                                }
                            } catch (KeeperException keeperException) {
                                // the ZK is not reliable at this moment. just throwing exception
                                throw keeperException;
                            }
                        }
                    // throw other exceptions and verified bad version exceptions
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
            isRetry = true;
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

17 View Complete Implementation : RecoverableZooKeeper.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * getData is an idempotent operation. Retry before throwing exception
 * @return Data
 */
public byte[] getData(String path, Watcher watcher, Stat stat) throws KeeperException, InterruptedException {
    TraceScope traceScope = null;
    try {
        traceScope = Trace.startSpan("RecoverableZookeeper.getData");
        RetryCounter retryCounter = retryCounterFactory.create();
        while (true) {
            try {
                byte[] revData = checkZk().getData(path, watcher, stat);
                return this.removeMetaData(revData);
            } catch (KeeperException e) {
                switch(e.code()) {
                    case CONNECTIONLOSS:
                    case SESSIONEXPIRED:
                    case OPERATIONTIMEOUT:
                        retryOrThrow(retryCounter, e, "getData");
                        break;
                    default:
                        throw e;
                }
            }
            retryCounter.sleepUntilNextRetry();
        }
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

17 View Complete Implementation : MemStoreFlusher.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Check if the regionserver's memstore memory usage is greater than the
 * limit. If so, flush regions with the biggest memstores until we're down
 * to the lower limit. This method blocks callers until we're down to a safe
 * amount of memstore consumption.
 */
public void reclaimMemStoreMemory() {
    TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
    if (isAboveHighWaterMark()) {
        if (Trace.isTracing()) {
            scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
        }
        long start = EnvironmentEdgeManager.currentTime();
        synchronized (this.blockSignal) {
            boolean blocked = false;
            long startTime = 0;
            boolean interrupted = false;
            try {
                while (isAboveHighWaterMark() && !server.isStopped()) {
                    if (!blocked) {
                        startTime = EnvironmentEdgeManager.currentTime();
                        LOG.info("Blocking updates on " + server.toString() + ": the global memstore size " + TraditionalBinaryPrefix.long2String(server.getRegionServerAccounting().getGlobalMemstoreSize(), "", 1) + " is >= than blocking " + TraditionalBinaryPrefix.long2String(globalMemStoreLimit, "", 1) + " size");
                    }
                    blocked = true;
                    wakeupFlushThread();
                    try {
                        // we should be able to wait forever, but we've seen a bug where
                        // we miss a notify, so put a 5 second bound on it at least.
                        blockSignal.wait(5 * 1000);
                    } catch (InterruptedException ie) {
                        LOG.warn("Interrupted while waiting");
                        interrupted = true;
                    }
                    long took = EnvironmentEdgeManager.currentTime() - start;
                    LOG.warn("Memstore is above high water mark and block " + took + "ms");
                }
            } finally {
                if (interrupted) {
                    Thread.currentThread().interrupt();
                }
            }
            if (blocked) {
                final long totalTime = EnvironmentEdgeManager.currentTime() - startTime;
                if (totalTime > 0) {
                    this.updatesBlockedMsHighWater.add(totalTime);
                }
                LOG.info("Unblocking updates for server " + server.toString());
            }
        }
    } else if (isAboveLowWaterMark()) {
        wakeupFlushThread();
    }
    scope.close();
}

17 View Complete Implementation : BlockReaderLocalLegacy.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Reads bytes into a buffer until EOF or the buffer's limit is reached
 */
private int fillBuffer(FileInputStream stream, ByteBuffer buf) throws IOException {
    TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")", Sampler.NEVER);
    try {
        int bytesRead = stream.getChannel().read(buf);
        if (bytesRead < 0) {
            // EOF
            return bytesRead;
        }
        while (buf.remaining() > 0) {
            int n = stream.getChannel().read(buf);
            if (n < 0) {
                // EOF
                return bytesRead;
            }
            bytesRead += n;
        }
        return bytesRead;
    } finally {
        scope.close();
    }
}

17 View Complete Implementation : DFSInotifyEventInputStream.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Returns the next batch of events in the stream, waiting indefinitely if
 * a new batch  is not immediately available.
 *
 * @throws IOException see {@link DFSInotifyEventInputStream#poll()}
 * @throws MissingEventsException see
 * {@link DFSInotifyEventInputStream#poll()}
 * @throws InterruptedException if the calling thread is interrupted
 */
public EventBatch take() throws IOException, InterruptedException, MissingEventsException {
    TraceScope scope = Trace.startSpan("inotifyTake", traceSampler);
    EventBatch next = null;
    try {
        int nextWaitMin = INITIAL_WAIT_MS;
        while ((next = poll()) == null) {
            // sleep for a random period between nextWaitMin and nextWaitMin * 2
            // to avoid stampedes at the NN if there are multiple clients
            int sleepTime = nextWaitMin + rng.nextInt(nextWaitMin);
            LOG.debug("take(): poll() returned null, sleeping for {} ms", sleepTime);
            Thread.sleep(sleepTime);
            // the maximum sleep is 2 minutes
            nextWaitMin = Math.min(60000, nextWaitMin * 2);
        }
    } finally {
        scope.close();
    }
    return next;
}

17 View Complete Implementation : Receiver.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Receive OP_COPY_BLOCK
 */
private void opCopyBlock(DataInputStream in) throws IOException {
    OpCopyBlockProto proto = OpCopyBlockProto.parseFrom(vintPrefixed(in));
    TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClreplaced().getSimpleName());
    try {
        copyBlock(PBHelper.convert(proto.getHeader().getBlock()), PBHelper.convert(proto.getHeader().getToken()));
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}

17 View Complete Implementation : BlockSender.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * sendBlock() is used to read block and its metadata and stream the data to
 * either a client or to another datanode.
 *
 * @param out  stream to which the block is written to
 * @param baseStream optional. if non-null, <code>out</code> is replacedumed to
 *        be a wrapper over this stream. This enables optimizations for
 *        sending the data, e.g.
 *        {@link SocketOutputStream#transferToFully(FileChannel,
 *        long, int)}.
 * @param throttler for sending data.
 * @return total bytes read, including checksum data.
 */
long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException {
    TraceScope scope = Trace.startSpan("sendBlock_" + block.getBlockId(), Sampler.NEVER);
    try {
        return doSendBlock(out, baseStream, throttler);
    } finally {
        scope.close();
    }
}

17 View Complete Implementation : TestTracing.java
Copyright Apache License 2.0
Author : yncxcw
public void writeWithTracing() throws Exception {
    long startTime = System.currentTimeMillis();
    TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS);
    writeTestFile("testWriteTraceHooks.dat");
    long endTime = System.currentTimeMillis();
    ts.close();
    String[] expectedSpanNames = { "testWriteTraceHooks", "org.apache.hadoop.hdfs.protocol.ClientProtocol.create", "ClientNamenodeProtocol#create", "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", "ClientNamenodeProtocol#fsync", "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#writeChunk", "DFSOutputStream#close", "dataStreamer", "OpWriteBlockProto", "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock", "ClientNamenodeProtocol#addBlock" };
    replacedertSpanNamesFound(expectedSpanNames);
    // The trace should last about the same amount of time as the test
    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
    Span s = map.get("testWriteTraceHooks").get(0);
    replacedert.replacedertNotNull(s);
    long spanStart = s.getStartTimeMillis();
    long spanEnd = s.getStopTimeMillis();
    // Spans homed in the top trace shoud have same trace id.
    // Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
    // and children of them are exception.
    String[] spansInTopTrace = { "testWriteTraceHooks", "org.apache.hadoop.hdfs.protocol.ClientProtocol.create", "ClientNamenodeProtocol#create", "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", "ClientNamenodeProtocol#fsync", "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#writeChunk", "DFSOutputStream#close" };
    for (String desc : spansInTopTrace) {
        for (Span span : map.get(desc)) {
            replacedert.replacedertEquals(ts.getSpan().getTraceId(), span.getTraceId());
        }
    }
    SetSpanReceiver.SetHolder.spans.clear();
}

17 View Complete Implementation : TestTracing.java
Copyright Apache License 2.0
Author : yncxcw
public void readWithTracing() throws Exception {
    String fileName = "testReadTraceHooks.dat";
    writeTestFile(fileName);
    long startTime = System.currentTimeMillis();
    TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
    readTestFile(fileName);
    ts.close();
    long endTime = System.currentTimeMillis();
    String[] expectedSpanNames = { "testReadTraceHooks", "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };
    replacedertSpanNamesFound(expectedSpanNames);
    // The trace should last about the same amount of time as the test
    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
    Span s = map.get("testReadTraceHooks").get(0);
    replacedert.replacedertNotNull(s);
    long spanStart = s.getStartTimeMillis();
    long spanEnd = s.getStopTimeMillis();
    replacedert.replacedertTrue(spanStart - startTime < 100);
    replacedert.replacedertTrue(spanEnd - endTime < 100);
    // There should only be one trace id as it should all be homed in the
    // top trace.
    for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
        replacedert.replacedertEquals(ts.getSpan().getTraceId(), span.getTraceId());
    }
    SetSpanReceiver.SetHolder.spans.clear();
}

16 View Complete Implementation : FindOfflineTablets.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    ServerUtilOpts opts = new ServerUtilOpts();
    try (TraceScope clientSpan = opts.parseArgsAndTrace(FindOfflineTablets.clreplaced.getName(), args)) {
        ServerContext context = opts.getServerContext();
        findOffline(context, null);
    }
}

16 View Complete Implementation : RandomWriter.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    opts.principal = "root";
    try (TraceScope clientSpan = opts.parseArgsAndTrace(RandomWriter.clreplaced.getName(), args)) {
        long start = System.currentTimeMillis();
        Properties clientProps = opts.getClientProps();
        String principal = ClientProperty.AUTH_PRINCIPAL.getValue(clientProps);
        log.info("starting at {} for user {}", start, principal);
        try (AcreplaceduloClient acreplaceduloClient = Acreplacedulo.newClient().from(clientProps).build();
            BatchWriter bw = acreplaceduloClient.createBatchWriter(opts.tableName)) {
            log.info("Writing {} mutations...", opts.count);
            bw.addMutations(new RandomMutationGenerator(opts.count));
        } catch (Exception e) {
            log.error("{}", e.getMessage(), e);
            throw e;
        }
        long stop = System.currentTimeMillis();
        log.info("stopping at {}", stop);
        log.info("elapsed: {}", (((double) stop - (double) start) / 1000.0));
    }
}

16 View Complete Implementation : VerifyTabletAssignments.java
Copyright Apache License 2.0
Author : apache
public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    try (TraceScope clientSpan = opts.parseArgsAndTrace(VerifyTabletreplacedignments.clreplaced.getName(), args)) {
        try (AcreplaceduloClient client = Acreplacedulo.newClient().from(opts.getClientProps()).build()) {
            for (String table : client.tableOperations().list()) checkTable((ClientContext) client, opts, table, null);
        }
    }
}

16 View Complete Implementation : ReplicationDriver.java
Copyright Apache License 2.0
Author : apache
@Override
public void run() {
    ProbabilitySampler sampler = TraceUtil.probabilitySampler(conf.getFraction(Property.REPLICATION_TRACE_PERCENT));
    long millisToWait = conf.getTimeInMillis(Property.REPLICATION_DRIVER_DELAY);
    log.debug("Waiting {}ms before starting main replication loop", millisToWait);
    UtilWaitThread.sleep(millisToWait);
    log.debug("Starting replication loop");
    while (master.stillMaster()) {
        if (workMaker == null) {
            client = master.getContext();
            statusMaker = new StatusMaker(client, master.getFileSystem());
            workMaker = new WorkMaker(master.getContext(), client);
            finishedWorkUpdater = new FinishedWorkUpdater(client);
            rcrr = new RemoveCompleteReplicationRecords(client);
        }
        try (TraceScope replicationDriver = Trace.startSpan("masterReplicationDriver", sampler)) {
            // Make status markers from replication records in metadata, removing entries in
            // metadata which are no longer needed (closed records)
            // This will end up creating the replication table too
            try {
                statusMaker.run();
            } catch (Exception e) {
                log.error("Caught Exception trying to create Replication status records", e);
            }
            // Tell the work maker to make work
            try {
                workMaker.run();
            } catch (Exception e) {
                log.error("Caught Exception trying to create Replication work records", e);
            }
            // Update the status records from the work records
            try {
                finishedWorkUpdater.run();
            } catch (Exception e) {
                log.error("Caught Exception trying to update Replication records using finished work records", e);
            }
            // Clean up records we no longer need.
            // It must be running at the same time as the StatusMaker or WorkMaker
            // So it's important that we run these sequentially and not concurrently
            try {
                rcrr.run();
            } catch (Exception e) {
                log.error("Caught Exception trying to remove finished Replication records", e);
            }
        }
        // Sleep for a bit
        long sleepMillis = conf.getTimeInMillis(Property.MASTER_REPLICATION_SCAN_INTERVAL);
        log.debug("Sleeping for {}ms before re-running", sleepMillis);
        try {
            Thread.sleep(sleepMillis);
        } catch (InterruptedException e) {
            log.error("Interrupted while sleeping", e);
        }
    }
}

16 View Complete Implementation : MemcachedBlockCache.java
Copyright Apache License 2.0
Author : fengchen8086
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) {
    // replacedume that nothing is the block cache
    HFileBlock result = null;
    try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
        result = client.get(cacheKey.toString(), tc);
    } catch (Exception e) {
        // Catch a pretty broad set of exceptions to limit any changes in the memecache client
        // and how it handles failures from leaking into the read path.
        if (LOG.isDebugEnabled()) {
            LOG.debug("Exception pulling from memcached [ " + cacheKey.toString() + " ]. Treating as a miss.", e);
        }
        result = null;
    } finally {
        // Update stats if this request doesn't have it turned off 100% of the time
        if (updateCacheMetrics) {
            if (result == null) {
                cacheStats.miss(caching, cacheKey.isPrimary());
            } else {
                cacheStats.hit(caching, cacheKey.isPrimary());
            }
        }
    }
    return result;
}

16 View Complete Implementation : IntegrationTestSendTraceRequests.java
Copyright Apache License 2.0
Author : fengchen8086
private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
    LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<Long>(25000);
    BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
    byte[] value = new byte[300];
    for (int x = 0; x < 5000; x++) {
        TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
        try {
            for (int i = 0; i < 5; i++) {
                long rk = random.nextLong();
                rowKeys.add(rk);
                Put p = new Put(Bytes.toBytes(rk));
                for (int y = 0; y < 10; y++) {
                    random.nextBytes(value);
                    p.add(familyName, Bytes.toBytes(random.nextLong()), value);
                }
                ht.mutate(p);
            }
            if ((x % 1000) == 0) {
                admin.flush(tableName);
            }
        } finally {
            traceScope.close();
        }
    }
    admin.flush(tableName);
    return rowKeys;
}

16 View Complete Implementation : BlockReaderLocal.java
Copyright Apache License 2.0
Author : yncxcw
/**
 * Read from the block file into a buffer.
 *
 * This function overwrites checksumBuf.  It will increment dataPos.
 *
 * @param buf   The buffer to read into.  May be dataBuf.
 *              The position and limit of this buffer should be set to
 *              multiples of the checksum size.
 * @param canSkipChecksum  True if we can skip checksumming.
 *
 * @return      Total bytes read.  0 on EOF.
 */
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum) throws IOException {
    TraceScope scope = Trace.startSpan("BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")", Sampler.NEVER);
    try {
        int total = 0;
        long startDataPos = dataPos;
        int startBufPos = buf.position();
        while (buf.hasRemaining()) {
            int nRead = dataIn.read(buf, dataPos);
            if (nRead < 0) {
                break;
            }
            dataPos += nRead;
            total += nRead;
        }
        if (canSkipChecksum) {
            freeChecksumBufIfExists();
            return total;
        }
        if (total > 0) {
            try {
                buf.limit(buf.position());
                buf.position(startBufPos);
                createChecksumBufIfNeeded();
                int checksumsNeeded = (total + bytesPerChecksum - 1) / bytesPerChecksum;
                checksumBuf.clear();
                checksumBuf.limit(checksumsNeeded * checksumSize);
                long checksumPos = BlockMetadataHeader.getHeaderSize() + ((startDataPos / bytesPerChecksum) * checksumSize);
                while (checksumBuf.hasRemaining()) {
                    int nRead = checksumIn.read(checksumBuf, checksumPos);
                    if (nRead < 0) {
                        throw new IOException("Got unexpected checksum file EOF at " + checksumPos + ", block file position " + startDataPos + " for " + "block " + block + " of file " + filename);
                    }
                    checksumPos += nRead;
                }
                checksumBuf.flip();
                checksum.verifyChunkedSums(buf, checksumBuf, filename, startDataPos);
            } finally {
                buf.position(buf.limit());
            }
        }
        return total;
    } finally {
        scope.close();
    }
}