org.apache.hadoop.hbase.regionserver.HRegion - java examples

Here are the examples of the java api org.apache.hadoop.hbase.regionserver.HRegion taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : IndexWALObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public boolean preWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx, HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException {
    String tableNameStr = info.getTableNamereplacedtring();
    if (IndexUtils.isCatalogTable(info.getTableName()) || IndexUtils.isIndexTable(tableNameStr)) {
        return true;
    }
    List<IndexSpecification> indices = indexManager.getIndicesForTable(tableNameStr);
    if (indices != null && !indices.isEmpty()) {
        LOG.trace("Entering preWALWrite for the table " + tableNameStr);
        String indexTableName = IndexUtils.getIndexTableName(tableNameStr);
        IndexEdits iEdits = IndexRegionObserver.threadLocal.get();
        WALEdit indexWALEdit = iEdits.getWALEdit();
        // This size will be 0 when none of the Mutations to the user table to be indexed.
        // or write to WAL is disabled for the Mutations
        if (indexWALEdit.getKeyValues().size() == 0) {
            return true;
        }
        LOG.trace("Adding indexWALEdits into WAL for table " + tableNameStr);
        HRegion indexRegion = iEdits.getRegion();
        // TS in all KVs within WALEdit will be the same. So considering the 1st one.
        Long time = indexWALEdit.getKeyValues().get(0).getTimestamp();
        ctx.getEnvironment().getWAL().appendNoSync(indexRegion.getRegionInfo(), Bytes.toBytes(indexTableName), indexWALEdit, logKey.getClusterId(), time, indexRegion.getTableDesc());
        LOG.trace("Exiting preWALWrite for the table " + tableNameStr);
    }
    return true;
}

19 View Complete Implementation : BaseRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, HRegion l, HRegion r) throws IOException {
}

19 View Complete Implementation : TestGlobalMemStoreSize.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Flush and log stats on flush
 * @param r
 * @param server
 * @throws IOException
 */
private void flush(final HRegion r, final HRegionServer server) throws IOException {
    LOG.info("Flush " + r.toString() + " on " + server.getServerName() + ", " + r.flushcache() + ", size=" + server.getRegionServerAccounting().getGlobalMemstoreSize());
}

19 View Complete Implementation : OpenRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
void cleanupFailedOpen(final HRegion region) throws IOException {
    if (region != null)
        region.close();
}

19 View Complete Implementation : OpenRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Update ZK, ROOT or META.  This can take a while if for example the
 * .META. is not available -- if server hosting .META. crashed and we are
 * waiting on it to come back -- so run in a thread and keep updating znode
 * state meantime so master doesn't timeout our region-in-transition.
 * Caller must cleanup region if this fails.
 */
boolean updateMeta(final HRegion r) {
    if (this.server.isStopped() || this.rsServices.isStopping()) {
        return false;
    }
    // Object we do wait/notify on.  Make it boolean.  If set, we're done.
    // Else, wait.
    final AtomicBoolean signaller = new AtomicBoolean(false);
    PostOpenDeployTasksThread t = new PostOpenDeployTasksThread(r, this.server, this.rsServices, signaller);
    t.start();
    int replacedignmentTimeout = this.server.getConfiguration().getInt("hbase.master.replacedignment.timeoutmonitor.period", 10000);
    // Total timeout for meta edit.  If we fail adding the edit then close out
    // the region and let it be replacedigned elsewhere.
    long timeout = replacedignmentTimeout * 10;
    long now = System.currentTimeMillis();
    long endTime = now + timeout;
    // Let our period at which we update OPENING state to be be 1/3rd of the
    // regions-in-transition timeout period.
    long period = Math.max(1, replacedignmentTimeout / 3);
    long lastUpdate = now;
    boolean tickleOpening = true;
    while (!signaller.get() && t.isAlive() && !this.server.isStopped() && !this.rsServices.isStopping() && (endTime > now)) {
        long elapsed = now - lastUpdate;
        if (elapsed > period) {
            // Only tickle OPENING if postOpenDeployTasks is taking some time.
            lastUpdate = now;
            tickleOpening = tickleOpening("post_open_deploy");
        }
        synchronized (signaller) {
            try {
                signaller.wait(period);
            } catch (InterruptedException e) {
            // Go to the loop check.
            }
        }
        now = System.currentTimeMillis();
    }
    // Is thread still alive?  We may have left above loop because server is
    // stopping or we timed out the edit.  Is so, interrupt it.
    if (t.isAlive()) {
        if (!signaller.get()) {
            // Thread still running; interrupt
            LOG.debug("Interrupting thread " + t);
            t.interrupt();
        }
        try {
            t.join();
        } catch (InterruptedException ie) {
            LOG.warn("Interrupted joining " + r.getRegionInfo().getRegionNamereplacedtring(), ie);
            Thread.currentThread().interrupt();
        }
    }
    // Was there an exception opening the region?  This should trigger on
    // InterruptedException too.  If so, we failed.  Even if tickle opening fails
    // then it is a failure.
    return ((!Thread.interrupted() && t.getException() == null) && tickleOpening);
}

19 View Complete Implementation : TestLogRolling.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Tests that logs are deleted
 * @throws IOException
 * @throws FailedLogCloseException
 */
@Test
public void testLogRolling() throws FailedLogCloseException, IOException {
    this.tableName = getName();
    startAndWriteData();
    LOG.info("after writing there are " + log.getNumLogFiles() + " log files");
    // flush all regions
    List<HRegion> regions = new ArrayList<HRegion>(server.getOnlineRegionsLocalContext());
    for (HRegion r : regions) {
        r.flushcache();
    }
    // Now roll the log
    log.rollWriter();
    int count = log.getNumLogFiles();
    LOG.info("after flushing all regions and rolling logs there are " + log.getNumLogFiles() + " log files");
    replacedertTrue(("actual count: " + count), count <= 2);
}

19 View Complete Implementation : HFileArchiveTestingUtil.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Helper method to get the archive directory for the specified region
 * @param conf {@link Configuration} to check for the name of the archive directory
 * @param region region that is being archived
 * @return {@link Path} to the archive directory for the given region
 */
public static Path getRegionArchiveDir(Configuration conf, HRegion region) {
    return HFileArchiveUtil.getRegionArchiveDir(conf, region.getTableDir(), region.getRegionDir());
}

19 View Complete Implementation : HFileArchiveTestingUtil.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Helper method to get the store archive directory for the specified region
 * @param conf {@link Configuration} to check for the name of the archive directory
 * @param region region that is being archived
 * @param store store that is archiving files
 * @return {@link Path} to the store archive directory for the given region
 */
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) {
    return HFileArchiveUtil.getStoreArchivePath(conf, region, store.getFamily().getName());
}

19 View Complete Implementation : MiniHBaseCluster.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Call flushCache on all regions on all participating regionservers.
 * @throws IOException
 */
public void flushcache() throws IOException {
    for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
        for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
            r.flushcache();
        }
    }
}

19 View Complete Implementation : HBaseTestCase.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Add content to region <code>r</code> on the preplaceded column
 * <code>column</code>.
 * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
 * @param r
 * @param columnFamily
 * @param column
 * @throws IOException
 * @return count of what we added.
 */
public static long addContent(final HRegion r, final byte[] columnFamily, final byte[] column) throws IOException {
    byte[] startKey = r.getRegionInfo().getStartKey();
    byte[] endKey = r.getRegionInfo().getEndKey();
    byte[] startKeyBytes = startKey;
    if (startKeyBytes == null || startKeyBytes.length == 0) {
        startKeyBytes = START_KEY_BYTES;
    }
    return addContent(new HRegionIncommon(r), Bytes.toString(columnFamily), Bytes.toString(column), startKeyBytes, endKey, -1);
}

19 View Complete Implementation : MetaUtils.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Scans a meta region. For every region found, calls the listener with
 * the HRegionInfo of the region.
 * TODO: Use Visitor rather than Listener pattern.  Allow multiple Visitors.
 * Use this everywhere we scan meta regions: e.g. in metascanners, in close
 * handling, etc.  Have it preplaced in the whole row, not just HRegionInfo.
 * <p>Use for reading meta only.  Does not close region when done.
 * Use {@link #getMetaRegion(HRegionInfo)} instead if writing.  Adds
 * meta region to list that will get a close on {@link #shutdown()}.
 *
 * @param metaRegionInfo HRegionInfo for meta region
 * @param listener method to be called for each meta region found
 * @throws IOException e
 */
public void scanMetaRegion(HRegionInfo metaRegionInfo, ScannerListener listener) throws IOException {
    // Open meta region so we can scan it
    HRegion metaRegion = openMetaRegion(metaRegionInfo);
    scanMetaRegion(metaRegion, listener);
}

19 View Complete Implementation : MiniHBaseCluster.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Get the location of the specified region
 * @param regionName Name of the region in bytes
 * @return Index into List of {@link MiniHBaseCluster#getRegionServerThreads()}
 * of HRS carrying .META.. Returns -1 if none found.
 */
public int getServerWith(byte[] regionName) {
    int index = -1;
    int count = 0;
    for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) {
        HRegionServer hrs = rst.getRegionServer();
        HRegion metaRegion = hrs.getOnlineRegion(regionName);
        if (metaRegion != null) {
            index = count;
            break;
        }
        count++;
    }
    return index;
}

19 View Complete Implementation : TestRegionObserverStacking.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    for (byte[] family : families) {
        htd.addFamily(new HColumnDescriptor(family));
    }
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    HRegion r = HRegion.createHRegion(info, path, conf, htd);
    // this following piece is a hack. currently a coprocessorHost
    // is secretly loaded at OpenRegionHandler. we don't really
    // start a region server here, so just manually create cphost
    // and set it to region.
    RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
    r.setCoprocessorHost(host);
    return r;
}

19 View Complete Implementation : MockRegionServerServices.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void addToOnlineRegions(HRegion r) {
    this.regions.put(r.getRegionInfo().getEncodedName(), r);
}

19 View Complete Implementation : BackwardSeekableRegionScanner.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
public clreplaced BackwardSeekableRegionScanner implements SeekAndReadRegionScanner {

    private ReInitializableRegionScanner delegator;

    private Scan scan;

    private HRegion hRegion;

    private byte[] startRow;

    private boolean closed = false;

    public BackwardSeekableRegionScanner(ReInitializableRegionScanner delegator, Scan scan, HRegion hRegion, byte[] startRow) {
        this.delegator = delegator;
        this.scan = scan;
        this.hRegion = hRegion;
        this.startRow = startRow;
    }

    Scan getScan() {
        return scan;
    }

    byte[] getStartRow() {
        return startRow;
    }

    // For testing.
    RegionScanner getDelegator() {
        return delegator;
    }

    @Override
    public HRegionInfo getRegionInfo() {
        return this.delegator.getRegionInfo();
    }

    @Override
    public boolean isFilterDone() {
        return this.delegator.isFilterDone();
    }

    @Override
    public synchronized void close() throws IOException {
        this.delegator.close();
        closed = true;
    }

    @Override
    public boolean isClosed() {
        return closed;
    }

    @Override
    public synchronized boolean next(List<KeyValue> results) throws IOException {
        return next(results, this.scan.getBatch());
    }

    @Override
    public boolean next(List<KeyValue> result, int limit) throws IOException {
        return false;
    }

    @Override
    public synchronized boolean reseek(byte[] row) throws IOException {
        return this.delegator.reseek(row);
    }

    @Override
    public void addSeekPoints(List<byte[]> seekPoints) {
        this.delegator.addSeekPoints(seekPoints);
    }

    @Override
    public boolean seekToNextPoint() throws IOException {
        return this.delegator.seekToNextPoint();
    }

    @Override
    public byte[] getLatestSeekpoint() {
        return this.delegator.getLatestSeekpoint();
    }

    @Override
    public boolean next(List<KeyValue> results, String metric) throws IOException {
        return next(results, this.scan.getBatch(), metric);
    }

    @Override
    public boolean next(List<KeyValue> result, int limit, String metric) throws IOException {
        boolean hasNext = false;
        try {
            if (this.delegator.isClosed())
                return false;
            hasNext = this.delegator.next(result, limit, metric);
        } catch (SeekUnderValueException e) {
            Scan newScan = new Scan(this.scan);
            // Start from the point where we got stopped because of seek backward
            newScan.setStartRow(getLatestSeekpoint());
            this.delegator.reInit(this.hRegion.getScanner(newScan));
            hasNext = next(result, limit, metric);
        }
        return hasNext;
    }

    @Override
    public long getMvccReadPoint() {
        return this.delegator.getMvccReadPoint();
    }

    @Override
    public boolean nextRaw(List<KeyValue> result, String metric) throws IOException {
        return next(result, metric);
    }

    @Override
    public boolean nextRaw(List<KeyValue> result, int limit, String metric) throws IOException {
        return next(result, limit, metric);
    }
}

19 View Complete Implementation : HBaseTestCase.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Add content to region <code>r</code> on the preplaceded column
 * <code>column</code>.
 * Adds data of the from 'aaa', 'aab', etc where key and value are the same.
 * @param r
 * @param columnFamily
 * @throws IOException
 * @return count of what we added.
 */
protected static long addContent(final HRegion r, final byte[] columnFamily) throws IOException {
    return addContent(r, columnFamily, null);
}

18 View Complete Implementation : OpenRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * @return Instance of HRegion if successful open else null.
 */
HRegion openRegion() {
    HRegion region = null;
    try {
        // Instantiate the region.  This also periodically tickles our zk OPENING
        // state so master doesn't timeout this region in transition.
        region = HRegion.openHRegion(this.regionInfo, this.htd, this.rsServices.getWAL(this.regionInfo), this.server.getConfiguration(), this.rsServices, new CancelableProgressable() {

            public boolean progress() {
                // We may lose the znode ownership during the open.  Currently its
                // too hard interrupting ongoing region open.  Just let it complete
                // and check we still have the znode after region open.
                return tickleOpening("open_region_progress");
            }
        });
    } catch (Throwable t) {
        // We failed open. Our caller will see the 'null' return value
        // and transition the node back to FAILED_OPEN. If that fails,
        // we rely on the Timeout Monitor in the master to rereplacedign.
        LOG.error("Failed open of region=" + this.regionInfo.getRegionNamereplacedtring() + ", starting to roll back the global memstore size.", t);
        // Decrease the global memstore size.
        if (this.rsServices != null) {
            RegionServerAccounting rsAccounting = this.rsServices.getRegionServerAccounting();
            if (rsAccounting != null) {
                rsAccounting.rollbackRegionReplayEditsSize(this.regionInfo.getRegionName());
            }
        }
    }
    return region;
}

18 View Complete Implementation : IndexRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void postBatchMutate(final ObserverContext<RegionCoprocessorEnvironment> ctx, final List<Mutation> mutations, WALEdit walEdit) {
    HTableDescriptor userTableDesc = ctx.getEnvironment().getRegion().getTableDesc();
    String tableName = userTableDesc.getNamereplacedtring();
    if (IndexUtils.isCatalogTable(userTableDesc.getName()) || IndexUtils.isIndexTable(tableName)) {
        return;
    }
    List<IndexSpecification> indices = indexManager.getIndicesForTable(tableName);
    if (indices == null || indices.isEmpty()) {
        LOG.trace("skipping postBatchMutate for the table " + tableName + " as there are no indices");
        return;
    }
    LOG.trace("Entering postBatchMutate for the table " + tableName);
    IndexEdits indexEdits = threadLocal.get();
    List<Pair<Mutation, Integer>> indexMutations = indexEdits.getIndexMutations();
    if (indexMutations.size() == 0) {
        return;
    }
    HRegion hr = indexEdits.getRegion();
    LOG.trace("Updating index table " + hr.getRegionInfo().getTableNamereplacedtring());
    try {
        hr.batchMutateForIndex(indexMutations.toArray(new Pair[indexMutations.size()]));
    } catch (IOException e) {
        // TODO This can come? If so we need to revert the actual put
        // and make the op failed.
        LOG.error("Error putting data into the index region", e);
    }
    LOG.trace("Exiting postBatchMutate for the table " + tableName);
}

18 View Complete Implementation : MockRegionServerServices.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void postOpenDeployTasks(HRegion r, CatalogTracker ct, boolean daughter) throws KeeperException, IOException {
    addToOnlineRegions(r);
}

18 View Complete Implementation : SimpleRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void postSplit(ObserverContext<RegionCoprocessorEnvironment> c, HRegion l, HRegion r) {
    hadPostSplit = true;
}

18 View Complete Implementation : CloseRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Transition ZK node to CLOSED
 * @param expectedVersion
 * @return If the state is set successfully
 */
private boolean setClosedState(final int expectedVersion, final HRegion region) {
    try {
        if (ZKreplacedign.transitionNodeClosed(server.getZooKeeper(), regionInfo, server.getServerName(), expectedVersion) == FAILED) {
            LOG.warn("Completed the CLOSE of a region but when transitioning from " + " CLOSING to CLOSED got a version mismatch, someone else clashed " + "so now unreplacedigning");
            region.close();
            return false;
        }
    } catch (NullPointerException e) {
        // I've seen NPE when table was deleted while close was running in unit tests.
        LOG.warn("NPE during close -- catching and continuing...", e);
        return false;
    } catch (KeeperException e) {
        LOG.error("Failed transitioning node from CLOSING to CLOSED", e);
        return false;
    } catch (IOException e) {
        LOG.error("Failed to close region after failing to transition", e);
        return false;
    }
    return true;
}

18 View Complete Implementation : TTLStoreScanner.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private boolean isUserTableRegionAvailable(String indexTableName, HRegion indexRegion, HRegionServer rs) {
    Collection<HRegion> userRegions = rs.getOnlineRegions(Bytes.toBytes(this.actualTableName));
    for (HRegion userRegion : userRegions) {
        // TODO start key check is enough? May be we can check for the
        // possibility for N-1 Mapping?
        if (Bytes.equals(userRegion.getStartKey(), indexRegion.getStartKey())) {
            return true;
        }
    }
    return false;
}

18 View Complete Implementation : IndexRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void acquireLockOnIndexRegion(String tableName, HRegion userRegion, HRegionServer rs) throws IOException {
    HRegion indexRegion = getIndexTableRegion(tableName, userRegion, rs);
    indexRegion.checkResources();
    indexRegion.startRegionOperation();
}

18 View Complete Implementation : HLogPerformanceEvaluation.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void closeRegion(final HRegion region) throws IOException {
    if (region != null) {
        region.close();
        HLog wal = region.getLog();
        if (wal != null)
            wal.close();
    }
}

18 View Complete Implementation : HBaseTestCase.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
protected HRegion openClosedRegion(final HRegion closedRegion) throws IOException {
    HRegion r = new HRegion(closedRegion);
    r.initialize();
    return r;
}

18 View Complete Implementation : IndexRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void prepareIndexMutations(List<IndexSpecification> indices, HRegion userRegion, Mutation mutation, String tableName, HRegion indexRegion) throws IOException {
    IndexEdits indexEdits = threadLocal.get();
    if (mutation instanceof Put) {
        for (IndexSpecification index : indices) {
            // Handle each of the index
            Mutation indexPut = IndexUtils.prepareIndexPut((Put) mutation, index, indexRegion);
            if (null != indexPut) {
                // This mutation can be null when the user table mutation is not
                // containing all of the indexed col value.
                indexEdits.add(indexPut);
            }
        }
    } else if (mutation instanceof Delete) {
        Collection<? extends Mutation> indexDeletes = prepareIndexDeletes((Delete) mutation, userRegion, indices, indexRegion);
        indexEdits.addAll(indexDeletes);
    } else {
    // TODO : Log or throw exception
    }
}

18 View Complete Implementation : HFileArchiveUtil.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Get the directory to archive a store directory
 * @param conf {@link Configuration} to read for the archive directory name
 * @param region parent region information under which the store currently
 *          lives
 * @param family name of the family in the store
 * @return {@link Path} to the directory to archive the given store or
 *         <tt>null</tt> if it should not be archived
 */
public static Path getStoreArchivePath(Configuration conf, HRegion region, byte[] family) {
    return getStoreArchivePath(conf, region.getRegionInfo(), region.getTableDir(), family);
}

18 View Complete Implementation : MiniHBaseCluster.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Call flushCache on all regions on all participating regionservers.
 * @throws IOException
 */
public void compact(boolean major) throws IOException {
    for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
        for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
            r.compactStores(major);
        }
    }
}

18 View Complete Implementation : TestHLogBench.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void go() throws IOException, InterruptedException {
    long start = System.currentTimeMillis();
    log("Running TestHLogBench with " + numThreads + " threads each doing " + numIterationsPerThread + " HLog appends " + (appendNoSync ? "nosync" : "sync") + " at rootDir " + regionRootDir);
    // Mock an HRegion
    byte[] tableName = Bytes.toBytes("table");
    byte[][] familyNames = new byte[][] { FAMILY };
    HTableDescriptor htd = new HTableDescriptor();
    htd.addFamily(new HColumnDescriptor(Bytes.toBytes("f1")));
    HRegion region = mockRegion(tableName, familyNames, regionRootDir);
    HLog hlog = region.getLog();
    // Spin up N threads to each perform M log operations
    LogWriter[] incrementors = new LogWriter[numThreads];
    for (int i = 0; i < numThreads; i++) {
        incrementors[i] = new LogWriter(region, tableName, hlog, i, numIterationsPerThread, appendNoSync);
        incrementors[i].start();
    }
    // Wait for threads to finish
    for (int i = 0; i < numThreads; i++) {
        // log("Waiting for #" + i + " to finish");
        incrementors[i].join();
    }
    // Output statistics
    long totalOps = numThreads * numIterationsPerThread;
    log("Operations per second " + ((totalOps * 1000L) / totalTime));
    log("Average latency in ms " + ((totalTime * 1000L) / totalOps));
}

18 View Complete Implementation : OpenRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * @param r Region we're working on.
 * @return whether znode is successfully transitioned to OPENED state.
 * @throws IOException
 */
private boolean transitionToOpened(final HRegion r) throws IOException {
    boolean result = false;
    HRegionInfo hri = r.getRegionInfo();
    final String name = hri.getRegionNamereplacedtring();
    // Finally, Transition ZK node to OPENED
    try {
        if (ZKreplacedign.transitionNodeOpened(this.server.getZooKeeper(), hri, this.server.getServerName(), this.version) == -1) {
            LOG.warn("Completed the OPEN of region " + name + " but when transitioning from " + " OPENING to OPENED got a version mismatch, someone else clashed " + "so now unreplacedigning -- closing region on server: " + this.server.getServerName());
        } else {
            LOG.debug("region transitioned to opened in zookeeper: " + r.getRegionInfo() + ", server: " + this.server.getServerName());
            result = true;
        }
    } catch (KeeperException e) {
        LOG.error("Failed transitioning node " + name + " from OPENING to OPENED -- closing region", e);
    }
    return result;
}

18 View Complete Implementation : IndexRegionObserver.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void postClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested) {
    HRegion region = e.getEnvironment().getRegion();
    byte[] tableName = region.getRegionInfo().getTableName();
    if (IndexUtils.isCatalogTable(tableName) || IndexUtils.isIndexTable(tableName)) {
        return;
    }
    if (splitThreadLocal.get() == null) {
        this.indexManager.decrementRegionCount(Bytes.toString(tableName), true);
    } else {
        this.indexManager.decrementRegionCount(Bytes.toString(tableName), false);
    }
}

18 View Complete Implementation : HFileArchiveTestingUtil.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName, byte[] storeName) throws IOException {
    byte[] table = Bytes.toBytes(tableName);
    // get the RS and region serving our table
    List<HRegion> servingRegions = util.getHBaseCluster().getRegions(table);
    HRegion region = servingRegions.get(0);
    // check that we actually have some store files that were archived
    Store store = region.getStore(storeName);
    return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
}

18 View Complete Implementation : TTLStoreScanner.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public boolean next(List<KeyValue> result, int limit) throws IOException {
    boolean next = this.delegate.next(result, limit);
    // Ideally here i should get only one result(i.e) only one kv
    for (Iterator<KeyValue> iterator = result.iterator(); iterator.hasNext(); ) {
        KeyValue kv = (KeyValue) iterator.next();
        byte[] indexNameInBytes = formIndexNameFromKV(kv);
        // From the indexname get the TTL
        IndexSpecification index = IndexManager.getInstance().getIndex(this.actualTableName, indexNameInBytes);
        HRegion hRegion = store.getHRegion();
        if (this.type == ScanType.MAJOR_COMPACT) {
            if (this.userRegionAvailable == null) {
                this.userRegionAvailable = isUserTableRegionAvailable(hRegion.getTableDesc().getNamereplacedtring(), hRegion, this.rs);
            }
            // If index is null probably index is been dropped through drop index
            // If user region not available it may be due to the reason that the user region has not yet
            // opened but
            // the index region has opened.
            // Its better not to avoid the kv here, and write it during this current compaction.
            // Anyway later compaction will avoid it. May lead to false positives but better than
            // data loss
            if (null == index && userRegionAvailable) {
                // Remove the dropped index from the results
                LOG.info("The index has been removed for the kv " + kv);
                iterator.remove();
                continue;
            }
        }
        if (index != null) {
            boolean ttlExpired = this.ttlExpiryChecker.checkIfTTLExpired(index.getTTL(), kv.getTimestamp());
            if (ttlExpired) {
                result.clear();
                LOG.info("The ttl has expired for the kv " + kv);
                return false;
            }
        }
    }
    return next;
}

18 View Complete Implementation : TestClassLoading.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Test
public // HBASE-3516: Test CP Clreplaced loading from local file system
void testClreplacedLoadingFromLocalFS() throws Exception {
    File jarFile = buildCoprocessorJar(cpName3);
    // create a table that references the jar
    HTableDescriptor htd = new HTableDescriptor(cpName3);
    htd.addFamily(new HColumnDescriptor("test"));
    htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER);
    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    admin.createTable(htd);
    waitForTable(htd.getName());
    // verify that the coprocessor was loaded
    boolean found = false;
    MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
    for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
        if (region.getRegionNamereplacedtring().startsWith(cpName3)) {
            found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null);
        }
    }
    replacedertTrue("Clreplaced " + cpName3 + " was missing on a region", found);
}

17 View Complete Implementation : TestForceCacheImportantBlocks.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void writeTestData(HRegion region) throws IOException {
    for (int i = 0; i < NUM_ROWS; ++i) {
        Put put = new Put(Bytes.toBytes("row" + i));
        for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
            for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
                put.add(CF_BYTES, Bytes.toBytes("col" + j), ts, Bytes.toBytes("value" + i + "_" + j + "_" + ts));
            }
        }
        region.put(put);
        if ((i + 1) % ROWS_PER_HFILE == 0) {
            region.flushcache();
        }
    }
}

17 View Complete Implementation : TestDistributedLogSplitting.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private void putData(HRegion region, byte[] startRow, int numRows, byte[] qf, byte[]... families) throws IOException {
    for (int i = 0; i < numRows; i++) {
        Put put = new Put(Bytes.add(startRow, Bytes.toBytes(i)));
        for (byte[] family : families) {
            put.add(family, qf, null);
        }
        region.put(put);
    }
}

17 View Complete Implementation : TestMergeTool.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void tearDown() throws Exception {
    super.tearDown();
    for (int i = 0; i < sourceRegions.length; i++) {
        HRegion r = regions[i];
        if (r != null) {
            r.close();
            r.getLog().closeAndDelete();
        }
    }
    TEST_UTIL.shutdownMiniCluster();
}

17 View Complete Implementation : MiniHBaseCluster.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Call flushCache on all regions of the specified table.
 * @throws IOException
 */
public void flushcache(byte[] tableName) throws IOException {
    for (JVMClusterUtil.RegionServerThread t : this.hbaseCluster.getRegionServers()) {
        for (HRegion r : t.getRegionServer().getOnlineRegionsLocalContext()) {
            if (Bytes.equals(r.getTableDesc().getName(), tableName)) {
                r.flushcache();
            }
        }
    }
}

17 View Complete Implementation : TestOpenedRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private HRegion getRegionBeingServed(MiniHBaseCluster cluster, HRegionServer regionServer) {
    Collection<HRegion> onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext();
    Iterator<HRegion> iterator = onlineRegionsLocalContext.iterator();
    HRegion region = null;
    while (iterator.hasNext()) {
        region = iterator.next();
        if (!region.getRegionInfo().isMetaTable()) {
            break;
        }
    }
    return region;
}

17 View Complete Implementation : OpenRegionHandler.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public void process() throws IOException {
    boolean transitionToFailedOpen = false;
    boolean openSuccessful = false;
    try {
        final String name = regionInfo.getRegionNamereplacedtring();
        if (this.server.isStopped() || this.rsServices.isStopping()) {
            return;
        }
        final String encodedName = regionInfo.getEncodedName();
        // Check that this region is not already online
        HRegion region = this.rsServices.getFromOnlineRegions(encodedName);
        // Open region.  After a successful open, failures in subsequent
        // processing needs to do a close as part of cleanup.
        region = openRegion();
        if (region == null) {
            tryTransitionToFailedOpen(regionInfo);
            transitionToFailedOpen = true;
            return;
        }
        boolean failed = true;
        if (tickleOpening("post_region_open")) {
            if (updateMeta(region)) {
                failed = false;
            }
        }
        if (failed || this.server.isStopped() || this.rsServices.isStopping()) {
            cleanupFailedOpen(region);
            tryTransitionToFailedOpen(regionInfo);
            transitionToFailedOpen = true;
            return;
        }
        if (!transitionToOpened(region)) {
            // If we fail to transition to opened, it's because of one of two cases:
            // (a) we lost our ZK lease
            // OR (b) someone else opened the region before us
            // In either case, we don't need to transition to FAILED_OPEN state.
            // In case (a), the Master will process us as a dead server. In case
            // (b) the region is already being handled elsewhere anyway.
            cleanupFailedOpen(region);
            transitionToFailedOpen = true;
            return;
        }
        // Successful region open, and add it to OnlineRegions
        this.rsServices.addToOnlineRegions(region);
        openSuccessful = true;
        // Done!  Successful region open
        LOG.debug("Opened " + name + " on server:" + this.server.getServerName());
    } finally {
        this.rsServices.removeFromRegionsInTransition(this.regionInfo);
        if (!openSuccessful && !transitionToFailedOpen) {
            tryTransitionToFailedOpen(regionInfo);
        }
    }
}

17 View Complete Implementation : MetaUtils.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
    HRegion meta = HRegion.openHRegion(metaInfo, HTableDescriptor.META_TABLEDESC, getLog(), this.conf);
    meta.compactStores();
    return meta;
}

17 View Complete Implementation : SecureBulkLoadEndpoint.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Override
public boolean bulkLoadHFiles(final List<Pair<byte[], String>> familyPaths, final Token<?> userToken, final String bulkToken) throws IOException {
    User user = getActiveUser();
    final UserGroupInformation ugi = user.getUGI();
    if (userToken != null) {
        ugi.addToken(userToken);
    } else if (User.isSecurityEnabled()) {
        // we allow this to preplaced through in "simple" security mode
        // for mini cluster testing
        throw new DoNotRetryIOException("User token cannot be null");
    }
    HRegion region = env.getRegion();
    boolean bypreplaced = false;
    if (region.getCoprocessorHost() != null) {
        bypreplaced = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
    }
    boolean loaded = false;
    if (!bypreplaced) {
        loaded = ugi.doAs(new PrivilegedAction<Boolean>() {

            @Override
            public Boolean run() {
                FileSystem fs = null;
                try {
                    Configuration conf = env.getConfiguration();
                    fs = FileSystem.get(conf);
                    for (Pair<byte[], String> el : familyPaths) {
                        Path p = new Path(el.getSecond());
                        LOG.debug("Setting permission for: " + p);
                        fs.setPermission(p, PERM_ALL_ACCESS);
                        Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
                        if (!fs.exists(stageFamily)) {
                            fs.mkdirs(stageFamily);
                            fs.setPermission(stageFamily, PERM_ALL_ACCESS);
                        }
                    }
                    // We call bulkLoadHFiles as requesting user
                    // To enable access prior to staging
                    return env.getRegion().bulkLoadHFiles(familyPaths, new SecureBulkLoadListener(fs, bulkToken));
                } catch (Exception e) {
                    LOG.error("Failed to complete bulk load", e);
                }
                return false;
            }
        });
    }
    if (region.getCoprocessorHost() != null) {
        loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded);
    }
    return loaded;
}

17 View Complete Implementation : RestoreSnapshotHelper.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Clone region directory content from the snapshot info.
 *
 * Each region is encoded with the table name, so the cloned region will have
 * a different region name.
 *
 * Instead of copying the hfiles a HFileLink is created.
 *
 * @param region {@link HRegion} cloned
 * @param snapshotRegionInfo
 */
private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo) throws IOException {
    final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
    final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
    final String tableName = tableDesc.getNamereplacedtring();
    SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir, new FSVisitor.StoreFileVisitor() {

        public void storeFile(final String region, final String family, final String hfile) throws IOException {
            LOG.info("Adding HFileLink " + hfile + " to table=" + tableName);
            Path familyDir = new Path(regionDir, family);
            restoreStoreFile(familyDir, snapshotRegionInfo, hfile);
        }
    });
}

17 View Complete Implementation : TestCoprocessorInterface.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
public void testSharedData() throws IOException {
    byte[] tableName = Bytes.toBytes("testtable");
    byte[][] families = { fam1, fam2, fam3 };
    Configuration hc = initSplit();
    HRegion region = initHRegion(tableName, getName(), hc, new Clreplaced<?>[] {}, families);
    for (int i = 0; i < 3; i++) {
        addContent(region, fam3);
        region.flushcache();
    }
    region.compactStores();
    byte[] splitRow = region.checkSplit();
    replacedertNotNull(splitRow);
    HRegion[] regions = split(region, splitRow);
    for (int i = 0; i < regions.length; i++) {
        regions[i] = reopenRegion(regions[i], CoprocessorImpl.clreplaced, CoprocessorII.clreplaced);
    }
    Coprocessor c = regions[0].getCoprocessorHost().findCoprocessor(CoprocessorImpl.clreplaced.getName());
    Coprocessor c2 = regions[0].getCoprocessorHost().findCoprocessor(CoprocessorII.clreplaced.getName());
    Object o = ((CoprocessorImpl) c).getSharedData().get("test1");
    Object o2 = ((CoprocessorII) c2).getSharedData().get("test2");
    replacedertNotNull(o);
    replacedertNotNull(o2);
    // to coprocessors get different sharedDatas
    replacedertFalse(((CoprocessorImpl) c).getSharedData() == ((CoprocessorII) c2).getSharedData());
    for (int i = 1; i < regions.length; i++) {
        c = regions[i].getCoprocessorHost().findCoprocessor(CoprocessorImpl.clreplaced.getName());
        c2 = regions[i].getCoprocessorHost().findCoprocessor(CoprocessorII.clreplaced.getName());
        // make sure that all coprocessor of a clreplaced have identical sharedDatas
        replacedertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
        replacedertTrue(((CoprocessorII) c2).getSharedData().get("test2") == o2);
    }
    // now have all Environments fail
    for (int i = 0; i < regions.length; i++) {
        try {
            Get g = new Get(regions[i].getStartKey());
            regions[i].get(g, null);
            fail();
        } catch (DoNotRetryIOException xc) {
        }
        replacedertNull(regions[i].getCoprocessorHost().findCoprocessor(CoprocessorII.clreplaced.getName()));
    }
    c = regions[0].getCoprocessorHost().findCoprocessor(CoprocessorImpl.clreplaced.getName());
    replacedertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
    c = c2 = null;
    // perform a GC
    System.gc();
    // reopen the region
    region = reopenRegion(regions[0], CoprocessorImpl.clreplaced, CoprocessorII.clreplaced);
    c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.clreplaced.getName());
    // CPimpl is unaffected, still the same reference
    replacedertTrue(((CoprocessorImpl) c).getSharedData().get("test1") == o);
    c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.clreplaced.getName());
    // new map and object created, hence the reference is different
    // hence the old entry was indeed removed by the GC and new one has been created
    replacedertFalse(((CoprocessorII) c2).getSharedData().get("test2") == o2);
}

17 View Complete Implementation : AccessControlLists.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Returns {@code true} if the given region is part of the {@code _acl_}
 * metadata table.
 */
static boolean isAclRegion(HRegion region) {
    return Bytes.equals(ACL_TABLE_NAME, region.getTableDesc().getName());
}

17 View Complete Implementation : TestCoprocessorInterface.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
private HRegion[] split(final HRegion r, final byte[] splitRow) throws IOException {
    HRegion[] regions = new HRegion[2];
    SplitTransaction st = new SplitTransaction(r, splitRow);
    int i = 0;
    if (!st.prepare()) {
        // test fails.
        replacedertTrue(false);
    }
    try {
        Server mockServer = Mockito.mock(Server.clreplaced);
        when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
        PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
        for (HRegion each_daughter : daughters) {
            regions[i] = each_daughter;
            i++;
        }
    } catch (IOException ioe) {
        LOG.info("Split transaction of " + r.getRegionNamereplacedtring() + " failed:" + ioe.getMessage());
        replacedertTrue(false);
    } catch (RuntimeException e) {
        LOG.info("Failed rollback of failed split of " + r.getRegionNamereplacedtring() + e.getMessage());
    }
    replacedertTrue(i == 2);
    return regions;
}

17 View Complete Implementation : IndexUtils.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
public static Put prepareIndexPut(Put userPut, IndexSpecification index, HRegion indexRegion) throws IOException {
    byte[] indexRegionStartKey = indexRegion.getStartKey();
    return prepareIndexPut(userPut, index, indexRegionStartKey);
}

17 View Complete Implementation : TestClassLoading.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
@Test
public // HBASE-6308: Test CP clreplacedloader is the CoprocessorClreplacedLoader
void testPrivateClreplacedLoader() throws Exception {
    File jarFile = buildCoprocessorJar(cpName4);
    // create a table that references the jar
    HTableDescriptor htd = new HTableDescriptor(cpName4);
    htd.addFamily(new HColumnDescriptor("test"));
    htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER);
    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    admin.createTable(htd);
    waitForTable(htd.getName());
    // verify that the coprocessor was loaded correctly
    boolean found = false;
    MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
    for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
        if (region.getRegionNamereplacedtring().startsWith(cpName4)) {
            Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4);
            if (cp != null) {
                found = true;
                replacedertEquals("Clreplaced " + cpName4 + " was not loaded by CoprocessorClreplacedLoader", cp.getClreplaced().getClreplacedLoader().getClreplaced(), CoprocessorClreplacedLoader.clreplaced);
            }
        }
    }
    replacedertTrue("Clreplaced " + cpName4 + " was missing on a region", found);
}

17 View Complete Implementation : Merge.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/*
   * Removes a region's meta information from the preplaceded <code>meta</code>
   * region.
   *
   * @param meta META HRegion to be updated
   * @param regioninfo HRegionInfo of region to remove from <code>meta</code>
   *
   * @throws IOException
   */
private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Removing region: " + regioninfo + " from " + meta);
    }
    Delete delete = new Delete(regioninfo.getRegionName(), System.currentTimeMillis(), null);
    meta.delete(delete, null, true);
}

17 View Complete Implementation : MetaUtils.java
Copyright Apache License 2.0
Author : Huawei-Hadoop
/**
 * Closes catalog regions if open. Also closes and deletes the HLog. You
 * must call this method if you want to persist changes made during a
 * MetaUtils edit session.
 */
public void shutdown() {
    if (this.rootRegion != null) {
        try {
            this.rootRegion.close();
        } catch (IOException e) {
            LOG.error("closing root region", e);
        } finally {
            this.rootRegion = null;
        }
    }
    try {
        for (HRegion r : metaRegions.values()) {
            LOG.info("CLOSING META " + r.toString());
            r.close();
        }
    } catch (IOException e) {
        LOG.error("closing meta region", e);
    } finally {
        metaRegions.clear();
    }
    try {
        if (this.log != null) {
            this.log.rollWriter();
            this.log.closeAndDelete();
        }
    } catch (IOException e) {
        LOG.error("closing HLog", e);
    } finally {
        this.log = null;
    }
}