org.apache.hadoop.hbase.HColumnDescriptor - java examples

Here are the examples of the java api org.apache.hadoop.hbase.HColumnDescriptor taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

155 Examples 7

19 View Complete Implementation : TestReplicationAdminWithClusters.java
Copyright Apache License 2.0
Author : fengchen8086
@Test(timeout = 300000)
public void testDisableAndEnableReplication() throws Exception {
    adminExt.disableTableRep(tableName);
    HTableDescriptor table = admin1.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        replacedertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL);
    }
    table = admin2.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        replacedertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL);
    }
    adminExt.enableTableRep(tableName);
    table = admin1.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        replacedertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL);
    }
}

19 View Complete Implementation : WALPerformanceEvaluation.java
Copyright Apache License 2.0
Author : fengchen8086
private static HTableDescriptor createHTableDescriptor(final int regionNum, final int numFamilies) {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE_NAME + ":" + regionNum));
    for (int i = 0; i < numFamilies; ++i) {
        HColumnDescriptor colDef = new HColumnDescriptor(FAMILY_PREFIX + i);
        htd.addFamily(colDef);
    }
    return htd;
}

19 View Complete Implementation : TestBlocksRead.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
 * @param tableName
 * @param callingMethod
 * @param conf
 * @param family
 * @throws IOException
 * @return created and initialized region.
 */
private HRegion initHRegion(byte[] tableName, String callingMethod, HBaseConfiguration conf, String family) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor familyDesc;
    for (int i = 0; i < BLOOM_TYPE.length; i++) {
        BloomType bloomType = BLOOM_TYPE[i];
        familyDesc = new HColumnDescriptor(family + "_" + bloomType).setBlocksize(1).setBloomFilterType(BLOOM_TYPE[i]);
        htd.addFamily(familyDesc);
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    HRegion r = HRegion.createHRegion(info, path, conf, htd);
    blockCache = new CacheConfig(conf).getBlockCache();
    return r;
}

19 View Complete Implementation : TestFilterWrapper.java
Copyright Apache License 2.0
Author : fengchen8086
private static void createTable() {
    replacedertNotNull("HBaseAdmin is not initialized successfully.", admin);
    if (admin != null) {
        HTableDescriptor desc = new HTableDescriptor(name);
        HColumnDescriptor coldef = new HColumnDescriptor(Bytes.toBytes("f1"));
        desc.addFamily(coldef);
        try {
            admin.createTable(desc);
            replacedertTrue("Fail to create the table", admin.tableExists(name));
        } catch (IOException e) {
            replacedertNull("Exception found while creating table", e);
        }
    }
}

19 View Complete Implementation : TestDefaultMemStore.java
Copyright Apache License 2.0
Author : fengchen8086
// //////////////////////////////////
// Test for timestamps
// //////////////////////////////////
/**
 * Test to ensure correctness when using Memstore with multiple timestamps
 */
public void testMultipleTimestamps() throws Exception {
    long[] timestamps = new long[] { 20, 10, 5, 1 };
    Scan scan = new Scan();
    for (long timestamp : timestamps) addRows(memstore, timestamp);
    byte[] fam = Bytes.toBytes("fam");
    HColumnDescriptor hcd = mock(HColumnDescriptor.clreplaced);
    when(hcd.getName()).thenReturn(fam);
    Store store = mock(Store.clreplaced);
    when(store.getFamily()).thenReturn(hcd);
    scan.setColumnFamilyTimeRange(fam, 0, 2);
    replacedertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
    scan.setColumnFamilyTimeRange(fam, 20, 82);
    replacedertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
    scan.setColumnFamilyTimeRange(fam, 10, 20);
    replacedertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
    scan.setColumnFamilyTimeRange(fam, 8, 12);
    replacedertTrue(memstore.shouldSeek(scan, store, Long.MIN_VALUE));
    scan.setColumnFamilyTimeRange(fam, 28, 42);
    replacedertTrue(!memstore.shouldSeek(scan, store, Long.MIN_VALUE));
}

19 View Complete Implementation : CCIndexAdmin.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Add a column to an existing table. Asynchronous operation.
 *
 * @param tableName name of the table to add column to
 * @param column    column descriptor of column to be added
 * @throws IOException if a remote or network exception occurs
 */
public void addColumn(final String tableName, HColumnDescriptor column) throws IOException {
    addColumn(TableName.valueOf(tableName), column);
}

19 View Complete Implementation : HFileOutputFormat2.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Serialize column family to block size map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBlockSize(HTableDescriptor tableDescriptor, Configuration conf) throws UnsupportedEncodingException {
    StringBuilder blockSizeConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            blockSizeConfigValue.append('&');
        }
        blockSizeConfigValue.append(URLEncoder.encode(familyDescriptor.getNamereplacedtring(), "UTF-8"));
        blockSizeConfigValue.append('=');
        blockSizeConfigValue.append(URLEncoder.encode(String.valueOf(familyDescriptor.getBlocksize()), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, blockSizeConfigValue.toString());
}

19 View Complete Implementation : TestRegionMergeTransaction.java
Copyright Apache License 2.0
Author : fengchen8086
private HRegion createRegion(final Path testdir, final WALFactory wals, final byte[] startrow, final byte[] endrow) throws IOException {
    // Make a region with start and end keys.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
    HColumnDescriptor hcd = new HColumnDescriptor(CF);
    htd.addFamily(hcd);
    HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow);
    HRegion a = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
    HRegion.closeHRegion(a);
    return HRegion.openHRegion(testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes()), TEST_UTIL.getConfiguration());
}

19 View Complete Implementation : TestLoadIncrementalHFiles.java
Copyright Apache License 2.0
Author : fengchen8086
private HTableDescriptor buildHTD(TableName tableName, BloomType bloomType) {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
    familyDesc.setBloomFilterType(bloomType);
    htd.addFamily(familyDesc);
    return htd;
}

19 View Complete Implementation : TableSchemaModel.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * @return a table descriptor
 */
@JsonIgnore
public HTableDescriptor getTableDescriptor() {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(getName()));
    for (Map.Entry<QName, Object> e : getAny().entrySet()) {
        htd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
    }
    for (ColumnSchemaModel column : getColumns()) {
        HColumnDescriptor hcd = new HColumnDescriptor(column.getName());
        for (Map.Entry<QName, Object> e : column.getAny().entrySet()) {
            hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString());
        }
        htd.addFamily(hcd);
    }
    return htd;
}

19 View Complete Implementation : TestSplitTransaction.java
Copyright Apache License 2.0
Author : fengchen8086
HRegion createRegion(final Path testdir, final WALFactory wals) throws IOException {
    // Make a region with start and end keys. Use 'aaa', to 'AAA'.  The load
    // region utility will add rows between 'aaa' and 'zzz'.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
    HColumnDescriptor hcd = new HColumnDescriptor(CF);
    htd.addFamily(hcd);
    HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW);
    HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
    HRegion.closeHRegion(r);
    return HRegion.openHRegion(testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes()), TEST_UTIL.getConfiguration());
}

19 View Complete Implementation : HRegionFileSystem.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Check whether region has Reference file
 *
 * @param htd table desciptor of the region
 * @return true if region has reference file
 * @throws IOException
 */
public boolean hasReferences(final HTableDescriptor htd) throws IOException {
    for (HColumnDescriptor family : htd.getFamilies()) {
        if (hasReferences(family.getNamereplacedtring())) {
            return true;
        }
    }
    return false;
}

19 View Complete Implementation : IndexTableDescriptor.java
Copyright Apache License 2.0
Author : fengchen8086
public HTableDescriptor createIndexTableDescriptor(byte[] indexColumn) throws IndexNotExistedException {
    IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
    HTableDescriptor indexTableDescriptor = new HTableDescriptor(indexSpec.getIndexTableName());
    if (indexSpec.getIndexType() == IndexType.CCIndex) {
        for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
            indexTableDescriptor.addFamily(desc);
        }
    } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
        Set<byte[]> family = indexSpec.getAdditionMap().keySet();
        if (family.size() != 0) {
            for (byte[] name : family) {
                indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
            }
        } else {
            indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
        }
    } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
    indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE, // record the index type
    Bytes.toBytes(indexSpec.getIndexType().toString()));
    return indexTableDescriptor;
}

19 View Complete Implementation : TestReplicationAdminWithClusters.java
Copyright Apache License 2.0
Author : fengchen8086
@Test(timeout = 300000)
public void testEnableReplicationWhenReplicationNotEnabled() throws Exception {
    HTableDescriptor table = admin1.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        fam.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
    }
    admin1.disableTable(tableName);
    admin1.modifyTable(tableName, table);
    admin1.enableTable(tableName);
    admin2.disableTable(tableName);
    admin2.modifyTable(tableName, table);
    admin2.enableTable(tableName);
    adminExt.enableTableRep(tableName);
    table = admin1.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        replacedertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL);
    }
}

19 View Complete Implementation : TestEncryptionKeyRotation.java
Copyright Apache License 2.0
Author : fengchen8086
private void createTableAndFlush(HTableDescriptor htd) throws Exception {
    HColumnDescriptor hcd = htd.getFamilies().iterator().next();
    // Create the test table
    TEST_UTIL.getHBaseAdmin().createTable(htd);
    TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
    // Create a store file
    Table table = new HTable(conf, htd.getTableName());
    try {
        table.put(new Put(Bytes.toBytes("testrow")).add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
    } finally {
        table.close();
    }
    TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
}

19 View Complete Implementation : IndexTableDescriptor.java
Copyright Apache License 2.0
Author : fengchen8086
protected HTableDescriptor createCCTTableDescriptor(byte[] indexColumn) throws IndexNotExistedException {
    IndexSpecification indexSpec = this.getIndexSpecification(indexColumn);
    HTableDescriptor indexTableDescriptor = new HTableDescriptor(IndexUtils.getCCTName(indexSpec.getTableName()));
    System.out.println("winter new cct table name: " + indexTableDescriptor.getTableName());
    if (indexSpec.getIndexType() == IndexType.CCIndex) {
        for (HColumnDescriptor desc : this.descriptor.getFamilies()) {
            // column is f, the only family
            indexTableDescriptor.addFamily(desc);
        }
    } else if (indexSpec.getIndexType() == IndexType.UDGIndex) {
        Set<byte[]> family = indexSpec.getAdditionMap().keySet();
        if (family.size() != 0) {
            for (byte[] name : family) {
                indexTableDescriptor.addFamily(this.descriptor.getFamily(name));
            }
        } else {
            indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
        }
    } else if (indexSpec.getIndexType() == IndexType.GSIndex) {
        indexTableDescriptor.addFamily(this.descriptor.getFamily(indexSpec.getFamily()));
    }
    indexTableDescriptor.setValue(IndexConstants.INDEX_TYPE, // record the index type
    Bytes.toBytes(indexSpec.getIndexType().toString()));
    return indexTableDescriptor;
}

19 View Complete Implementation : SnapshotTestingUtils.java
Copyright Apache License 2.0
Author : fengchen8086
public static void createTable(final HBaseTestingUtility util, final TableName tableName, int regionReplication, final byte[]... families) throws IOException, InterruptedException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.setRegionReplication(regionReplication);
    for (byte[] family : families) {
        HColumnDescriptor hcd = new HColumnDescriptor(family);
        htd.addFamily(hcd);
    }
    byte[][] splitKeys = getSplitKeys();
    util.createTable(htd, splitKeys);
    replacedertEquals((splitKeys.length + 1) * regionReplication, util.getHBaseAdmin().getTableRegions(tableName).size());
}

19 View Complete Implementation : UnmodifyableHTableDescriptor.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Does NOT add a column family. This object is immutable
 * @param family HColumnDescriptor of familyto add.
 */
@Override
public HTableDescriptor addFamily(final HColumnDescriptor family) {
    throw new UnsupportedOperationException("HTableDescriptor is read-only");
}

19 View Complete Implementation : ReplicationAdmin.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Set the table's replication switch if the table's replication switch is already not set.
 * @param tableName name of the table
 * @param isRepEnabled is replication switch enable or disable
 * @throws IOException if a remote or network exception occurs
 */
private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
    Admin admin = null;
    try {
        admin = this.connection.getAdmin();
        HTableDescriptor htd = admin.getTableDescriptor(tableName);
        if (isTableRepEnabled(htd) ^ isRepEnabled) {
            boolean isOnlineSchemaUpdateEnabled = this.connection.getConfiguration().getBoolean("hbase.online.schema.update.enable", true);
            if (!isOnlineSchemaUpdateEnabled) {
                admin.disableTable(tableName);
            }
            for (HColumnDescriptor hcd : htd.getFamilies()) {
                hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL : HConstants.REPLICATION_SCOPE_LOCAL);
            }
            admin.modifyTable(tableName, htd);
            if (!isOnlineSchemaUpdateEnabled) {
                admin.enableTable(tableName);
            }
        }
    } finally {
        if (admin != null) {
            try {
                admin.close();
            } catch (IOException e) {
                LOG.warn("Failed to close admin connection.");
                LOG.debug("Details on failure to close admin connection.", e);
            }
        }
    }
}

19 View Complete Implementation : MasterProcedureTestingUtility.java
Copyright Apache License 2.0
Author : fengchen8086
public static void validateColumnFamilyModification(final HMaster master, final TableName tableName, final String family, HColumnDescriptor columnDescriptor) throws IOException {
    HTableDescriptor htd = master.getTableDescriptors().get(tableName);
    replacedertTrue(htd != null);
    HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
    replacedertTrue(hcfd.equals(columnDescriptor));
}

19 View Complete Implementation : MasterFileSystem.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Modify Column of a table
 * @param tableName
 * @param hcd HColumnDesciptor
 * @return Modified HTableDescriptor with the column modified.
 * @throws IOException
 */
public HTableDescriptor modifyColumn(TableName tableName, HColumnDescriptor hcd) throws IOException {
    LOG.info("AddModifyColumn. Table = " + tableName + " HCD = " + hcd.toString());
    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
    byte[] familyName = hcd.getName();
    if (!htd.hasFamily(familyName)) {
        throw new InvalidFamilyOperationException("Family '" + Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
    }
    htd.modifyFamily(hcd);
    this.services.getTableDescriptors().add(htd);
    return htd;
}

19 View Complete Implementation : HFileOutputFormat2.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Serialize column family to data block encoding map to configuration.
 * Invoked while configuring the MR job for incremental load.
 *
 * @param table to read the properties from
 * @param conf to persist serialized values into
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureDataBlockEncoding(HTableDescriptor tableDescriptor, Configuration conf) throws UnsupportedEncodingException {
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    StringBuilder dataBlockEncodingConfigValue = new StringBuilder();
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            dataBlockEncodingConfigValue.append('&');
        }
        dataBlockEncodingConfigValue.append(URLEncoder.encode(familyDescriptor.getNamereplacedtring(), "UTF-8"));
        dataBlockEncodingConfigValue.append('=');
        DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
        if (encoding == null) {
            encoding = DataBlockEncoding.NONE;
        }
        dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(), "UTF-8"));
    }
    conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, dataBlockEncodingConfigValue.toString());
}

19 View Complete Implementation : TestAtomicOperation.java
Copyright Apache License 2.0
Author : fengchen8086
private void initHRegion(byte[] tableName, String callingMethod, int[] maxVersions, byte[]... families) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    int i = 0;
    for (byte[] family : families) {
        HColumnDescriptor hcd = new HColumnDescriptor(family);
        hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
        htd.addFamily(hcd);
    }
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    region = TEST_UTIL.createLocalHRegion(info, htd);
}

19 View Complete Implementation : TestReplicationAdminWithClusters.java
Copyright Apache License 2.0
Author : fengchen8086
@Test(timeout = 300000)
public void testEnableReplicationWhenTableDescriptorIsNotSameInClusters() throws Exception {
    HTableDescriptor table = admin2.getTableDescriptor(tableName);
    HColumnDescriptor f = new HColumnDescriptor("newFamily");
    table.addFamily(f);
    admin2.disableTable(tableName);
    admin2.modifyTable(tableName, table);
    admin2.enableTable(tableName);
    try {
        adminExt.enableTableRep(tableName);
        fail("Exception should be thrown if table descriptors in the clusters are not same.");
    } catch (RuntimeException ignored) {
    }
    admin1.disableTable(tableName);
    admin1.modifyTable(tableName, table);
    admin1.enableTable(tableName);
    adminExt.enableTableRep(tableName);
    table = admin1.getTableDescriptor(tableName);
    for (HColumnDescriptor fam : table.getColumnFamilies()) {
        replacedertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL);
    }
}

19 View Complete Implementation : IntegrationTestMTTR.java
Copyright Apache License 2.0
Author : fengchen8086
private static void setupTables() throws IOException {
    // Get the table name.
    tableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.tableName", "IntegrationTestMTTR"));
    loadTableName = TableName.valueOf(util.getConfiguration().get("hbase.IntegrationTestMTTR.loadTableName", "IntegrationTestMTTRLoadTestTool"));
    if (util.getHBaseAdmin().tableExists(tableName)) {
        util.deleteTable(tableName);
    }
    if (util.getHBaseAdmin().tableExists(loadTableName)) {
        util.deleteTable(loadTableName);
    }
    // Create the table.  If this fails then fail everything.
    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
    // Make the max file size huge so that splits don't happen during the test.
    tableDescriptor.setMaxFileSize(Long.MAX_VALUE);
    HColumnDescriptor descriptor = new HColumnDescriptor(FAMILY);
    descriptor.setMaxVersions(1);
    tableDescriptor.addFamily(descriptor);
    util.getHBaseAdmin().createTable(tableDescriptor);
    // Setup the table for LoadTestTool
    int ret = loadTool.run(new String[] { "-tn", loadTableName.getNamereplacedtring(), "-init_only" });
    replacedertEquals("Failed to initialize LoadTestTool", 0, ret);
}

19 View Complete Implementation : TestDurability.java
Copyright Apache License 2.0
Author : fengchen8086
// lifted from TestAtomicOperation
private HRegion createHRegion(byte[] tableName, String callingMethod, WAL log, Durability durability) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    htd.setDurability(durability);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    Path path = new Path(DIR + callingMethod);
    if (FS.exists(path)) {
        if (!FS.delete(path, true)) {
            throw new IOException("Failed delete of " + path);
        }
    }
    return HRegion.createHRegion(info, path, CONF, htd, log);
}

19 View Complete Implementation : TestReplicationSourceManager.java
Copyright Apache License 2.0
Author : fengchen8086
@BeforeClreplaced
public static void setUpBeforeClreplaced() throws Exception {
    conf = HBaseConfiguration.create();
    conf.set("replication.replicationsource.implementation", ReplicationSourceDummy.clreplaced.getCanonicalName());
    conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    conf.setLong("replication.sleep.before.failover", 2000);
    conf.setInt("replication.source.maxretriesmultiplier", 10);
    utility = new HBaseTestingUtility(conf);
    utility.startMiniZKCluster();
    zkw = new ZooKeeperWatcher(conf, "test", null);
    ZKUtil.createWithParents(zkw, "/hbase/replication");
    ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1");
    ZKUtil.setData(zkw, "/hbase/replication/peers/1", Bytes.toBytes(conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT) + ":/1"));
    ZKUtil.createWithParents(zkw, "/hbase/replication/peers/1/peer-state");
    ZKUtil.setData(zkw, "/hbase/replication/peers/1/peer-state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
    ZKUtil.createWithParents(zkw, "/hbase/replication/state");
    ZKUtil.setData(zkw, "/hbase/replication/state", ReplicationStateZKBase.ENABLED_ZNODE_BYTES);
    ZKClusterId.setClusterId(zkw, new ClusterId());
    FSUtils.setRootDir(utility.getConfiguration(), utility.getDataTestDir());
    fs = FileSystem.get(conf);
    oldLogDir = new Path(utility.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME);
    logDir = new Path(utility.getDataTestDir(), HConstants.HREGION_LOGDIR_NAME);
    replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
    manager = replication.getReplicationManager();
    manager.addSource(slaveId);
    htd = new HTableDescriptor(test);
    HColumnDescriptor col = new HColumnDescriptor("f1");
    col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    htd.addFamily(col);
    col = new HColumnDescriptor("f2");
    col.setScope(HConstants.REPLICATION_SCOPE_LOCAL);
    htd.addFamily(col);
    hri = new HRegionInfo(htd.getTableName(), r1, r2);
}

19 View Complete Implementation : MasterFileSystem.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Add column to a table
 * @param tableName
 * @param hcd
 * @return Modified HTableDescriptor with new column added.
 * @throws IOException
 */
public HTableDescriptor addColumn(TableName tableName, HColumnDescriptor hcd) throws IOException {
    LOG.info("AddColumn. Table = " + tableName + " HCD = " + hcd.toString());
    HTableDescriptor htd = this.services.getTableDescriptors().get(tableName);
    if (htd == null) {
        throw new InvalidFamilyOperationException("Family '" + hcd.getNamereplacedtring() + "' cannot be modified as HTD is null");
    }
    htd.addFamily(hcd);
    this.services.getTableDescriptors().add(htd);
    return htd;
}

19 View Complete Implementation : HFileOutputFormat2.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Serialize column family to compression algorithm map to configuration.
 * Invoked while configuring the MR job for incremental load.
 *
 * @param table to read the properties from
 * @param conf to persist serialized values into
 * @throws IOException
 *           on failure to read column family descriptors
 */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")
@VisibleForTesting
static void configureCompression(Configuration conf, HTableDescriptor tableDescriptor) throws UnsupportedEncodingException {
    StringBuilder compressionConfigValue = new StringBuilder();
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            compressionConfigValue.append('&');
        }
        compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getNamereplacedtring(), "UTF-8"));
        compressionConfigValue.append('=');
        compressionConfigValue.append(URLEncoder.encode(familyDescriptor.getCompression().getName(), "UTF-8"));
    }
    // Get rid of the last ampersand
    conf.set(COMPRESSION_FAMILIES_CONF_KEY, compressionConfigValue.toString());
}

19 View Complete Implementation : TestStripeCompactor.java
Copyright Apache License 2.0
Author : fengchen8086
private static StripeCompactor createCompactor(StoreFileWritersCapture writers, KeyValue[] input) throws Exception {
    Configuration conf = HBaseConfiguration.create();
    final Scanner scanner = new Scanner(input);
    // Create store mock that is satisfactory for compactor.
    HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS);
    ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, new KVComparator());
    Store store = mock(Store.clreplaced);
    when(store.getFamily()).thenReturn(col);
    when(store.getScanInfo()).thenReturn(si);
    when(store.areWritesEnabled()).thenReturn(true);
    when(store.getFileSystem()).thenReturn(mock(FileSystem.clreplaced));
    when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
    when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.clreplaced), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
    when(store.getComparator()).thenReturn(new KVComparator());
    return new StripeCompactor(conf, store) {

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
            return scanner;
        }

        @Override
        protected InternalScanner createScanner(Store store, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException {
            return scanner;
        }
    };
}

19 View Complete Implementation : TestMasterReplication.java
Copyright Apache License 2.0
Author : fengchen8086
@Before
public void setUp() throws Exception {
    baseConfiguration = HBaseConfiguration.create();
    // smaller block size and capacity to trigger more operations
    // and test them
    baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
    baseConfiguration.setInt("replication.source.size.capacity", 1024);
    baseConfiguration.setLong("replication.source.sleepforretries", 100);
    baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
    baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
    baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
    baseConfiguration.setBoolean("dfs.support.append", true);
    baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    baseConfiguration.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, CoprocessorCounter.clreplaced.getName());
    table = new HTableDescriptor(tableName);
    HColumnDescriptor fam = new HColumnDescriptor(famName);
    fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
    table.addFamily(fam);
    fam = new HColumnDescriptor(noRepfamName);
    table.addFamily(fam);
}

19 View Complete Implementation : HFileOutputFormat2.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Serialize column family to bloom type map to configuration.
 * Invoked while configuring the MR job for incremental load.
 * @param tableDescriptor to read the properties from
 * @param conf to persist serialized values into
 *
 * @throws IOException
 *           on failure to read column family descriptors
 */
@VisibleForTesting
static void configureBloomType(HTableDescriptor tableDescriptor, Configuration conf) throws UnsupportedEncodingException {
    if (tableDescriptor == null) {
        // could happen with mock table instance
        return;
    }
    StringBuilder bloomTypeConfigValue = new StringBuilder();
    Collection<HColumnDescriptor> families = tableDescriptor.getFamilies();
    int i = 0;
    for (HColumnDescriptor familyDescriptor : families) {
        if (i++ > 0) {
            bloomTypeConfigValue.append('&');
        }
        bloomTypeConfigValue.append(URLEncoder.encode(familyDescriptor.getNamereplacedtring(), "UTF-8"));
        bloomTypeConfigValue.append('=');
        String bloomType = familyDescriptor.getBloomFilterType().toString();
        if (bloomType == null) {
            bloomType = HColumnDescriptor.DEFAULT_BLOOMFILTER;
        }
        bloomTypeConfigValue.append(URLEncoder.encode(bloomType, "UTF-8"));
    }
    conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, bloomTypeConfigValue.toString());
}

19 View Complete Implementation : TestAdmin2.java
Copyright Apache License 2.0
Author : fengchen8086
@Test(timeout = 300000)
public void testDisableCatalogTable() throws Exception {
    try {
        this.admin.disableTable(TableName.META_TABLE_NAME);
        fail("Expected to throw ConstraintException");
    } catch (ConstraintException e) {
    }
    // Before the fix for HBASE-6146, the below table creation was failing as the hbase:meta table
    // actually getting disabled by the disableTable() call.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testDisableCatalogTable".getBytes()));
    HColumnDescriptor hcd = new HColumnDescriptor("cf1".getBytes());
    htd.addFamily(hcd);
    TEST_UTIL.getHBaseAdmin().createTable(htd);
}

19 View Complete Implementation : MasterFileSystem.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Enable in memory caching for hbase:meta
 */
public static void setInfoFamilyCachingForMeta(final HTableDescriptor metaDescriptor, final boolean b) {
    for (HColumnDescriptor hcd : metaDescriptor.getColumnFamilies()) {
        if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
            hcd.setBlockCacheEnabled(b);
            hcd.setInMemory(b);
        }
    }
}

19 View Complete Implementation : TestBulkDeleteProtocol.java
Copyright Apache License 2.0
Author : fengchen8086
private Table createTable(TableName tableName) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY1);
    // Just setting 10 as I am not testing with more than 10 versions here
    hcd.setMaxVersions(10);
    htd.addFamily(hcd);
    TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
    Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
    return ht;
}

19 View Complete Implementation : TestDependentColumnFilter.java
Copyright Apache License 2.0
Author : fengchen8086
@Before
public void setUp() throws Exception {
    testVals = makeTestVals();
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(this.getClreplaced().getSimpleName()));
    HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
    hcd0.setMaxVersions(3);
    htd.addFamily(hcd0);
    HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
    hcd1.setMaxVersions(3);
    htd.addFamily(hcd1);
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    addData();
}

19 View Complete Implementation : ReplicationAdmin.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * @param htd table descriptor details for the table to check
 * @return true if table's replication switch is enabled
 */
private boolean isTableRepEnabled(HTableDescriptor htd) {
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        if (hcd.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) {
            return false;
        }
    }
    return true;
}

19 View Complete Implementation : ThriftUtilities.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * This utility method creates a new Thrift ColumnDescriptor "struct" based on
 * an Hbase HColumnDescriptor object.
 *
 * @param in
 *          Hbase HColumnDescriptor object
 * @return Thrift ColumnDescriptor
 */
static public ColumnDescriptor colDescFromHbase(HColumnDescriptor in) {
    ColumnDescriptor col = new ColumnDescriptor();
    col.name = ByteBuffer.wrap(Bytes.add(in.getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY));
    col.maxVersions = in.getMaxVersions();
    col.compression = in.getCompression().toString();
    col.inMemory = in.isInMemory();
    col.blockCacheEnabled = in.isBlockCacheEnabled();
    col.bloomFilterType = in.getBloomFilterType().toString();
    return col;
}

19 View Complete Implementation : VisibilityUtils.java
Copyright Apache License 2.0
Author : fengchen8086
public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) throws IOException {
    Map<ByteRange, Integer> cfVsMaxVersions = new HashMap<ByteRange, Integer>();
    for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) {
        cfVsMaxVersions.put(new SimpleMutableByteRange(hcd.getName()), hcd.getMaxVersions());
    }
    VisibilityLabelService vls = VisibilityLabelServiceManager.getInstance().getVisibilityLabelService();
    Filter visibilityLabelFilter = new VisibilityLabelFilter(vls.getVisibilityExpEvaluator(authorizations), cfVsMaxVersions);
    return visibilityLabelFilter;
}

19 View Complete Implementation : TestWALObserver.java
Copyright Apache License 2.0
Author : fengchen8086
private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
    htd.addFamily(a);
    HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
    htd.addFamily(b);
    HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
    htd.addFamily(c);
    return htd;
}

19 View Complete Implementation : UnmodifyableHTableDescriptor.java
Copyright Apache License 2.0
Author : fengchen8086
@Override
public HTableDescriptor modifyFamily(HColumnDescriptor family) {
    throw new UnsupportedOperationException("HTableDescriptor is read-only");
}

19 View Complete Implementation : ReplicationAdmin.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Find all column families that are replicated from this cluster
 * @return the full list of the replicated column families of this cluster as:
 *        tableName, family name, replicationType
 *
 * Currently replicationType is Global. In the future, more replication
 * types may be extended here. For example
 *  1) the replication may only apply to selected peers instead of all peers
 *  2) the replicationType may indicate the host Cluster servers as Slave
 *     for the table:columnFam.
 */
public List<HashMap<String, String>> listReplicated() throws IOException {
    List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();
    Admin admin = connection.getAdmin();
    HTableDescriptor[] tables;
    try {
        tables = admin.listTables();
    } finally {
        if (admin != null)
            admin.close();
    }
    for (HTableDescriptor table : tables) {
        HColumnDescriptor[] columns = table.getColumnFamilies();
        String tableName = table.getNamereplacedtring();
        for (HColumnDescriptor column : columns) {
            if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
                // At this moment, the columfam is replicated to all peers
                HashMap<String, String> replicationEntry = new HashMap<String, String>();
                replicationEntry.put(TNAME, tableName);
                replicationEntry.put(CFNAME, column.getNamereplacedtring());
                replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
                replicationColFams.add(replicationEntry);
            }
        }
    }
    return replicationColFams;
}

19 View Complete Implementation : TestAdmin2.java
Copyright Apache License 2.0
Author : fengchen8086
private void createTableWithDefaultConf(TableName TABLENAME) throws IOException {
    HTableDescriptor htd = new HTableDescriptor(TABLENAME);
    HColumnDescriptor hcd = new HColumnDescriptor("value");
    htd.addFamily(hcd);
    admin.createTable(htd, null);
}

19 View Complete Implementation : OfflineMetaRebuildTestCore.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Setup a clean table before we start mucking with it.
 *
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException
 */
private Table setupTable(TableName tablename) throws Exception {
    HTableDescriptor desc = new HTableDescriptor(tablename);
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
    // If a table has no CF's it doesn't get checked
    desc.addFamily(hcd);
    TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
    return this.connection.getTable(tablename);
}

19 View Complete Implementation : TestScannerSelectionUsingKeyRange.java
Copyright Apache License 2.0
Author : fengchen8086
@Test
public void testScannerSelection() throws IOException {
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.setInt("hbase.hstore.compactionThreshold", 10000);
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_BYTES).setBlockCacheEnabled(true).setBloomFilterType(bloomType);
    HTableDescriptor htd = new HTableDescriptor(TABLE);
    htd.addFamily(hcd);
    HRegionInfo info = new HRegionInfo(TABLE);
    HRegion region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), conf, htd);
    for (int iFile = 0; iFile < NUM_FILES; ++iFile) {
        for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
            Put put = new Put(Bytes.toBytes("row" + iRow));
            for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
                put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol), Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
            }
            region.put(put);
        }
        region.flush(true);
    }
    Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
    CacheConfig.blockCacheDisabled = false;
    CacheConfig cacheConf = new CacheConfig(conf);
    LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
    cache.clearCache();
    InternalScanner scanner = region.getScanner(scan);
    List<Cell> results = new ArrayList<Cell>();
    while (scanner.next(results)) {
    }
    scanner.close();
    replacedertEquals(0, results.size());
    Set<String> accessedFiles = cache.getCachedFileNamesForTest();
    replacedertEquals(expectedCount, accessedFiles.size());
    region.close();
}

19 View Complete Implementation : TestWALObserver.java
Copyright Apache License 2.0
Author : fengchen8086
/*
   * Creates an HRI around an HTD that has <code>tableName</code> and three
   * column families named.
   * 
   * @param tableName Name of table to use when we create HTableDescriptor.
   */
private HRegionInfo createBasic3FamilyHRegionInfo(final String tableName) {
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
    for (int i = 0; i < TEST_FAMILY.length; i++) {
        HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
        htd.addFamily(a);
    }
    return new HRegionInfo(htd.getTableName(), null, null, false);
}

19 View Complete Implementation : TestStore.java
Copyright Apache License 2.0
Author : fengchen8086
@Test
public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
    final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
    long anyValue = 10;
    // We'll check that it uses correct config and propagates it appropriately by going thru
    // the simplest "real" path I can find - "throttleCompaction", which just checks whether
    // a number we preplaced in is higher than some config value, inside compactionPolicy.
    Configuration conf = HBaseConfiguration.create();
    conf.setLong(CONFIG_KEY, anyValue);
    init(name.getMethodName() + "-xml", conf);
    replacedert.replacedertTrue(store.throttleCompaction(anyValue + 1));
    replacedert.replacedertFalse(store.throttleCompaction(anyValue));
    // HTD overrides XML.
    --anyValue;
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(name.getMethodName() + "-htd", conf, htd, hcd);
    replacedert.replacedertTrue(store.throttleCompaction(anyValue + 1));
    replacedert.replacedertFalse(store.throttleCompaction(anyValue));
    // HCD overrides them both.
    --anyValue;
    hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
    init(name.getMethodName() + "-hcd", conf, htd, hcd);
    replacedert.replacedertTrue(store.throttleCompaction(anyValue + 1));
    replacedert.replacedertFalse(store.throttleCompaction(anyValue));
}

19 View Complete Implementation : TestStoreFile.java
Copyright Apache License 2.0
Author : fengchen8086
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
    StoreFile.Reader reader = mock(StoreFile.Reader.clreplaced);
    Store store = mock(Store.clreplaced);
    HColumnDescriptor hcd = mock(HColumnDescriptor.clreplaced);
    byte[] cf = Bytes.toBytes("ty");
    when(hcd.getName()).thenReturn(cf);
    when(store.getFamily()).thenReturn(hcd);
    StoreFileScanner scanner = new StoreFileScanner(reader, mock(HFileScanner.clreplaced), false, false, 0);
    Scan scan = new Scan();
    scan.setColumnFamilyTimeRange(cf, 0, 1);
    replacedertFalse(scanner.shouldUseScanner(scan, store, 0));
}

19 View Complete Implementation : TestLoadIncrementalHFiles.java
Copyright Apache License 2.0
Author : fengchen8086
/**
 * Test loading into a column family that does not exist.
 */
@Test(timeout = 60000)
public void testNonexistentColumnFamilyLoad() throws Exception {
    String testName = "testNonexistentColumnFamilyLoad";
    byte[][][] hFileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("ccc") }, new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") } };
    final byte[] TABLE = Bytes.toBytes("mytable_" + testName);
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
    // set real family name to upper case in purpose to simulate the case that
    // family name in HFiles is invalid
    HColumnDescriptor family = new HColumnDescriptor(Bytes.toBytes(new String(FAMILY).toUpperCase()));
    htd.addFamily(family);
    try {
        runTest(testName, htd, BloomType.NONE, true, SPLIT_KEYS, hFileRanges);
        replacedertTrue("Loading into table with non-existent family should have failed", false);
    } catch (Exception e) {
        replacedertTrue("IOException expected", e instanceof IOException);
        // further check whether the exception message is correct
        String errMsg = e.getMessage();
        replacedertTrue("Incorrect exception message, expected message: [" + EXPECTED_MSG_FOR_NON_EXISTING_FAMILY + "], current message: [" + errMsg + "]", errMsg.contains(EXPECTED_MSG_FOR_NON_EXISTING_FAMILY));
    }
}

19 View Complete Implementation : TestSnapshotManifest.java
Copyright Apache License 2.0
Author : fengchen8086
@Before
public void setup() throws Exception {
    TEST_UTIL = HBaseTestingUtility.createLocalHTU();
    rootDir = TEST_UTIL.getDataTestDir(TABLE_NAME_STR);
    fs = TEST_UTIL.getTestFileSystem();
    conf = TEST_UTIL.getConfiguration();
    SnapshotTestingUtils.SnapshotMock snapshotMock = new SnapshotTestingUtils.SnapshotMock(conf, fs, rootDir);
    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot", TABLE_NAME_STR, 0);
    snapshotDir = builder.commit();
    snapshotDesc = builder.getSnapshotDescription();
    SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
    byte[] startKey = null;
    byte[] stopKey = null;
    for (int i = 1; i <= TEST_NUM_REGIONS; i++) {
        stopKey = Bytes.toBytes(String.format("%016d", i));
        HRegionInfo regionInfo = new HRegionInfo(TABLE_NAME, startKey, stopKey, false);
        SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder();
        for (HColumnDescriptor hcd : builder.getTableDescriptor().getFamilies()) {
            SnapshotRegionManifest.FamilyFiles.Builder family = SnapshotRegionManifest.FamilyFiles.newBuilder();
            family.setFamilyName(ByteStringer.wrap(hcd.getName()));
            for (int j = 0; j < 100; ++j) {
                SnapshotRegionManifest.StoreFile.Builder sfManifest = SnapshotRegionManifest.StoreFile.newBuilder();
                sfManifest.setName(String.format("%032d", i));
                sfManifest.setFileSize((1 + i) * (1 + i) * 1024);
                family.addStoreFiles(sfManifest.build());
            }
            dataRegionManifestBuilder.addFamilyFiles(family.build());
        }
        dataRegionManifestBuilder.setRegionInfo(HRegionInfo.convert(regionInfo));
        dataManifestBuilder.addRegionManifests(dataRegionManifestBuilder.build());
        startKey = stopKey;
    }
    dataManifestBuilder.setTableSchema(builder.getTableDescriptor().convert());
    SnapshotDataManifest dataManifest = dataManifestBuilder.build();
    writeDataManifest(dataManifest);
}