diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 20884edf836e..ba494d0010b3 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -169,7 +169,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, LOG.debug("currentLogFile: " + log.getPath().toString()); if (AbstractFSWALProvider.isMetaFile(log.getPath())) { if (LOG.isDebugEnabled()) { - LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + LOG.debug("Skip {} log file: {}", conn.getMetaTableName(), log.getPath().getName()); } continue; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index 6c021bf622a5..78f2f1ea36b8 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; @@ -131,7 +130,7 @@ public static void updateMetaWithFavoredNodesInfo( puts.add(put); } } - try (Table table = connection.getTable(TableName.META_TABLE_NAME)) { + try (Table table = connection.getTable(connection.getMetaTableName())) { table.put(puts); } LOG.info("Added " + puts.size() + " region favored nodes in META"); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 02c18c73bfb5..2a2512b066a3 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -170,9 +170,10 @@ private void processMetaRecord(Result result) throws IOException { * Initialize the region assignment snapshot by scanning the hbase:meta table */ public void initialize() throws IOException { - LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot"); + LOG.info("Start to scan {} for the current region assignment snapshot", + connection.getMetaTableName()); // Scan hbase:meta to pick up user regions - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME); + try (Table metaTable = connection.getTable(connection.getMetaTableName()); ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { for (;;) { Result result = scanner.next(); @@ -187,7 +188,8 @@ public void initialize() throws IOException { } } } - LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); + LOG.info("Finished scanning {} for the current region assignment snapshot", + connection.getMetaTableName()); } private void addRegion(RegionInfo regionInfo) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java index cccc0c55c2c5..d323f7ca73ae 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java @@ -119,6 +119,7 @@ public void testTableIsolationAndReplicaDistribution() { * Validates whether all meta table regions are isolated. */ private boolean isMetaTableIsolated(BalancerClusterState cluster) { + return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index 42bfd757e0d1..48a75871bbb2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -196,7 +196,7 @@ private static CompletableFuture>> getTableReg final AsyncTable metaTable, final TableName tableName, final boolean excludeOfflinedSplitParents) { CompletableFuture>> future = new CompletableFuture<>(); - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (TableName.isMetaTableName(tableName)) { future.completeExceptionally(new IOException( "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 62c6951b4535..bbf8029d944e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil; import org.apache.hadoop.hbase.exceptions.MasterRegistryFetchException; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -55,6 +56,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameResponse; /** * Base class for rpc based connection registry implementation. @@ -250,6 +253,19 @@ public CompletableFuture getActiveMaster() { getClass().getSimpleName() + ".getActiveMaster"); } + @Override + public CompletableFuture getMetaTableName() { + return tracedFuture(() -> this. call( + (c, s, d) -> s.getMetaTableName(c, GetMetaTableNameRequest.getDefaultInstance(), d), + GetMetaTableNameResponse::hasTableName, "getMetaTableName()").thenApply(resp -> { + if (resp.hasTableName() && !resp.getTableName().isEmpty()) { + return TableName.valueOf(resp.getTableName()); + } else { + return TableName.META_TABLE_NAME; + } + }), getClass().getSimpleName() + ".getMetaTableName"); + } + @Override public void close() { trace(() -> { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java index 6e96918d1d9a..64151fa6ad5c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java @@ -41,6 +41,17 @@ public interface AsyncConnection extends Closeable { */ Configuration getConfiguration(); + /** + * Returns the meta table name for this cluster. + *

+ * This value is fetched from the cluster during connection establishment and cached for the + * lifetime of this connection. For most clusters, this will be "hbase:meta". For read replica + * clusters or other specialized configurations, this may return a different table name. + *

+ * @return The meta table name for this cluster + */ + TableName getMetaTableName(); + /** * Retrieve a AsyncRegionLocator implementation to inspect region information on a table. The * returned AsyncRegionLocator is not thread-safe, so a new instance should be created for each @@ -104,6 +115,15 @@ default AsyncTable getTable(TableName tableName, ExecutorSer return getTableBuilder(tableName, pool).build(); } + /** + * Retrieve an {@link AsyncTable} implementation for accessing the meta table. This method returns + * the correct meta table for this connection + * @return An AsyncTable to use for interactions with the meta table + */ + default AsyncTable getMetaTable() { + return getTable(getMetaTableName()); + } + /** * Returns an {@link AsyncTableBuilder} for creating {@link AsyncTable}. *

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 3f0e3e0b370e..cbe84222d953 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -92,6 +92,8 @@ public class AsyncConnectionImpl implements AsyncConnection { final ConnectionRegistry registry; + private final TableName metaTableName; + protected final int rpcTimeout; protected final RpcClient rpcClient; @@ -128,14 +130,16 @@ public class AsyncConnectionImpl implements AsyncConnection { private volatile ConnectionOverAsyncConnection conn; public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, - SocketAddress localAddress, User user) { - this(conf, registry, clusterId, localAddress, user, Collections.emptyMap()); + TableName metaTableName, SocketAddress localAddress, User user) { + this(conf, registry, clusterId, metaTableName, localAddress, user, Collections.emptyMap()); } public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId, - SocketAddress localAddress, User user, Map connectionAttributes) { + TableName metaTableName, SocketAddress localAddress, User user, + Map connectionAttributes) { this.conf = conf; this.user = user; + this.metaTableName = metaTableName; this.metricsScope = MetricsConnection.getScope(conf, clusterId, this); if (user.isLoginFromKeytab()) { @@ -219,6 +223,10 @@ public Configuration getConfiguration() { return conf; } + public TableName getMetaTableName() { + return metaTableName; + } + @Override public boolean isClosed() { return closed.get(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index e26fb837b89d..eef82a3ebac6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.HConstants.NINES; import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS; import static org.apache.hadoop.hbase.HConstants.ZEROES; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations; import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood; import static org.apache.hadoop.hbase.client.ConnectionConfiguration.HBASE_CLIENT_META_CACHE_INVALIDATE_INTERVAL; @@ -238,14 +237,15 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { + .createSelector(replicaSelectorClass, conn.getMetaTableName(), conn, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations() .get(conn.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + LOG.error("Failed to get table {}'s region replication, ", conn.getMetaTableName(), + e); } return numOfReplicas; }); @@ -427,7 +427,7 @@ private void locateInMeta(TableName tableName, LocateRequest req) { // do nothing } - conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() { + conn.getTable(conn.getMetaTableName()).scan(scan, new AdvancedScanResultConsumer() { private boolean completeNormally = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 0e872a5b21da..5e640c45a354 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; @@ -217,7 +216,7 @@ void clearCache(TableName tableName) { new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); - if (tableName.equals(META_TABLE_NAME)) { + if (tableName.equals(conn.getMetaTableName())) { metaRegionLocator.clearCache(); } else { nonMetaRegionLocator.clearCache(tableName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index b7ec7fcd8725..ab358e1af184 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -63,7 +63,7 @@ public CompletableFuture> getAllRegionLocations() { .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } CompletableFuture> future = ClientMetaTableAccessor - .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); + .getTableHRegionLocations(conn.getTable(conn.getMetaTableName()), tableName); addListener(future, (locs, error) -> locs.forEach(loc -> { // the cache assumes that all locations have a serverName. only add if that's true if (loc.getServerName() != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 8220189d9b51..bebfa6addc25 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -62,6 +62,17 @@ public interface Connection extends Abortable, Closeable { /** Returns Configuration instance being used by this Connection instance. */ Configuration getConfiguration(); + /** + * Returns the meta table name for this cluster. + *

+ * This value is fetched from the cluster during connection establishment and cached for the + * lifetime of this connection. For most clusters, this will be "hbase:meta". For read replica + * clusters or other specialized configurations, this may return a different table name. + *

+ * @return The meta table name for this cluster + */ + TableName getMetaTableName(); + /** * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a * new instance should be created for each using thread. This is a lightweight operation, pooling @@ -95,6 +106,15 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep return getTableBuilder(tableName, pool).build(); } + /** + * Retrieve a Table implementation for accessing the meta table. This method returns the correct + * meta table for this connection (hbase:meta or hbase:meta_suffix). + * @return A Table to use for interactions with the meta table + */ + default Table getMetaTable() throws IOException { + return getTable(getMetaTableName()); + } + /** *

* Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 144a790c406d..8e304fcbcac9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -595,16 +595,29 @@ public static CompletableFuture createAsyncConnection(URI conne future.completeExceptionally(new IOException("clusterid came back null")); return; } - Class clazz = appliedConf.getClass( - HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class); - try { - future.complete(user.runAs((PrivilegedExceptionAction< - ? extends AsyncConnection>) () -> ReflectionUtils.newInstance(clazz, appliedConf, - registry, clusterId, null, user, connectionAttributes))); - } catch (Exception e) { - registry.close(); - future.completeExceptionally(e); - } + // Fetch meta table name from registry + addListener(registry.getMetaTableName(), (metaTableName, metaError) -> { + if (metaError != null) { + registry.close(); + future.completeExceptionally(metaError); + return; + } + if (metaTableName == null) { + registry.close(); + future.completeExceptionally(new IOException("meta table name came back null")); + return; + } + Class clazz = appliedConf.getClass( + HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class); + try { + future.complete(user.runAs((PrivilegedExceptionAction< + ? extends AsyncConnection>) () -> ReflectionUtils.newInstance(clazz, appliedConf, + registry, clusterId, metaTableName, null, user, connectionAttributes))); + } catch (Exception e) { + registry.close(); + future.completeExceptionally(e); + } + }); }); return future; }, "ConnectionFactory.createAsyncConnection"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java index 471cfa874458..ad7483600ab6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java @@ -88,6 +88,11 @@ public Configuration getConfiguration() { return conn.getConfiguration(); } + @Override + public TableName getMetaTableName() { + return conn.getMetaTableName(); + } + @Override public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException { AsyncBufferedMutatorBuilder builder = conn.getBufferedMutatorBuilder(params.getTableName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 2ace3959ffa6..b80f05ba2081 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -21,6 +21,7 @@ import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -48,6 +49,15 @@ public interface ConnectionRegistry extends Closeable { */ CompletableFuture getActiveMaster(); + /** + * Get the name of the meta table for this cluster. + *

+ * Should only be called once, similar to {@link #getClusterId()}. + *

+ * @return CompletableFuture containing the meta table name + */ + CompletableFuture getMetaTableName(); + /** * Return the connection string associated with this registry instance. This value is * informational, used for annotating traces. Values returned may not be valid for establishing a diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index d6d8e00f7822..18805bce3400 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -120,7 +120,7 @@ private static int checkReplicaId(int regionId) { this.replicaId = checkReplicaId(replicaId); this.offLine = offLine; this.regionName = RegionInfo.createRegionName(this.tableName, this.startKey, this.regionId, - this.replicaId, !this.tableName.equals(TableName.META_TABLE_NAME)); + this.replicaId, !TableName.isMetaTableName(this.tableName)); this.encodedName = RegionInfo.encodeRegionName(this.regionName); this.hashCode = generateHashCode(this.tableName, this.startKey, this.endKey, this.regionId, this.replicaId, this.offLine, this.regionName); @@ -232,7 +232,7 @@ public boolean containsRow(byte[] row) { /** Returns true if this region is a meta region */ @Override public boolean isMetaRegion() { - return tableName.equals(TableName.META_TABLE_NAME); + return TableName.isMetaTableName(tableName); } /** Returns True if has been split and has daughters. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index f4a474957a2f..631d5e42cf70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.HIGH_QOS; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import static org.apache.hadoop.hbase.util.FutureUtils.unwrapCompletionException; @@ -405,7 +404,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { AsyncAdminBuilderBase builder) { this.connection = connection; this.retryTimer = retryTimer; - this.metaTable = connection.getTable(META_TABLE_NAME); + this.metaTable = connection.getTable(connection.getMetaTableName()); this.rpcTimeoutNs = builder.rpcTimeoutNs; this.operationTimeoutNs = builder.operationTimeoutNs; this.pauseNs = builder.pauseNs; @@ -1012,7 +1011,7 @@ List> adminCall(controller, stub, @Override public CompletableFuture> getRegions(TableName tableName) { - if (tableName.equals(META_TABLE_NAME)) { + if (tableName.equals(connection.getMetaTableName())) { return connection.registry.getMetaRegionLocations() .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) .collect(Collectors.toList())); @@ -1303,7 +1302,7 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa * List all region locations for the specific table. */ private CompletableFuture> getTableHRegionLocations(TableName tableName) { - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (connection.getMetaTableName().equals(tableName)) { CompletableFuture> future = new CompletableFuture<>(); addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index ef927fd3a55b..66c62755839b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -42,6 +42,8 @@ public class RegionInfoBuilder { */ // TODO: How come Meta regions still do not have encoded region names? Fix. // hbase:meta,,1.1588230740 should be the hbase:meta first region name. + // TODO: For now, hardcode to default. Future: lazy initialization based on config or make it use + // conenction public static final RegionInfo FIRST_META_REGIONINFO = new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java index 3f353b5799d4..68906d8d6382 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java @@ -82,7 +82,7 @@ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuratio */ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); - if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) { + if (displayKey || TableName.isMetaTableName(ri.getTable())) { return ri.getRegionName(); } else { // create a modified regionname with the startkey replaced but preserving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index eed1a40a2c2f..f158a72b9c23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -616,7 +616,7 @@ private ModifyableTableDescriptor(final TableName name, families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); this.values.putAll(values); this.values.put(IS_META_KEY, - new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); + new Bytes(Bytes.toBytes(Boolean.toString(TableName.isMetaTableName(name))))); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index ebb43723b8f8..cc3b23392f2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.security.User; @@ -262,6 +263,17 @@ public CompletableFuture getActiveMaster() { "ZKConnectionRegistry.getActiveMaster"); } + /** + * Returns the meta table name. This implementation always returns the default "hbase:meta" + * because ZKConnectionRegistry is deprecated and does not support custom meta table names. Custom + * meta table name support requires using RPC-based connection registry. + */ + @Override + public CompletableFuture getMetaTableName() { + return tracedFuture(() -> CompletableFuture.completedFuture(TableName.META_TABLE_NAME), + "ZKConnectionRegistry.getMetaTableName"); + } + @Override public String getConnectionString() { final String serverList = zk.getConnectString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index 40ff0373c36c..47fb4594c3cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -22,7 +22,6 @@ import java.security.PrivilegedExceptionAction; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.Connection; @@ -73,7 +72,7 @@ private static void injectFault() throws ServiceException { future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException)); return future; } - AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); + AsyncTable table = conn.getTable(conn.getMetaTableName()); table. coprocessorService( AuthenticationProtos.AuthenticationService::newStub, @@ -102,7 +101,7 @@ static Token obtainToken(Connection conn) throws try { injectFault(); - meta = conn.getTable(TableName.META_TABLE_NAME); + meta = conn.getTable(conn.getMetaTableName()); CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 60175137ad2c..0373edb34570 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -3325,7 +3325,7 @@ public static String toLockJson(List lockedRes long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; - if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { + if (TableName.isMetaTableName(tableName) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } byte[] startKey = null; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java index 30d69d4b3f9e..1f7cb679d92a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -48,6 +49,11 @@ public CompletableFuture getActiveMaster() { return CompletableFuture.completedFuture(null); } + @Override + public CompletableFuture getMetaTableName() { + return CompletableFuture.completedFuture(null); + } + @Override public String getConnectionString() { return "nothing"; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index f65c7ccb6e75..9d3045257aaf 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -143,8 +143,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { }).when(adminStub).stopServer(any(HBaseRpcController.class), any(StopServerRequest.class), any()); User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, - user) { + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", + TableName.META_TABLE_NAME, null, user) { @Override CompletableFuture getMasterStub() { @@ -195,7 +195,7 @@ public void testCreateSystemTable() { // that we pass the correct priority @Test public void testCreateMetaTable() { - conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(conn.getMetaTableName()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index e56fffbb2642..22fac8ecc9e5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -72,7 +73,7 @@ public CompletableFuture getActiveMaster() { return CompletableFuture.completedFuture(masterServer); } }; - conn = new AsyncConnectionImpl(CONF, registry, "test", null, user); + conn = new AsyncConnectionImpl(CONF, registry, "test", TableName.META_TABLE_NAME, null, user); } @After diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index a7df92999d08..180a95d08895 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -100,7 +100,7 @@ public void setUp() throws IOException { public CompletableFuture getMetaRegionLocations() { return CompletableFuture.completedFuture(locs); } - }, "test", null, user); + }, "test", TableName.META_TABLE_NAME, null, user); } @After @@ -147,38 +147,36 @@ public void testClearCacheServerName() { @Test public void testClearCacheTableName() { - conn.getLocator().clearCache(TableName.META_TABLE_NAME); + conn.getLocator().clearCache(conn.getMetaTableName()); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + buildTableAttributesMatcher(conn.getMetaTableName()))); } @Test public void testGetRegionLocation() { - conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, + conn.getLocator().getRegionLocation(conn.getMetaTableName(), HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); - assertThat(span, - allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", - locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(conn.getMetaTableName()), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", + locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @Test public void testGetRegionLocations() { - conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, + conn.getLocator().getRegionLocations(conn.getMetaTableName(), HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); String[] expectedRegions = Arrays.stream(locs.getRegionLocations()).map(HRegionLocation::getRegion) .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(conn.getMetaTableName()), + hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index cb5431c35d3e..87e1211d715b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -162,8 +162,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, - user) { + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", + TableName.META_TABLE_NAME, null, user) { @Override AsyncRegionLocator getLocator() { @@ -237,7 +237,7 @@ public void testGetSystemTable() { @Test public void testGetMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).get(new Get(Bytes.toBytes(0))).join(); + conn.getTable(conn.getMetaTableName()).get(new Get(Bytes.toBytes(0))).join(); verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any()); } @@ -268,7 +268,7 @@ public void testPutSystemTable() { @Test public void testPutMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0)) + conn.getTable(conn.getMetaTableName()).put(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -296,7 +296,7 @@ public void testDeleteSystemTable() { @Test public void testDeleteMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).delete(new Delete(Bytes.toBytes(0))).join(); + conn.getTable(conn.getMetaTableName()).delete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -327,7 +327,7 @@ public void testAppendSystemTable() { @Test public void testAppendMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0)) + conn.getTable(conn.getMetaTableName()).append(new Append(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -355,7 +355,7 @@ public void testIncrementSystemTable() { @Test public void testIncrementMetaTable() { - conn.getTable(TableName.META_TABLE_NAME) + conn.getTable(conn.getMetaTableName()) .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -393,7 +393,7 @@ public void testCheckAndPutSystemTable() { @Test public void testCheckAndPutMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) .join(); @@ -426,7 +426,7 @@ public void testCheckAndDeleteSystemTable() { @Test public void testCheckAndDeleteMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) .join(); @@ -467,7 +467,7 @@ public void testCheckAndMutateSystemTable() throws IOException { @Test public void testCheckAndMutateMetaTable() throws IOException { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) .join(); @@ -555,7 +555,7 @@ public void testScanSystemTable() throws Exception { @Test public void testScanMetaTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.META_TABLE_NAME, renewFuture, Optional.empty()); + testForTable(conn.getMetaTableName(), renewFuture, Optional.empty()); } private void testForTable(TableName tableName, CompletableFuture renewFuture, @@ -598,7 +598,7 @@ public void testBatchSystemTable() { @Test public void testBatchMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) + conn.getTable(conn.getMetaTableName()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 2cecc974b6ef..3ee9a6b97e54 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -209,8 +209,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); final User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null, - user) { + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", + TableName.META_TABLE_NAME, null, user) { @Override AsyncRegionLocator getLocator() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java index 40617d78950a..eca15781f0e0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -93,8 +92,9 @@ public void testCompactTableWithNullLocations() throws Exception { AsyncConnectionImpl connection = mock(AsyncConnectionImpl.class)) { mockedMeta.when(() -> ClientMetaTableAccessor.getTableHRegionLocations(any(AsyncTable.class), any(TableName.class))).thenReturn(nullLocationsFuture); + when(connection.getMetaTableName()).thenReturn(TableName.META_TABLE_NAME); AsyncTable metaTable = mock(AsyncTable.class); - when(connection.getTable(META_TABLE_NAME)).thenReturn(metaTable); + when(connection.getTable(connection.getMetaTableName())).thenReturn(metaTable); HashedWheelTimer hashedWheelTimer = mock(HashedWheelTimer.class); AsyncAdminBuilderBase asyncAdminBuilderBase = mock(AsyncAdminBuilderBase.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java index 806c5edeb7fc..a31f0cfb900c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryApplyURIQueries.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.After; @@ -59,6 +60,8 @@ public void setUp() { mockedConnectionRegistryFactory .when(() -> ConnectionRegistryFactory.create(any(), any(), any())).thenReturn(registry); when(registry.getClusterId()).thenReturn(CompletableFuture.completedFuture("cluster")); + when(registry.getMetaTableName()) + .thenReturn(CompletableFuture.completedFuture(TableName.META_TABLE_NAME)); } @After @@ -68,13 +71,15 @@ public void tearDown() { @Test public void testApplyURIQueries() throws Exception { - ConnectionFactory.createConnection(new URI("hbase+rpc://server:16010?a=1&b=2&c"), conf); - ArgumentCaptor captor = ArgumentCaptor.forClass(Configuration.class); - mockedConnectionRegistryFactory - .verify(() -> ConnectionRegistryFactory.create(any(), captor.capture(), any())); - Configuration c = captor.getValue(); - assertEquals("1", c.get("a")); - assertEquals("2", c.get("b")); - assertEquals("", c.get("c")); + try (Connection ignored = + ConnectionFactory.createConnection(new URI("hbase+rpc://server:16010?a=1&b=2&c"), conf)) { + ArgumentCaptor captor = ArgumentCaptor.forClass(Configuration.class); + mockedConnectionRegistryFactory + .verify(() -> ConnectionRegistryFactory.create(any(), captor.capture(), any())); + Configuration c = captor.getValue(); + assertEquals("1", c.get("a")); + assertEquals("2", c.get("b")); + assertEquals("", c.get("c")); + } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index e0d18f6bbb7e..6edbb3d47515 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -106,13 +106,15 @@ public void testMetricsConnectionScope() throws IOException { String scope = "testScope"; conf.setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true); - AsyncConnectionImpl impl = new AsyncConnectionImpl(conf, null, "foo", null, User.getCurrent()); + AsyncConnectionImpl impl = new AsyncConnectionImpl(conf, null, "foo", TableName.META_TABLE_NAME, + null, User.getCurrent()); Optional metrics = impl.getConnectionMetrics(); assertTrue("Metrics should be present", metrics.isPresent()); assertEquals(clusterId + "@" + Integer.toHexString(impl.hashCode()), metrics.get().getMetricScope()); conf.set(MetricsConnection.METRICS_SCOPE_KEY, scope); - impl = new AsyncConnectionImpl(conf, null, "foo", null, User.getCurrent()); + impl = new AsyncConnectionImpl(conf, null, "foo", TableName.META_TABLE_NAME, null, + User.getCurrent()); metrics = impl.getConnectionMetrics(); assertTrue("Metrics should be present", metrics.isPresent()); @@ -132,7 +134,7 @@ public void testMetricsWithMultiConnections() throws IOException { AsyncConnectionImpl impl; List connList = new ArrayList(); for (int i = 0; i < num; i++) { - impl = new AsyncConnectionImpl(conf, null, null, null, user); + impl = new AsyncConnectionImpl(conf, null, null, TableName.META_TABLE_NAME, null, user); connList.add(impl); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 0e6a53ca7c47..9d17de911085 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -787,8 +787,13 @@ public static CellComparator getCellComparator(TableName tableName) { */ public static CellComparator getCellComparator(byte[] tableName) { // FYI, TableName.toBytes does not create an array; just returns existing array pointer. - return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) + // Check if this is a meta table (hbase:meta or hbase:meta_*) + return isMetaTable(tableName) ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR; } + + static boolean isMetaTable(byte[] tableName) { + return Bytes.startsWith(tableName, TableName.META_TABLE_NAME.getName()); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java index 7f6e87ebf911..0c9dae240b07 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -75,7 +74,8 @@ public static CellComparator getInnerStoreCellComparator(TableName tableName) { * @return CellComparator to use going off the {@code tableName} passed. */ public static CellComparator getInnerStoreCellComparator(byte[] tableName) { - return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) + // Check if this is a meta table (hbase:meta or hbase:meta_*) + return CellComparatorImpl.isMetaTable(tableName) ? MetaCellComparator.META_COMPARATOR : InnerStoreCellComparator.INNER_STORE_COMPARATOR; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index b6d854c13784..3a28c1eecb85 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -25,6 +25,8 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -45,6 +47,8 @@ @InterfaceAudience.Public public final class TableName implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(TableName.class); + /** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */ private static final Set tableCache = new CopyOnWriteArraySet<>(); @@ -66,6 +70,7 @@ public final class TableName implements Comparable { + NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))"; /** The hbase:meta table's name. */ + @Deprecated public static final TableName META_TABLE_NAME = valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); @@ -85,9 +90,18 @@ public final class TableName implements Comparable { /** One globally disallowed name */ public static final String DISALLOWED_TABLE_NAME = "zookeeper"; - /** Returns True if tn is the hbase:meta table name. */ + /** + * Returns True if tn is a meta table (hbase:meta or hbase:meta_suffix). This handles + * both the default meta table and read replica meta tables. + */ public static boolean isMetaTableName(final TableName tn) { - return tn.equals(TableName.META_TABLE_NAME); + if ( + tn == null || !tn.getNamespaceAsString().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) + ) { + return false; + } + String qualifier = tn.getQualifierAsString(); + return qualifier.equals("meta") || qualifier.startsWith("meta_"); } /** @@ -288,8 +302,8 @@ private TableName(ByteBuffer namespace, ByteBuffer qualifier) throws IllegalArgu throw new IllegalArgumentException(OLD_ROOT_STR + " has been deprecated."); } if (qualifierAsString.equals(OLD_META_STR)) { - throw new IllegalArgumentException( - OLD_META_STR + " no longer exists. The table has been " + "renamed to " + META_TABLE_NAME); + throw new IllegalArgumentException(OLD_META_STR + " no longer exists. The table has been " + + "renamed to hbase:meta or hbase:meta_suffix in conf"); } if (Bytes.equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME, namespace)) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 553b39311369..d8c169140f46 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -196,40 +196,28 @@ public void testMetaComparisons2() { long now = EnvironmentEdgeManager.currentTime(); CellComparator c = MetaCellComparator.META_COMPARATOR; assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))) + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now))) == 0); - Cell a = createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)); - Cell b = createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)); + Cell a = + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now)); + Cell b = + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,2"), now)); assertTrue(c.compare(a, b) < 0); assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))) + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,2"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now))) > 0); assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now))) + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now))) == 0); assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now))) - < 0); + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,2"), now))) < 0); assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now))) - > 0); + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,2"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now))) > 0); } @Test diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 1644a6f1fce7..4547aef0abf3 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -197,32 +197,23 @@ public void testKeyValueBorderCases() { private void metacomparisons(final CellComparatorImpl c) { long now = EnvironmentEdgeManager.currentTime(); - assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)) - == 0); - KeyValue a = - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now); - KeyValue b = - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now); + assertTrue(c.compare(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now), + new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now)) == 0); + KeyValue a = new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now); + KeyValue b = new KeyValue(Bytes.toBytes("hbase:meta,a,,0,2"), now); assertTrue(c.compare(a, b) < 0); - assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)) - > 0); + assertTrue(c.compare(new KeyValue(Bytes.toBytes("hbase:meta,a,,0,2"), now), + new KeyValue(Bytes.toBytes("hbase:meta,a,,0,1"), now)) > 0); } private void comparisons(final CellComparatorImpl c) { long now = EnvironmentEdgeManager.currentTime(); - assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) == 0); - assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)) < 0); - assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) > 0); + assertTrue(c.compare(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now), + new KeyValue(Bytes.toBytes("hbase:meta,,1"), now)) == 0); + assertTrue(c.compare(new KeyValue(Bytes.toBytes("hbase:meta,,1"), now), + new KeyValue(Bytes.toBytes("hbase:meta,,2"), now)) < 0); + assertTrue(c.compare(new KeyValue(Bytes.toBytes("hbase:meta,,2"), now), + new KeyValue(Bytes.toBytes("hbase:meta,,1"), now)) > 0); } @Test diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index a459074ba27d..c4302c0a9e60 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -221,17 +221,16 @@ public void testRegionStatesCount() throws Exception { ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), - 0); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getClosedRegions(), 0); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getSplitRegions(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getTotalRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getClosedRegions(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getSplitRegions(), 0); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); @@ -252,13 +251,12 @@ public void testRegionStatesWithSplit() throws Exception { ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), - 0); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getTotalRegions(), 1); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); @@ -272,13 +270,12 @@ public void testRegionStatesWithSplit() throws Exception { metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), - 0); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); - Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getRegionsInTransition(), 0); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getOpenRegions(), 1); + Assert.assertEquals(metrics.getTableRegionStatesCount() + .get(UTIL.getConnection().getMetaTableName()).getTotalRegions(), 1); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2); diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index 5e8447c2ad81..2d470f47a179 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.slf4j.Logger; @@ -165,7 +164,7 @@ public void startHBase() throws IOException { int attemptsLeft = 10; while (attemptsLeft-- > 0) { try { - testUtil.getConnection().getTable(TableName.META_TABLE_NAME); + testUtil.getConnection().getMetaTable(); } catch (Exception e) { LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e); Threads.sleep(1000); diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index 144ea6503b06..713b0133167b 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -124,7 +124,7 @@ protected int doWork() throws Exception { LOG.debug("Trying to scan meta"); - Table metaTable = connection.getTable(TableName.META_TABLE_NAME); + Table metaTable = connection.getTable(connection.getMetaTableName()); ResultScanner scanner = metaTable.getScanner(new Scan()); Result result; while ((result = scanner.next()) != null) { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index a8c3a16d13dc..5eb6b00ec748 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -56,7 +56,7 @@ public static void setUp() throws Exception { 1000); // Make sure there are three servers. util.initializeCluster(3); - HBaseTestingUtil.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(util.getAdmin(), util.getConnection().getMetaTableName(), 3); ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 2bb87ca8f2f6..5a7b6260918d 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -184,7 +184,7 @@ private static void setupActions() throws IOException { // Set up the action that will move the regions of meta. moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime, - MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, TableName.META_TABLE_NAME); + MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, util.getConnection().getMetaTableName()); // Set up the action that will move the regions of our table. moveRegionAction = new MoveRegionsOfTableAction(sleepTime, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 37096e408a74..4fc5e3389902 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1205,6 +1205,11 @@ public Configuration getConfiguration() { return delegate.getConfiguration(); } + @Override + public TableName getMetaTableName() { + return delegate.getMetaTableName(); + } + @Override public Table getTable(TableName tableName) throws IOException { return delegate.getTable(tableName); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index d4ccac901436..823c33833bea 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -255,8 +255,8 @@ public void testSimpleCase() throws Throwable { */ @Test public void testMetaExport() throws Throwable { - String[] args = - new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = new String[] { UTIL.getConnection().getMetaTableName().getNameAsString(), + FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 7c136fa2a19f..f10437d6c651 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -145,6 +145,11 @@ public Configuration getConfiguration() { return this.configuration; } + @Override + public TableName getMetaTableName() { + return null; + } + @Override public BufferedMutator getBufferedMutator(TableName tableName) throws IOException { return null; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 7b2170d19520..08aa72536cc8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -231,6 +231,11 @@ public Configuration getConfiguration() { throw new UnsupportedOperationException(); } + @Override + public TableName getMetaTableName() { + throw new UnsupportedOperationException(); + } + @Override public Table getTable(TableName tableName) throws IOException { Table table = mock(Table.class); diff --git a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto index f55b892413b2..e6fc063ab0da 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto @@ -73,6 +73,14 @@ message GetBootstrapNodesResponse { repeated ServerName server_name = 1; } +/** Request and response to get the meta table name for this cluster */ +message GetMetaTableNameRequest { +} +message GetMetaTableNameResponse { + /** The name of the meta table. Defaults to "hbase:meta" if not set. */ + optional string table_name = 1; +} + /** * Implements all the RPCs needed by clients to look up cluster meta information needed for * connection establishment. @@ -105,6 +113,11 @@ service ClientMetaService { * Get nodes which could be used as ClientMetaService */ rpc GetBootstrapNodes(GetBootstrapNodesRequest) returns (GetBootstrapNodesResponse); + + /** + * Get the meta table name for this cluster. + */ + rpc GetMetaTableName(GetMetaTableNameRequest) returns(GetMetaTableNameResponse); } message GetConnectionRegistryRequest { diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index a115fd17af3f..f6fa703e1183 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -55,15 +55,13 @@ public class TestStatusResource { private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class); - private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); - private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); private static Client client; private static JAXBContext context; private static Configuration conf; - private static void validate(StorageClusterStatusModel model) { + private static void validate(StorageClusterStatusModel model) throws IOException { assertNotNull(model); assertTrue(model.getRegions() + ">= 1", model.getRegions() >= 1); assertTrue(model.getRequests() >= 0); @@ -77,7 +75,10 @@ private static void validate(StorageClusterStatusModel model) { assertTrue(node.getStartCode() > 0L); assertTrue(node.getRequests() >= 0); for (StorageClusterStatusModel.Node.Region region : node.getRegions()) { - if (Bytes.equals(region.getName(), META_REGION_NAME)) { + if ( + Bytes.equals(region.getName(), + Bytes.toBytes(TEST_UTIL.getConnection().getMetaTableName() + ",,1")) + ) { foundMeta = true; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java index d6d277808838..c644680dcbf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -89,6 +89,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponseEntry; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload; /** @@ -381,6 +383,23 @@ public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controlle return builder.build(); } + @Override + public final GetMetaTableNameResponse getMetaTableName(RpcController controller, + GetMetaTableNameRequest request) throws ServiceException { + GetMetaTableNameResponse.Builder builder = GetMetaTableNameResponse.newBuilder(); + + try { + TableName metaTableName = server.getMetaTableName(); + if (metaTableName != null) { + builder.setTableName(metaTableName.getNameAsString()); + } + } catch (Exception e) { + throw new ServiceException(e); + } + + return builder.build(); + } + @Override @QosPriority(priority = HConstants.ADMIN_QOS) public UpdateConfigurationResponse updateConfiguration(RpcController controller, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index c2f65edd4fda..85df415f6598 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -698,4 +698,16 @@ public String toString() { protected abstract boolean cacheTableDescriptor(); protected abstract boolean clusterMode(); + + protected TableName getDefaultMetaTableName() { + return TableName.META_TABLE_NAME; + } + + @Override + public TableName getMetaTableName() { + // For now, it is hbase:meta because we don't support custom meta table name. + // After adding support for custom meta table names, we can calculate this from conf and use it + // downstream to persist it in Master Region. + return getDefaultMetaTableName(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 05b049e27dbc..ee5104d50201 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -151,7 +151,7 @@ public static Table getMetaHTable(final Connection connection) throws IOExceptio if (connection.isClosed()) { throw new IOException("connection is closed"); } - return connection.getTable(TableName.META_TABLE_NAME); + return connection.getTable(connection.getMetaTableName()); } /** @@ -366,7 +366,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { public static List> getTableRegionsAndLocations( Connection connection, @Nullable final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { - if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName != null && tableName.equals(connection.getMetaTableName())) { throw new IOException( "This method can't be used to locate meta regions; use MetaTableLocator instead"); } @@ -592,7 +592,7 @@ public static PairOfSameType getDaughterRegions(Result data) { */ @Nullable public static TableState getTableState(Connection conn, TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (TableName.isMetaTableName(tableName)) { return new TableState(tableName, TableState.State.ENABLED); } Table metaHTable = getMetaHTable(conn); @@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); putToMetaTable(connection, put); - LOG.info("Updated {} in hbase:meta", state); + LOG.info("Updated {} in {}", state, connection.getMetaTableName()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java index 20b915288c61..4d01e98f2f7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java @@ -59,8 +59,8 @@ class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection { public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry, - String clusterId, SocketAddress localAddress, User user) { - super(conf, registry, clusterId, localAddress, user, Collections.emptyMap()); + String clusterId, TableName metaTableName, SocketAddress localAddress, User user) { + super(conf, registry, clusterId, metaTableName, localAddress, user, Collections.emptyMap()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java index 70a1e703c667..a837a677ff5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java @@ -22,6 +22,7 @@ import java.net.URI; import java.security.PrivilegedExceptionAction; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.ReflectionUtils; @@ -42,13 +43,14 @@ private ClusterConnectionFactory() { private static AsyncClusterConnection createAsyncClusterConnection(Configuration conf, ConnectionRegistry registry, SocketAddress localAddress, User user) throws IOException { String clusterId = FutureUtils.get(registry.getClusterId()); + TableName metaTableName = FutureUtils.get(registry.getMetaTableName()); Class clazz = conf.getClass(HBASE_SERVER_CLUSTER_CONNECTION_IMPL, AsyncClusterConnectionImpl.class, AsyncClusterConnection.class); try { return user .runAs((PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, localAddress, user)); + .newInstance(clazz, conf, registry, clusterId, metaTableName, localAddress, user)); } catch (Exception e) { throw new IOException(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java index a75faf3db75b..9071a75d10f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java @@ -22,6 +22,7 @@ import java.util.Optional; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,4 +55,13 @@ public interface ConnectionRegistryEndpoint { * Get the location of meta regions. */ List getMetaLocations(); + + /** + * Get the name of the meta table for this cluster. + *

+ * By default, this returns "hbase:meta". Future implementations may support custom meta table + * names for read replica clusters. + * @return The meta table name + */ + TableName getMetaTableName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java index ebffc7ee5111..b19d86be19b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java @@ -52,6 +52,11 @@ public Configuration getConfiguration() { return conn.getConfiguration(); } + @Override + public TableName getMetaTableName() { + return conn.getMetaTableName(); + } + @Override public AsyncTableRegionLocator getRegionLocator(TableName tableName) { return conn.getRegionLocator(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java index ae52df266cfb..a9276dbaaf5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java @@ -61,6 +61,11 @@ public Configuration getConfiguration() { return this.conn.getConfiguration(); } + @Override + public TableName getMetaTableName() { + return this.conn.getMetaTableName(); + } + @Override public BufferedMutator getBufferedMutator(TableName tableName) throws IOException { return this.conn.getBufferedMutator(tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java index 1eb4e2d08ea8..8863d6b8c54a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; /** @@ -68,6 +69,11 @@ public CompletableFuture getActiveMaster() { return future; } + @Override + public CompletableFuture getMetaTableName() { + return CompletableFuture.completedFuture(endpoint.getMetaTableName()); + } + @Override public String getConnectionString() { return "short-circuit"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java index 3cac1f319dae..17a314e0ae34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java @@ -133,7 +133,7 @@ private String getRegionIdFromOp(Row op) { } private boolean isMetaTableOp(ObserverContext e) { - return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); + return TableName.isMetaTableName(e.getEnvironment().getRegionInfo().getTable()); } private void clientMetricRegisterAndMark() { @@ -267,8 +267,8 @@ public void start(CoprocessorEnvironment env) throws IOException { if ( env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null - && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() - .equals(TableName.META_TABLE_NAME) + && TableName + .isMetaTableName(((RegionCoprocessorEnvironment) env).getRegionInfo().getTable()) ) { RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c997f1c6e822..b7bc3badf629 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -509,6 +509,14 @@ public class HMaster extends HBaseServerBase implements Maste */ private ReplicationPeerModificationStateStore replicationPeerModificationStateStore; + /** + * Store for the meta table name in the Master Local Region. This provides cluster-specific + * storage for dynamic meta table name discovery. + */ + private MetaTableNameStore metaTableNameStore; + + private volatile TableName cachedMetaTableName; + /** * Initializes the HMaster. The steps are as follows: *

@@ -1016,6 +1024,8 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE masterRegion = MasterRegionFactory.create(this); rsListStorage = new MasterRegionServerList(masterRegion, this); + cachedMetaTableName = initMetaTableName(); + // Initialize the ServerManager and register it as a configuration observer this.serverManager = createServerManager(this, rsListStorage); this.configurationManager.registerObserver(this.serverManager); @@ -1092,7 +1102,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE startupTaskGroup.addTask("Initializing meta table if this is a new deploy"); InitMetaProcedure initMetaProc = null; // Print out state of hbase:meta on startup; helps debugging. - if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { + if (!this.assignmentManager.getRegionStates().hasTableRegionStates(getMetaTableName())) { Optional optProc = procedureExecutor.getProcedures().stream() .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { @@ -1156,7 +1166,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE return; } - TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDescriptor = tableDescriptors.get(getMetaTableName()); final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); final ColumnFamilyDescriptor replBarrierFamilyDesc = @@ -1174,16 +1184,17 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (conf.get(HConstants.META_REPLICAS_NUM) != null) { int replicasNumInConf = conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); - TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDesc = tableDescriptors.get(getMetaTableName()); if (metaDesc.getRegionReplication() != replicasNumInConf) { // it is possible that we already have some replicas before upgrading, so we must set the // region replication number in meta TableDescriptor directly first, without creating a // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. int existingReplicasCount = - assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + assignmentManager.getRegionStates().getRegionsOfTable(getMetaTableName()).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" - + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info( + "Update replica count of {} from {} (in TableDescriptor) to {} (existing ZNodes)", + getMetaTableName(), metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1193,7 +1204,8 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE LOG.info( "The {} config is {} while the replica count in TableDescriptor is {}" + " for hbase:meta, altering...", - HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(), + getMetaTableName()); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(replicasNumInConf).build(), @@ -1423,7 +1435,7 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); - long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); + long pid = this.modifyTable(getMetaTableName(), () -> newMetaDesc, 0, 0, false); waitForProcedureToComplete(pid, "Failed to add table and rep_barrier CFs to meta"); } @@ -1652,6 +1664,33 @@ public TableStateManager getTableStateManager() { return tableStateManager; } + /** + * Override base implementation to read from Master Local Region storage. This allows the master + * to return the cluster-specific meta table name. + */ + @Override + public TableName getMetaTableName() { + return cachedMetaTableName; + } + + private TableName initMetaTableName() { + metaTableNameStore = new MetaTableNameStore(masterRegion); + try { + TableName metaTableName = metaTableNameStore.load(); + // If metaTableNameStore is empty (bootstrap case), get meta table name from super, store it, + // and return. + if (Objects.isNull(metaTableName)) { + metaTableName = super.getDefaultMetaTableName(); + LOG.info("Bootstrap: storing default meta table name in master region: {}", metaTableName); + metaTableNameStore.store(metaTableName); + } + return metaTableName; + } catch (IOException e) { + LOG.info("Exception loading/storing meta table name from master region"); + throw new RuntimeException(e); + } + } + /* * Start up all services. If any of these threads gets an unhandled exception then they just die * with a logged message. This should be fine because in general, we do not expect the master to @@ -2601,7 +2640,7 @@ private void startActiveMasterManager(int infoPort) throws KeeperException { } private static boolean isCatalogTable(final TableName tableName) { - return tableName.equals(TableName.META_TABLE_NAME); + return TableName.isMetaTableName(tableName); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java index 464dfaca7035..b0bb167fc4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java @@ -84,7 +84,7 @@ protected int getBasePriority(RequestHeader header, Message param) { if (rst.getRegionInfoList() != null) { for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) { TableName tn = ProtobufUtil.toTableName(info.getTableName()); - if (TableName.META_TABLE_NAME.equals(tn)) { + if (TableName.isMetaTableName(tn)) { return META_TRANSITION_QOS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 745b962860bb..dd88d56c6c36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -525,4 +525,10 @@ long flushTable(final TableName tableName, final List columnFamilies, * @return procedure id */ long rollAllWALWriters(long nonceGroup, long nonce) throws IOException; + + /** + * Return cluster's meta table name + * @return meta table name + */ + TableName getMetaTableName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaTableNameStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaTableNameStore.java new file mode 100644 index 000000000000..f356a10df4f4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaTableNameStore.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.region.MasterRegion; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Stores and retrieves the meta table name for this cluster in the Master Local Region. This + * provides cluster-specific storage for the meta table name. + */ +@InterfaceAudience.Private +public class MetaTableNameStore { + private static final Logger LOG = LoggerFactory.getLogger(MetaTableNameStore.class); + private static final byte[] META_TABLE_NAME_ROW = Bytes.toBytes("meta_table_name"); + private static final byte[] INFO_FAMILY = Bytes.toBytes("info"); + private static final byte[] NAME_QUALIFIER = Bytes.toBytes("name"); + + private final MasterRegion masterRegion; + private volatile TableName cachedMetaTableName; + + public MetaTableNameStore(MasterRegion masterRegion) { + this.masterRegion = masterRegion; + } + + /** + * Store the meta table name in the Master Local Region. This should be called once during cluster + * initialization. The stored value is cluster-specific and should not conflict with other + * clusters sharing the same HDFS. + * @param metaTableName the meta table name to store + * @throws IOException if the operation fails + */ + public void store(TableName metaTableName) throws IOException { + LOG.info("Storing meta table name in Master Local Region: {}", metaTableName); + Put put = new Put(META_TABLE_NAME_ROW); + put.addColumn(INFO_FAMILY, NAME_QUALIFIER, Bytes.toBytes(metaTableName.getNameAsString())); + masterRegion.update(r -> r.put(put)); + cachedMetaTableName = metaTableName; + LOG.info("Successfully stored meta table name: {}", metaTableName); + } + + /** + * Load the meta table name from the Master Local Region. + * @return the meta table name for this cluster + * @throws IOException if the load operation fails + */ + public TableName load() throws IOException { + if (cachedMetaTableName != null) { + return cachedMetaTableName; + } + + synchronized (this) { + if (cachedMetaTableName != null) { + return cachedMetaTableName; + } + Get get = new Get(META_TABLE_NAME_ROW); + get.addColumn(INFO_FAMILY, NAME_QUALIFIER); + Result result = masterRegion.get(get); + + if (!result.isEmpty()) { + byte[] value = result.getValue(INFO_FAMILY, NAME_QUALIFIER); + cachedMetaTableName = TableName.valueOf(Bytes.toString(value)); + LOG.debug("Loaded meta table name from Master Local Region: {}", cachedMetaTableName); + return cachedMetaTableName; + } + LOG.info("No stored meta table name found in Master Local Region: {}", cachedMetaTableName); + return cachedMetaTableName; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java index 854c21da2bc7..8d5173d479bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -605,7 +605,7 @@ public static void printAssignmentPlan(FavoredNodesPlan plan) { */ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException { try { - LOG.info("Start to update the hbase:meta with the new assignment plan"); + LOG.info("Started updating {} with the new assignment plan", connection.getMetaTableName()); Map> assignmentMap = plan.getAssignmentMap(); Map> planToUpdate = new HashMap<>(assignmentMap.size()); Map regionToRegionInfoMap = @@ -690,14 +690,14 @@ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws I } public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException { - LOG.info("Start to update the new assignment plan for the hbase:meta table and" - + " the region servers"); + LOG.info("Started updating the new assignment plan for {} and the region servers", + connection.getMetaTableName()); // Update the new assignment plan to META updateAssignmentPlanToMeta(plan); // Update the new assignment plan to Region Servers updateAssignmentPlanToRegionServers(plan); - LOG.info("Finish to update the new assignment plan for the hbase:meta table and" - + " the region servers"); + LOG.info("Finished updating the new assignment plan for {} and the region servers", + connection.getMetaTableName()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 4d18b2ad8f4e..8b527e6a9001 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -79,7 +79,7 @@ private void tryMigrateNamespaceTable() throws IOException, InterruptedException if (!opt.isPresent()) { // the procedure is not present, check whether have the ns family in meta table TableDescriptor metaTableDesc = - masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME); + masterServices.getTableDescriptors().get(masterServices.getConnection().getMetaTableName()); if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { // normal case, upgrading is done or the cluster is created with 3.x code migrationDone = true; @@ -106,7 +106,7 @@ private void addToCache(Result result, byte[] family, byte[] qualifier) throws I } private void loadFromMeta() throws IOException { - try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table table = masterServices.getConnection().getMetaTable(); ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) { for (Result result;;) { result = scanner.next(); @@ -204,7 +204,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY, HConstants.NAMESPACE_COL_DESC_QUALIFIER, ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { + try (Table table = conn.getMetaTable()) { table.put(put); } } @@ -212,7 +212,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns public void deleteNamespace(String namespaceName) throws IOException { checkMigrationDone(); Delete d = new Delete(Bytes.toBytes(namespaceName)); - try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = masterServices.getConnection().getMetaTable()) { table.delete(d); } cache.remove(namespaceName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 6ad32623be1a..b71b03ccd6f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -86,7 +86,7 @@ public boolean isTableState(TableName tableName, TableState.State... states) { } public void setDeletedTable(TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(master.getConnection().getMetaTableName())) { // Can't delete the hbase:meta table. return; } @@ -147,7 +147,7 @@ public TableState getTableState(TableName tableName) throws IOException { } private void updateMetaState(TableName tableName, TableState.State newState) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(master.getConnection().getMetaTableName())) { if ( TableState.State.DISABLING.equals(newState) || TableState.State.DISABLED.equals(newState) ) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 7bfa79e21484..d66c6459f411 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -354,7 +354,7 @@ public void start() throws IOException, KeeperException { if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) { setMetaAssigned(regionInfo, state == State.OPEN); } - LOG.debug("Loaded hbase:meta {}", regionNode); + LOG.debug("Loaded {} {}", master.getConnection().getMetaTableName(), regionNode); }, result); } } @@ -1962,8 +1962,8 @@ private void checkMetaLoaded(RegionInfo hri, long procId) throws PleaseHoldExcep boolean meta = isMetaRegion(hri); boolean metaLoaded = isMetaLoaded(); if (!meta && !metaLoaded) { - throw new PleaseHoldException( - "Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + metaLoaded); + throw new PleaseHoldException("Master not fully online; " + + master.getConnection().getMetaTableName() + "=" + meta + ", metaLoaded=" + metaLoaded); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index c370fed9d9c0..e97f264de04c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -718,8 +718,10 @@ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOExcept RegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("Row key of mutation from coprocessor is not parsable as region name. " - + "Mutations from coprocessor should only be for hbase:meta table.", e); + LOG.error( + "Row key of mutation from coprocessor is not parsable as region name. " + + "Mutations from coprocessor should only be for {} table.", + env.getMasterServices().getConnection().getMetaTableName(), e); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 5987fc7537b4..50711115ae8f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -189,9 +189,9 @@ private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) thr final int replicaId = regionInfo.getReplicaId(); final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); MetaTableAccessor.addRegionInfo(put, regionInfo); - final StringBuilder info = - new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=") - .append(regionInfo.getEncodedName()).append(", regionState=").append(state); + final StringBuilder info = new StringBuilder("pid=").append(pid).append(" updating ") + .append(master.getConnection().getMetaTableName()).append(" row=") + .append(regionInfo.getEncodedName()).append(", regionState=").append(state); if (openSeqNum >= 0) { Preconditions.checkArgument(state == State.OPEN && regionLocation != null, "Open region should be on a server"); @@ -283,7 +283,8 @@ private CompletableFuture updateRegionLocation(RegionInfo regionInfo, Stat future = FutureUtils.failedFuture(e); } } else { - AsyncTable table = master.getAsyncConnection().getTable(TableName.META_TABLE_NAME); + AsyncTable table = + master.getAsyncConnection().getTable(master.getConnection().getMetaTableName()); future = table.put(put); } FutureUtils.addListener(future, (r, e) -> { @@ -329,8 +330,8 @@ private void multiMutate(RegionInfo ri, List mutations) throws IOExcep } } MutateRowsRequest request = builder.build(); - AsyncTable table = - master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME); + AsyncTable table = master.getConnection().toAsyncConnection() + .getTable(master.getConnection().getMetaTableName()); CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), row); @@ -338,7 +339,7 @@ MutateRowsResponse> coprocessorService(MultiRowMutationService::newStub, } private Table getMetaTable() throws IOException { - return master.getConnection().getTable(TableName.META_TABLE_NAME); + return master.getConnection().getTable(master.getConnection().getMetaTableName()); } private Result getRegionCatalogResult(RegionInfo region) throws IOException { @@ -476,7 +477,6 @@ public List getMergeRegions(RegionInfo region) throws IOException { /** * Deletes merge qualifiers for the specified merge region. - * @param connection connection we're using * @param mergeRegion the merged region */ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { @@ -504,7 +504,7 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { + " in meta table, they are cleaned up already, Skip."); return; } - try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = master.getConnection().getTable(master.getConnection().getMetaTableName())) { table.delete(delete); } LOG.info( @@ -566,7 +566,6 @@ private void deleteRegions(List regions, long ts) throws IOException /** * Overwrites the specified regions from hbase:meta. Deletes old rows for the given regions and * adds new ones. Regions added back have state CLOSED. - * @param connection connection we're using * @param regionInfos list of regions to be added to META */ public void overwriteRegions(List regionInfos, int regionReplication) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 3d3d3d18de23..c569c4a34301 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -903,8 +903,10 @@ private void preSplitRegionBeforeMETA(final MasterProcedureEnv env) RegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as " - + "region name." + "Mutations from coprocessor should only for hbase:meta table."); + LOG.error( + "pid={} row key of mutation from coprocessor not parsable as region name. " + + "Mutations from coprocessor should only be for {} table.", + getProcId(), env.getMasterServices().getConnection().getMetaTableName()); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java index 77b1082d0f03..46d8e8dc5a55 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java @@ -80,9 +80,8 @@ public synchronized void chore() { long deletedLastPushedSeqIds = 0; TableName tableName = null; List peerIds = null; - try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME); - ResultScanner scanner = metaTable.getScanner( - new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) { + try (Table metaTable = conn.getTable(conn.getMetaTableName()); ResultScanner scanner = metaTable + .getScanner(new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) { for (;;) { Result result = scanner.next(); if (result == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java index 9f5ff857d4d8..6c2216837a20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java @@ -156,7 +156,7 @@ public TableName getScanTable() { public Results getResults() { final AsyncTable asyncTable = - connection.getTable(TableName.META_TABLE_NAME); + connection.getTable(connection.getMetaTableName()); return new Results(asyncTable.getScanner(buildScan())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java index 0d3ddb43abd4..b93eca943cb9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java @@ -105,7 +105,8 @@ protected boolean initialChore() { scan(); } } catch (IOException e) { - LOG.warn("Failed initial janitorial scan of hbase:meta table", e); + LOG.warn("Failed initial janitorial scan of {} table", + services.getConnection().getMetaTableName(), e); return false; } return true; @@ -145,7 +146,8 @@ protected void chore() { + this.services.getServerManager().isClusterShutdown()); } } catch (IOException e) { - LOG.warn("Failed janitorial scan of hbase:meta table", e); + LOG.warn("Failed janitorial scan of {} table", services.getConnection().getMetaTableName(), + e); } } @@ -484,7 +486,7 @@ public static void main(String[] args) throws IOException { */ Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0.")); g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - try (Table t = connection.getTable(TableName.META_TABLE_NAME)) { + try (Table t = connection.getTable(connection.getMetaTableName())) { Result r = t.get(g); byte[] row = g.getRow(); row[row.length - 2] <<= row[row.length - 2]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 1244d5bf3525..b7b0bcfaaab1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -203,19 +203,20 @@ private static List createMetaEntries(final MasterServices masterSer .flatMap(List::stream).collect(Collectors.toList()); final List createMetaEntriesFailures = addMetaEntriesResults.stream() .filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList()); - LOG.debug("Added {}/{} entries to hbase:meta", createMetaEntriesSuccesses.size(), - newRegionInfos.size()); + LOG.debug("Added {}/{} entries to {}", createMetaEntriesSuccesses.size(), newRegionInfos.size(), + TableName.META_TABLE_NAME.getNameAsString()); if (!createMetaEntriesFailures.isEmpty()) { LOG.warn( - "Failed to create entries in hbase:meta for {}/{} RegionInfo descriptors. First" + "Failed to create entries in {}} for {}/{} RegionInfo descriptors. First" + " failure message included; full list of failures with accompanying stack traces is" + " available at log level DEBUG. message={}", - createMetaEntriesFailures.size(), addMetaEntriesResults.size(), - createMetaEntriesFailures.get(0).getMessage()); + TableName.META_TABLE_NAME.getNameAsString(), createMetaEntriesFailures.size(), + addMetaEntriesResults.size(), createMetaEntriesFailures.get(0).getMessage()); if (LOG.isDebugEnabled()) { createMetaEntriesFailures - .forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta failed.", ioe)); + .forEach(ioe -> LOG.debug("Attempt to fix region hole in {} failed.", + TableName.META_TABLE_NAME.getNameAsString(), ioe)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java index c712f1cba672..07dd3af70aea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java @@ -137,8 +137,9 @@ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) { if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) { LOG.warn( "INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; " - + "row={} {}; See if RegionInfo is referenced in another hbase:meta row? Delete?", - Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString()); + + "row={} {}; See if RegionInfo is referenced in another {} row? Delete?", + Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString(), + services.getConnection().getMetaTableName()); return null; } // Skip split parent region diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index e199f6d5971d..b9a02c98dfbf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -393,8 +393,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table .setFilter(new KeyOnlyFilter()); long now = EnvironmentEdgeManager.currentTime(); List deletes = new ArrayList<>(); - try ( - Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME); + try (Table metaTable = env.getMasterServices().getConnection().getTable(env.getMetaTableName()); ResultScanner scanner = metaTable.getScanner(tableScan)) { for (;;) { Result result = scanner.next(); @@ -405,7 +404,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table } if (!deletes.isEmpty()) { LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " - + TableName.META_TABLE_NAME); + + env.getMetaTableName()); metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index e8999b886afd..72931ad53ff5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -110,8 +110,8 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable env.getMasterServices().getTableDescriptors().get(tableName).hasGlobalReplicationScope() ) { MasterFileSystem fs = env.getMasterFileSystem(); - try (BufferedMutator mutator = env.getMasterServices().getConnection() - .getBufferedMutator(TableName.META_TABLE_NAME)) { + try (BufferedMutator mutator = + env.getMasterServices().getConnection().getBufferedMutator(env.getMetaTableName())) { for (RegionInfo region : env.getAssignmentManager().getRegionStates() .getRegionsOfTable(tableName)) { long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId( @@ -230,7 +230,7 @@ public TableOperationType getTableOperationType() { */ private boolean prepareDisable(final MasterProcedureEnv env) throws IOException { boolean canTableBeDisabled = true; - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(env.getMetaTableName())) { setFailure("master-disable-table", new ConstraintException("Cannot disable " + this.tableName)); canTableBeDisabled = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index 43d69361c2d2..91de3f9e6c5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -102,14 +102,14 @@ List getRegionsOnCrashedServer(MasterProcedureEnv env) { MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor, null); } catch (IOException ioe) { - LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe); + LOG.warn("Failed scan of {} for 'Unknown Servers'", env.getMetaTableName(), ioe); return ris; } // create the server state node too env.getAssignmentManager().getRegionStates().createServer(getServerName()); - LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: {}", - visitor.getReassigns().size(), getServerName(), visitor.getReassigns().stream() - .map(RegionInfo::getEncodedName).collect(Collectors.joining(","))); + LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}", + visitor.getReassigns().size(), getServerName(), env.getMetaTableName(), visitor.getReassigns() + .stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(","))); return visitor.getReassigns(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index 2d54eaf6c58c..ddaa188bf5a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -67,6 +67,8 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure proc) { // Meta Queue Lookup Helpers // ============================================================================ private MetaQueue getMetaQueue() { - MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR); + // For now, hardcode default. Future: pass metaTableName via constructor from Master + TableName metaTableName = TableName.META_TABLE_NAME; + MetaQueue node = AvlTree.get(metaMap, metaTableName, META_QUEUE_KEY_COMPARATOR); if (node != null) { return node; } - node = new MetaQueue(locking.getMetaLock()); + node = new MetaQueue(metaTableName, locking.getMetaLock()); metaMap = AvlTree.insert(metaMap, node); return node; } @@ -1079,6 +1081,7 @@ public boolean waitMetaExclusiveLock(Procedure procedure) { return false; } waitProcedure(lock, procedure); + // TODO: Get dynamic name from MasterServices logLockedResource(LockedResourceType.META, TableName.META_TABLE_NAME.getNameAsString()); return true; } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java index 3d313c9ac3ab..901541d7302c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java @@ -32,8 +32,8 @@ @InterfaceAudience.Private class MetaQueue extends Queue { - protected MetaQueue(LockStatus lockStatus) { - super(TableName.META_TABLE_NAME, 1, lockStatus); + protected MetaQueue(TableName metaTableName, LockStatus lockStatus) { + super(metaTableName, 1, lockStatus); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java index dc9eac4c879d..c726b2a77de9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java @@ -64,7 +64,7 @@ private void migrate(MasterProcedureEnv env) throws IOException { try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME); ResultScanner scanner = nsTable.getScanner( new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions()); - BufferedMutator mutator = conn.getBufferedMutator(TableName.META_TABLE_NAME)) { + BufferedMutator mutator = conn.getBufferedMutator(env.getMetaTableName())) { for (Result result;;) { result = scanner.next(); if (result == null) { @@ -88,7 +88,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTablePro switch (state) { case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY: TableDescriptor metaTableDesc = - env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME); + env.getMasterServices().getTableDescriptors().get(env.getMetaTableName()); if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) .setColumnFamily( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 3450f3059106..f8b71b3f5bae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -110,7 +110,7 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) { if (!cfs.contains(family)) { throw new HBaseIOException( - "Delete of hbase:meta column family " + Bytes.toString(family)); + "Delete of " + env.getMetaTableName() + " column family " + Bytes.toString(family)); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java index 642df36d535f..9153191017b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java @@ -174,6 +174,7 @@ List getLocks() { addToLockedResources(lockedResources, regionLocks, Function.identity(), LockedResourceType.REGION); addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER); + // TODO: Get dynamic name from MasterServices addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock), tn -> tn.getNameAsString(), LockedResourceType.META); addToLockedResources(lockedResources, globalLocks, Function.identity(), @@ -236,6 +237,7 @@ public String toString() { .append("tableLocks", filterUnlocked(tableLocks)) .append("regionLocks", filterUnlocked(regionLocks)) .append("peerLocks", filterUnlocked(peerLocks)) + // TODO: Get dynamic name from MasterServices .append("metaLocks", filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock))) .append("globalLocks", filterUnlocked(globalLocks)).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index ef11e68217a5..3da0c61c9bf5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -231,7 +231,7 @@ public void toStringClassDetails(StringBuilder sb) { } private boolean prepareTruncate() throws IOException { - if (getTableName().equals(TableName.META_TABLE_NAME)) { + if (TableName.isMetaTableName(getTableName())) { throw new IOException("Can't truncate region in catalog tables"); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index eea82ca511eb..f69e6230fc1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -3672,6 +3672,22 @@ public List getMetaLocations() { return metaRegionLocationCache.getMetaRegionLocations(); } + /** + * RegionServers get the meta table name from Master via connection registry. + */ + @Override + public TableName getMetaTableName() { + if (asyncClusterConnection != null) { + try { + return asyncClusterConnection.getMetaTableName(); + } catch (Exception e) { + LOG.warn("Failed to get meta table name from Master", e); + } + } + // Bootstrap + return super.getMetaTableName(); + } + @Override protected NamedQueueRecorder createNamedQueueRecord() { return NamedQueueRecorder.getInstance(conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index ba838e2f16ca..c5c3eea3e3da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1928,7 +1928,7 @@ public OpenRegionResponse openRegion(final RpcController controller, tableName = ProtobufUtil.toTableName(ri.getTableName()); } } - if (!TableName.META_TABLE_NAME.equals(tableName)) { + if (!server.getConnection().getMetaTableName().equals(tableName)) { throw new ServiceException(ie); } // We are assigning meta, wait a little for regionserver to finish initialization. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java index 8bf32baada22..4caf746c6532 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java @@ -192,7 +192,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co .addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER) .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) .setCaching(10); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(conn.getMetaTableName()); ResultScanner scanner = table.getScanner(scan)) { for (Result result;;) { result = scanner.next(); @@ -215,7 +215,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co public static long[] getReplicationBarriers(Connection conn, byte[] regionName) throws IOException { - try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { + try (Table table = conn.getTable(conn.getMetaTableName())) { Result result = table.get(new Get(regionName) .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) .readAllVersions()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index 57d156ab1c2e..30f1d71975cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -544,7 +544,7 @@ public AuthResult permissionGranted(String request, User user, Action permReques TableName tableName, Map> families) { // 1. All users need read access to hbase:meta table. // this is a very common operation, so deal with it quickly. - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (TableName.isMetaTableName(tableName)) { if (permRequest == Action.READ) { return AuthResult.allow(request, "All users allowed", user, permRequest, tableName, families); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 4d6f57e22edc..10b4a8a20941 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -660,21 +660,21 @@ private int getRegionIndex(List> startEndKeys, byte[] key) private void checkRegionIndexValid(int idx, List> startEndKeys, TableName tableName) throws IOException { if (idx < 0) { - throw new IOException("The first region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + throw new IOException("The first region info for table " + tableName + " can't be found in " + + "hbase:meta. Please use hbck tool to fix it first."); } else if ( (idx == startEndKeys.size() - 1) && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY) ) { - throw new IOException("The last region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + throw new IOException("The last region info for table " + tableName + " can't be found in " + + "hbase:meta. Please use hbck tool to fix it first."); } else if ( idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), startEndKeys.get(idx + 1).getFirst()) == 0) ) { throw new IOException("The endkey of one region for table " + tableName + " is not equal to the startkey of the next region in hbase:meta." - + "Please use hbck tool to fix it first."); + + " Please use hbck tool to fix it first."); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 75bf721ef41e..f763c76e6c38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -143,24 +143,30 @@ public static void tryUpdateMetaTableDescriptor(Configuration conf) throws IOExc CommonFSUtils.getRootDir(conf)); } + private static TableName getMetaTableNameFromConf(Configuration conf) { + // TODO: Support replica-specific meta table names from masterRegion + return TableName.META_TABLE_NAME; + } + public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration conf, FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. - Optional> opt = getTableDescriptorFromFs(fs, - CommonFSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME), false); + TableName metaTableName = getMetaTableNameFromConf(conf); + Optional> opt = + getTableDescriptorFromFs(fs, CommonFSUtils.getTableDir(rootdir, metaTableName), false); if (opt.isPresent()) { return opt.get().getSecond(); } TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); TableDescriptor td = StoreFileTrackerFactory.updateWithTrackerConfigs(conf, builder.build()); - LOG.info("Creating new hbase:meta table descriptor {}", td); + LOG.info("Creating new {} table descriptor {}", metaTableName, td); TableName tableName = td.getTableName(); Path tableDir = CommonFSUtils.getTableDir(rootdir, tableName); Path p = writeTableDescriptor(fs, td, tableDir, null); if (p == null) { - throw new IOException("Failed update hbase:meta table descriptor"); + throw new IOException("Failed update " + metaTableName + " table descriptor"); } - LOG.info("Updated hbase:meta table descriptor to {}", p); + LOG.info("Updated {} table descriptor to {}", metaTableName, p); return td; } @@ -198,7 +204,7 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // the META table data goes to File mode BC only. Test how that affect the system. If too much, // we have to rethink about adding back the setCacheDataInL1 for META table CFs. - return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + return TableDescriptorBuilder.newBuilder(getMetaTableNameFromConf(conf)) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) .setMaxVersions( conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c3eafa7c11d1..19b7506cde29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -540,7 +540,7 @@ public void run() { connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); - meta = connection.getTable(TableName.META_TABLE_NAME); + meta = connection.getTable(connection.getMetaTableName()); status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); } @@ -660,17 +660,19 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, Interr reportUnknownServers(); // Check if hbase:meta is found only once and in the right place if (!checkMetaRegion()) { - String errorMsg = "hbase:meta table is not consistent. "; + String errorMsg = connection.getMetaTableName() + " table is not consistent. "; if (shouldFixAssignments()) { - errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state."; + errorMsg += "HBCK will try fixing it. Rerun once " + connection.getMetaTableName() + + " is back to consistent state."; } else { - errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency."; + errorMsg += "Run HBCK with proper fix options to fix " + connection.getMetaTableName() + + " inconsistency."; } errors.reportError(errorMsg + " Exiting..."); return -2; } // Not going with further consistency check for tables when hbase:meta itself is not consistent. - LOG.info("Loading regionsinfo from the hbase:meta table"); + LOG.info("Loading regionsinfo from the {} table", connection.getMetaTableName()); boolean success = loadMetaEntries(); if (!success) return -1; @@ -1219,8 +1221,8 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO * TODO -- need to add tests for this. */ private void reportEmptyMetaCells() { - errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " - + emptyRegionInfoQualifiers.size()); + errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + connection.getMetaTableName() + + ": " + emptyRegionInfoQualifiers.size()); if (details) { for (Result r : emptyRegionInfoQualifiers) { errors.print(" " + r); @@ -1371,7 +1373,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, */ public void fixEmptyMetaCells() throws IOException { if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) { - LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows."); + LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", connection.getMetaTableName()); for (Result region : emptyRegionInfoQualifiers) { deleteMetaRegion(region.getRow()); errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL); @@ -1574,8 +1576,8 @@ private void loadTableStates() throws IOException { // Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it // has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in // meantime. - this.tableStates.put(TableName.META_TABLE_NAME, - new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); + this.tableStates.put(connection.getMetaTableName(), + new TableState(connection.getMetaTableName(), TableState.State.ENABLED)); } /** @@ -1604,7 +1606,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { TableName tableName = CommonFSUtils.getTableName(path); if ( (!checkMetaOnly && isTableIncluded(tableName)) - || tableName.equals(TableName.META_TABLE_NAME) + || tableName.equals(connection.getMetaTableName()) ) { tableDirs.add(fs.getFileStatus(path)); } @@ -1649,7 +1651,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { */ private boolean recordMetaRegion() throws IOException { List locs; - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(connection.getMetaTableName())) { locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true); } if (locs == null || locs.isEmpty()) { @@ -2019,9 +2021,11 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { } RegionInfo hri = h.getRegion(); if (hri == null) { - LOG.warn("Unable to close region " + hi.getRegionNameAsString() - + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" - + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); + LOG.warn( + "Unable to close region " + hi.getRegionNameAsString() + + " because {} had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.", + connection.getMetaTableName()); continue; } // close the region -- close files and remove assignment @@ -2140,8 +2144,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { assert false : "Entry for region with no data"; } else if (!inMeta && !inHdfs && isDeployed) { errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, - "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " - + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in " + + connection.getMetaTableName() + " but " + "deployed on " + + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { undeployRegions(hbi); } @@ -2155,8 +2160,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); return; } - errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName - + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server"); + errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + "Region " + descriptiveName + " on HDFS, but not listed in " + connection.getMetaTableName() + + " or deployed on any region server"); // restore region consistency of an adopted orphan if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { @@ -2196,7 +2202,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } } - LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); + LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), + connection.getMetaTableName()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), @@ -2224,7 +2231,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { return; } - LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); + LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(), + connection.getMetaTableName()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), @@ -2301,9 +2309,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isMultiplyDeployed) { errors.reportError(ERROR_CODE.MULTI_DEPLOYED, - "Region " + descriptiveName + " is listed in hbase:meta on region server " - + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " - + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " is listed in " + connection.getMetaTableName() + + " on region server " + hbi.getMetaEntry().regionServer + " but is multiply assigned" + + " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn())); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2313,8 +2321,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) { errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, - "Region " + descriptiveName + " listed in hbase:meta on region server " - + hbi.getMetaEntry().regionServer + " but found on region server " + "Region " + descriptiveName + " listed in " + connection.getMetaTableName() + + " on region server " + hbi.getMetaEntry().regionServer + " but found on region server " + hbi.getDeployedOn().get(0)); // If we are trying to fix the errors if (shouldFixAssignments()) { @@ -2599,7 +2607,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept metaRegions.put(value.getReplicaId(), value); } } - int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); + int metaReplication = admin.getDescriptor(connection.getMetaTableName()).getRegionReplication(); boolean noProblem = true; // There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas // Check the deployed servers. It should be exactly one server for each replica. @@ -2614,11 +2622,12 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept if (servers.isEmpty()) { assignMetaReplica(i); } else if (servers.size() > 1) { - errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " - + metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); + errors.reportError(ERROR_CODE.MULTI_META_REGION, + connection.getMetaTableName() + ", replicaId " + metaHbckRegionInfo.getReplicaId() + + " is found on more than one region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta, replicaId " - + metaHbckRegionInfo.getReplicaId() + ".."); + errors.print("Trying to fix a problem with " + connection.getMetaTableName() + + ", replicaId " + metaHbckRegionInfo.getReplicaId() + ".."); setShouldRerun(); // try fix it (treat is a dupe assignment) HBaseFsckRepair.fixMultiAssignment(connection, @@ -2631,11 +2640,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept for (Map.Entry entry : metaRegions.entrySet()) { noProblem = false; errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed " - + metaRegions.size()); + connection.getMetaTableName() + " replicas are deployed in excess. Configured " + + metaReplication + ", deployed " + metaRegions.size()); if (shouldFixAssignments()) { - errors.print( - "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta.."); + errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of " + + connection.getMetaTableName() + ".."); setShouldRerun(); unassignMetaReplica(entry.getValue()); } @@ -2655,9 +2664,9 @@ private void unassignMetaReplica(HbckRegionInfo hi) private void assignMetaReplica(int replicaId) throws IOException, KeeperException, InterruptedException { errors.reportError(ERROR_CODE.NO_META_REGION, - "hbase:meta, replicaId " + replicaId + " is not found on any region."); + connection.getMetaTableName() + ", replicaId " + replicaId + " is not found on any region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta.."); + errors.print("Trying to fix a problem with " + connection.getMetaTableName() + ".."); setShouldRerun(); // try to fix it (treat it as unassigned region) RegionInfo h = RegionReplicaUtil @@ -2693,7 +2702,7 @@ public boolean visit(Result result) throws IOException { if (rl == null) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, - "Empty REGIONINFO_QUALIFIER found in hbase:meta"); + "Empty REGIONINFO_QUALIFIER found in " + connection.getMetaTableName()); return true; } ServerName sn = null; @@ -2703,7 +2712,7 @@ public boolean visit(Result result) throws IOException { ) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, - "Empty REGIONINFO_QUALIFIER found in hbase:meta"); + "Empty REGIONINFO_QUALIFIER found in " + connection.getMetaTableName()); return true; } RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion(); @@ -2731,7 +2740,8 @@ public boolean visit(Result result) throws IOException { } else if (previous.getMetaEntry() == null) { previous.setMetaEntry(m); } else { - throw new IOException("Two entries in hbase:meta are same " + previous); + throw new IOException( + "Two entries in " + connection.getMetaTableName() + " are same " + previous); } } List mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 6ead66c16d9e..239a44c1b834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.AsyncClusterConnection; @@ -149,7 +148,7 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, Collection servers, int numReplicas) throws IOException { Connection conn = ConnectionFactory.createConnection(conf); - Table meta = conn.getTable(TableName.META_TABLE_NAME); + Table meta = conn.getTable(conn.getMetaTableName()); Put put = MetaTableAccessor.makePutFromRegionInfo(hri); if (numReplicas > 1) { Random rand = ThreadLocalRandom.current(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index c1f98edd75ab..9c1f9eb3d45a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -586,13 +586,13 @@ private void unloadRegions(ServerName server, List regionServers, // For isolating hbase:meta, it should move explicitly in Ack mode, // hence the forceMoveRegionByAck = true. if (!metaSeverName.equals(server)) { - LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " is on server " - + metaSeverName + " moving to " + server); + LOG.info("Region of {} {} is on server {} moving to {}", conn.getMetaTableName(), + metaRegionInfo.getEncodedName(), metaSeverName, server); submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server), movedRegions, Collections.singletonList(metaRegionInfo), true); } else { - LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " already exists" - + " on server : " + server); + LOG.info("Region of {} {} already exists on server: {}", conn.getMetaTableName(), + metaRegionInfo.getEncodedName(), server); } isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index b8f095eb03df..dc1e6e140eec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; - import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashMap; @@ -78,7 +76,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter, void append(RegionEntryBuffer buffer) throws IOException { Map> familyCells = new HashMap<>(); Map familySeqIds = new HashMap<>(); - boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME); + boolean isMetaTable = TableName.isMetaTableName(buffer.tableName); // First iterate all Cells to find which column families are present and to stamp Cell with // sequence id. for (WAL.Entry entry : buffer.entryBuffer) { diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp index 1f1aad61ba70..9f79df924014 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp @@ -56,7 +56,7 @@ <%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %> <% } %> <% String description = ""; - if (tableName.equals(TableName.META_TABLE_NAME)){ + if (tableName.equals(master.getConnection().getMetaTableName())){ description = "The hbase:meta table holds references to all User Table regions."; } else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){ description = "The hbase:canary table is used to sniff the write availability of" diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index d88d968e199f..2a300e2629bc 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -195,8 +195,8 @@ Table table = master.getConnection().getTable(TableName.valueOf(fqtn)); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf); - int numMetaReplicas = - master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication(); + int numMetaReplicas = master.getTableDescriptors() + .get(master.getConnection().getMetaTableName()).getRegionReplication(); Map frags = null; if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); @@ -317,7 +317,7 @@

<% //Meta table. - if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) { %> + if(fqtn.equals(master.getConnection().getMetaTableName().getNameAsString())) { %>

Table Regions

@@ -653,7 +653,7 @@
- +
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index f56fc57dd2d9..e5d73c2296fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -388,7 +388,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce * Get the ServerName of region server serving the first hbase:meta region */ public ServerName getServerHoldingMeta() throws IOException { - return getServerHoldingRegion(TableName.META_TABLE_NAME, + return getServerHoldingRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable(), RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 8ae9206694c6..37fd9cbb6fe7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -902,8 +902,7 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption // Populate the master address configuration from mini cluster configuration. conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c)); // Don't leave here till we've done a successful scan of the hbase:meta - try (Table t = getConnection().getTable(TableName.META_TABLE_NAME); - ResultScanner s = t.getScanner(new Scan())) { + try (Table t = getConnection().getMetaTable(); ResultScanner s = t.getScanner(new Scan())) { for (;;) { if (s.next() == null) { break; @@ -1025,7 +1024,7 @@ public void restartHBaseCluster(StartTestingClusterOption option) option.getMasterClass(), option.getRsClass()); // Don't leave here till we've done a successful scan of the hbase:meta Connection conn = ConnectionFactory.createConnection(this.conf); - Table t = conn.getTable(TableName.META_TABLE_NAME); + Table t = conn.getTable(conn.getMetaTableName()); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -2169,7 +2168,7 @@ public String checksumRows(final Table table) throws Exception { */ public List createMultiRegionsInMeta(final Configuration conf, final TableDescriptor htd, byte[][] startKeys) throws IOException { - try (Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table meta = getConnection().getMetaTable()) { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(), @@ -2522,7 +2521,7 @@ public void process(WatchedEvent watchedEvent) { monitor.close(); if (checkStatus) { - getConnection().getTable(TableName.META_TABLE_NAME).close(); + getConnection().getMetaTable().close(); } } @@ -3047,7 +3046,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce * Waith until all system table's regions get assigned */ public void waitUntilAllSystemRegionsAssigned() throws IOException { - waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + waitUntilAllRegionsAssigned(getConnection().getMetaTableName()); } /** @@ -3060,7 +3059,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { - try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (final Table meta = getConnection().getMetaTable()) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @@ -3280,7 +3279,7 @@ public Table createRandomTable(TableName tableName, final Collection fam Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + getMiniHBaseCluster().flushcache(getConnection().getMetaTableName()); } BufferedMutator mutator = getConnection().getBufferedMutator(tableName); @@ -3361,7 +3360,7 @@ public static void waitForHostPort(String host, int port) throws IOException { } public static int getMetaRSPort(Connection connection) throws IOException { - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(connection.getMetaTableName())) { return locator.getRegionLocation(Bytes.toBytes("")).getPort(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index 2a438461b4e7..0c994349fe7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -68,11 +68,12 @@ public void after() throws Exception { // make sure that with every possible way, we get the same meta table descriptor. private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOException { Admin admin = UTIL.getAdmin(); - TableDescriptor get = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor get = admin.getDescriptor(UTIL.getConnection().getMetaTableName()); TableDescriptor list = admin.listTableDescriptors(true).stream().filter(td -> td.isMetaTable()).findAny().get(); TableDescriptor listByName = - admin.listTableDescriptors(Collections.singletonList(TableName.META_TABLE_NAME)).get(0); + admin.listTableDescriptors(Collections.singletonList(UTIL.getConnection().getMetaTableName())) + .get(0); TableDescriptor listByNs = admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME).stream() .filter(td -> td.isMetaTable()).findAny().get(); @@ -91,7 +92,7 @@ private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOExc @Test public void testEditMeta() throws IOException { Admin admin = UTIL.getAdmin(); - admin.tableExists(TableName.META_TABLE_NAME); + admin.tableExists(UTIL.getConnection().getMetaTableName()); TableDescriptor originalDescriptor = getMetaDescriptor(); ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY); int oldVersions = cfd.getMaxVersions(); @@ -100,11 +101,11 @@ public void testEditMeta() throws IOException { .setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING, DataBlockEncoding.ROW_INDEX_V1.toString()) .build(); - admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd); + admin.modifyColumnFamily(UTIL.getConnection().getMetaTableName(), cfd); byte[] extraColumnFamilyName = Bytes.toBytes("xtra"); ColumnFamilyDescriptor newCfd = ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build(); - admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd); + admin.addColumnFamily(UTIL.getConnection().getMetaTableName(), newCfd); TableDescriptor descriptor = getMetaDescriptor(); // Assert new max versions is == old versions plus 1. assertEquals(oldVersions + 1, @@ -126,11 +127,11 @@ public void testEditMeta() throws IOException { assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString()); assertTrue(r.getStore(extraColumnFamilyName) != null); // Assert we can't drop critical hbase:meta column family but we can drop any other. - admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName()); + admin.deleteColumnFamily(UTIL.getConnection().getMetaTableName(), newCfd.getName()); descriptor = getMetaDescriptor(); assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null); try { - admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY); + admin.deleteColumnFamily(UTIL.getConnection().getMetaTableName(), HConstants.CATALOG_FAMILY); fail("Should not reach here"); } catch (HBaseIOException hioe) { assertTrue(hioe.getMessage().contains("Delete of hbase:meta")); @@ -144,7 +145,8 @@ public void testEditMeta() throws IOException { @Test public void testAlterMetaWithReadOnly() throws IOException { Admin admin = UTIL.getAdmin(); - TableDescriptor origMetaTableDesc = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor origMetaTableDesc = + admin.getDescriptor(UTIL.getConnection().getMetaTableName()); assertFalse(origMetaTableDesc.isReadOnly()); TableDescriptor newTD = TableDescriptorBuilder.newBuilder(origMetaTableDesc).setReadOnly(true).build(); @@ -152,7 +154,7 @@ public void testAlterMetaWithReadOnly() throws IOException { admin.modifyTable(newTD); fail("Meta table can't be set as read only"); } catch (Exception e) { - assertFalse(admin.getDescriptor(TableName.META_TABLE_NAME).isReadOnly()); + assertFalse(admin.getDescriptor(UTIL.getConnection().getMetaTableName()).isReadOnly()); } // Create a table to check region assignment & meta operation diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java index a894bbcd0aeb..286c227fb2c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java @@ -91,7 +91,7 @@ public void testGetMasterInfoPort() throws Exception { @Test public void testInfoServersRedirect() throws Exception { // give the cluster time to start up - UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + UTIL.getConnection().getMetaTable().close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master.jsp"); assertContainsContent(new URL("http://localhost:" + port + "/master-status"), "master.jsp"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java index 91e040f1db13..b5e52b7afaa4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java @@ -87,7 +87,8 @@ private void multiMutate(byte[] row, List mutations) throws IOExceptio } } MutateRowsRequest request = builder.build(); - AsyncTable table = UTIL.getAsyncConnection().getTable(TableName.META_TABLE_NAME); + AsyncTable table = + UTIL.getAsyncConnection().getTable(UTIL.getConnection().getMetaTableName()); CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java index 0005a2becde7..44c43885aa77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -117,7 +117,7 @@ public void verifyReservedNS() throws IOException { assertEquals(2, admin.listNamespaceDescriptors().length); // verify existence of system tables - Set systemTables = Sets.newHashSet(TableName.META_TABLE_NAME); + Set systemTables = Sets.newHashSet(TEST_UTIL.getConnection().getMetaTableName()); List descs = admin.listTableDescriptorsByNamespace( Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName())); assertEquals(systemTables.size(), descs.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java index 69e6e4ac83df..b6e639d95b77 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java @@ -77,7 +77,7 @@ public Setup(final Supplier testingUtilSupplier) { @Override protected void before() throws Throwable { final HBaseTestingUtil testingUtil = testingUtilSupplier.get(); - testingUtil.waitTableAvailable(TableName.META_TABLE_NAME); + testingUtil.waitTableAvailable(testingUtil.getConnection().getMetaTableName()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 509d74e0335c..03f58d602351 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -104,6 +105,11 @@ public MockRegistry(Configuration conf, User user) { public CompletableFuture getClusterId() { return CompletableFuture.completedFuture("clusterId"); } + + @Override + public CompletableFuture getMetaTableName() { + return CompletableFuture.completedFuture(TableName.META_TABLE_NAME); + } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 031dff736c84..c2f5c90d2efa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -49,7 +49,8 @@ public abstract class AbstractTestRegionLocator { protected static void startClusterAndCreateTable() throws Exception { UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), + REGION_REPLICATION); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); @@ -69,7 +70,7 @@ protected static void startClusterAndCreateTable() throws Exception { @After public void tearDownAfterTest() throws IOException { clearCache(TABLE_NAME); - clearCache(TableName.META_TABLE_NAME); + clearCache(UTIL.getConnection().getMetaTableName()); } private byte[] getStartKey(int index) { @@ -170,8 +171,13 @@ private void assertMetaRegionLocation(HRegionLocation loc, int replicaId) { assertArrayEquals(HConstants.EMPTY_START_ROW, region.getStartKey()); assertArrayEquals(HConstants.EMPTY_END_ROW, region.getEndKey()); assertEquals(replicaId, region.getReplicaId()); - ServerName expected = - findRegionLocation(TableName.META_TABLE_NAME, region.getStartKey(), replicaId); + ServerName expected; + try { + expected = findRegionLocation(UTIL.getConnection().getMetaTableName(), region.getStartKey(), + replicaId); + } catch (IOException e) { + throw new RuntimeException(e); + } assertEquals(expected, loc.getServerName()); } @@ -184,19 +190,19 @@ private void assertMetaRegionLocations(List locs) { @Test public void testMeta() throws IOException { - assertMetaStartOrEndKeys(getStartKeys(TableName.META_TABLE_NAME)); - assertMetaStartOrEndKeys(getEndKeys(TableName.META_TABLE_NAME)); - Pair startEndKeys = getStartEndKeys(TableName.META_TABLE_NAME); + assertMetaStartOrEndKeys(getStartKeys(UTIL.getConnection().getMetaTableName())); + assertMetaStartOrEndKeys(getEndKeys(UTIL.getConnection().getMetaTableName())); + Pair startEndKeys = + getStartEndKeys(UTIL.getConnection().getMetaTableName()); assertMetaStartOrEndKeys(startEndKeys.getFirst()); assertMetaStartOrEndKeys(startEndKeys.getSecond()); for (int replicaId = 0; replicaId < REGION_REPLICATION; replicaId++) { - assertMetaRegionLocation( - getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, replicaId), - replicaId); + assertMetaRegionLocation(getRegionLocation(UTIL.getConnection().getMetaTableName(), + HConstants.EMPTY_START_ROW, replicaId), replicaId); } assertMetaRegionLocations( - getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW)); - assertMetaRegionLocations(getAllRegionLocations(TableName.META_TABLE_NAME)); + getRegionLocations(UTIL.getConnection().getMetaTableName(), HConstants.EMPTY_START_ROW)); + assertMetaRegionLocations(getAllRegionLocations(UTIL.getConnection().getMetaTableName())); } protected abstract byte[][] getStartKeys(TableName tableName) throws IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java index cb54e6e72634..0a2dafa61090 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java @@ -44,6 +44,11 @@ public Configuration getConfiguration() { return null; } + @Override + public TableName getMetaTableName() { + return null; + } + @Override public AsyncTableRegionLocator getRegionLocator(TableName tableName) { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 2b6b3d017fcb..42847d2c0fd6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; @@ -58,7 +57,7 @@ protected static void startCluster() throws Exception { .numAlwaysStandByMasters(1).numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); TEST_UTIL.startMiniCluster(option); Admin admin = TEST_UTIL.getAdmin(); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); ServerName hbaseMetaServerName = am.getRegionStates() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index a98ae217e3c2..1c91c4468fec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -47,7 +47,7 @@ static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util, ConnectionRe throws IOException { Configuration conf = util.getConfiguration(); int regionReplicaCount = - util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); + util.getAdmin().getDescriptor(util.getConnection().getMetaTableName()).getRegionReplication(); Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true, new ExplainingPredicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 68a841b7d671..41d7902d91e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -64,7 +64,7 @@ public class TestAdmin extends TestAdminBase { @Test public void testListTableDescriptors() throws IOException { TableDescriptor metaTableDescriptor = - TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().getDescriptor(TEST_UTIL.getConnection().getMetaTableName()); List tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(true); assertTrue(tableDescriptors.contains(metaTableDescriptor)); tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 2cf088fa6a82..631221f1e2e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -84,13 +84,14 @@ public class TestAdmin2 extends TestAdminBase { public void testCreateBadTables() throws IOException { String msg = null; try { - ADMIN.createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); + ADMIN.createTable( + TableDescriptorBuilder.newBuilder(TEST_UTIL.getConnection().getMetaTableName()).build()); } catch (TableExistsException e) { msg = e.toString(); } assertTrue("Unexcepted exception message " + msg, msg != null && msg.startsWith(TableExistsException.class.getName()) - && msg.contains(TableName.META_TABLE_NAME.getNameAsString())); + && msg.contains(TEST_UTIL.getConnection().getMetaTableName().getNameAsString())); // Now try and do concurrent creation with a bunch of threads. TableDescriptor tableDescriptor = @@ -456,7 +457,7 @@ private void setUpforLogRolling() { private HRegionServer startAndWriteData(TableName tableName, byte[] value) throws IOException, InterruptedException { // When the hbase:meta table can be opened, the region servers are running - TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + TEST_UTIL.getConnection().getMetaTable().close(); // Create the test table and open it TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) @@ -486,7 +487,7 @@ private HRegionServer startAndWriteData(TableName tableName, byte[] value) @Test public void testDisableCatalogTable() throws Exception { try { - ADMIN.disableTable(TableName.META_TABLE_NAME); + ADMIN.disableTable(TEST_UTIL.getConnection().getMetaTableName()); fail("Expected to throw ConstraintException"); } catch (ConstraintException e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index bb0eb31d2549..fb6ce2030f91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -54,7 +54,8 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase { @BeforeClass public static void setUpBeforeClass() throws Exception { TestAsyncAdminBase.setUpBeforeClass(); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TEST_UTIL.getConnection().getMetaTableName(), + 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); @@ -80,7 +81,7 @@ public void testMoveNonDefaultReplica() throws InterruptedException, ExecutionException, IOException { createTableWithDefaultConf(tableName, 3); testMoveNonDefaultReplica(tableName); - testMoveNonDefaultReplica(TableName.META_TABLE_NAME); + testMoveNonDefaultReplica(TEST_UTIL.getConnection().getMetaTableName()); } @Test @@ -138,11 +139,12 @@ public void testCloneTableSchema() throws IOException, InterruptedException, Exe @Test public void testGetTableRegions() throws InterruptedException, ExecutionException, IOException { - List metaRegions = admin.getRegions(TableName.META_TABLE_NAME).get(); + List metaRegions = + admin.getRegions(TEST_UTIL.getConnection().getMetaTableName()).get(); assertEquals(3, metaRegions.size()); for (int i = 0; i < 3; i++) { RegionInfo metaRegion = metaRegions.get(i); - assertEquals(TableName.META_TABLE_NAME, metaRegion.getTable()); + assertEquals(TEST_UTIL.getConnection().getMetaTableName(), metaRegion.getTable()); assertEquals(i, metaRegion.getReplicaId()); } createTableWithDefaultConf(tableName, 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index e14cd32a3889..23c11a5c8931 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -105,7 +105,7 @@ private void assertInitialized() { protected void before() throws Throwable { final AsyncAdmin admin = connectionRule.getAsyncConnection().getAdmin(); testUtil = miniClusterRule.getTestingUtility(); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, testUtil.getConnection().getMetaTableName(), 3); testUtil.waitUntilNoRegionsInTransition(); registry = ConnectionRegistryFactory.create(testUtil.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(testUtil, registry); @@ -163,8 +163,8 @@ public void test() throws Exception { TraceUtil.trace(() -> { try { - testLocator(miniClusterRule.getTestingUtility(), TableName.META_TABLE_NAME, - new Locator() { + testLocator(miniClusterRule.getTestingUtility(), + testUtil.getConnection().getMetaTableName(), new Locator() { @Override public void updateCachedLocationOnError(HRegionLocation loc, Throwable error) { locator.updateCachedLocationOnError(loc, error); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 0bfbd18eb32f..a3b60c54fd39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -106,10 +106,10 @@ public static void setUp() throws Exception { admin.balancerSwitch(false, true); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, NUM_OF_META_REPLICA); - TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() - >= NUM_OF_META_REPLICA); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), + NUM_OF_META_REPLICA); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster() + .getRegions(TEST_UTIL.getConnection().getMetaTableName()).size() >= NUM_OF_META_REPLICA); SPLIT_KEYS = new byte[8][]; for (int i = 111; i < 999; i += 111) { @@ -129,8 +129,8 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I c.set(RegionLocator.LOCATOR_META_REPLICAS_MODE, metaReplicaMode.toString()); ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); - conn = - new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent()); + conn = new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), + TEST_UTIL.getConnection().getMetaTableName(), null, User.getCurrent()); locator = new AsyncNonMetaRegionLocator(conn, AsyncConnectionImpl.RETRY_TIMER); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java index 4529c07dfd13..9b319b17a944 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java @@ -126,8 +126,9 @@ public static void setUp() throws Exception { TEST_UTIL.getAdmin().balancerSwitch(false, true); ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); - CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + CONN = + new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), + TEST_UTIL.getConnection().getMetaTableName(), null, User.getCurrent()); LOCATOR = new AsyncNonMetaRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER); SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i))) .toArray(byte[][]::new); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index 61dd87007c11..d7074928585f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; @@ -86,7 +85,7 @@ public void testSplitSwitch() throws Exception { final int rows = 10000; TestAsyncRegionAdminApi.loadData(tableName, families, rows); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); int originalCount = regionLocations.size(); @@ -117,7 +116,7 @@ public void testMergeSwitch() throws Exception { byte[][] families = { FAMILY }; TestAsyncRegionAdminApi.loadData(tableName, families, 1000); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); int originalCount = regionLocations.size(); @@ -162,7 +161,7 @@ public void testMergeRegions() throws Exception { byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") }; createTableWithDefaultConf(tableName, splitRows); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo regionA; @@ -242,7 +241,7 @@ private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion, // create table createTableWithDefaultConf(tableName); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals(1, regionLocations.size()); @@ -299,7 +298,7 @@ public void testTruncateRegion() throws Exception { final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; createTableWithDefaultConf(tableName, splitKeys, bFamilies); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo regionToBeTruncated = regionLocations.get(0).getRegion(); @@ -333,7 +332,7 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception { final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; createTableWithDefaultConf(tableName, 2, splitKeys, bFamilies); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = ASYNC_CONN.getMetaTable(); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo primaryRegion = regionLocations.get(0).getRegion(); @@ -354,7 +353,7 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception { @Test public void testTruncateRegionsMetaTableRegionsNotAllowed() throws Exception { - AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(META_TABLE_NAME); + AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(ASYNC_CONN.getMetaTableName()); List regionLocations = locator.getAllRegionLocations().get(); HRegionLocation regionToBeTruncated = regionLocations.get(0); // 1 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java index 5e858eb8d9d6..6d872e5fa8c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java @@ -101,8 +101,9 @@ public static void setUp() throws Exception { TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); - CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + CONN = + new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), + TEST_UTIL.getConnection().getMetaTableName(), null, User.getCurrent()); LOCATOR = CONN.getLocator(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java index baa4ee74ade9..3bed588e523f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java @@ -74,8 +74,9 @@ public static void setUpBeforeClass() throws Exception { TEST_UTIL.waitTableAvailable(TABLE_NAME); ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); - CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, - registry.getClusterId().get(), null, User.getCurrent()); + CONN = + new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry, registry.getClusterId().get(), + TEST_UTIL.getConnection().getMetaTableName(), null, User.getCurrent()); } @AfterClass @@ -165,7 +166,8 @@ void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { } }; try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(), - CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) { + CONN.registry, CONN.registry.getClusterId().get(), + TEST_UTIL.getConnection().getMetaTableName(), null, User.getCurrent()) { @Override AsyncRegionLocator getLocator() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 33fbc906f19f..fde2c951529a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -73,14 +72,16 @@ public void testCreateTable() throws Exception { static TableState.State getStateFromMeta(TableName table) throws Exception { Optional state = ClientMetaTableAccessor - .getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get(); + .getTableState(ASYNC_CONN.getTable(TEST_UTIL.getConnection().getMetaTableName()), table) + .get(); assertTrue(state.isPresent()); return state.get().getState(); } @Test public void testCreateTableNumberOfRegions() throws Exception { - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(TEST_UTIL.getConnection().getMetaTableName()); createTableWithDefaultConf(tableName); List regionLocations = @@ -128,7 +129,8 @@ public void testCreateTableWithRegions() throws Exception { boolean tableAvailable = admin.isTableAvailable(tableName).get(); assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(TEST_UTIL.getConnection().getMetaTableName()); List regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); Iterator hris = regions.iterator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java index 9db82a3bcd82..c6eefefe1392 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java @@ -57,7 +57,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase { @Test public void testDisableCatalogTable() throws Exception { try { - this.admin.disableTable(TableName.META_TABLE_NAME).join(); + this.admin.disableTable(TEST_UTIL.getConnection().getMetaTableName()).join(); fail("Expected to throw ConstraintException"); } catch (Exception e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java index d9007f748308..9248549d8599 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; @@ -66,10 +65,10 @@ public void testTableExist() throws Exception { TEST_UTIL.createTable(tableName, FAMILY); exist = admin.tableExists(tableName).get(); assertTrue(exist); - exist = admin.tableExists(TableName.META_TABLE_NAME).get(); + exist = admin.tableExists(TEST_UTIL.getConnection().getMetaTableName()).get(); assertTrue(exist); // meta table already exists - exist = admin.tableExists(TableName.META_TABLE_NAME).get(); + exist = admin.tableExists(TEST_UTIL.getConnection().getMetaTableName()).get(); assertTrue(exist); } @@ -118,7 +117,7 @@ public void testListTables() throws Exception { assertEquals(0, size); Collections.addAll(tableNames, tables); - tableNames.add(TableName.META_TABLE_NAME); + tableNames.add(TEST_UTIL.getConnection().getMetaTableName()); tableDescs = admin.listTableDescriptors(tableNames).get(); size = tableDescs.size(); assertEquals(tables.length + 1, size); @@ -126,7 +125,8 @@ public void testListTables() throws Exception { assertTrue("tableName should be equal in order", tableDescs.get(j).getTableName().equals(tables[i])); } - assertTrue(tableDescs.get(size - 1).getTableName().equals(TableName.META_TABLE_NAME)); + assertTrue( + tableDescs.get(size - 1).getTableName().equals(TEST_UTIL.getConnection().getMetaTableName())); for (int i = 0; i < tables.length; i++) { admin.disableTable(tables[i]).join(); @@ -205,7 +205,7 @@ public void testDisableAndEnableTable() throws Exception { assertTrue(ok); // meta table can not be disabled. try { - admin.disableTable(TableName.META_TABLE_NAME).get(); + admin.disableTable(TEST_UTIL.getConnection().getMetaTableName()).get(); fail("meta table can not be disabled"); } catch (ExecutionException e) { Throwable cause = e.getCause(); @@ -285,7 +285,8 @@ public void testEnableTableRetainAssignment() throws Exception { int expectedRegions = splitKeys.length + 1; createTableWithDefaultConf(tableName, splitKeys); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(TEST_UTIL.getConnection().getMetaTableName()); List regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals( @@ -314,8 +315,8 @@ public void testIsTableEnabledAndDisabled() throws Exception { assertTrue(admin.isTableDisabled(tableName).get()); // meta table is always enabled - assertTrue(admin.isTableEnabled(TableName.META_TABLE_NAME).get()); - assertFalse(admin.isTableDisabled(TableName.META_TABLE_NAME).get()); + assertTrue(admin.isTableEnabled(TEST_UTIL.getConnection().getMetaTableName()).get()); + assertFalse(admin.isTableDisabled(TEST_UTIL.getConnection().getMetaTableName()).get()); } @Test @@ -323,6 +324,6 @@ public void testIsTableAvailable() throws Exception { createTableWithDefaultConf(tableName); TEST_UTIL.waitTableAvailable(tableName); assertTrue(admin.isTableAvailable(tableName).get()); - assertTrue(admin.isTableAvailable(TableName.META_TABLE_NAME).get()); + assertTrue(admin.isTableAvailable(TEST_UTIL.getConnection().getMetaTableName()).get()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 18c53a49de7b..ea8200995d8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -94,14 +94,14 @@ public static void setUp() throws Exception { conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.create(conf, User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)); } - UTIL.flush(TableName.META_TABLE_NAME); + UTIL.flush(UTIL.getConnection().getMetaTableName()); // wait for the store file refresh so we can read the region location from secondary meta // replicas Thread.sleep(2000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 184b4ba0d3cc..b5e2b7d93c27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -72,14 +71,14 @@ public static void setUp() throws Exception { admin.balancerSwitch(false, true); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); - TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() - >= numOfMetaReplica); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), + numOfMetaReplica); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster() + .getRegions(TEST_UTIL.getConnection().getMetaTableName()).size() >= numOfMetaReplica); registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); - CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), null, - User.getCurrent()); + CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), + TableName.META_TABLE_NAME, null, User.getCurrent()); } @AfterClass @@ -94,18 +93,19 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted CONN.getConfiguration().get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, CatalogReplicaLoadBalanceSimpleSelector.class.getName()); - CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() - .get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + CatalogReplicaLoadBalanceSelector metaSelector = + CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, + TEST_UTIL.getConnection().getMetaTableName(), CONN, () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() + .get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table meta table's region replication, ", e); + } + return numOfReplicas; + }); // Loop for 100 times, it should cover all replica ids. int[] replicaIdCount = new int[numOfMetaReplica]; @@ -116,20 +116,20 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted IntStream.range(0, numOfMetaReplica).forEach(i -> assertNotEquals(replicaIdCount[i], 0)); // Change to No meta replica - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1); - TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 1); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), 1); + TEST_UTIL.waitFor(30000, () -> TEST_UTIL.getMiniHBaseCluster() + .getRegions(TEST_UTIL.getConnection().getMetaTableName()).size() == 1); CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = - CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, META_TABLE_NAME, - CONN, () -> { + CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, + TEST_UTIL.getConnection().getMetaTableName(), CONN, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() .get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + LOG.error("Failed to get table meta table's region replication, ", e); } return numOfReplicas; }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java index 912ded0a27bb..813bc5d79267 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -48,7 +47,8 @@ public void testReplicaCleanup() throws Exception { ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); List metaReplicaZnodes = zkw.getMetaReplicaNodes(); assertEquals(3, metaReplicaZnodes.size()); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TEST_UTIL.getConnection().getMetaTableName(), + 1); metaReplicaZnodes = zkw.getMetaReplicaNodes(); assertEquals(1, metaReplicaZnodes.size()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java index 583dc02763d0..e46ae64ad24d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java @@ -453,15 +453,21 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } else { ScanResponse scanRes = super.scan(controller, request); String regionName = Bytes.toString(request.getRegion().getValue().toByteArray()); - if (!regionName.contains(TableName.META_TABLE_NAME.getNameAsString())) { - tableScannerId = scanRes.getScannerId(); - if (sleepOnOpen) { - try { - LOG.info("openScanner SLEEPING " + sleepTime); - Thread.sleep(sleepTime); - } catch (InterruptedException e) { + try { + if ( + !regionName.contains(TEST_UTIL.getConnection().getMetaTableName().getNameAsString()) + ) { + tableScannerId = scanRes.getScannerId(); + if (sleepOnOpen) { + try { + LOG.info("openScanner SLEEPING " + sleepTime); + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + } } } + } catch (IOException e) { + throw new RuntimeException(e); } return scanRes; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java index 253e61f995cf..8f6a41e3a75d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java @@ -89,8 +89,8 @@ public void setup() throws IOException { conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.getDefaultRootDirPath(); fs = TEST_UTIL.getTestFileSystem(); - htd = TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); - hri = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0); + htd = TEST_UTIL.getAdmin().getDescriptor(TEST_UTIL.getConnection().getMetaTableName()); + hri = TEST_UTIL.getAdmin().getRegions(TEST_UTIL.getConnection().getMetaTableName()).get(0); scan = new Scan(); } @@ -200,7 +200,7 @@ private void testScanMetricsWithScanMetricsByRegionDisabled(ScanMetrics scanMetr Configuration copyConf = new Configuration(conf); Scan scan = new Scan(); scan.setScanMetricsEnabled(true); - TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().flush(TEST_UTIL.getConnection().getMetaTableName()); try (ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) { clientSideRegionScanner.next(); @@ -229,7 +229,7 @@ private void testScanMetricByRegion(ScanMetrics scanMetrics) throws IOException Configuration copyConf = new Configuration(conf); Scan scan = new Scan(); scan.setEnableScanMetricsByRegion(true); - TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().flush(TEST_UTIL.getConnection().getMetaTableName()); try (ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) { clientSideRegionScanner.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 2384e02955da..7f23390390f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -92,7 +92,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() } // Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR // content from a few of the rows. - try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table metaTable = TEST_UTIL.getConnection().getMetaTable()) { try (ResultScanner scanner = metaTable.getScanner( MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java index 44c096f0b8a9..d138316f44e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java @@ -98,8 +98,8 @@ public static void setUpBeforeClass() throws IOException { done.run(null); return null; }).when(stub).replay(any(), any(), any()); - CONN = new AsyncClusterConnectionImpl(CONF, mock(ConnectionRegistry.class), "test", null, - User.getCurrent()) { + CONN = new AsyncClusterConnectionImpl(CONF, mock(ConnectionRegistry.class), "test", + TableName.META_TABLE_NAME, null, User.getCurrent()) { @Override AsyncRegionLocator getLocator() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java index 8f820158e460..65410ed51ff9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java @@ -2532,7 +2532,7 @@ public void testFilterAllRecords() throws IOException { scan.setCaching(1); // Filter out any records scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0]))); - try (Table table = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = TEST_UTIL.getConnection().getMetaTable()) { try (ResultScanner s = table.getScanner(scan)) { assertNull(s.next()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java index f93fc9d5bf5d..e1dfe3f0113c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -50,7 +49,7 @@ public static void setUp() throws Exception { public void testUpgradeAndIncreaseReplicaCount() throws Exception { HMaster oldMaster = TEST_UTIL.getMiniHBaseCluster().getMaster(); TableDescriptors oldTds = oldMaster.getTableDescriptors(); - TableDescriptor oldMetaTd = oldTds.get(TableName.META_TABLE_NAME); + TableDescriptor oldMetaTd = oldTds.get(TEST_UTIL.getConnection().getMetaTableName()); assertEquals(3, oldMetaTd.getRegionReplication()); // force update the replica count to 1 and then kill the master, to simulate that hen upgrading, // we have no region replication in meta table descriptor but we actually have meta region diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index d79603cea3cc..398c4338140c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.security.User; @@ -61,7 +60,8 @@ public static void setUp() throws Exception { StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); builder.numMasters(3).numRegionServers(3); TEST_UTIL.startMiniCluster(builder.build()); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TEST_UTIL.getConnection().getMetaTableName(), + 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java index ce52918bfe42..f8a97a8bef16 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java @@ -87,7 +87,7 @@ public static void setUpBeforeClass() throws Exception { conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithFakeRpcServices.class.getName()); TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(TEST_UTIL.getConnection().getMetaTableName()); badRS = TEST_UTIL.getHBaseCluster().getRegionServer(0); assertTrue(badRS.getRSRpcServices() instanceof FakeRSRpcServices); TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 29223dea5dbe..4df81f3995c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.MetaRegionLocationCache; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.security.User; @@ -63,7 +62,8 @@ public class TestMetaRegionLocationCache { @BeforeClass public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TEST_UTIL.getConnection().getMetaTableName(), + 3); REGISTRY = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index eae7078639d1..41ca9747b5d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -53,8 +52,8 @@ public static void setUp() throws Exception { @Test public void testMetaHTDReplicaCount() throws Exception { - assertEquals(3, - TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication()); + assertEquals(3, TEST_UTIL.getAdmin().getDescriptor(TEST_UTIL.getConnection().getMetaTableName()) + .getRegionReplication()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java index 267d618d03d1..7e9e89798ee6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java @@ -97,7 +97,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex ServerName master = null; try (Connection c = ConnectionFactory.createConnection(util.getConfiguration())) { try (Table htable = util.createTable(TABLE, FAMILIES)) { - util.getAdmin().flush(TableName.META_TABLE_NAME); + util.getAdmin().flush(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); Thread.sleep( conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 6); List regions = MetaTableAccessor.getTableRegions(c, TABLE); @@ -114,7 +114,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex Thread.sleep(10); hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); } while (primary.equals(hrl.getServerName())); - util.getAdmin().flush(TableName.META_TABLE_NAME); + util.getAdmin().flush(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()); Thread.sleep( conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 3); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java index 55646c35e435..387152ebe302 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java @@ -48,7 +48,7 @@ public class TestMultiActionMetricsFromClient { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(TEST_UTIL.getConnection().getMetaTableName()); TEST_UTIL.createTable(TABLE_NAME, FAMILY); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index c38be19a238e..e6dfc359b2c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -261,7 +261,7 @@ public static void beforeClass() throws Exception { HTU.startMiniCluster(NB_SERVERS); // Enable meta replica at server side - HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(HTU.getAdmin(), HTU.getConnection().getMetaTableName(), 2); HTU.getHBaseCluster().startMaster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java index d33cc943355c..f9eec4c4f1f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -72,7 +71,7 @@ public static void setUpBeforeClass() throws Exception { UTIL.getConfiguration().setLong(RpcConnectionRegistry.MIN_SECS_BETWEEN_REFRESHES, 0); UTIL.getConfiguration().setLong(BootstrapNodeManager.REQUEST_MASTER_MIN_INTERVAL_SECS, 1); UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 52ccd5d8b7da..db1bf6511d58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -280,13 +280,13 @@ public void testAsyncTable() throws Exception { public void testChangeMetaReplicaCount() throws Exception { Admin admin = TEST_UTIL.getAdmin(); try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + TEST_UTIL.getConnection().getRegionLocator(TEST_UTIL.getConnection().getMetaTableName())) { assertEquals(1, locator.getAllRegionLocations().size()); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), 3); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), 2); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 2); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(admin, TEST_UTIL.getConnection().getMetaTableName(), 1); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java index c39fc076ef2b..005786bf603e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.BeforeClass; @@ -50,7 +49,8 @@ public void testShutdownOfReplicaHolder() throws Exception { // checks that the when the server holding meta replica is shut down, the meta replica // can be recovered try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + RegionLocator locator = + conn.getRegionLocator(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable())) { HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1); ServerName oldServer = hrl.getServerName(); TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 8abb4d754a7a..deca00e04086 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -163,7 +163,7 @@ public static void cleanupTest() throws Exception { */ @Test(expected = IllegalArgumentException.class) public void testMetaTablesSnapshot() throws Exception { - UTIL.getAdmin().snapshot("metaSnapshot", TableName.META_TABLE_NAME); + UTIL.getAdmin().snapshot("metaSnapshot", UTIL.getConnection().getMetaTableName()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 6d585245e959..2ca6ad47a875 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; @@ -64,7 +63,8 @@ public class TestZKConnectionRegistry { @BeforeClass public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TEST_UTIL.getConnection().getMetaTableName(), + 3); REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration(), null); } @@ -89,7 +89,11 @@ public void test() throws InterruptedException, ExecutionException, IOException IntStream.range(0, 3).forEach(i -> { HRegionLocation loc = locs.getRegionLocation(i); assertNotNull("Replica " + i + " doesn't have location", loc); - assertEquals(TableName.META_TABLE_NAME, loc.getRegion().getTable()); + try { + assertEquals(TEST_UTIL.getConnection().getMetaTableName(), loc.getRegion().getTable()); + } catch (IOException e) { + throw new RuntimeException(e); + } assertEquals(i, loc.getRegion().getReplicaId()); }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMetaTableMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMetaTableMetrics.java index 1b66a085fa54..1b839637adf0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMetaTableMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMetaTableMetrics.java @@ -169,7 +169,7 @@ public void testMetaTableMetricsInJmx() throws Exception { @Test public void testConcurrentAccess() { try { - tablename = Bytes.toBytes("hbase:meta"); + tablename = TableName.META_TABLE_NAME.getName(); int numRows = 3000; int numRowsInTableBefore = UTIL.countRows(TableName.valueOf(tablename)); putData(numRows); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java index b613e2a824c6..e4b815cf8364 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java @@ -200,7 +200,7 @@ public void testTableActionsAvailableForAdmins() throws Exception { @Override public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getTablePage(TableName.META_TABLE_NAME); + Pair pair = getTablePage(UTIL.getConnection().getMetaTableName()); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -213,7 +213,7 @@ public Void run() throws Exception { nonAdmin.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Pair pair = getTablePage(TableName.META_TABLE_NAME); + Pair pair = getTablePage(UTIL.getConnection().getMetaTableName()); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertFalse( "should not find=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index b04380ae450c..720ba0fba073 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -597,6 +597,11 @@ public long rollAllWALWriters(long nonceGroup, long nonce) throws IOException { return 0; } + @Override + public TableName getMetaTableName() { + return null; + } + @Override public KeyManagementService getKeyManagementService() { return this; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 41848a58b784..ee52ebe04f71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -243,7 +243,7 @@ public void testFlushedSequenceIdPersistLoad() throws Exception { .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); Table table = TEST_UTIL.createTable(tableDescriptor, null); // flush META region - TEST_UTIL.flush(TableName.META_TABLE_NAME); + TEST_UTIL.flush(TEST_UTIL.getConnection().getMetaTableName()); // wait for regionserver report Threads.sleep(msgInterval * 2); // record flush seqid before cluster shutdown diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java index b3fadc7ed27a..69561c105db4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -64,7 +63,8 @@ public static void teardownTest() throws Exception { @Test public void tesMetaDescriptorHasSFTConfig() throws Exception { - TableDescriptor descriptor = UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor descriptor = + UTIL.getAdmin().getDescriptor(UTIL.getConnection().getMetaTableName()); assertEquals(FILE.name(), descriptor.getValue(TRACKER_IMPL)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index f640c3084cb8..ac07aeb7d535 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -307,7 +307,7 @@ public void testIncompleteMetaTableReplicaInformation() throws Exception { ADMIN.disableTable(tableName); // now delete one replica info from all the rows // this is to make the meta appear to be only partially updated - Table metaTable = ADMIN.getConnection().getTable(TableName.META_TABLE_NAME); + Table metaTable = ADMIN.getConnection().getMetaTable(); for (byte[] row : tableRows) { Delete deleteOneReplicaLocation = new Delete(row); deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java index 910692d93c30..560580e09668 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java @@ -93,7 +93,7 @@ public void testNewCluster() throws Exception { Connection conn = TEST_UTIL.getConnection(); assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(TEST_UTIL.getConnection().getMetaTableName()); ResultScanner scanner = table.getScanner(new Scan())) { assertNotNull("Could not read meta.", scanner.next()); } @@ -120,7 +120,7 @@ public void testExistingCluster() throws Exception { Connection conn = TEST_UTIL.getConnection(); assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(TEST_UTIL.getConnection().getMetaTableName()); ResultScanner scanner = table.getScanner(HConstants.TABLE_FAMILY); Stream results = StreamSupport.stream(scanner.spliterator(), false)) { assertTrue("Did not find user table records while reading hbase:meta", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index e59ef4919126..d76aa14693bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -301,7 +301,7 @@ public void testKillRSWithOpeningRegion2482() throws Exception { */ private static int addToEachStartKey(final int expected) throws IOException { Table t = TEST_UTIL.getConnection().getTable(TABLENAME); - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + Table meta = TEST_UTIL.getConnection().getMetaTable(); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java index b6bce31eed9c..d0a95e611def 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -66,7 +65,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testStopActiveMaster() throws Exception { try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + RegionLocator locator = conn.getRegionLocator(UTIL.getConnection().getMetaTableName())) { ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName(); ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java index cdb243b06cdb..161952a66813 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -68,7 +67,7 @@ public class TestMigrateAndMirrorMetaLocations { @BeforeClass public static void setUp() throws Exception { UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), 2); } @AfterClass @@ -143,20 +142,21 @@ public void test() throws Exception { } // wait until all meta regions have been assigned UTIL.waitFor(30000, - () -> UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 2); + () -> UTIL.getMiniHBaseCluster().getRegions(UTIL.getConnection().getMetaTableName()).size() + == 2); // make sure all the SCPs are finished waitUntilNoSCP(); checkMirrorLocation(2); // increase replica count to 3 - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), 3); checkMirrorLocation(3); byte[] replica2Data = ZKUtil.getData(UTIL.getZooKeeperWatcher(), UTIL.getZooKeeperWatcher().getZNodePaths().getZNodeForReplica(2)); // decrease replica count to 1 - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), UTIL.getConnection().getMetaTableName(), 1); checkMirrorLocation(1); // restart the whole cluster, put an extra replica znode on zookeeper, to see if we will remove diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java index 30dd308c28f3..2d575d6043a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java @@ -77,7 +77,11 @@ public static final class SuspendProcedure extends Procedure @Override public TableName getTableName() { - return TableName.META_TABLE_NAME; + try { + return UTIL.getConnection().getMetaTableName(); + } catch (IOException e) { + throw new RuntimeException(e); + } } @Override @@ -154,7 +158,7 @@ public static void tearDown() throws Exception { private void removeNamespaceFamily() throws IOException { FileSystem fs = UTIL.getTestFileSystem(); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); - Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); + Path tableDir = CommonFSUtils.getTableDir(rootDir, UTIL.getConnection().getMetaTableName()); TableDescriptor metaTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); TableDescriptor noNsMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) .removeColumnFamily(HConstants.NAMESPACE_FAMILY).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java index 42f54e5c8758..e50ca9853009 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java @@ -126,7 +126,7 @@ private void validateRecreateClusterWithUserTableEnabled(boolean cleanupWALs, private void restartHBaseCluster(boolean cleanUpWALs, boolean cleanUpZnodes) throws Exception { // flush cache so that everything is on disk - TEST_UTIL.getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + TEST_UTIL.getMiniHBaseCluster().flushcache(TEST_UTIL.getConnection().getMetaTableName()); TEST_UTIL.getMiniHBaseCluster().flushcache(); List oldServers = @@ -177,7 +177,7 @@ private void prepareDataBeforeRecreate(HBaseTestingUtil testUtil, TableName tabl put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("c"), Bytes.toBytes("v")); table.put(put); - ensureTableNotColocatedWithSystemTable(tableName, TableName.META_TABLE_NAME); + ensureTableNotColocatedWithSystemTable(tableName, TEST_UTIL.getConnection().getMetaTableName()); } private void ensureTableNotColocatedWithSystemTable(TableName userTable, TableName systemTable) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java index 866f74b73191..8dad0a84b4e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java @@ -80,7 +80,7 @@ public void testRestart() throws IOException, InterruptedException { table.put(new Put(row).addColumn(FAMILY, QUALIFIER, row)); } // flush all in memory data - UTIL.flush(TableName.META_TABLE_NAME); + UTIL.flush(UTIL.getConnection().getMetaTableName()); UTIL.flush(NAME); // stop master first, so when stopping region server, we will not schedule a SCP. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java index 8263298a8e4f..45cfa62d5c15 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -64,13 +63,13 @@ public static void tearDown() throws Exception { public void test() throws Exception { RegionServerThread rsThread = null; for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) { - if (!t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (!t.getRegionServer().getRegions(UTIL.getConnection().getMetaTableName()).isEmpty()) { rsThread = t; break; } } HRegionServer rs = rsThread.getRegionServer(); - RegionInfo hri = rs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo(); + RegionInfo hri = rs.getRegions(UTIL.getConnection().getMetaTableName()).get(0).getRegionInfo(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); ProcedureExecutor executor = master.getMasterProcedureExecutor(); DummyRegionProcedure proc = new DummyRegionProcedure(executor.getEnvironment(), hri); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java index b86493287e52..9bda3d92266a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java @@ -150,11 +150,13 @@ public static void setUp() throws Exception { UTIL.createTable(TABLE_NAME, CF); UTIL.getAdmin().balancerSwitch(false, true); HRegionServer srcRs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); - if (!srcRs.getRegions(TableName.META_TABLE_NAME).isEmpty()) { - RegionInfo metaRegion = srcRs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo(); + if (!srcRs.getRegions(UTIL.getConnection().getMetaTableName()).isEmpty()) { + RegionInfo metaRegion = + srcRs.getRegions(UTIL.getConnection().getMetaTableName()).get(0).getRegionInfo(); HRegionServer dstRs = UTIL.getOtherRegionServer(srcRs); UTIL.getAdmin().move(metaRegion.getEncodedNameAsBytes(), dstRs.getServerName()); - UTIL.waitFor(30000, () -> !dstRs.getRegions(TableName.META_TABLE_NAME).isEmpty()); + UTIL.waitFor(30000, + () -> !dstRs.getRegions(UTIL.getConnection().getMetaTableName()).isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java index 2f88f6087dd4..db6040926fc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java @@ -94,7 +94,7 @@ public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class); UTIL.startMiniCluster(1); - UTIL.waitTableAvailable(TableName.META_TABLE_NAME); + UTIL.waitTableAvailable(UTIL.getConnection().getMetaTableName()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 0e00006251ac..469def163922 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -121,7 +121,7 @@ public void testVisitMetaForBadRegionState() throws Exception { put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes("BAD_STATE")); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java index cd73e09af6db..d58e96d4bfe8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java @@ -167,7 +167,8 @@ public void describeTo(Description description) { @Test public void testFailAndRollback() throws Exception { - HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(TableName.META_TABLE_NAME); + HRegionServer rsWithMeta = + UTIL.getRSForFirstRegionInTable(UTIL.getConnection().getMetaTableName()); UTIL.getMiniHBaseCluster().killRegionServer(rsWithMeta.getServerName()); UTIL.waitFor(15000, () -> getSCPForServer(rsWithMeta.getServerName()) != null); ServerCrashProcedure scp = getSCPForServer(rsWithMeta.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java index dcaebbd84356..92dda7532131 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java @@ -161,7 +161,7 @@ public void testRoundRobinAssignment() throws Exception { LoadBalancer balancer = master.getLoadBalancer(); List regions = admin.getRegions(tableName); - regions.addAll(admin.getRegions(TableName.META_TABLE_NAME)); + regions.addAll(admin.getRegions(TEST_UTIL.getConnection().getMetaTableName())); List servers = Lists.newArrayList( admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet()); Map> map = balancer.roundRobinAssignment(regions, servers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java index 80f9728651e3..4ae6615678a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java @@ -92,8 +92,9 @@ public void testTableIsolation() throws Exception { admin.createTable(productTableDescriptor, BalancerConditionalsTestUtil.generateSplits(2 * NUM_SERVERS)); - Set tablesToBeSeparated = ImmutableSet. builder() - .add(TableName.META_TABLE_NAME).add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build(); + Set tablesToBeSeparated = + ImmutableSet. builder().add(TEST_UTIL.getConnection().getMetaTableName()) + .add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build(); // Pause the balancer admin.balancerSwitch(false, true); @@ -146,8 +147,13 @@ private static void validateRegionLocationsWithRetry(Connection connection, private static void validateRegionLocations(Map> tableToServers, TableName productTableName, boolean shouldBeBalanced) { // Validate that the region assignments - ServerName metaServer = - tableToServers.get(TableName.META_TABLE_NAME).stream().findFirst().orElseThrow(); + ServerName metaServer; + try { + metaServer = tableToServers.get(TEST_UTIL.getConnection().getMetaTableName()).stream() + .findFirst().orElseThrow(); + } catch (IOException e) { + throw new RuntimeException(e); + } ServerName quotaServer = tableToServers.get(QuotaUtil.QUOTA_TABLE_NAME).stream().findFirst().orElseThrow(); Set productServers = tableToServers.get(productTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java index 88d1a298aa48..0e717d6e3b74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java @@ -94,7 +94,7 @@ public static void tearDownAfterClass() throws Exception { @After public void tearDown() throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table table = UTIL.getConnection().getMetaTable(); ResultScanner scanner = table.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY) .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).setFilter(new FirstKeyOnlyFilter()))) { for (;;) { @@ -148,20 +148,20 @@ private void addBarrier(RegionInfo region, long... barriers) throws IOException put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } } private void fillCatalogFamily(RegionInfo region) throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(new Put(region.getRegionName()).addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("whatever"), Bytes.toBytes("whatever"))); } } private void clearCatalogFamily(RegionInfo region) throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.delete(new Delete(region.getRegionName()).addFamily(HConstants.CATALOG_FAMILY)); } } @@ -281,7 +281,7 @@ public void testDeleteRowForDeletedRegion() throws IOException, ReplicationExcep // No catalog family, then we should remove the whole row clearCatalogFamily(region); cleaner.chore(); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { assertFalse(table .exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY))); } @@ -303,7 +303,7 @@ public void testDeleteRowForDeletedRegionNoPeers() throws IOException { // There are no peers, and no catalog family for this region either, so we should remove the // barriers. And since there is no catalog family, after we delete the barrier family, the whole // row is deleted. - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { assertFalse(table.exists(new Get(region.getRegionName()))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusUtil.java index 0ec94c917193..e1ea8ff9cf56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusUtil.java @@ -136,8 +136,7 @@ public void testGetUserTablesFilterOutSystemTables() throws IOException { Map mockTables = new HashMap<>(); mockTables.put("foo", TableDescriptorBuilder.newBuilder(TableName.valueOf("foo")).build()); mockTables.put("bar", TableDescriptorBuilder.newBuilder(TableName.valueOf("bar")).build()); - mockTables.put("meta", - TableDescriptorBuilder.newBuilder(TableName.valueOf("hbase", "meta")).build()); + mockTables.put("meta", TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); TableDescriptors tableDescriptors = Mockito.mock(TableDescriptors.class); Mockito.doReturn(tableDescriptors).when(master).getTableDescriptors(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java index 4bb60f4137e7..74375e9d53e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMetaBrowserNoCluster.java @@ -61,7 +61,7 @@ public void buildFirstPageQueryStringNoParams() { final HttpServletRequest request = new MockRequestBuilder().build(); final MetaBrowser metaBrowser = new MetaBrowser(connection, request); - assertEquals("hbase:meta", metaBrowser.getName()); + assertEquals(TableName.META_TABLE_NAME.getNameAsString(), metaBrowser.getName()); assertNull(metaBrowser.getScanLimit()); assertNull(metaBrowser.getScanRegionState()); assertNull(metaBrowser.getScanStart()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java index cf118260b401..8f8d872c7164 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java @@ -182,7 +182,7 @@ private PairOfSameType waitOnDaughters(final RegionInfo r) throws IO long start = EnvironmentEdgeManager.currentTime(); PairOfSameType pair = null; try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { + Table metaTable = conn.getTable(TEST_UTIL.getConnection().getMetaTableName())) { Result result = null; RegionInfo region = null; while ((EnvironmentEdgeManager.currentTime() - start) < 60000) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java index 614385ec04d6..37d4fbd1aeee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java @@ -25,7 +25,6 @@ import java.util.List; import java.util.SortedSet; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -49,22 +48,26 @@ public class TestMetaFixerNoCluster { private static byte[] C = Bytes.toBytes("c"); private static byte[] D = Bytes.toBytes("d"); private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO; - private static RegionInfo _ARI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build(); - private static RegionInfo _BRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build(); + private static RegionInfo _ARI = RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(A).build(); + private static RegionInfo _BRI = RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(B).build(); private static RegionInfo ABRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build(); + RegionInfoBuilder.newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(A) + .setEndKey(B).build(); private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(A).setEndKey(C) + .build(); private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(C).setEndKey(D) + .build(); private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(A).setEndKey(D) + .build(); private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(D).build(); private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(C).build(); @Test public void testGetRegionInfoWithLargestEndKey() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java index d2f04c674c97..72e7fb89c0f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java @@ -146,9 +146,14 @@ public static void tearDown() throws Exception { @Test public void test() throws Exception { - RegionServerThread rsWithMetaThread = UTIL.getMiniHBaseCluster().getRegionServerThreads() - .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) - .findAny().get(); + RegionServerThread rsWithMetaThread = + UTIL.getMiniHBaseCluster().getRegionServerThreads().stream().filter(t -> { + try { + return !t.getRegionServer().getRegions(UTIL.getConnection().getMetaTableName()).isEmpty(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).findAny().get(); HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer()); FAIL = true; UTIL.getMiniHBaseCluster().killRegionServer(rsNoMeta.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java index 386356124f5b..09e5f762ee94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java @@ -122,7 +122,11 @@ public static class MetaTableProcedure extends Procedure @Override public TableName getTableName() { - return TableName.META_TABLE_NAME; + try { + return UTIL.getConnection().getMetaTableName(); + } catch (IOException e) { + throw new RuntimeException(e); + } } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java index 64454ab268fa..693e1981ba34 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java @@ -131,7 +131,7 @@ public void test() throws Exception { HRegionServer regionSvr = UTIL.getRSForFirstRegionInTable(TABLE_NAME); HRegion region = regionSvr.getRegions(TABLE_NAME).get(0); String regName = region.getRegionInfo().getEncodedName(); - List metaRegs = regionSvr.getRegions(TableName.META_TABLE_NAME); + List metaRegs = regionSvr.getRegions(UTIL.getConnection().getMetaTableName()); if (metaRegs != null && !metaRegs.isEmpty()) { LOG.info("meta is on the same server: " + regionSvr); // when region is on same server as hbase:meta, reassigning meta would abort the server diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index af3902c9aa1e..71dd054747cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -1002,7 +1002,8 @@ public void testShouldFlushMeta() throws Exception { TableDescriptors tds = new FSTableDescriptors(conf); FSTableDescriptors.tryUpdateMetaTableDescriptor(conf); HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, - tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); + tds.get(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()), + wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); // parameterized tests add [#] suffix get rid of [ and ]. TableDescriptor desc = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 897152f8b6dd..22cf84e9100c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -429,7 +429,7 @@ public static void blockUntilRegionSplit(Configuration conf, long timeout, log("blocking until region is split:" + Bytes.toStringBinary(regionName)); RegionInfo daughterA = null, daughterB = null; try (Connection conn = ConnectionFactory.createConnection(conf); - Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { + Table metaTable = conn.getTable(TEST_UTIL.getConnection().getMetaTableName())) { Result result = null; RegionInfo region = null; while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 179297bd873f..acef0dc796df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -1996,9 +1995,7 @@ public void testMaxPreadBytesConfiguredToBeLessThanZero() throws Exception { public void testInMemoryCompactionTypeWithLowerCase() throws IOException, InterruptedException { Configuration conf = HBaseConfiguration.create(); conf.set("hbase.systemtables.compacting.memstore.type", "eager"); - init(name.getMethodName(), conf, - TableDescriptorBuilder.newBuilder( - TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME, "meta".getBytes())), + init(name.getMethodName(), conf, TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME), ColumnFamilyDescriptorBuilder.newBuilder(family) .setInMemoryCompaction(MemoryCompactionPolicy.NONE).build()); assertTrue(((MemStoreCompactor) ((CompactingMemStore) store.memstore).compactor).toString() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index 2869be090f42..eede4752a789 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -73,13 +72,12 @@ public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedExce FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR); FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR); HRegion r = HBaseTestingUtil.createRegionAndWAL(ri, ROOT_DIR, CONF, - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(ri.getTable())); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, - CONF); + r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(ri.getTable()), null, CONF); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java index 60fe39ecc77f..9c83ec9e1d4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java @@ -71,7 +71,8 @@ public class TestRegionInfo { public void testIsStart() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(Bytes.toBytes("not_start")).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()) + .setStartKey(Bytes.toBytes("not_start")).build(); assertFalse(ri.isFirst()); } @@ -79,7 +80,8 @@ public void testIsStart() { public void testIsEnd() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(Bytes.toBytes("not_end")).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()) + .setEndKey(Bytes.toBytes("not_end")).build(); assertFalse(ri.isLast()); } @@ -87,9 +89,9 @@ public void testIsEnd() { public void testIsNext() { byte[] bytes = Bytes.toBytes("row"); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(bytes).build(); org.apache.hadoop.hbase.client.RegionInfo ri2 = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(bytes).build(); assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO)); assertTrue(ri.isNext(ri2)); } @@ -102,18 +104,21 @@ public void testIsOverlap() { byte[] d = Bytes.toBytes("d"); org.apache.hadoop.hbase.client.RegionInfo all = RegionInfoBuilder.FIRST_META_REGIONINFO; org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(a).setEndKey(b).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(a).setEndKey(b) + .build(); org.apache.hadoop.hbase.client.RegionInfo adri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(a).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(a).setEndKey(d) + .build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(c).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(c).setEndKey(d) + .build(); org.apache.hadoop.hbase.client.RegionInfo dri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(d).build(); assertTrue(all.isOverlap(all)); assertTrue(all.isOverlap(abri)); assertFalse(abri.isOverlap(cdri)); @@ -140,18 +145,21 @@ public void testIsOverlaps() { byte[] e = Bytes.toBytes("e"); byte[] f = Bytes.toBytes("f"); org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(a).setEndKey(b).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(a).setEndKey(b) + .build(); org.apache.hadoop.hbase.client.RegionInfo eri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build(); + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setEndKey(e).build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(c).setEndKey(d).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(c).setEndKey(d) + .build(); org.apache.hadoop.hbase.client.RegionInfo efri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) - .setStartKey(e).setEndKey(f).build(); + org.apache.hadoop.hbase.client.RegionInfoBuilder + .newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).setStartKey(e).setEndKey(f) + .build(); assertFalse(ari.isOverlap(abri)); assertTrue(abri.isOverlap(eri)); assertFalse(cdri.isOverlap(efri)); @@ -175,12 +183,13 @@ public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedExc FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable())); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, + r = HRegion.openHRegion(basedir, hri, + fsTableDescriptors.get(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); @@ -254,7 +263,7 @@ public void testContainsRange() { @Test public void testContainsRangeForMetaTable() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + TableDescriptorBuilder.newBuilder(RegionInfoBuilder.FIRST_META_REGIONINFO.getTable()).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build(); byte[] startRow = HConstants.EMPTY_START_ROW; byte[] row1 = Bytes.toBytes("a,a,0"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 68c6b6434c4f..ffc0d5cd157a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -147,7 +147,7 @@ public void testRegionReplicaUpdatesMetaLocation() throws Exception { openRegion(HTU, getRS(), hriSecondary); Table meta = null; try { - meta = HTU.getConnection().getTable(TableName.META_TABLE_NAME); + meta = HTU.getConnection().getMetaTable(); TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName(), getRS().getServerName(), -1, 1, false); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java index 3fad6e16bf76..e7f16d7dc666 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java @@ -65,7 +65,7 @@ public static void setUp() throws Exception { UTIL.createTable(TABLE_NAME, CF); UTIL.waitTableAvailable(TABLE_NAME); HRegionServer rs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); - if (!rs.getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (!rs.getRegions(UTIL.getConnection().getMetaTableName()).isEmpty()) { HRegionServer rs1 = UTIL.getOtherRegionServer(rs); UTIL.moveRegionAndWait( UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0).getRegionInfo(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 522b0ea884b3..1122ad8e8096 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -96,12 +96,13 @@ public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU) throws IOException, InterruptedException { // cache meta location, so we will not go to master to lookup meta region location for (JVMClusterUtil.RegionServerThread t : HTU.getMiniHBaseCluster().getRegionServerThreads()) { - try (RegionLocator locator = - t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = t.getRegionServer().getConnection() + .getRegionLocator(HTU.getConnection().getMetaTableName())) { locator.getAllRegionLocations(); } } - try (RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = + HTU.getConnection().getRegionLocator(HTU.getConnection().getMetaTableName())) { locator.getAllRegionLocations(); } // Stop master diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java index 61da536310a8..b1c3fb02691d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java @@ -135,7 +135,7 @@ public void testRejectRequestsOnAbort() throws Exception { .getRegionServerThreads()) { HRegionServer regionServer = regionServerThread.getRegionServer(); if ( - regionServer.getRegions(TableName.META_TABLE_NAME).isEmpty() + regionServer.getRegions(UTIL.getConnection().getMetaTableName()).isEmpty() && !regionServer.getRegions(TABLE_NAME).isEmpty() ) { serverWithoutMeta = regionServer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java index 0bc7deccc121..6e938755c5da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java @@ -135,7 +135,7 @@ public void test() throws Exception { RegionServerThread rst1 = UTIL.getMiniHBaseCluster().getRegionServerThreads().get(1); HRegionServer liveRS; RegionServerThread toKillRSThread; - if (rst1.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (rst1.getRegionServer().getRegions(UTIL.getConnection().getMetaTableName()).isEmpty()) { liveRS = rst0.getRegionServer(); toKillRSThread = rst1; } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 2a5aec458828..2f41ec9a51cf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -338,7 +338,7 @@ void validateData(Table table, int rownum) throws IOException { public void testCompactionRecordDoesntBlockRolling() throws Exception { // When the hbase:meta table can be opened, the region servers are running - try (Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table t = TEST_UTIL.getConnection().getMetaTable(); Table table = createTestTable(getName())) { server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 3c3dbe1ead9e..cf4d61cf3a3a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -162,7 +162,7 @@ public void testRSAbortWithUnflushedEdits() throws Exception { LOG.info("Starting testRSAbortWithUnflushedEdits()"); // When the hbase:meta table can be opened, the region servers are running - TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + TEST_UTIL.getConnection().getMetaTable().close(); // Create the test table and open it TableName tableName = TableName.valueOf(this.getClass().getSimpleName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 43477f21f7f8..66ce18e0b847 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -322,7 +322,7 @@ public void testLogRollOnPipelineRestart() throws Exception { fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1); LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); // When the hbase:meta table can be opened, the region servers are running - Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + Table t = TEST_UTIL.getConnection().getMetaTable(); try { this.server = cluster.getRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 897166a94000..78a299c1e35e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -72,7 +72,7 @@ public void testSystemTableWALEntryFilter() { // meta WALKeyImpl key1 = new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime()); + RegionInfoBuilder.FIRST_META_REGIONINFO.getTable(), EnvironmentEdgeManager.currentTime()); Entry metaEntry = new Entry(key1, null); assertNull(filter.filter(metaEntry)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java index ab7935ddad41..2579d8d9c1ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java @@ -98,10 +98,12 @@ public void before() throws Exception { // conf.setInt(HConstants.META_REPLICAS_NUM, numOfMetaReplica); HTU.startMiniCluster(NB_SERVERS); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, numOfMetaReplica); + HBaseTestingUtil.setReplicas(HTU.getAdmin(), HTU.getConnection().getMetaTableName(), + numOfMetaReplica); - HTU.waitFor(30000, () -> HTU.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() - >= numOfMetaReplica); + HTU.waitFor(30000, + () -> HTU.getMiniHBaseCluster().getRegions(HTU.getConnection().getMetaTableName()).size() + >= numOfMetaReplica); } @After @@ -118,35 +120,38 @@ public void testHBaseMetaReplicates() throws Exception { try (Table table = HTU.createTable(TableName.valueOf(this.name.getMethodName() + "_0"), HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, + getMetaCells(table.getName())); } try (Table table = HTU.createTable(TableName.valueOf(this.name.getMethodName() + "_1"), HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, + getMetaCells(table.getName())); // Try delete. HTU.deleteTableIfAny(table.getName()); - verifyDeletedReplication(TableName.META_TABLE_NAME, numOfMetaReplica, table.getName()); + verifyDeletedReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, + table.getName()); } } @Test public void testCatalogReplicaReplicationWithFlushAndCompaction() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); - Table table = connection.getTable(TableName.META_TABLE_NAME)) { + Table table = connection.getTable(HTU.getConnection().getMetaTableName())) { // load the data to the table for (int i = 0; i < 5; i++) { LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000)); HTU.loadNumericRows(table, HConstants.CATALOG_FAMILY, i * 1000, i * 1000 + 1000); LOG.info("flushing table"); - HTU.flush(TableName.META_TABLE_NAME); + HTU.flush(HTU.getConnection().getMetaTableName()); LOG.info("compacting table"); if (i < 4) { - HTU.compact(TableName.META_TABLE_NAME, false); + HTU.compact(HTU.getConnection().getMetaTableName(), false); } } - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, 0, 5000, + verifyReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, 0, 5000, HConstants.CATALOG_FAMILY); } } @@ -181,7 +186,7 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception { } } try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); - Table table = connection.getTable(TableName.META_TABLE_NAME)) { + Table table = connection.getTable(HTU.getConnection().getMetaTableName())) { // load the data to the table for (int i = 0; i < 5; i++) { LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000)); @@ -191,7 +196,7 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception { } } - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, 0, 5000, + verifyReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, 0, 5000, HConstants.CATALOG_FAMILY); } } @@ -413,7 +418,8 @@ private void getMetaReplicaReadRequests(final Region[] metaRegions, final long[] @Test public void testHBaseMetaReplicaGets() throws Exception { TableName tn = TableName.valueOf(this.name.getMethodName()); - final Region[] metaRegions = getAllRegions(TableName.META_TABLE_NAME, numOfMetaReplica); + final Region[] metaRegions = + getAllRegions(HTU.getConnection().getMetaTableName(), numOfMetaReplica); long[] readReqsForMetaReplicas = new long[numOfMetaReplica]; long[] readReqsForMetaReplicasAfterGet = new long[numOfMetaReplica]; long[] readReqsForMetaReplicasAfterGetAllLocations = new long[numOfMetaReplica]; @@ -426,7 +432,8 @@ public void testHBaseMetaReplicaGets() throws Exception { try (Table table = HTU.createTable(tn, HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(HTU.getConnection().getMetaTableName(), numOfMetaReplica, + getMetaCells(table.getName())); // load different values HTU.loadTable(table, new byte[][] { HConstants.CATALOG_FAMILY }, VALUE); for (int i = 0; i < NB_SERVERS; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 37af52eb93b9..13b7101766c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -195,8 +195,8 @@ public void testWALEntryFilter() throws IOException { new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.valueOf("test"), -1, -1, uuid), we); assertTrue(wef.filter(e) == e); // Test system WAL edit. - e = new WAL.Entry( - new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1, -1, uuid), we); + e = new WAL.Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, + org.apache.hadoop.hbase.TableName.META_TABLE_NAME, -1, -1, uuid), we); assertNull(wef.filter(e)); } finally { rs.terminate("Done"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java index 8731adbe4c2b..9c085d810dbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java @@ -115,6 +115,7 @@ public void setUp() throws IOException { when(source.getReplicationQueueStorage()).thenReturn(QUEUE_STORAGE); conn = mock(Connection.class); when(conn.isClosed()).thenReturn(false); + when(conn.getMetaTableName()).thenReturn(UTIL.getConnection().getMetaTableName()); doAnswer(new Answer() { @Override @@ -162,7 +163,7 @@ private void addStateAndBarrier(RegionInfo region, RegionState.State state, long put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } } @@ -171,7 +172,7 @@ private void setState(RegionInfo region, RegionState.State state) throws IOExcep Put put = new Put(region.getRegionName(), EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes(state.name())); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } } @@ -188,7 +189,7 @@ private void addParents(RegionInfo region, List parents) throws IOEx put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, ReplicationBarrierFamilyFormat.REPLICATION_PARENT_QUALIFIER, ReplicationBarrierFamilyFormat.getParentsBytes(parents)); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java index 11a0e98c5541..6e865967dda4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java @@ -187,7 +187,7 @@ public static class DevNullAsyncClusterConnection extends DummyAsyncClusterConne private final Configuration conf; public DevNullAsyncClusterConnection(Configuration conf, Object registry, String clusterId, - SocketAddress localAddress, User user) { + TableName metaTableName, SocketAddress localAddress, User user) { this.conf = conf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java index cb53f77bce56..f861948211e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RSGroupTests; import org.junit.After; @@ -69,7 +68,7 @@ public void testGetRSGroupInfoCPHookCalled() throws Exception { @Test public void testGetRSGroupInfoOfTableCPHookCalled() throws Exception { - ADMIN.getRSGroup(TableName.META_TABLE_NAME); + ADMIN.getRSGroup(TEST_UTIL.getConnection().getMetaTableName()); assertTrue(OBSERVER.preGetRSGroupInfoOfTableCalled); assertTrue(OBSERVER.postGetRSGroupInfoOfTableCalled); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index 3a596a02e0a6..ccf1729ba24a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -246,10 +246,10 @@ public void testLowerMetaGroupVersion() throws Exception { // move hbase:meta to meta_group Set toAddTables = new HashSet<>(); - toAddTables.add(TableName.META_TABLE_NAME); + toAddTables.add(TEST_UTIL.getConnection().getMetaTableName()); ADMIN.setRSGroup(toAddTables, groupName); assertTrue(ADMIN.getConfiguredNamespacesAndTablesInRSGroup(groupName).getSecond() - .contains(TableName.META_TABLE_NAME)); + .contains(TEST_UTIL.getConnection().getMetaTableName())); // restart the regionserver in meta_group, and lower its version String originVersion = ""; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java index dffeaa206a24..414d84527485 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java @@ -236,8 +236,8 @@ public void testRunCleanerChore() throws Exception { public void testExecProcedure() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. - admin.execProcedure("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), - new HashMap<>()); + admin.execProcedure("flush-table-proc", + TEST_UTIL.getConnection().getMetaTableName().getNameAsString(), new HashMap<>()); }); } @@ -259,8 +259,8 @@ public void testExecService() throws Exception { public void testExecProcedureWithRet() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. - admin.execProcedureWithReturn("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), - new HashMap<>()); + admin.execProcedureWithReturn("flush-table-proc", + TEST_UTIL.getConnection().getMetaTableName().getNameAsString(), new HashMap<>()); }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index f132eb6964b1..d4cff355a050 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -29,7 +29,6 @@ import java.util.Collection; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.Connection; @@ -97,7 +96,7 @@ public void setUpBeforeMethod() { private void testToken() throws Exception { try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { - AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); + AsyncTable table = conn.getTable(conn.getMetaTableName()); WhoAmIResponse response = table. coprocessorService( AuthenticationService::newStub, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java index 06d64ace3be2..9f262a73fc46 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java @@ -86,7 +86,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(TEST_UTIL.getConnection().getMetaTableName()); rootDir = CommonFSUtils.getRootDir(conf); fs = TEST_UTIL.getTestFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index 0b989b8029f0..2c360f74db76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -198,7 +198,8 @@ protected void deleteRegion(Configuration conf, final TableDescriptor htd, byte[ } if (metaRow) { - try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) { + try (Table meta = connection.getTable(TEST_UTIL.getConnection().getMetaTableName(), + tableExecutorService)) { Delete delete = new Delete(deleteRow); meta.delete(delete); } @@ -510,8 +511,9 @@ public boolean tableHasErrors(HbckTableInfo table) { protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { - HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME) - .getRegionLocation(HConstants.EMPTY_START_ROW); + HRegionLocation metaLocation = + connection.getRegionLocator(TEST_UTIL.getConnection().getMetaTableName()) + .getRegionLocation(HConstants.EMPTY_START_ROW); ServerName hsa = metaLocation.getServerName(); RegionInfo hri = metaLocation.getRegion(); if (unassign) { @@ -526,7 +528,8 @@ protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hd Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path p = - new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); + new Path(rootDir + "/" + TEST_UTIL.getConnection().getMetaTableName().getNameAsString(), + hri.getEncodedName()); Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); fs.delete(hriPath, true); } @@ -536,7 +539,8 @@ protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hd Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path p = - new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); + new Path(rootDir + "/" + TEST_UTIL.getConnection().getMetaTableName().getNameAsString(), + hri.getEncodedName()); HBaseFsck.debugLsr(conf, p); boolean success = fs.delete(p, true); LOG.info("Deleted " + p + " sucessfully? " + success); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java index c8e96383492a..63a49f69ab8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java @@ -198,7 +198,7 @@ private void addStateAndBarrier(RegionInfo region, RegionState.State state, long put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getMetaTable()) { table.put(put); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index b24b721762d3..349decee59d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -84,7 +84,7 @@ public static void setUpBeforeClass() throws Exception { admin = connection.getAdmin(); admin.balancerSwitch(false, true); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(connection.getMetaTableName()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java index 503f0dbdd971..68337ddbe35b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java @@ -286,8 +286,13 @@ public void testRegionServerPort() throws Exception { @Test public void testLoadMetaRegion() throws Exception { HRegionServer rsWithMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get(); + .map(t -> t.getRegionServer()).filter(rs -> { + try { + return rs.getRegions(TEST_UTIL.getConnection().getMetaTableName()).size() > 0; + } catch (IOException e) { + throw new RuntimeException(e); + } + }).findFirst().get(); int onlineRegions = rsWithMeta.getNumberOfOnlineRegions(); String rsName = rsWithMeta.getServerName().getAddress().toString(); try (RegionMover rm = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java index 56e103aa612e..cff8b0336787 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -85,9 +86,13 @@ public void setUp() throws Exception { Collection allServers = admin.getRegionServers(); // Remove rs contains hbase:meta, otherwise test looks unstable and buggy in test env. ServerName rsContainMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() - .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get() - .getServerName(); + .map(t -> t.getRegionServer()).filter(rs -> { + try { + return rs.getRegions(TEST_UTIL.getConnection().getMetaTableName()).size() > 0; + } catch (IOException e) { + throw new RuntimeException(e); + } + }).findFirst().get().getServerName(); LOG.info("{} contains hbase:meta", rsContainMeta); List modifiable = new ArrayList<>(allServers); modifiable.remove(rsContainMeta); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index cc1fefc266c4..b33c4db2e177 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -456,7 +456,8 @@ public void testRecoveredEditsPathForMeta() throws IOException { @Test public void testOldRecoveredEditsFileSidelined() throws IOException { Path p = createRecoveredEditsPathForRegion(); - Path tdir = CommonFSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); + Path tdir = + CommonFSUtils.getTableDir(HBASEDIR, org.apache.hadoop.hbase.TableName.META_TABLE_NAME); Path regiondir = new Path(tdir, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); Path parent = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); @@ -469,8 +470,8 @@ public void testOldRecoveredEditsFileSidelined() throws IOException { private Path createRecoveredEditsPathForRegion() throws IOException { byte[] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); - Path p = WALSplitUtil.getRegionSplitEditsPath(TableName.META_TABLE_NAME, encoded, 1, - FILENAME_BEING_SPLIT, TMPDIRNAME, conf, ""); + Path p = WALSplitUtil.getRegionSplitEditsPath(org.apache.hadoop.hbase.TableName.META_TABLE_NAME, + encoded, 1, FILENAME_BEING_SPLIT, TMPDIRNAME, conf, ""); return p; } diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index aed5e1c26e59..7154c862aec9 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -748,7 +748,7 @@ def get_all_columns # Checks if current table is one of the 'meta' tables def is_meta_table? - org.apache.hadoop.hbase.TableName::META_TABLE_NAME.equals(@table.getName) + org.apache.hadoop.hbase.TableName.isMetaTableName(@table.getName) end # Given a column specification in the format FAMILY[:QUALIFIER[:CONVERTER]] diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 698e433bb744..c3a555d1e0fd 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1107,7 +1107,7 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option) // Populate the master address configuration from mini cluster configuration. conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c)); // Don't leave here till we've done a successful scan of the hbase:meta - try (Table t = getConnection().getTable(TableName.META_TABLE_NAME); + try (Table t = getConnection().getTable(getConnection().getMetaTableName()); ResultScanner s = t.getScanner(new Scan())) { for (;;) { if (s.next() == null) { @@ -1229,7 +1229,7 @@ public void restartHBaseCluster(StartMiniClusterOption option) option.getMasterClass(), option.getRsClass()); // Don't leave here till we've done a successful scan of the hbase:meta Connection conn = ConnectionFactory.createConnection(this.conf); - Table t = conn.getTable(TableName.META_TABLE_NAME); + Table t = conn.getTable(getConnection().getMetaTableName()); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -2397,7 +2397,7 @@ public String checksumRows(final Table table) throws Exception { */ public List createMultiRegionsInMeta(final Configuration conf, final TableDescriptor htd, byte[][] startKeys) throws IOException { - Table meta = getConnection().getTable(TableName.META_TABLE_NAME); + Table meta = getConnection().getTable(getConnection().getMetaTableName()); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(), @@ -2479,7 +2479,7 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD */ public List getMetaTableRows() throws IOException { // TODO: Redo using MetaTableAccessor class - Table t = getConnection().getTable(TableName.META_TABLE_NAME); + Table t = getConnection().getTable(getConnection().getMetaTableName()); List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -2497,7 +2497,7 @@ public List getMetaTableRows() throws IOException { */ public List getMetaTableRows(TableName tableName) throws IOException { // TODO: Redo using MetaTableAccessor. - Table t = getConnection().getTable(TableName.META_TABLE_NAME); + Table t = getConnection().getTable(getConnection().getMetaTableName()); List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -2827,7 +2827,7 @@ public void process(WatchedEvent watchedEvent) { monitor.close(); if (checkStatus) { - getConnection().getTable(TableName.META_TABLE_NAME).close(); + getConnection().getTable(getConnection().getMetaTableName()).close(); } } @@ -3352,7 +3352,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce * Waith until all system table's regions get assigned */ public void waitUntilAllSystemRegionsAssigned() throws IOException { - waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + waitUntilAllRegionsAssigned(getConnection().getMetaTableName()); } /** @@ -3365,7 +3365,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { - try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (final Table meta = getConnection().getTable(getConnection().getMetaTableName())) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @@ -3583,7 +3583,7 @@ public Table createRandomTable(TableName tableName, final Collection fam Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + getMiniHBaseCluster().flushcache(getConnection().getMetaTableName()); } BufferedMutator mutator = getConnection().getBufferedMutator(tableName); @@ -3798,7 +3798,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto } public static int getMetaRSPort(Connection connection) throws IOException { - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(connection.getMetaTableName())) { return locator.getRegionLocation(Bytes.toBytes("")).getPort(); } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java index 1900c6c0f8da..7ecfe714d134 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java @@ -1072,12 +1072,12 @@ private void closeTable(Table table) throws IOError { public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { byte[] row = getBytes(searchRow); - Result startRowResult = - getReverseScanResult(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY); + Result startRowResult = getReverseScanResult( + connectionCache.getAdmin().getConnection().getMetaTableName().getName(), row, + HConstants.CATALOG_FAMILY); if (startRowResult == null) { - throw new IOException( - "Cannot find row in " + TableName.META_TABLE_NAME + ", row=" + Bytes.toStringBinary(row)); + throw new IOException("Cannot find row in hbase:meta, row=" + Bytes.toStringBinary(row)); } // find region start and end keys diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java index db1b1e1c9870..01919525104e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java @@ -369,6 +369,11 @@ public void clearRegionLocationCache() { throw new NotImplementedException("clearRegionLocationCache not supported in ThriftTable"); } + @Override + public TableName getMetaTableName() { + return toAsyncConnection().getMetaTableName(); + } + @Override public AsyncConnection toAsyncConnection() { throw new NotImplementedException("toAsyncConnection not supported in ThriftTable"); diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index d34600bc5d3a..95fb3a0cce73 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -182,8 +182,8 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { LOG.debug("hbase:meta region location doesn't exist, create it"); } else { - LOG.debug( - "hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); + LOG.debug("hbase:meta region location doesn't exist for replicaId={}, create it", + replicaId); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data);