call(
+ (c, s, d) -> s.getMetaTableName(c, GetMetaTableNameRequest.getDefaultInstance(), d),
+ GetMetaTableNameResponse::hasTableName, "getMetaTableName()")
+ .thenApply(resp -> {
+ // If the response has a table name, parse it. Otherwise, return default.
+ if (resp.hasTableName() && !resp.getTableName().isEmpty()) {
+ return TableName.valueOf(resp.getTableName());
+ } else {
+ return TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ }
+ }),
+ getClass().getSimpleName() + ".getMetaTableName");
+ }
+
@Override
public void close() {
trace(() -> {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
index 6e96918d1d9a..d858ffdc5aa6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
@@ -41,6 +41,19 @@ public interface AsyncConnection extends Closeable {
*/
Configuration getConfiguration();
+ /**
+ * Returns the meta table name for this cluster.
+ *
+ * This value is fetched from the cluster during connection establishment and cached for the
+ * lifetime of this connection. For most clusters, this will be "hbase:meta". For read replica
+ * clusters or other specialized configurations, this may return a different table name.
+ *
+ * This method should be used instead of static references to meta table name to ensure
+ * compatibility with clusters that use custom meta table names.
+ * @return The meta table name for this cluster
+ */
+ TableName getMetaTableName();
+
/**
* Retrieve a AsyncRegionLocator implementation to inspect region information on a table. The
* returned AsyncRegionLocator is not thread-safe, so a new instance should be created for each
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index 3f0e3e0b370e..cbe84222d953 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -92,6 +92,8 @@ public class AsyncConnectionImpl implements AsyncConnection {
final ConnectionRegistry registry;
+ private final TableName metaTableName;
+
protected final int rpcTimeout;
protected final RpcClient rpcClient;
@@ -128,14 +130,16 @@ public class AsyncConnectionImpl implements AsyncConnection {
private volatile ConnectionOverAsyncConnection conn;
public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId,
- SocketAddress localAddress, User user) {
- this(conf, registry, clusterId, localAddress, user, Collections.emptyMap());
+ TableName metaTableName, SocketAddress localAddress, User user) {
+ this(conf, registry, clusterId, metaTableName, localAddress, user, Collections.emptyMap());
}
public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, String clusterId,
- SocketAddress localAddress, User user, Map connectionAttributes) {
+ TableName metaTableName, SocketAddress localAddress, User user,
+ Map connectionAttributes) {
this.conf = conf;
this.user = user;
+ this.metaTableName = metaTableName;
this.metricsScope = MetricsConnection.getScope(conf, clusterId, this);
if (user.isLoginFromKeytab()) {
@@ -219,6 +223,10 @@ public Configuration getConfiguration() {
return conf;
}
+ public TableName getMetaTableName() {
+ return metaTableName;
+ }
+
@Override
public boolean isClosed() {
return closed.get();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index 8220189d9b51..1737a787c8bc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -62,6 +62,19 @@ public interface Connection extends Abortable, Closeable {
/** Returns Configuration instance being used by this Connection instance. */
Configuration getConfiguration();
+ /**
+ * Returns the meta table name for this cluster.
+ *
+ * This value is fetched from the cluster during connection establishment and cached for the
+ * lifetime of this connection. For most clusters, this will be "hbase:meta". For read replica
+ * clusters or other specialized configurations, this may return a different table name.
+ *
+ * This method should be used instead of static references to meta table name to ensure
+ * compatibility with clusters that use custom meta table names.
+ * @return The meta table name for this cluster
+ */
+ TableName getMetaTableName();
+
/**
* Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a
* new instance should be created for each using thread. This is a lightweight operation, pooling
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 144a790c406d..8e304fcbcac9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -595,16 +595,29 @@ public static CompletableFuture createAsyncConnection(URI conne
future.completeExceptionally(new IOException("clusterid came back null"));
return;
}
- Class extends AsyncConnection> clazz = appliedConf.getClass(
- HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class);
- try {
- future.complete(user.runAs((PrivilegedExceptionAction<
- ? extends AsyncConnection>) () -> ReflectionUtils.newInstance(clazz, appliedConf,
- registry, clusterId, null, user, connectionAttributes)));
- } catch (Exception e) {
- registry.close();
- future.completeExceptionally(e);
- }
+ // Fetch meta table name from registry
+ addListener(registry.getMetaTableName(), (metaTableName, metaError) -> {
+ if (metaError != null) {
+ registry.close();
+ future.completeExceptionally(metaError);
+ return;
+ }
+ if (metaTableName == null) {
+ registry.close();
+ future.completeExceptionally(new IOException("meta table name came back null"));
+ return;
+ }
+ Class extends AsyncConnection> clazz = appliedConf.getClass(
+ HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class);
+ try {
+ future.complete(user.runAs((PrivilegedExceptionAction<
+ ? extends AsyncConnection>) () -> ReflectionUtils.newInstance(clazz, appliedConf,
+ registry, clusterId, metaTableName, null, user, connectionAttributes)));
+ } catch (Exception e) {
+ registry.close();
+ future.completeExceptionally(e);
+ }
+ });
});
return future;
}, "ConnectionFactory.createAsyncConnection");
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
index 471cfa874458..ad7483600ab6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionOverAsyncConnection.java
@@ -88,6 +88,11 @@ public Configuration getConfiguration() {
return conn.getConfiguration();
}
+ @Override
+ public TableName getMetaTableName() {
+ return conn.getMetaTableName();
+ }
+
@Override
public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
AsyncBufferedMutatorBuilder builder = conn.getBufferedMutatorBuilder(params.getTableName());
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
index 2ace3959ffa6..a15778fbcb3e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java
@@ -21,6 +21,7 @@
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -48,6 +49,18 @@ public interface ConnectionRegistry extends Closeable {
*/
CompletableFuture getActiveMaster();
+ /**
+ * Get the name of the meta table for this cluster.
+ *
+ * Should only be called once, similar to {@link #getClusterId()}. The upper layer should cache
+ * this value as it will not change during the connection lifetime.
+ *
+ * For most clusters, this will return the default "hbase:meta". For read replica clusters or
+ * other specialized configurations, this may return a different table name.
+ * @return CompletableFuture containing the meta table name
+ */
+ CompletableFuture getMetaTableName();
+
/**
* Return the connection string associated with this registry instance. This value is
* informational, used for annotating traces. Values returned may not be valid for establishing a
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
index ebb43723b8f8..36734ba3c97f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java
@@ -35,8 +35,10 @@
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.security.User;
@@ -262,6 +264,14 @@ public CompletableFuture getActiveMaster() {
"ZKConnectionRegistry.getActiveMaster");
}
+ @Override
+ public CompletableFuture getMetaTableName() {
+ return tracedFuture(
+ () -> CompletableFuture.completedFuture(
+ TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta")),
+ "ZKConnectionRegistry.getMetaTableName");
+ }
+
@Override
public String getConnectionString() {
final String serverList = zk.getConnectString();
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
index 30d69d4b3f9e..7f87e7e29b72 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/DoNothingConnectionRegistry.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.security.User;
import org.apache.yetus.audience.InterfaceAudience;
@@ -48,6 +49,11 @@ public CompletableFuture getActiveMaster() {
return CompletableFuture.completedFuture(null);
}
+ @Override public CompletableFuture getMetaTableName() {
+ return CompletableFuture.completedFuture(null);
+
+ }
+
@Override
public String getConnectionString() {
return "nothing";
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
index 2206a800767e..7fa6cf7fb108 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
@@ -144,8 +144,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
}).when(adminStub).stopServer(any(HBaseRpcController.class), any(StopServerRequest.class),
any());
User user = UserProvider.instantiate(CONF).getCurrent();
- conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null,
- user) {
+ conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user) {
@Override
CompletableFuture getMasterStub() {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
index e56fffbb2642..24e30098984f 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java
@@ -72,7 +72,8 @@ public CompletableFuture getActiveMaster() {
return CompletableFuture.completedFuture(masterServer);
}
};
- conn = new AsyncConnectionImpl(CONF, registry, "test", null, user);
+ conn = new AsyncConnectionImpl(CONF, registry, "test",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user);
}
@After
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index d519870080bf..787f0454e5dc 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -100,7 +100,7 @@ public void setUp() throws IOException {
public CompletableFuture getMetaRegionLocations() {
return CompletableFuture.completedFuture(locs);
}
- }, "test", null, user);
+ }, "test", org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user);
}
@After
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
index 34e9ba201838..dcf51d038f35 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
@@ -163,8 +163,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
}
}).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any());
User user = UserProvider.instantiate(CONF).getCurrent();
- conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null,
- user) {
+ conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user) {
@Override
AsyncRegionLocator getLocator() {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java
index 2cecc974b6ef..6a1fc45737fd 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java
@@ -209,8 +209,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
}
}).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any());
final User user = UserProvider.instantiate(CONF).getCurrent();
- conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test", null,
- user) {
+ conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF, user), "test",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user) {
@Override
AsyncRegionLocator getLocator() {
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
index e0d18f6bbb7e..40ec35649e4e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java
@@ -106,13 +106,15 @@ public void testMetricsConnectionScope() throws IOException {
String scope = "testScope";
conf.setBoolean(MetricsConnection.CLIENT_SIDE_METRICS_ENABLED_KEY, true);
- AsyncConnectionImpl impl = new AsyncConnectionImpl(conf, null, "foo", null, User.getCurrent());
+ AsyncConnectionImpl impl = new AsyncConnectionImpl(conf, null, "foo",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
Optional metrics = impl.getConnectionMetrics();
assertTrue("Metrics should be present", metrics.isPresent());
assertEquals(clusterId + "@" + Integer.toHexString(impl.hashCode()),
metrics.get().getMetricScope());
conf.set(MetricsConnection.METRICS_SCOPE_KEY, scope);
- impl = new AsyncConnectionImpl(conf, null, "foo", null, User.getCurrent());
+ impl = new AsyncConnectionImpl(conf, null, "foo",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
metrics = impl.getConnectionMetrics();
assertTrue("Metrics should be present", metrics.isPresent());
@@ -132,7 +134,8 @@ public void testMetricsWithMultiConnections() throws IOException {
AsyncConnectionImpl impl;
List connList = new ArrayList();
for (int i = 0; i < num; i++) {
- impl = new AsyncConnectionImpl(conf, null, null, null, user);
+ impl = new AsyncConnectionImpl(conf, null, null,
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, user);
connList.add(impl);
}
diff --git a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto
index f55b892413b2..e6fc063ab0da 100644
--- a/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/server/Registry.proto
@@ -73,6 +73,14 @@ message GetBootstrapNodesResponse {
repeated ServerName server_name = 1;
}
+/** Request and response to get the meta table name for this cluster */
+message GetMetaTableNameRequest {
+}
+message GetMetaTableNameResponse {
+ /** The name of the meta table. Defaults to "hbase:meta" if not set. */
+ optional string table_name = 1;
+}
+
/**
* Implements all the RPCs needed by clients to look up cluster meta information needed for
* connection establishment.
@@ -105,6 +113,11 @@ service ClientMetaService {
* Get nodes which could be used as ClientMetaService
*/
rpc GetBootstrapNodes(GetBootstrapNodesRequest) returns (GetBootstrapNodesResponse);
+
+ /**
+ * Get the meta table name for this cluster.
+ */
+ rpc GetMetaTableName(GetMetaTableNameRequest) returns(GetMetaTableNameResponse);
}
message GetConnectionRegistryRequest {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java
index d6d277808838..6a6167272359 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java
@@ -89,6 +89,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponseEntry;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaTableNameResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload;
/**
@@ -381,6 +383,23 @@ public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controlle
return builder.build();
}
+ @Override
+ public final GetMetaTableNameResponse getMetaTableName(RpcController controller,
+ GetMetaTableNameRequest request) throws ServiceException {
+ GetMetaTableNameResponse.Builder builder = GetMetaTableNameResponse.newBuilder();
+
+ try {
+ TableName metaTableName = server.getMetaTableName();
+ if (metaTableName != null) {
+ builder.setTableName(metaTableName.getNameAsString());
+ }
+ } catch (Exception e) {
+ throw new ServiceException(e);
+ }
+
+ return builder.build();
+ }
+
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public UpdateConfigurationResponse updateConfiguration(RpcController controller,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
index c2f65edd4fda..9f2304f3844c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
@@ -675,6 +675,14 @@ public String toString() {
return getServerName().toString();
}
+ @Override
+ public org.apache.hadoop.hbase.TableName getMetaTableName() {
+ // For now, always return the default meta table name.
+ // Future implementations may support custom meta table names from configuration or storage.
+ return org.apache.hadoop.hbase.TableName.valueOf(
+ org.apache.hadoop.hbase.NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ }
+
protected abstract CoprocessorHost, ?> getCoprocessorHost();
protected abstract boolean canCreateBaseZNode();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
index 20b915288c61..9d232e34aa31 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
@@ -59,8 +59,9 @@
class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements AsyncClusterConnection {
public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry registry,
- String clusterId, SocketAddress localAddress, User user) {
- super(conf, registry, clusterId, localAddress, user, Collections.emptyMap());
+ String clusterId, org.apache.hadoop.hbase.TableName metaTableName, SocketAddress localAddress,
+ User user) {
+ super(conf, registry, clusterId, metaTableName, localAddress, user, Collections.emptyMap());
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
index 70a1e703c667..cd22d3ecd16d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClusterConnectionFactory.java
@@ -42,13 +42,15 @@ private ClusterConnectionFactory() {
private static AsyncClusterConnection createAsyncClusterConnection(Configuration conf,
ConnectionRegistry registry, SocketAddress localAddress, User user) throws IOException {
String clusterId = FutureUtils.get(registry.getClusterId());
+ org.apache.hadoop.hbase.TableName metaTableName =
+ FutureUtils.get(registry.getMetaTableName());
Class extends AsyncClusterConnection> clazz =
conf.getClass(HBASE_SERVER_CLUSTER_CONNECTION_IMPL, AsyncClusterConnectionImpl.class,
AsyncClusterConnection.class);
try {
return user
.runAs((PrivilegedExceptionAction extends AsyncClusterConnection>) () -> ReflectionUtils
- .newInstance(clazz, conf, registry, clusterId, localAddress, user));
+ .newInstance(clazz, conf, registry, clusterId, metaTableName, localAddress, user));
} catch (Exception e) {
throw new IOException(e);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java
index a75faf3db75b..9071a75d10f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryEndpoint.java
@@ -22,6 +22,7 @@
import java.util.Optional;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
/**
@@ -54,4 +55,13 @@ public interface ConnectionRegistryEndpoint {
* Get the location of meta regions.
*/
List getMetaLocations();
+
+ /**
+ * Get the name of the meta table for this cluster.
+ *
+ * By default, this returns "hbase:meta". Future implementations may support custom meta table
+ * names for read replica clusters.
+ * @return The meta table name
+ */
+ TableName getMetaTableName();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
index ebffc7ee5111..b19d86be19b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedAsyncConnection.java
@@ -52,6 +52,11 @@ public Configuration getConfiguration() {
return conn.getConfiguration();
}
+ @Override
+ public TableName getMetaTableName() {
+ return conn.getMetaTableName();
+ }
+
@Override
public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
return conn.getRegionLocator(tableName);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
index ae52df266cfb..a9276dbaaf5a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/SharedConnection.java
@@ -61,6 +61,11 @@ public Configuration getConfiguration() {
return this.conn.getConfiguration();
}
+ @Override
+ public TableName getMetaTableName() {
+ return this.conn.getMetaTableName();
+ }
+
@Override
public BufferedMutator getBufferedMutator(TableName tableName) throws IOException {
return this.conn.getBufferedMutator(tableName);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java
index 1eb4e2d08ea8..e257bd029607 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitConnectionRegistry.java
@@ -68,6 +68,11 @@ public CompletableFuture getActiveMaster() {
return future;
}
+ @Override
+ public CompletableFuture getMetaTableName() {
+ return CompletableFuture.completedFuture(endpoint.getMetaTableName());
+ }
+
@Override
public String getConnectionString() {
return "short-circuit";
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
index cb54e6e72634..db3ab3496f97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/DummyAsyncClusterConnection.java
@@ -44,6 +44,11 @@ public Configuration getConfiguration() {
return null;
}
+ @Override
+ public TableName getMetaTableName() {
+ return TableName.META_TABLE_NAME;
+ }
+
@Override
public AsyncTableRegionLocator getRegionLocator(TableName tableName) {
return null;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index 4da4c87daba3..9f4cb8f3a4bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -131,7 +131,7 @@ public void setUpBeforeTest() throws InterruptedException, ExecutionException, I
ConnectionRegistry registry =
ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
conn =
- new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), null, User.getCurrent());
+ new AsyncConnectionImpl(c, registry, registry.getClusterId().get(), org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
locator = new AsyncNonMetaRegionLocator(conn, AsyncConnectionImpl.RETRY_TIMER);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
index 4529c07dfd13..70073cae8ad5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
@@ -127,7 +127,7 @@ public static void setUp() throws Exception {
ConnectionRegistry registry =
ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
- registry.getClusterId().get(), null, User.getCurrent());
+ registry.getClusterId().get(), org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
LOCATOR = new AsyncNonMetaRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER);
SPLIT_KEYS = IntStream.range(1, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
.toArray(byte[][]::new);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
index 5e858eb8d9d6..cce2c4fa735b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocator.java
@@ -102,7 +102,7 @@ public static void setUp() throws Exception {
ConnectionRegistry registry =
ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
- registry.getClusterId().get(), null, User.getCurrent());
+ registry.getClusterId().get(), org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
LOCATOR = CONN.getLocator();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
index baa4ee74ade9..c243901f4497 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
@@ -75,7 +75,7 @@ public static void setUpBeforeClass() throws Exception {
ConnectionRegistry registry =
ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), registry,
- registry.getClusterId().get(), null, User.getCurrent());
+ registry.getClusterId().get(), org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
}
@AfterClass
@@ -165,7 +165,8 @@ void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) {
}
};
try (AsyncConnectionImpl mockedConn = new AsyncConnectionImpl(CONN.getConfiguration(),
- CONN.registry, CONN.registry.getClusterId().get(), null, User.getCurrent()) {
+ CONN.registry, CONN.registry.getClusterId().get(),
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent()) {
@Override
AsyncRegionLocator getLocator() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
index a392ff5c9a69..4512c060cee7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
@@ -78,8 +78,8 @@ public static void setUp() throws Exception {
>= numOfMetaReplica);
registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
- CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(), null,
- User.getCurrent());
+ CONN = new AsyncConnectionImpl(conf, registry, registry.getClusterId().get(),
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent());
}
@AfterClass
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java
index 44c096f0b8a9..b35bbc7ceba0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFallbackToUseReplay.java
@@ -98,8 +98,8 @@ public static void setUpBeforeClass() throws IOException {
done.run(null);
return null;
}).when(stub).replay(any(), any(), any());
- CONN = new AsyncClusterConnectionImpl(CONF, mock(ConnectionRegistry.class), "test", null,
- User.getCurrent()) {
+ CONN = new AsyncClusterConnectionImpl(CONF, mock(ConnectionRegistry.class), "test",
+ org.apache.hadoop.hbase.TableName.META_TABLE_NAME, null, User.getCurrent()) {
@Override
AsyncRegionLocator getLocator() {
From be38a8c544a2a60be347dc89e9efba6ae6cd15f3 Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 12 Jan 2026 12:39:48 -0500
Subject: [PATCH 08/21] HBASE-29691: Change TableName.META_TABLE_NAME from
being a global static: Rename references
---
.../backup/impl/IncrementalBackupManager.java | 2 +-
.../favored/FavoredNodeAssignmentHelper.java | 2 +-
.../SnapshotOfRegionAssignmentFromMeta.java | 6 +-
...rgeClusterBalancingMetaTableIsolation.java | 4 +-
...gTableIsolationAndReplicaDistribution.java | 4 +-
.../client/AsyncNonMetaRegionLocator.java | 6 +-
.../hbase/client/AsyncRegionLocator.java | 2 +-
.../client/AsyncTableRegionLocatorImpl.java | 2 +-
.../hbase/client/RawAsyncHBaseAdmin.java | 6 +-
.../hbase/security/token/ClientTokenUtil.java | 4 +-
.../client/TestAsyncAdminRpcPriority.java | 2 +-
.../client/TestAsyncRegionLocatorTracing.java | 14 ++---
.../client/TestAsyncTableRpcPriority.java | 20 +++----
.../hbase/client/TestCompactFromClient.java | 2 +-
.../hbase/client/TestRegionInfoBuilder.java | 2 +-
.../client/TestTableDescriptorBuilder.java | 4 +-
.../hadoop/hbase/TestCellComparator.java | 24 ++++----
.../org/apache/hadoop/hbase/TestKeyValue.java | 24 ++++----
.../hbase/TestClientClusterMetrics.java | 22 ++++----
.../util/ProcessBasedLocalHBaseCluster.java | 2 +-
.../hadoop/hbase/util/RestartMetaTest.java | 2 +-
.../hbase/IntegrationTestMetaReplicas.java | 2 +-
.../hbase/mttr/IntegrationTestMTTR.java | 2 +-
.../hbase/mapreduce/TestImportExport.java | 2 +-
.../hadoop/hbase/rest/TestStatusResource.java | 2 +-
.../model/TestStorageClusterStatusModel.java | 4 +-
.../master/RegionPlacementMaintainer.java | 8 +--
.../hbase/master/TableStateManager.java | 4 +-
.../master/assignment/AssignmentManager.java | 4 +-
.../MergeTableRegionsProcedure.java | 2 +-
.../master/assignment/RegionStateStore.java | 23 ++++----
.../assignment/SplitTableRegionProcedure.java | 2 +-
.../hadoop/hbase/master/http/MetaBrowser.java | 2 +-
.../hbase/master/janitor/CatalogJanitor.java | 8 ++-
.../master/janitor/ReportMakingVisitor.java | 2 +-
.../procedure/DeleteTableProcedure.java | 4 +-
.../procedure/DisableTableProcedure.java | 4 +-
.../procedure/HBCKServerCrashProcedure.java | 4 +-
.../MigrateNamespaceTableProcedure.java | 4 +-
.../procedure/ModifyTableProcedure.java | 2 +-
.../hbase/regionserver/RSRpcServices.java | 2 +-
.../apache/hadoop/hbase/util/HBaseFsck.java | 56 +++++++++----------
.../apache/hadoop/hbase/util/RegionMover.java | 4 +-
.../hbase-webapps/master/catalogTables.jsp | 2 +-
.../resources/hbase-webapps/master/table.jsp | 8 +--
.../hadoop/hbase/HBaseClusterInterface.java | 2 +-
.../apache/hadoop/hbase/HBaseTestingUtil.java | 16 +++---
.../hadoop/hbase/TestHBaseMetaEdit.java | 18 +++---
.../apache/hadoop/hbase/TestInfoServers.java | 2 +-
.../TestMetaUpdatesGoToPriorityQueue.java | 2 +-
.../apache/hadoop/hbase/TestNamespace.java | 2 +-
.../hbase/TestServerInternalsTracing.java | 2 +-
.../client/AbstractTestRegionLocator.java | 18 +++---
.../client/MetaWithReplicasTestBase.java | 2 +-
.../hbase/client/RegionReplicaTestHelper.java | 2 +-
.../apache/hadoop/hbase/client/TestAdmin.java | 2 +-
.../hadoop/hbase/client/TestAdmin2.java | 8 +--
.../TestAsyncAdminWithRegionReplicas.java | 8 +--
.../client/TestAsyncMetaRegionLocator.java | 4 +-
.../client/TestAsyncNonMetaRegionLocator.java | 4 +-
.../client/TestAsyncRegionAdminApi2.java | 14 ++---
.../hbase/client/TestAsyncTableAdminApi.java | 6 +-
.../hbase/client/TestAsyncTableAdminApi2.java | 2 +-
.../hbase/client/TestAsyncTableAdminApi3.java | 18 +++---
.../client/TestAsyncTableUseMetaReplicas.java | 4 +-
...talogReplicaLoadBalanceSimpleSelector.java | 16 +++---
.../hbase/client/TestCleanupMetaReplica.java | 2 +-
.../client/TestClientScannerTimeouts.java | 2 +-
.../client/TestClientSideRegionScanner.java | 8 +--
.../hadoop/hbase/client/TestEnableTable.java | 2 +-
.../hbase/client/TestFromClientSide5.java | 2 +-
.../TestIncreaseMetaReplicaThroughConfig.java | 2 +-
.../hbase/client/TestMasterRegistry.java | 2 +-
.../hadoop/hbase/client/TestMetaCache.java | 2 +-
.../client/TestMetaRegionLocationCache.java | 2 +-
.../client/TestMetaWithReplicasBasic.java | 2 +-
.../TestMetaWithReplicasShutdownHandling.java | 4 +-
.../TestMultiActionMetricsFromClient.java | 2 +-
.../hbase/client/TestReplicaWithCluster.java | 2 +-
.../client/TestRpcConnectionRegistry.java | 2 +-
.../client/TestSeparateClientZKCluster.java | 8 +--
.../TestShutdownOfMetaReplicaHolder.java | 2 +-
.../hbase/client/TestSnapshotFromClient.java | 2 +-
.../client/TestZKConnectionRegistry.java | 4 +-
.../hadoop/hbase/http/TestInfoServersACL.java | 4 +-
.../hadoop/hbase/master/TestMaster.java | 2 +-
...MasterFileSystemWithStoreFileTracking.java | 2 +-
...TestMasterOperationsForRegionReplicas.java | 2 +-
.../hbase/master/TestMasterRepairMode.java | 4 +-
.../hbase/master/TestMasterTransitions.java | 2 +-
.../TestMetaAssignmentWithStopMaster.java | 2 +-
.../TestMigrateAndMirrorMetaLocations.java | 8 +--
.../master/TestMigrateNamespaceTable.java | 4 +-
.../hbase/master/TestRecreateCluster.java | 4 +-
.../TestRestartWithEmptyWALDirectory.java | 2 +-
...ServerCrashProcedureCarryingMetaStuck.java | 4 +-
.../TestCloseRegionWhileRSCrash.java | 6 +-
.../TestOpenRegionProcedureBackoff.java | 2 +-
.../assignment/TestRegionStateStore.java | 2 +-
.../master/assignment/TestRollbackSCP.java | 2 +-
.../BalancerConditionalsTestUtil.java | 2 +-
.../TestFavoredStochasticLoadBalancer.java | 2 +-
...MetaTableIsolationBalancerConditional.java | 4 +-
.../TestReplicationBarrierCleaner.java | 12 ++--
.../TestCatalogJanitorInMemoryStates.java | 2 +-
.../janitor/TestMetaFixerNoCluster.java | 16 +++---
.../TestSimpleRegionNormalizer.java | 2 +-
.../hbase/master/procedure/TestHBCKSCP.java | 4 +-
.../procedure/TestProcedurePriority.java | 2 +-
...TestTableProcedureWaitingQueueCleanup.java | 2 +-
.../TestCompactionInDeadRegionServer.java | 2 +-
.../regionserver/TestDefaultMemStore.java | 2 +-
.../TestEndToEndSplitTransaction.java | 2 +-
.../TestGetClosestAtOrBefore.java | 2 +-
.../TestReadAndWriteRegionInfoFile.java | 4 +-
.../hbase/regionserver/TestRegionInfo.java | 34 +++++------
.../regionserver/TestRegionReplicas.java | 2 +-
.../TestRegionServerCrashDisableWAL.java | 2 +-
.../TestRegionServerNoMaster.java | 4 +-
.../TestRegionServerRejectDuringAbort.java | 2 +-
.../TestShutdownWhileWALBroken.java | 2 +-
.../wal/AbstractTestLogRolling.java | 2 +-
.../regionserver/wal/TestLogRollAbort.java | 2 +-
.../regionserver/wal/TestLogRolling.java | 2 +-
.../wal/TestLogRollingNoCluster.java | 6 +-
.../TestReplicationWALEntryFilters.java | 2 +-
.../TestMetaRegionReplicaReplication.java | 26 ++++-----
.../regionserver/TestReplicationSource.java | 2 +-
.../TestSerialReplicationChecker.java | 6 +-
.../rsgroup/TestRSGroupsCPHookCalled.java | 2 +-
.../hbase/rsgroup/TestRSGroupsKillRS.java | 4 +-
.../security/access/TestRpcAccessChecks.java | 4 +-
.../token/TestGenerateDelegationToken.java | 2 +-
.../snapshot/TestRegionSnapshotTask.java | 2 +-
.../hadoop/hbase/util/BaseTestHBaseFsck.java | 8 +--
.../hbase/util/TestFSTableDescriptors.java | 6 +-
...TestHBaseFsckCleanReplicationBarriers.java | 2 +-
.../hadoop/hbase/util/TestHBaseFsckMOB.java | 2 +-
.../hadoop/hbase/util/TestRegionMover1.java | 2 +-
.../TestRegionMoverWithRSGroupEnable.java | 2 +-
.../apache/hadoop/hbase/wal/TestWALSplit.java | 4 +-
.../hadoop/hbase/HBaseTestingUtility.java | 20 +++----
.../thrift2/client/ThriftConnection.java | 5 ++
143 files changed, 399 insertions(+), 389 deletions(-)
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index ec5a04a63a05..f6454a737a3b 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -170,7 +170,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Skip {} log file: {}", MetaTableName.getInstance(), log.getPath().getName());
+ LOG.debug("Skip {} log file: {}", conn.getMetaTableName(), log.getPath().getName());
}
continue;
}
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index d316075979a1..88d5ac88caed 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -131,7 +131,7 @@ public static void updateMetaWithFavoredNodesInfo(
puts.add(put);
}
}
- try (Table table = connection.getTable(MetaTableName.getInstance())) {
+ try (Table table = connection.getTable(connection.getMetaTableName())) {
table.put(puts);
}
LOG.info("Added " + puts.size() + " region favored nodes in META");
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index e54ce7478e99..0a40e9dcdf8b 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -172,9 +172,9 @@ private void processMetaRecord(Result result) throws IOException {
*/
public void initialize() throws IOException {
LOG.info("Start to scan {} for the current region assignment snapshot",
- MetaTableName.getInstance());
+ connection.getMetaTableName());
// Scan hbase:meta to pick up user regions
- try (Table metaTable = connection.getTable(MetaTableName.getInstance());
+ try (Table metaTable = connection.getTable(connection.getMetaTableName());
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
for (;;) {
Result result = scanner.next();
@@ -190,7 +190,7 @@ public void initialize() throws IOException {
}
}
LOG.info("Finished scanning {} for the current region assignment snapshot",
- MetaTableName.getInstance());
+ connection.getMetaTableName());
}
private void addRegion(RegionInfo regionInfo) {
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
index 1843cb7b895d..b8e144b4ac85 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
@@ -71,7 +71,7 @@ public static void setup() {
// Create regions
List allRegions = new ArrayList<>();
for (int i = 0; i < NUM_REGIONS; i++) {
- TableName tableName = i < 3 ? MetaTableName.getInstance() : NON_META_TABLE_NAME;
+ TableName tableName = i < 3 ? connection.getMetaTableName() : NON_META_TABLE_NAME;
byte[] startKey = new byte[1];
startKey[0] = (byte) i;
byte[] endKey = new byte[1];
@@ -99,7 +99,7 @@ public void testMetaTableIsolation() {
}
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
+ return isTableIsolated(cluster, connection.getMetaTableName(), "Meta");
}
}
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
index 4c8e9ffd5526..94f38e0d8cca 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
@@ -75,7 +75,7 @@ public static void setup() {
for (int i = 0; i < NUM_REGIONS; i++) {
TableName tableName;
if (i < 1) {
- tableName = MetaTableName.getInstance();
+ tableName = connection.getMetaTableName();
} else if (i < 10) {
tableName = SYSTEM_TABLE_NAME;
} else {
@@ -120,7 +120,7 @@ public void testTableIsolationAndReplicaDistribution() {
* Validates whether all meta table regions are isolated.
*/
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
+ return isTableIsolated(cluster, connection.getMetaTableName(), "Meta");
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index 2ef315d2b4ac..9877810be5b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -238,7 +238,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture {
+ .createSelector(replicaSelectorClass, conn.getMetaTableName(), conn, () -> {
int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
try {
RegionLocations metaLocations = conn.registry.getMetaRegionLocations()
@@ -246,7 +246,7 @@ private boolean tryComplete(LocateRequest req, CompletableFuture {
LOG.debug("Clear meta cache for {}", tableName);
- if (tableName.equals(MetaTableName.getInstance())) {
+ if (tableName.equals(conn.getMetaTableName())) {
metaRegionLocator.clearCache();
} else {
nonMetaRegionLocator.clearCache(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
index 32349a64651d..c0d297460330 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
@@ -64,7 +64,7 @@ public CompletableFuture> getAllRegionLocations() {
.thenApply(locs -> Arrays.asList(locs.getRegionLocations()));
}
CompletableFuture> future = ClientMetaTableAccessor
- .getTableHRegionLocations(conn.getTable(MetaTableName.getInstance()), tableName);
+ .getTableHRegionLocations(conn.getTable(conn.getMetaTableName()), tableName);
addListener(future, (locs, error) -> locs.forEach(loc -> {
// the cache assumes that all locations have a serverName. only add if that's true
if (loc.getServerName() != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 818d3be9913d..b09d6360a322 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -405,7 +405,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
AsyncAdminBuilderBase builder) {
this.connection = connection;
this.retryTimer = retryTimer;
- this.metaTable = connection.getTable(MetaTableName.getInstance());
+ this.metaTable = connection.getTable(connection.getMetaTableName());
this.rpcTimeoutNs = builder.rpcTimeoutNs;
this.operationTimeoutNs = builder.operationTimeoutNs;
this.pauseNs = builder.pauseNs;
@@ -1012,7 +1012,7 @@ List> adminCall(controller, stub,
@Override
public CompletableFuture> getRegions(TableName tableName) {
- if (tableName.equals(MetaTableName.getInstance())) {
+ if (tableName.equals(connection.getMetaTableName())) {
return connection.registry.getMetaRegionLocations()
.thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
.collect(Collectors.toList()));
@@ -1303,7 +1303,7 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa
* List all region locations for the specific table.
*/
private CompletableFuture> getTableHRegionLocations(TableName tableName) {
- if (MetaTableName.getInstance().equals(tableName)) {
+ if (connection.getMetaTableName().equals(tableName)) {
CompletableFuture> future = new CompletableFuture<>();
addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> {
if (err != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
index 8f0c11a03f6b..6c7fcb1b8feb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
@@ -73,7 +73,7 @@ private static void injectFault() throws ServiceException {
future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException));
return future;
}
- AsyncTable> table = conn.getTable(MetaTableName.getInstance());
+ AsyncTable> table = conn.getTable(conn.getMetaTableName());
table. coprocessorService(
AuthenticationProtos.AuthenticationService::newStub,
@@ -102,7 +102,7 @@ static Token obtainToken(Connection conn) throws
try {
injectFault();
- meta = conn.getTable(MetaTableName.getInstance());
+ meta = conn.getTable(conn.getMetaTableName());
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
index 7fa6cf7fb108..c9eb597da204 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
@@ -196,7 +196,7 @@ public void testCreateSystemTable() {
// that we pass the correct priority
@Test
public void testCreateMetaTable() {
- conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance())
+ conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(connection.getMetaTableName())
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join();
verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS),
any(CreateTableRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index 787f0454e5dc..6215fea6853e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -85,7 +85,7 @@ public class TestAsyncRegionLocatorTracing {
@Before
public void setUp() throws IOException {
- RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).build();
+ RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(connection.getMetaTableName()).build();
locs = new RegionLocations(
new HRegionLocation(metaRegionInfo,
ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
@@ -147,30 +147,30 @@ public void testClearCacheServerName() {
@Test
public void testClearCacheTableName() {
- conn.getLocator().clearCache(MetaTableName.getInstance());
+ conn.getLocator().clearCache(connection.getMetaTableName());
SpanData span = waitSpan("AsyncRegionLocator.clearCache");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(MetaTableName.getInstance())));
+ buildTableAttributesMatcher(connection.getMetaTableName())));
}
@Test
public void testGetRegionLocation() {
- conn.getLocator().getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocation(connection.getMetaTableName(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(MetaTableName.getInstance()),
+ buildTableAttributesMatcher(connection.getMetaTableName()),
hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions",
locs.getDefaultRegionLocation().getRegion().getRegionNameAsString()))));
}
@Test
public void testGetRegionLocations() {
- conn.getLocator().getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocations(connection.getMetaTableName(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations");
String[] expectedRegions =
@@ -178,7 +178,7 @@ public void testGetRegionLocations() {
.map(RegionInfo::getRegionNameAsString).toArray(String[]::new);
assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(MetaTableName.getInstance()), hasAttributes(
+ buildTableAttributesMatcher(connection.getMetaTableName()), hasAttributes(
containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions)))));
}
}
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
index dcf51d038f35..f3cfe1c2e7f7 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
@@ -238,7 +238,7 @@ public void testGetSystemTable() {
@Test
public void testGetMetaTable() {
- conn.getTable(MetaTableName.getInstance()).get(new Get(Bytes.toBytes(0))).join();
+ conn.getTable(conn.getMetaTableName()).get(new Get(Bytes.toBytes(0))).join();
verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any());
}
@@ -269,7 +269,7 @@ public void testPutSystemTable() {
@Test
public void testPutMetaTable() {
- conn.getTable(MetaTableName.getInstance()).put(new Put(Bytes.toBytes(0))
+ conn.getTable(conn.getMetaTableName()).put(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -297,7 +297,7 @@ public void testDeleteSystemTable() {
@Test
public void testDeleteMetaTable() {
- conn.getTable(MetaTableName.getInstance()).delete(new Delete(Bytes.toBytes(0))).join();
+ conn.getTable(conn.getMetaTableName()).delete(new Delete(Bytes.toBytes(0))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -328,7 +328,7 @@ public void testAppendSystemTable() {
@Test
public void testAppendMetaTable() {
- conn.getTable(MetaTableName.getInstance()).append(new Append(Bytes.toBytes(0))
+ conn.getTable(conn.getMetaTableName()).append(new Append(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -356,7 +356,7 @@ public void testIncrementSystemTable() {
@Test
public void testIncrementMetaTable() {
- conn.getTable(MetaTableName.getInstance())
+ conn.getTable(conn.getMetaTableName())
.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -394,7 +394,7 @@ public void testCheckAndPutSystemTable() {
@Test
public void testCheckAndPutMetaTable() {
- conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -427,7 +427,7 @@ public void testCheckAndDeleteSystemTable() {
@Test
public void testCheckAndDeleteMetaTable() {
- conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -468,7 +468,7 @@ public void testCheckAndMutateSystemTable() throws IOException {
@Test
public void testCheckAndMutateMetaTable() throws IOException {
- conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(conn.getMetaTableName()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v"))
.thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0))))
.join();
@@ -556,7 +556,7 @@ public void testScanSystemTable() throws Exception {
@Test
public void testScanMetaTable() throws Exception {
CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS);
- testForTable(MetaTableName.getInstance(), renewFuture, Optional.empty());
+ testForTable(connection.getMetaTableName(), renewFuture, Optional.empty());
}
private void testForTable(TableName tableName, CompletableFuture renewFuture,
@@ -599,7 +599,7 @@ public void testBatchSystemTable() {
@Test
public void testBatchMetaTable() {
- conn.getTable(MetaTableName.getInstance()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
+ conn.getTable(conn.getMetaTableName()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
.join();
verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS),
any(ClientProtos.MultiRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
index c8e6b2158ce1..3d4417947c52 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
@@ -94,7 +94,7 @@ public void testCompactTableWithNullLocations() throws Exception {
mockedMeta.when(() -> ClientMetaTableAccessor.getTableHRegionLocations(any(AsyncTable.class),
any(TableName.class))).thenReturn(nullLocationsFuture);
AsyncTable metaTable = mock(AsyncTable.class);
- when(connection.getTable(MetaTableName.getInstance())).thenReturn(metaTable);
+ when(connection.getTable(connection.getMetaTableName())).thenReturn(metaTable);
HashedWheelTimer hashedWheelTimer = mock(HashedWheelTimer.class);
AsyncAdminBuilderBase asyncAdminBuilderBase = mock(AsyncAdminBuilderBase.class);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
index e01b3b741dcc..8ed7b2739120 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
@@ -142,7 +142,7 @@ public void testContainsRange() {
@Test
public void testContainsRangeForMetaTable() {
TableDescriptor tableDesc =
- TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build();
+ TableDescriptorBuilder.newBuilder(connection.getMetaTableName()).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build();
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] row1 = Bytes.toBytes("a,a,0");
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
index d09f7a225a6e..b94b73aedf59 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
@@ -60,7 +60,7 @@ public class TestTableDescriptorBuilder {
@Test(expected = IOException.class)
public void testAddCoprocessorTwice() throws IOException {
String cpName = "a.b.c.d";
- TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setCoprocessor(cpName)
+ TableDescriptorBuilder.newBuilder(connection.getMetaTableName()).setCoprocessor(cpName)
.setCoprocessor(cpName).build();
}
@@ -68,7 +68,7 @@ public void testAddCoprocessorTwice() throws IOException {
public void testPb() throws DeserializationException, IOException {
final int v = 123;
TableDescriptor htd =
- TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setMaxFileSize(v)
+ TableDescriptorBuilder.newBuilder(connection.getMetaTableName()).setMaxFileSize(v)
.setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build();
byte[] bytes = TableDescriptorBuilder.toByteArray(htd);
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index 5c143d8ee065..88b17cc051a5 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -197,36 +197,36 @@ public void testMetaComparisons2() {
CellComparator c = MetaCellComparator.META_COMPARATOR;
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(new KeyValue(
- Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)),
+ Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now)),
createByteBufferKeyValueFromKeyValue(new KeyValue(
- Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) == 0);
+ Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now))) == 0);
Cell a = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now));
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now));
Cell b = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now));
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,2"), now));
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(new KeyValue(
- Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now)),
+ Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,2"), now)),
createByteBufferKeyValueFromKeyValue(new KeyValue(
- Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) > 0);
+ Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now))) > 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now)))
== 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,2"), now)))
< 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,2"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now)))
> 0);
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index 1e65b75a9777..24e751215149 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -198,32 +198,32 @@ public void testKeyValueBorderCases() {
private void metacomparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now),
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now))
== 0);
KeyValue a =
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now);
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now);
KeyValue b =
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now);
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,2"), now);
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now),
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,2"), now),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",a,,0,1"), now))
> 0);
}
private void comparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now))
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now))
== 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)) < 0);
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,2"), now)) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now),
- new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) > 0);
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,2"), now),
+ new KeyValue(Bytes.toBytes(connection.getMetaTableName().getNameAsString() + ",,1"), now)) > 0);
}
@Test
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index 54ceeecfec21..6daf0f94be17 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -222,16 +222,16 @@ public void testRegionStatesCount() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getTotalRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getClosedRegions(), 0);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getClosedRegions(), 0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getSplitRegions(), 0);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getSplitRegions(), 0);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -253,12 +253,12 @@ public void testRegionStatesWithSplit() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -273,12 +273,12 @@ public void testRegionStatesWithSplit() throws Exception {
metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(connection.getMetaTableName()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
index 267b78dade13..ea0d3cf16236 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
@@ -165,7 +165,7 @@ public void startHBase() throws IOException {
int attemptsLeft = 10;
while (attemptsLeft-- > 0) {
try {
- testUtil.getConnection().getTable(MetaTableName.getInstance());
+ testUtil.getConnection().getTable(connection.getMetaTableName());
} catch (Exception e) {
LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e);
Threads.sleep(1000);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
index dc7d025796bd..4b56ed4c00c5 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
@@ -125,7 +125,7 @@ protected int doWork() throws Exception {
LOG.debug("Trying to scan meta");
- Table metaTable = connection.getTable(MetaTableName.getInstance());
+ Table metaTable = connection.getTable(connection.getMetaTableName());
ResultScanner scanner = metaTable.getScanner(new Scan());
Result result;
while ((result = scanner.next()) != null) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index fd07d7e1dc6a..57a635a02c14 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -56,7 +56,7 @@ public static void setUp() throws Exception {
1000);
// Make sure there are three servers.
util.initializeCluster(3);
- HBaseTestingUtil.setReplicas(util.getAdmin(), MetaTableName.getInstance(), 3);
+ HBaseTestingUtil.setReplicas(util.getAdmin(), connection.getMetaTableName(), 3);
ZKWatcher zkw = util.getZooKeeperWatcher();
Configuration conf = util.getConfiguration();
String baseZNode =
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index fbc98f006393..3fe10281264d 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -185,7 +185,7 @@ private static void setupActions() throws IOException {
// Set up the action that will move the regions of meta.
moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime,
- MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, MetaTableName.getInstance());
+ MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, connection.getMetaTableName());
// Set up the action that will move the regions of our table.
moveRegionAction = new MoveRegionsOfTableAction(sleepTime,
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index b861c29a9bcc..0e5d7ec4c572 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -257,7 +257,7 @@ public void testSimpleCase() throws Throwable {
@Test
public void testMetaExport() throws Throwable {
String[] args =
- new String[] { MetaTableName.getInstance().getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
+ new String[] { connection.getMetaTableName().getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
assertTrue(runExport(args));
}
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
index 5ddc23d36554..c429bdb48f99 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
@@ -56,7 +56,7 @@ public class TestStatusResource {
private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class);
- private static final byte[] META_REGION_NAME = Bytes.toBytes(MetaTableName.getInstance() + ",,1");
+ private static final byte[] META_REGION_NAME = Bytes.toBytes(connection.getMetaTableName() + ",,1");
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
index 1db8c371593c..8be9a2b18f1a 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
@@ -89,7 +89,7 @@ protected StorageClusterStatusModel buildTestModel() {
model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"),
1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1);
model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion(
- Bytes.toBytes(MetaTableName.getInstance() + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1,
+ Bytes.toBytes(connection.getMetaTableName() + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1,
1, 1, 1);
return model;
}
@@ -128,7 +128,7 @@ protected void checkModel(StorageClusterStatusModel model) {
assertEquals(1024, node.getMaxHeapSizeMB());
regions = node.getRegions().iterator();
region = regions.next();
- assertEquals(Bytes.toString(region.getName()), MetaTableName.getInstance() + ",,1246000043724");
+ assertEquals(Bytes.toString(region.getName()), connection.getMetaTableName() + ",,1246000043724");
assertEquals(1, region.getStores());
assertEquals(1, region.getStorefiles());
assertEquals(0, region.getStorefileSizeMB());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 3edfc1eb67a0..3b16014ead55 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -606,7 +606,7 @@ public static void printAssignmentPlan(FavoredNodesPlan plan) {
*/
public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException {
try {
- LOG.info("Started updating {} with the new assignment plan", MetaTableName.getInstance());
+ LOG.info("Started updating {} with the new assignment plan", connection.getMetaTableName());
Map> assignmentMap = plan.getAssignmentMap();
Map> planToUpdate = new HashMap<>(assignmentMap.size());
Map regionToRegionInfoMap =
@@ -620,7 +620,7 @@ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException
} catch (Exception e) {
LOG.error(
"Failed to update hbase:meta with the new assignment" + "plan because " + e.getMessage());
- LOG.info("Updated {} with the new assignment plan", MetaTableName.getInstance());
+ LOG.info("Updated {} with the new assignment plan", connection.getMetaTableName());
}
}
@@ -693,13 +693,13 @@ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws I
public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException {
LOG.info("Started updating the new assignment plan for {} and the region servers",
- MetaTableName.getInstance());
+ connection.getMetaTableName());
// Update the new assignment plan to META
updateAssignmentPlanToMeta(plan);
// Update the new assignment plan to Region Servers
updateAssignmentPlanToRegionServers(plan);
LOG.info("Finished updating the new assignment plan for {} and the region servers",
- MetaTableName.getInstance());
+ connection.getMetaTableName());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index cdd54d616bee..bfe9a9451c89 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -87,7 +87,7 @@ public boolean isTableState(TableName tableName, TableState.State... states) {
}
public void setDeletedTable(TableName tableName) throws IOException {
- if (tableName.equals(MetaTableName.getInstance())) {
+ if (tableName.equals(master.getConnection().getMetaTableName())) {
// Can't delete the hbase:meta table.
return;
}
@@ -148,7 +148,7 @@ public TableState getTableState(TableName tableName) throws IOException {
}
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
- if (tableName.equals(MetaTableName.getInstance())) {
+ if (tableName.equals(master.getConnection().getMetaTableName())) {
if (
TableState.State.DISABLING.equals(newState) || TableState.State.DISABLED.equals(newState)
) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 3a50d74e8a64..58dac734823c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -355,7 +355,7 @@ public void start() throws IOException, KeeperException {
if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) {
setMetaAssigned(regionInfo, state == State.OPEN);
}
- LOG.debug("Loaded {} {}", MetaTableName.getInstance(), regionNode);
+ LOG.debug("Loaded {} {}", master.getConnection().getMetaTableName(), regionNode);
}, result);
}
}
@@ -1963,7 +1963,7 @@ private void checkMetaLoaded(RegionInfo hri, long procId) throws PleaseHoldExcep
boolean meta = isMetaRegion(hri);
boolean metaLoaded = isMetaLoaded();
if (!meta && !metaLoaded) {
- throw new PleaseHoldException("Master not fully online; " + MetaTableName.getInstance() + "="
+ throw new PleaseHoldException("Master not fully online; " + master.getConnection().getMetaTableName() + "="
+ meta + ", metaLoaded=" + metaLoaded);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index e6891d08f075..c08ed70dd538 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -722,7 +722,7 @@ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOExcept
LOG.error(
"Row key of mutation from coprocessor is not parsable as region name. "
+ "Mutations from coprocessor should only be for {} table.",
- MetaTableName.getInstance(), e);
+ env.getMasterServices().getConnection().getMetaTableName(), e);
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 4d42ad619255..afe7a18ae433 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -172,8 +172,8 @@ public static void visitMetaEntry(final RegionStateVisitor visitor, final Result
LOG.debug(
"Load {} entry region={}, regionState={}, lastHost={}, "
+ "regionLocation={}, openSeqNum={}",
- MetaTableName.getInstance(), regionInfo.getEncodedName(), state, lastHost, regionLocation,
- openSeqNum);
+ MetaTableName.getInstance(), regionInfo.getEncodedName(), state, lastHost,
+ regionLocation, openSeqNum);
visitor.visitRegionState(result, regionInfo, state, regionLocation, lastHost, openSeqNum);
}
}
@@ -191,9 +191,9 @@ private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) thr
final int replicaId = regionInfo.getReplicaId();
final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
MetaTableAccessor.addRegionInfo(put, regionInfo);
- final StringBuilder info =
- new StringBuilder("pid=").append(pid).append(" updating ").append(MetaTableName.getInstance())
- .append(" row=").append(regionInfo.getEncodedName()).append(", regionState=").append(state);
+ final StringBuilder info = new StringBuilder("pid=").append(pid).append(" updating ")
+ .append(MetaTableName.getInstance()).append(" row=")
+ .append(regionInfo.getEncodedName()).append(", regionState=").append(state);
if (openSeqNum >= 0) {
Preconditions.checkArgument(state == State.OPEN && regionLocation != null,
"Open region should be on a server");
@@ -285,7 +285,8 @@ private CompletableFuture updateRegionLocation(RegionInfo regionInfo, Stat
future = FutureUtils.failedFuture(e);
}
} else {
- AsyncTable> table = master.getAsyncConnection().getTable(MetaTableName.getInstance());
+ AsyncTable> table =
+ master.getAsyncConnection().getTable(MetaTableName.getInstance());
future = table.put(put);
}
FutureUtils.addListener(future, (r, e) -> {
@@ -331,8 +332,8 @@ private void multiMutate(RegionInfo ri, List mutations) throws IOExcep
}
}
MutateRowsRequest request = builder.build();
- AsyncTable> table =
- master.getConnection().toAsyncConnection().getTable(MetaTableName.getInstance());
+ AsyncTable> table = master.getConnection().toAsyncConnection()
+ .getTable(MetaTableName.getInstance());
CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
@@ -506,7 +507,8 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException {
+ " in meta table, they are cleaned up already, Skip.");
return;
}
- try (Table table = master.getConnection().getTable(MetaTableName.getInstance())) {
+ try (Table table =
+ master.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(delete);
}
LOG.info(
@@ -696,7 +698,8 @@ public static State getRegionState(final Result r, RegionInfo regionInfo) {
return State.valueOf(state);
} catch (IllegalArgumentException e) {
LOG.warn(
- "BAD value {} in " + MetaTableName.getInstance() + " info:state column for region {} , "
+ "BAD value {} in " + MetaTableName.getInstance()
+ + " info:state column for region {} , "
+ "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE",
state, regionInfo.getEncodedName());
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 55ac3fa2a0bb..950e4cf13aeb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -907,7 +907,7 @@ private void preSplitRegionBeforeMETA(final MasterProcedureEnv env)
LOG.error(
"pid={} row key of mutation from coprocessor not parsable as region name. "
+ "Mutations from coprocessor should only be for {} table.",
- getProcId(), MetaTableName.getInstance());
+ getProcId(), env.getMasterServices().getConnection().getMetaTableName());
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
index f9fa67da83ae..b6c1daade0ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
@@ -157,7 +157,7 @@ public TableName getScanTable() {
public Results getResults() {
final AsyncTable asyncTable =
- connection.getTable(MetaTableName.getInstance());
+ connection.getTable(connection.getMetaTableName());
return new Results(asyncTable.getScanner(buildScan()));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
index a6cec33c3efb..0af596e26a41 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
@@ -106,7 +106,8 @@ protected boolean initialChore() {
scan();
}
} catch (IOException e) {
- LOG.warn("Failed initial janitorial scan of {} table", MetaTableName.getInstance(), e);
+ LOG.warn("Failed initial janitorial scan of {} table",
+ services.getConnection().getMetaTableName(), e);
return false;
}
return true;
@@ -146,7 +147,8 @@ protected void chore() {
+ this.services.getServerManager().isClusterShutdown());
}
} catch (IOException e) {
- LOG.warn("Failed janitorial scan of {} table", MetaTableName.getInstance(), e);
+ LOG.warn("Failed janitorial scan of {} table",
+ services.getConnection().getMetaTableName(), e);
}
}
@@ -485,7 +487,7 @@ public static void main(String[] args) throws IOException {
*/
Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0."));
g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- try (Table t = connection.getTable(MetaTableName.getInstance())) {
+ try (Table t = connection.getTable(connection.getMetaTableName())) {
Result r = t.get(g);
byte[] row = g.getRow();
row[row.length - 2] <<= row[row.length - 2];
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
index c74be9de50d9..c915db1f0fe9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
@@ -140,7 +140,7 @@ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) {
"INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; "
+ "row={} {}; See if RegionInfo is referenced in another {} row? Delete?",
Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString(),
- MetaTableName.getInstance());
+ services.getConnection().getMetaTableName());
return null;
}
// Skip split parent region
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index 98f43871238e..59cf23be8a44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -396,7 +396,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
List deletes = new ArrayList<>();
try (
Table metaTable =
- env.getMasterServices().getConnection().getTable(MetaTableName.getInstance());
+ env.getMasterServices().getConnection().getTable(env.getMasterServices().getConnection().getMetaTableName());
ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (;;) {
Result result = scanner.next();
@@ -407,7 +407,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from "
- + MetaTableName.getInstance());
+ + env.getMasterServices().getConnection().getMetaTableName());
metaTable.delete(deletes);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index 8ce33c1574ca..5cb9aca6962b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -112,7 +112,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable
) {
MasterFileSystem fs = env.getMasterFileSystem();
try (BufferedMutator mutator = env.getMasterServices().getConnection()
- .getBufferedMutator(MetaTableName.getInstance())) {
+ .getBufferedMutator(env.getMasterServices().getConnection().getMetaTableName())) {
for (RegionInfo region : env.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tableName)) {
long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId(
@@ -231,7 +231,7 @@ public TableOperationType getTableOperationType() {
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
- if (tableName.equals(MetaTableName.getInstance())) {
+ if (tableName.equals(env.getMasterServices().getConnection().getMetaTableName())) {
setFailure("master-disable-table",
new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
index d7a4ce95c4ff..88eaf7efcfb0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
@@ -103,13 +103,13 @@ List getRegionsOnCrashedServer(MasterProcedureEnv env) {
MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor,
null);
} catch (IOException ioe) {
- LOG.warn("Failed scan of {} for 'Unknown Servers'", MetaTableName.getInstance(), ioe);
+ LOG.warn("Failed scan of {} for 'Unknown Servers'", env.getMasterServices().getConnection().getMetaTableName(), ioe);
return ris;
}
// create the server state node too
env.getAssignmentManager().getRegionStates().createServer(getServerName());
LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}",
- visitor.getReassigns().size(), getServerName(), MetaTableName.getInstance(), visitor
+ visitor.getReassigns().size(), getServerName(), env.getMasterServices().getConnection().getMetaTableName(), visitor
.getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
return visitor.getReassigns();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
index 30a120143ade..909d6862e636 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
@@ -65,7 +65,7 @@ private void migrate(MasterProcedureEnv env) throws IOException {
try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
ResultScanner scanner = nsTable.getScanner(
new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
- BufferedMutator mutator = conn.getBufferedMutator(MetaTableName.getInstance())) {
+ BufferedMutator mutator = conn.getBufferedMutator(env.getMasterServices().getConnection().getMetaTableName())) {
for (Result result;;) {
result = scanner.next();
if (result == null) {
@@ -89,7 +89,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTablePro
switch (state) {
case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY:
TableDescriptor metaTableDesc =
- env.getMasterServices().getTableDescriptors().get(MetaTableName.getInstance());
+ env.getMasterServices().getTableDescriptors().get(env.getMasterServices().getConnection().getMetaTableName());
if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc)
.setColumnFamily(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index c9e2d67b43df..74603c226a61 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -110,7 +110,7 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H
Set cfs = this.modifiedTableDescriptor.getColumnFamilyNames();
for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
if (!cfs.contains(family)) {
- throw new HBaseIOException("Delete of " + MetaTableName.getInstance() + " column family "
+ throw new HBaseIOException("Delete of " + env.getMasterServices().getConnection().getMetaTableName() + " column family "
+ Bytes.toString(family));
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 4b7b7787f79d..957650564e20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1929,7 +1929,7 @@ public OpenRegionResponse openRegion(final RpcController controller,
tableName = ProtobufUtil.toTableName(ri.getTableName());
}
}
- if (!MetaTableName.getInstance().equals(tableName)) {
+ if (!server.getConnection().getMetaTableName().equals(tableName)) {
throw new ServiceException(ie);
}
// We are assigning meta, wait a little for regionserver to finish initialization.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 9eff10a0b160..b94511c9c264 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -541,7 +541,7 @@ public void run() {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
- meta = connection.getTable(MetaTableName.getInstance());
+ meta = connection.getTable(connection.getMetaTableName());
status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS,
Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));
}
@@ -661,19 +661,19 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, Interr
reportUnknownServers();
// Check if hbase:meta is found only once and in the right place
if (!checkMetaRegion()) {
- String errorMsg = MetaTableName.getInstance() + " table is not consistent. ";
+ String errorMsg = connection.getMetaTableName() + " table is not consistent. ";
if (shouldFixAssignments()) {
- errorMsg += "HBCK will try fixing it. Rerun once " + MetaTableName.getInstance()
+ errorMsg += "HBCK will try fixing it. Rerun once " + connection.getMetaTableName()
+ " is back " + "to consistent state.";
} else {
- errorMsg += "Run HBCK with proper fix options to fix " + MetaTableName.getInstance()
+ errorMsg += "Run HBCK with proper fix options to fix " + connection.getMetaTableName()
+ " inconsistency.";
}
errors.reportError(errorMsg + " Exiting...");
return -2;
}
// Not going with further consistency check for tables when hbase:meta itself is not consistent.
- LOG.info("Loading regionsinfo from the {} table", MetaTableName.getInstance());
+ LOG.info("Loading regionsinfo from the {} table", connection.getMetaTableName());
boolean success = loadMetaEntries();
if (!success) return -1;
@@ -1222,7 +1222,7 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO
* TODO -- need to add tests for this.
*/
private void reportEmptyMetaCells() {
- errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + MetaTableName.getInstance()
+ errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + connection.getMetaTableName()
+ ": " + emptyRegionInfoQualifiers.size());
if (details) {
for (Result r : emptyRegionInfoQualifiers) {
@@ -1374,7 +1374,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
*/
public void fixEmptyMetaCells() throws IOException {
if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
- LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", MetaTableName.getInstance());
+ LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", connection.getMetaTableName());
for (Result region : emptyRegionInfoQualifiers) {
deleteMetaRegion(region.getRow());
errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL);
@@ -1577,8 +1577,8 @@ private void loadTableStates() throws IOException {
// Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it
// has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in
// meantime.
- this.tableStates.put(MetaTableName.getInstance(),
- new TableState(MetaTableName.getInstance(), TableState.State.ENABLED));
+ this.tableStates.put(connection.getMetaTableName(),
+ new TableState(connection.getMetaTableName(), TableState.State.ENABLED));
}
/**
@@ -1607,7 +1607,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
TableName tableName = CommonFSUtils.getTableName(path);
if (
(!checkMetaOnly && isTableIncluded(tableName))
- || tableName.equals(MetaTableName.getInstance())
+ || tableName.equals(connection.getMetaTableName())
) {
tableDirs.add(fs.getFileStatus(path));
}
@@ -1652,7 +1652,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
*/
private boolean recordMetaRegion() throws IOException {
List locs;
- try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) {
+ try (RegionLocator locator = connection.getRegionLocator(connection.getMetaTableName())) {
locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
}
if (locs == null || locs.isEmpty()) {
@@ -2026,7 +2026,7 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException {
"Unable to close region " + hi.getRegionNameAsString()
+ " because {} had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
+ Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.",
- MetaTableName.getInstance());
+ connection.getMetaTableName());
continue;
}
// close the region -- close files and remove assignment
@@ -2146,7 +2146,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
} else if (!inMeta && !inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_META_HDFS,
"Region " + descriptiveName + ", key=" + key + ", not on HDFS or in "
- + MetaTableName.getInstance() + " but " + "deployed on "
+ + connection.getMetaTableName() + " but " + "deployed on "
+ Joiner.on(", ").join(hbi.getDeployedOn()));
if (shouldFixAssignments()) {
undeployRegions(hbi);
@@ -2162,7 +2162,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
return;
}
errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
- "Region " + descriptiveName + " on HDFS, but not listed in " + MetaTableName.getInstance()
+ "Region " + descriptiveName + " on HDFS, but not listed in " + connection.getMetaTableName()
+ " or deployed on any region server");
// restore region consistency of an adopted orphan
if (shouldFixMeta()) {
@@ -2203,7 +2203,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
}
}
- LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), MetaTableName.getInstance());
+ LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), connection.getMetaTableName());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2232,7 +2232,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(),
- MetaTableName.getInstance());
+ connection.getMetaTableName());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2309,7 +2309,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isMultiplyDeployed) {
errors.reportError(ERROR_CODE.MULTI_DEPLOYED,
- "Region " + descriptiveName + " is listed in " + MetaTableName.getInstance()
+ "Region " + descriptiveName + " is listed in " + connection.getMetaTableName()
+ " on region server " + hbi.getMetaEntry().regionServer + " but is multiply assigned"
+ " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn()));
// If we are trying to fix the errors
@@ -2321,7 +2321,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META,
- "Region " + descriptiveName + " listed in " + MetaTableName.getInstance()
+ "Region " + descriptiveName + " listed in " + connection.getMetaTableName()
+ " on region server " + hbi.getMetaEntry().regionServer + " but found on region server "
+ hbi.getDeployedOn().get(0));
// If we are trying to fix the errors
@@ -2607,7 +2607,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
metaRegions.put(value.getReplicaId(), value);
}
}
- int metaReplication = admin.getDescriptor(MetaTableName.getInstance()).getRegionReplication();
+ int metaReplication = admin.getDescriptor(connection.getMetaTableName()).getRegionReplication();
boolean noProblem = true;
// There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas
// Check the deployed servers. It should be exactly one server for each replica.
@@ -2623,10 +2623,10 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
assignMetaReplica(i);
} else if (servers.size() > 1) {
errors.reportError(ERROR_CODE.MULTI_META_REGION,
- MetaTableName.getInstance() + ", replicaId " + metaHbckRegionInfo.getReplicaId()
+ connection.getMetaTableName() + ", replicaId " + metaHbckRegionInfo.getReplicaId()
+ " is found on more than one region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with " + MetaTableName.getInstance()
+ errors.print("Trying to fix a problem with " + connection.getMetaTableName()
+ ", replicaId " + metaHbckRegionInfo.getReplicaId() + "..");
setShouldRerun();
// try fix it (treat is a dupe assignment)
@@ -2640,11 +2640,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
for (Map.Entry entry : metaRegions.entrySet()) {
noProblem = false;
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
- MetaTableName.getInstance() + " replicas are deployed in excess. Configured "
+ connection.getMetaTableName() + " replicas are deployed in excess. Configured "
+ metaReplication + ", deployed " + metaRegions.size());
if (shouldFixAssignments()) {
errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of "
- + MetaTableName.getInstance() + "..");
+ + connection.getMetaTableName() + "..");
setShouldRerun();
unassignMetaReplica(entry.getValue());
}
@@ -2664,9 +2664,9 @@ private void unassignMetaReplica(HbckRegionInfo hi)
private void assignMetaReplica(int replicaId)
throws IOException, KeeperException, InterruptedException {
errors.reportError(ERROR_CODE.NO_META_REGION,
- MetaTableName.getInstance() + ", replicaId " + replicaId + " is not found on any region.");
+ connection.getMetaTableName() + ", replicaId " + replicaId + " is not found on any region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with " + MetaTableName.getInstance() + "..");
+ errors.print("Trying to fix a problem with " + connection.getMetaTableName() + "..");
setShouldRerun();
// try to fix it (treat it as unassigned region)
RegionInfo h = RegionReplicaUtil
@@ -2702,7 +2702,7 @@ public boolean visit(Result result) throws IOException {
if (rl == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
+ "Empty REGIONINFO_QUALIFIER found in " + connection.getMetaTableName());
return true;
}
ServerName sn = null;
@@ -2712,7 +2712,7 @@ public boolean visit(Result result) throws IOException {
) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
+ "Empty REGIONINFO_QUALIFIER found in " + connection.getMetaTableName());
return true;
}
RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion();
@@ -2741,7 +2741,7 @@ public boolean visit(Result result) throws IOException {
previous.setMetaEntry(m);
} else {
throw new IOException(
- "Two entries in " + MetaTableName.getInstance() + " are same " + previous);
+ "Two entries in " + connection.getMetaTableName() + " are same " + previous);
}
}
List mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 7321d7da178d..11d295435f73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -587,12 +587,12 @@ private void unloadRegions(ServerName server, List regionServers,
// For isolating hbase:meta, it should move explicitly in Ack mode,
// hence the forceMoveRegionByAck = true.
if (!metaSeverName.equals(server)) {
- LOG.info("Region of {} {} is on server {} moving to {}", MetaTableName.getInstance(),
+ LOG.info("Region of {} {} is on server {} moving to {}", conn.getMetaTableName(),
metaRegionInfo.getEncodedName(), metaSeverName, server);
submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server),
movedRegions, Collections.singletonList(metaRegionInfo), true);
} else {
- LOG.info("Region of {} {} already exists on server: {}", MetaTableName.getInstance(),
+ LOG.info("Region of {} {} already exists on server: {}", conn.getMetaTableName(),
metaRegionInfo.getEncodedName(), server);
}
isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
index f157f327103e..be5b63cff9ea 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
@@ -57,7 +57,7 @@
| <%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %> |
<% } %>
<% String description = "";
- if (tableName.equals(TableName.META_TABLE_NAME)){
+ if (tableName.equals(master.getConnection().getMetaTableName())){
description = "The hbase:meta table holds references to all User Table regions.";
} else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){
description = "The hbase:canary table is used to sniff the write availability of"
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index d838cd3c50bd..c94b32e5d651 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -196,8 +196,8 @@
Table table = master.getConnection().getTable(TableName.valueOf(fqtn));
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf);
- int numMetaReplicas =
- master.getTableDescriptors().get(MetaTableName.getInstance()).getRegionReplication();
+ int numMetaReplicas = master.getTableDescriptors()
+ .get(master.getConnection().getMetaTableName()).getRegionReplication();
Map frags = null;
if (showFragmentation) {
frags = FSUtils.getTableFragmentation(master);
@@ -318,7 +318,7 @@
<% //Meta table.
- if(fqtn.equals(MetaTableName.getInstance().getNameAsString())) { %>
+ if(fqtn.equals(master.getConnection().getMetaTableName().getNameAsString())) { %>
Table Regions
@@ -654,7 +654,7 @@