From b0123870c3d649b9b71cfa8c0dec777e48ff3d61 Mon Sep 17 00:00:00 2001 From: Abhishek Kothalikar <99398985+kabhishek4@users.noreply.github.com> Date: Tue, 11 Mar 2025 20:03:25 +0530 Subject: [PATCH 1/2] HBASE-29081: Feature implementation (cherry picked from commit 7ab9d52801fc5be72b742582d1732a8f5e602d86) --- .../backup/impl/IncrementalBackupManager.java | 3 +- .../hbase/backup/master/BackupLogCleaner.java | 2 +- .../SnapshotOfRegionAssignmentFromMeta.java | 6 +- .../hadoop/hbase/ActiveClusterSuffix.java | 98 ++++ .../org/apache/hadoop/hbase/client/Admin.java | 28 + .../hbase/client/AdminOverAsyncAdmin.java | 20 + .../hadoop/hbase/client/AsyncAdmin.java | 20 + .../hadoop/hbase/client/AsyncHBaseAdmin.java | 20 + .../hbase/client/RawAsyncHBaseAdmin.java | 63 +++ .../hadoop/hbase/client/RegionInfo.java | 2 +- .../org/apache/hadoop/hbase/HConstants.java | 25 + .../org/apache/hadoop/hbase/TableName.java | 37 +- .../master/MetricsMasterFileSystemSource.java | 3 +- .../protobuf/server/ActiveClusterSuffix.proto | 33 ++ .../main/protobuf/server/master/Master.proto | 25 + .../server/master/MasterProcedure.proto | 31 ++ .../hadoop/hbase/MetaTableAccessor.java | 4 +- .../apache/hadoop/hbase/TableDescriptors.java | 7 + .../hbase/coprocessor/CoprocessorHost.java | 35 ++ .../MasterCoprocessorEnvironment.java | 3 + .../hadoop/hbase/executor/EventType.java | 8 +- .../hadoop/hbase/executor/ExecutorType.java | 3 +- .../apache/hadoop/hbase/master/HMaster.java | 144 +++++- .../hbase/master/MasterCoprocessorHost.java | 5 + .../hadoop/hbase/master/MasterFileSystem.java | 76 ++- .../hbase/master/MasterRpcServices.java | 31 ++ .../master/RegionPlacementMaintainer.java | 16 +- .../master/assignment/AssignmentManager.java | 6 +- .../MergeTableRegionsProcedure.java | 6 +- .../master/assignment/RegionStateStore.java | 11 +- .../assignment/SplitTableRegionProcedure.java | 6 +- .../hadoop/hbase/master/hbck/HbckChore.java | 3 +- .../hbase/master/janitor/CatalogJanitor.java | 4 +- .../hbase/master/janitor/MetaFixer.java | 12 +- .../master/janitor/ReportMakingVisitor.java | 6 +- .../procedure/HBCKServerCrashProcedure.java | 13 +- .../master/procedure/InitMetaProcedure.java | 2 +- .../procedure/ModifyTableProcedure.java | 2 +- .../RefreshHFilesRegionProcedure.java | 208 ++++++++ .../RefreshHFilesTableProcedure.java | 165 ++++++ .../procedure/RefreshMetaProcedure.java | 480 +++++++++++++++++ .../procedure/TableProcedureInterface.java | 3 +- .../hbase/master/procedure/TableQueue.java | 1 + .../master/region/MasterRegionFactory.java | 28 +- .../region/HFileProcedurePrettyPrinter.java | 2 +- .../hbase/regionserver/CompactSplit.java | 19 + .../hadoop/hbase/regionserver/HRegion.java | 25 + .../hbase/regionserver/HRegionServer.java | 21 + .../regionserver/RefreshHFilesCallable.java | 70 +++ .../StoreFileTrackerBase.java | 19 +- .../access/AbstractReadOnlyController.java | 105 ++++ .../access/BulkLoadReadOnlyController.java | 53 ++ .../access/EndpointReadOnlyController.java | 50 ++ .../access/MasterReadOnlyController.java | 433 ++++++++++++++++ .../access/RegionReadOnlyController.java | 371 ++++++++++++++ .../RegionServerReadOnlyController.java | 64 +++ .../hadoop/hbase/tool/BulkLoadHFilesTool.java | 12 +- .../hadoop/hbase/util/ConfigurationUtil.java | 6 + .../util/CoprocessorConfigurationUtil.java | 85 +++ .../hadoop/hbase/util/FSTableDescriptors.java | 29 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 112 +++- .../apache/hadoop/hbase/util/HBaseFsck.java | 75 +-- .../apache/hadoop/hbase/util/RegionMover.java | 9 +- .../hadoop/hbase/TestMetaTableForReplica.java | 176 +++++++ .../hadoop/hbase/TestRefreshHFilesBase.java | 157 ++++++ .../client/TestRefreshHFilesFromClient.java | 139 +++++ .../hbase/master/TestMasterMetrics.java | 201 +++++++- .../master/assignment/TestHbckChore.java | 43 ++ .../procedure/TestRefreshHFilesProcedure.java | 116 +++++ ...efreshHFilesProcedureWithReadOnlyConf.java | 124 +++++ .../procedure/TestRefreshMetaProcedure.java | 121 +++++ .../TestRefreshMetaProcedureIntegration.java | 275 ++++++++++ .../region/TestMasterRegionInitialize.java | 18 + .../TestHFileProcedurePrettyPrinter.java | 2 +- .../region/TestWALProcedurePrettyPrinter.java | 2 +- .../regionserver/TestActiveClusterSuffix.java | 158 ++++++ .../TestCompactSplitReadOnly.java | 87 ++++ .../DummyStoreFileTrackerForReadOnlyMode.java | 79 +++ .../TestStoreFileTrackerBaseReadOnlyMode.java | 146 ++++++ .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 19 + .../TestCanStartHBaseInReadOnlyMode.java | 67 +++ .../access/TestReadOnlyController.java | 217 ++++++++ ...estReadOnlyControllerBulkLoadObserver.java | 70 +++ ...tReadOnlyControllerCoprocessorLoading.java | 258 ++++++++++ ...estReadOnlyControllerEndpointObserver.java | 75 +++ .../TestReadOnlyControllerMasterObserver.java | 393 ++++++++++++++ .../TestReadOnlyControllerRegionObserver.java | 483 ++++++++++++++++++ ...eadOnlyControllerRegionServerObserver.java | 89 ++++ .../TestReadOnlyManageActiveClusterFile.java | 153 ++++++ .../TestCoprocessorConfigurationUtil.java | 200 ++++++++ .../hbase/util/TestFSTableDescriptors.java | 33 ++ hbase-shell/src/main/ruby/hbase/admin.rb | 26 + hbase-shell/src/main/ruby/shell.rb | 2 + .../ruby/shell/commands/refresh_hfiles.rb | 64 +++ .../main/ruby/shell/commands/refresh_meta.rb | 43 ++ .../hbase/thrift2/client/ThriftAdmin.java | 20 + .../hbase/zookeeper/MetaTableLocator.java | 19 +- .../apache/hadoop/hbase/zookeeper/ZKDump.java | 3 +- 98 files changed, 7188 insertions(+), 154 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java create mode 100644 hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesRegionProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshHFilesTableProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RefreshHFilesCallable.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AbstractReadOnlyController.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/BulkLoadReadOnlyController.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/EndpointReadOnlyController.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/MasterReadOnlyController.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/RegionReadOnlyController.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/RegionServerReadOnlyController.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableForReplica.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestRefreshHFilesBase.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRefreshHFilesFromClient.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshHFilesProcedureWithReadOnlyConf.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedure.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedureIntegration.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestActiveClusterSuffix.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSplitReadOnly.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/DummyStoreFileTrackerForReadOnlyMode.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/storefiletracker/TestStoreFileTrackerBaseReadOnlyMode.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCanStartHBaseInReadOnlyMode.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyController.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerBulkLoadObserver.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerCoprocessorLoading.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerEndpointObserver.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerMasterObserver.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerRegionObserver.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyControllerRegionServerObserver.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestReadOnlyManageActiveClusterFile.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorConfigurationUtil.java create mode 100644 hbase-shell/src/main/ruby/shell/commands/refresh_hfiles.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/refresh_meta.rb diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 2d7aeb646db3..5f48bfc39ade 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; @@ -156,7 +157,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, LOG.debug("currentLogFile: " + log.getPath().toString()); if (AbstractFSWALProvider.isMetaFile(log.getPath())) { if (LOG.isDebugEnabled()) { - LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, log.getPath().getName()); } continue; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 971e80e2f83a..eff1151b035a 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -204,6 +204,6 @@ private static boolean isHMasterWAL(Path path) { String fn = path.getName(); return fn.startsWith(WALProcedureStore.LOG_PREFIX) || fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) - || path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_STORE_DIR)); + || path.toString().contains("/%s/".formatted(MasterRegionFactory.getMasterRegionDirName())); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 02c18c73bfb5..74d49ec14c0b 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -170,7 +170,8 @@ private void processMetaRecord(Result result) throws IOException { * Initialize the region assignment snapshot by scanning the hbase:meta table */ public void initialize() throws IOException { - LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot"); + LOG.info("Start to scan {} for the current region assignment snapshot", + TableName.META_TABLE_NAME); // Scan hbase:meta to pick up user regions try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME); ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { @@ -187,7 +188,8 @@ public void initialize() throws IOException { } } } - LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); + LOG.info("Finished scanning {} for the current region assignment snapshot", + TableName.META_TABLE_NAME); } private void addRegion(RegionInfo regionInfo) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java new file mode 100644 index 000000000000..68fd61a22534 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos; + +/** + * The suffix for this cluster. It is serialized to the filesystem and up into zookeeper. This is a + * container for the id. Also knows how to serialize and deserialize the cluster id. + */ +@InterfaceAudience.Private +public class ActiveClusterSuffix { + private final String active_cluster_suffix; + + /** + * New ActiveClusterSuffix. + */ + + public ActiveClusterSuffix(final String cs) { + this.active_cluster_suffix = cs; + } + + public String getActiveClusterSuffix() { + return active_cluster_suffix; + } + + /** Returns The active cluster suffix serialized using pb w/ pb magic prefix */ + public byte[] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** + * Parse the serialized representation of the {@link ActiveClusterSuffix} + * @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb magic prefix + * @return An instance of {@link ActiveClusterSuffix} made from bytes + * @see #toByteArray() + */ + public static ActiveClusterSuffix parseFrom(final byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder = + ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder(); + ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null; + try { + ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); + cs = builder.build(); + } catch (IOException e) { + throw new DeserializationException(e); + } + return convert(cs); + } else { + // Presume it was written out this way, the old way. + return new ActiveClusterSuffix(Bytes.toString(bytes)); + } + } + + /** Returns A pb instance to represent this instance. */ + public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() { + ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder = + ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder(); + return builder.setActiveClusterSuffix(this.active_cluster_suffix).build(); + } + + /** Returns A {@link ActiveClusterSuffix} made from the passed in cs */ + public static ActiveClusterSuffix + convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) { + return new ActiveClusterSuffix(cs.getActiveClusterSuffix()); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.active_cluster_suffix; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 65b3abcd413c..5d00366a9fe8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2705,4 +2705,32 @@ List getLogEntries(Set serverNames, String logType, Server @InterfaceAudience.Private void restoreBackupSystemTable(String snapshotName) throws IOException; + + /** + * Perform hbase:meta table refresh + */ + Long refreshMeta() throws IOException; + + /** + * Refresh HFiles for the table + * @param tableName table to refresh HFiles for + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles(final TableName tableName) throws IOException; + + /** + * Refresh HFiles for all the tables under given namespace + * @param namespace Namespace for which we should call refresh HFiles for all tables under it + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles(final String namespace) throws IOException; + + /** + * Refresh HFiles for all the tables + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 7117fd4fd33f..6628b7b3f78b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1157,4 +1157,24 @@ public List getCachedFilesList(ServerName serverName) throws IOException public void restoreBackupSystemTable(String snapshotName) throws IOException { get(admin.restoreBackupSystemTable(snapshotName)); } + + @Override + public Long refreshMeta() throws IOException { + return get(admin.refreshMeta()); + } + + @Override + public Long refreshHFiles(final TableName tableName) throws IOException { + return get(admin.refreshHFiles(tableName)); + } + + @Override + public Long refreshHFiles(final String namespace) throws IOException { + return get(admin.refreshHFiles(namespace)); + } + + @Override + public Long refreshHFiles() throws IOException { + return get(admin.refreshHFiles()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 56211cedc493..e27d7136c4d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1892,4 +1892,24 @@ CompletableFuture> getLogEntries(Set serverNames, Str @InterfaceAudience.Private CompletableFuture restoreBackupSystemTable(String snapshotName); + + /** + * Perform hbase:meta table refresh + */ + CompletableFuture refreshMeta(); + + /** + * Refresh HFiles for the table + */ + CompletableFuture refreshHFiles(final TableName tableName); + + /** + * Refresh HFiles for all the tables under given namespace + */ + CompletableFuture refreshHFiles(final String namespace); + + /** + * Refresh HFiles for all the tables + */ + CompletableFuture refreshHFiles(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 8132b184809c..f7923d6cad22 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1025,4 +1025,24 @@ public CompletableFuture> getCachedFilesList(ServerName serverName) public CompletableFuture restoreBackupSystemTable(String snapshotName) { return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); } + + @Override + public CompletableFuture refreshMeta() { + return wrap(rawAdmin.refreshMeta()); + } + + @Override + public CompletableFuture refreshHFiles(final TableName tableName) { + return wrap(rawAdmin.refreshHFiles(tableName)); + } + + @Override + public CompletableFuture refreshHFiles(final String namespace) { + return wrap(rawAdmin.refreshHFiles(namespace)); + } + + @Override + public CompletableFuture refreshHFiles() { + return wrap(rawAdmin.refreshHFiles()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index ea51d27b99a4..58409251cef3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -263,6 +263,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; @@ -4697,4 +4701,63 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request, MasterProtos.RestoreBackupSystemTableResponse::getProcId, new RestoreBackupSystemTableProcedureBiConsumer()); } + + private CompletableFuture internalRefershHFiles(RefreshHFilesRequest request) { + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, request, MasterService.Interface::refreshHFiles, + RefreshHFilesResponse::getProcId)) + .call(); + } + + @Override + public CompletableFuture refreshMeta() { + RefreshMetaRequest.Builder request = RefreshMetaRequest.newBuilder(); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, request.build(), MasterService.Interface::refreshMeta, + RefreshMetaResponse::getProcId)) + .call(); + } + + @Override + public CompletableFuture refreshHFiles(final TableName tableName) { + if (tableName.isSystemTable()) { + LOG.warn("Refreshing HFiles for system table {} is not allowed", tableName.getNameAsString()); + throw new IllegalArgumentException( + "Not allowed to refresh HFiles for system table '" + tableName.getNameAsString() + "'"); + } + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + request.setTableName(ProtobufUtil.toProtoTableName(tableName)); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } + + @Override + public CompletableFuture refreshHFiles(final String namespace) { + if ( + namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) + || namespace.equals(NamespaceDescriptor.BACKUP_NAMESPACE_NAME_STR) + ) { + LOG.warn("Refreshing HFiles for reserve namespace {} is not allowed", namespace); + throw new IllegalArgumentException( + "Not allowed to refresh HFiles for reserve namespace '" + namespace + "'"); + } + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + request.setNamespace(namespace); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } + + @Override + public CompletableFuture refreshHFiles() { + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + // Set nonce + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 10c554e26f79..82b8711b7762 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -431,7 +431,7 @@ static byte[] toByteArray(RegionInfo ri) { */ static String prettyPrint(final String encodedRegionName) { if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/hbase:meta"; + return encodedRegionName + "/" + TableName.META_TABLE_NAME; } return encodedRegionName; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9af711e7edfd..4ed6d3dc9d58 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1665,6 +1665,31 @@ public enum OperationStatusCode { */ public final static boolean REJECT_DECOMMISSIONED_HOSTS_DEFAULT = false; + /** + * Adds a suffix to the meta table name: value=’test’ -> ‘hbase:meta_test’ Added in HBASE-XXXXX to + * support having multiple hbase:meta tables (with distinct names )to enable storage sharing by + * more than one clusters. + */ + public final static String HBASE_META_TABLE_SUFFIX = "hbase.meta.table.suffix"; + + /** + * Default value of {@link #HBASE_META_TABLE_SUFFIX} + */ + public final static String HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE = ""; + + /** + * Should HBase only serve Read Requests + */ + public final static String HBASE_GLOBAL_READONLY_ENABLED_KEY = "hbase.global.readonly.enabled"; + + /** + * Default value of {@link #HBASE_GLOBAL_READONLY_ENABLED_KEY} + */ + public final static boolean HBASE_GLOBAL_READONLY_ENABLED_DEFAULT = false; + + /** name of the file having active cluster suffix */ + public static final String ACTIVE_CLUSTER_SUFFIX_FILE_NAME = "active.cluster.suffix.id"; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index b6d854c13784..73008c7ad5fd 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -17,16 +17,21 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** * Immutable POJO class for representing a table name. Which is of the form: <table @@ -44,6 +49,7 @@ */ @InterfaceAudience.Public public final class TableName implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(TableName.class); /** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */ private static final Set tableCache = new CopyOnWriteArraySet<>(); @@ -65,9 +71,34 @@ public final class TableName implements Comparable { public static final String VALID_USER_TABLE_REGEX = "(?:(?:(?:" + VALID_NAMESPACE_REGEX + "\\" + NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))"; - /** The hbase:meta table's name. */ - public static final TableName META_TABLE_NAME = - valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + /** + * The name of hbase meta table could either be hbase:meta_xxx or 'hbase:meta' otherwise. Config + * hbase.meta.table.suffix will govern the decision of adding suffix to the habase:meta + */ + public static final TableName META_TABLE_NAME; + static { + Configuration conf = HBaseConfiguration.create(); + META_TABLE_NAME = initializeHbaseMetaTableName(conf); + LOG.info("Meta table name: {}", META_TABLE_NAME); + } + + /* Visible for testing only */ + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public static TableName getDefaultNameOfMetaForReplica() { + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + } + + public static TableName initializeHbaseMetaTableName(Configuration conf) { + String suffix_val = conf.get(HConstants.HBASE_META_TABLE_SUFFIX, + HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE); + LOG.info("Meta table suffix value: {}", suffix_val); + if (Strings.isNullOrEmpty(suffix_val)) { + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + } else { + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta_" + suffix_val); + } + } /** * The Namespace table's name. diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 53ed8a25ed0e..579171e1c3d7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @@ -49,7 +50,7 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_SIZE_NAME = "hlogSplitSize"; String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; - String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split"; + String META_SPLIT_SIZE_DESC = "Size of " + TableName.META_TABLE_NAME + " WAL files being split"; String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto new file mode 100644 index 000000000000..89bc086212b3 --- /dev/null +++ b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +// This file contains protocol buffers that are shared throughout HBase +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; +option java_outer_classname = "ActiveClusterSuffixProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * Content of the '/hbase/active_cluster_suffix.id' file to indicate the active cluster. + */ +message ActiveClusterSuffix { + // This is the active cluster suffix set by the user in the config, as a String + required string active_cluster_suffix = 1; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index f475d26060d0..c774a93605ab 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -807,6 +807,17 @@ message ModifyColumnStoreFileTrackerResponse { optional uint64 proc_id = 1; } +message RefreshHFilesRequest { + optional TableName table_name = 1; + optional string namespace = 2; + optional uint64 nonce_group = 3 [default = 0]; + optional uint64 nonce = 4 [default = 0]; +} + +message RefreshHFilesResponse { + optional uint64 proc_id = 1; +} + message FlushMasterStoreRequest {} message FlushMasterStoreResponse {} @@ -819,6 +830,14 @@ message RollAllWALWritersResponse { optional uint64 proc_id = 1; } +message RefreshMetaRequest { + optional uint64 nonce_group = 1 [default = 0]; + optional uint64 nonce = 2 [default = 0]; +} +message RefreshMetaResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -1303,6 +1322,12 @@ service MasterService { rpc rollAllWALWriters(RollAllWALWritersRequest) returns(RollAllWALWritersResponse); + + rpc RefreshMeta(RefreshMetaRequest) + returns(RefreshMetaResponse); + + rpc RefreshHFiles(RefreshHFilesRequest) + returns(RefreshHFilesResponse); } // HBCK Service definitions. diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 7e6c6c8e2fc7..56086aed29e3 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -864,3 +864,34 @@ message LogRollRemoteProcedureResult { optional ServerName server_name = 1; optional uint64 last_highest_wal_filenum = 2; } + +enum RefreshMetaState { + REFRESH_META_INIT = 1; + REFRESH_META_SCAN_STORAGE = 2; + REFRESH_META_PREPARE = 3; + REFRESH_META_APPLY = 4; + REFRESH_META_FOLLOWUP = 5; + REFRESH_META_FINISH = 6; +} + +message RefreshMetaStateData { +} + +enum RefreshHFilesTableProcedureState { + REFRESH_HFILES_PREPARE = 1; + REFRESH_HFILES_REFRESH_REGION = 2; + REFRESH_HFILES_FINISH = 3; +} + +message RefreshHFilesTableProcedureStateData { + optional TableName table_name = 1; + optional string namespace_name = 2; +} + +message RefreshHFilesRegionProcedureStateData { + required RegionInfo region = 1; +} + +message RefreshHFilesRegionParameter { + required RegionInfo region = 1; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 05b049e27dbc..92d6a18486a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -753,7 +753,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet * @param connection connection we're using * @param deletes Deletes to add to hbase:meta This list should support #remove. */ - private static void deleteFromMetaTable(final Connection connection, final List deletes) + public static void deleteFromMetaTable(final Connection connection, final List deletes) throws IOException { try (Table t = getMetaHTable(connection)) { debugLogMutations(deletes); @@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); putToMetaTable(connection, put); - LOG.info("Updated {} in hbase:meta", state); + LOG.info("Updated {} in {}", state, TableName.META_TABLE_NAME); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index d22e46383d30..32594ffce489 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -78,4 +78,11 @@ default void update(TableDescriptor htd) throws IOException { /** Returns Instance of table descriptor or null if none found. */ TableDescriptor remove(TableName tablename) throws IOException; + + /** + * Invalidates the table descriptor cache. + */ + default void invalidateTableDescriptorCache() { + // do nothing by default + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 3d5897c0a056..625ac1dc5842 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; @@ -130,6 +132,39 @@ public Set getCoprocessorClassNames() { return returnValue; } + /** + * Used to help make the relevant loaded coprocessors dynamically configurable by registering them + * to the {@link ConfigurationManager}. Coprocessors are considered "relevant" if they implement + * the {@link ConfigurationObserver} interface. + * @param configurationManager the ConfigurationManager the coprocessors get registered to + */ + public void registerConfigurationObservers(ConfigurationManager configurationManager) { + Coprocessor foundCp; + Set coprocessors = this.getCoprocessors(); + for (String cp : coprocessors) { + foundCp = this.findCoprocessor(cp); + if (foundCp instanceof ConfigurationObserver) { + configurationManager.registerObserver((ConfigurationObserver) foundCp); + } + } + } + + /** + * Deregisters relevant coprocessors from the {@link ConfigurationManager}. Coprocessors are + * considered "relevant" if they implement the {@link ConfigurationObserver} interface. + * @param configurationManager the ConfigurationManager the coprocessors get deregistered from + */ + public void deregisterConfigurationObservers(ConfigurationManager configurationManager) { + Coprocessor foundCp; + Set coprocessors = this.getCoprocessors(); + for (String cp : coprocessors) { + foundCp = this.findCoprocessor(cp); + if (foundCp instanceof ConfigurationObserver) { + configurationManager.deregisterObserver((ConfigurationObserver) foundCp); + } + } + } + /** * Load system coprocessors once only. Read the class names from configuration. Called by * constructor. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index 61c983468876..b6950f4c2e85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -88,4 +89,6 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment * RS_LOG_ROLL */ - RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL); + RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL), + + /** + * RS refresh hfiles for a region.
+ * RS_REFRESH_HFILES + */ + RS_REFRESH_HFILES(92, ExecutorType.RS_REFRESH_HFILES); private final int code; private final ExecutorType executor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 668cd701c0d9..e2d357fbee61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -57,7 +57,8 @@ public enum ExecutorType { RS_SNAPSHOT_OPERATIONS(36), RS_FLUSH_OPERATIONS(37), RS_RELOAD_QUOTAS_OPERATIONS(38), - RS_LOG_ROLL(39); + RS_LOG_ROLL(39), + RS_REFRESH_HFILES(39); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c997f1c6e822..5255eb00c450 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.PleaseRestartMasterException; import org.apache.hadoop.hbase.RegionMetrics; @@ -170,6 +171,8 @@ import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; +import org.apache.hadoop.hbase.master.procedure.RefreshHFilesTableProcedure; +import org.apache.hadoop.hbase.master.procedure.RefreshMetaProcedure; import org.apache.hadoop.hbase.master.procedure.ReloadQuotasProcedure; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; @@ -248,10 +251,12 @@ import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.AbstractReadOnlyController; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.ConfigurationUtil; import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -611,6 +616,12 @@ protected String getUseThisHostnameInstead(Configuration conf) { private void registerConfigurationObservers() { configurationManager.registerObserver(this.rpcServices); configurationManager.registerObserver(this); + if (cpHost != null) { + cpHost.registerConfigurationObservers(configurationManager); + } else { + LOG.warn("Could not register HMaster coprocessors to the ConfigurationManager because " + + "MasterCoprocessorHost is null"); + } } // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will @@ -1080,7 +1091,12 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (!maintenanceMode) { startupTaskGroup.addTask("Initializing master coprocessors"); setQuotasObserver(conf); - this.cpHost = new MasterCoprocessorHost(this, conf); + CoprocessorConfigurationUtil.syncReadOnlyConfigurations( + ConfigurationUtil.isReadOnlyModeEnabled(conf), conf, + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + AbstractReadOnlyController.manageActiveClusterIdFile( + ConfigurationUtil.isReadOnlyModeEnabled(conf), this.getMasterFileSystem()); + initializeCoprocessorHost(conf); } else { // start an in process region server for carrying system regions maintenanceRegionServer = @@ -1182,8 +1198,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" - + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info( + "Update replica count of {} from {}(in TableDescriptor)" + " to {}(existing ZNodes)", + TableName.META_TABLE_NAME, metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1192,8 +1209,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (metaDesc.getRegionReplication() != replicasNumInConf) { LOG.info( "The {} config is {} while the replica count in TableDescriptor is {}" - + " for hbase:meta, altering...", - HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + + " for {}, altering...", + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(), + TableName.META_TABLE_NAME); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(replicasNumInConf).build(), @@ -3124,8 +3142,8 @@ public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet