Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
Expand Down Expand Up @@ -156,7 +157,7 @@ private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, log.getPath().getName());
}
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,6 @@ private static boolean isHMasterWAL(Path path) {
String fn = path.getName();
return fn.startsWith(WALProcedureStore.LOG_PREFIX)
|| fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_STORE_DIR));
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.getMasterRegionDirName()));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ private void processMetaRecord(Result result) throws IOException {
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {
LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
LOG.info("Start to scan {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
// Scan hbase:meta to pick up user regions
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
Expand All @@ -187,7 +188,8 @@ public void initialize() throws IOException {
}
}
}
LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
LOG.info("Finished scanning {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
}

private void addRegion(RegionInfo regionInfo) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import java.io.IOException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos;

/**
* The suffix for this cluster. It is serialized to the filesystem and up into zookeeper. This is a
* container for the id. Also knows how to serialize and deserialize the cluster id.
*/
@InterfaceAudience.Private
public class ActiveClusterSuffix {
private final String active_cluster_suffix;

/**
* New ActiveClusterSuffix.
*/

public ActiveClusterSuffix(final String cs) {
this.active_cluster_suffix = cs;
}

public String getActiveClusterSuffix() {
return active_cluster_suffix;
}

/** Returns The active cluster suffix serialized using pb w/ pb magic prefix */
public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/**
* Parse the serialized representation of the {@link ActiveClusterSuffix}
* @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb magic prefix
* @return An instance of {@link ActiveClusterSuffix} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ActiveClusterSuffix parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cs = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cs);
} else {
// Presume it was written out this way, the old way.
return new ActiveClusterSuffix(Bytes.toString(bytes));
}
}

/** Returns A pb instance to represent this instance. */
public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() {
ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
return builder.setActiveClusterSuffix(this.active_cluster_suffix).build();
}

/** Returns A {@link ActiveClusterSuffix} made from the passed in <code>cs</code> */
public static ActiveClusterSuffix
convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) {
return new ActiveClusterSuffix(cs.getActiveClusterSuffix());
}

/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return this.active_cluster_suffix;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2705,4 +2705,32 @@ List<LogEntry> getLogEntries(Set<ServerName> serverNames, String logType, Server

@InterfaceAudience.Private
void restoreBackupSystemTable(String snapshotName) throws IOException;

/**
* Perform hbase:meta table refresh
*/
Long refreshMeta() throws IOException;

/**
* Refresh HFiles for the table
* @param tableName table to refresh HFiles for
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles(final TableName tableName) throws IOException;

/**
* Refresh HFiles for all the tables under given namespace
* @param namespace Namespace for which we should call refresh HFiles for all tables under it
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles(final String namespace) throws IOException;

/**
* Refresh HFiles for all the tables
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles() throws IOException;
}
Original file line number Diff line number Diff line change
Expand Up @@ -1157,4 +1157,24 @@ public List<String> getCachedFilesList(ServerName serverName) throws IOException
public void restoreBackupSystemTable(String snapshotName) throws IOException {
get(admin.restoreBackupSystemTable(snapshotName));
}

@Override
public Long refreshMeta() throws IOException {
return get(admin.refreshMeta());
}

@Override
public Long refreshHFiles(final TableName tableName) throws IOException {
return get(admin.refreshHFiles(tableName));
}

@Override
public Long refreshHFiles(final String namespace) throws IOException {
return get(admin.refreshHFiles(namespace));
}

@Override
public Long refreshHFiles() throws IOException {
return get(admin.refreshHFiles());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1892,4 +1892,24 @@ CompletableFuture<List<LogEntry>> getLogEntries(Set<ServerName> serverNames, Str

@InterfaceAudience.Private
CompletableFuture<Void> restoreBackupSystemTable(String snapshotName);

/**
* Perform hbase:meta table refresh
*/
CompletableFuture<Long> refreshMeta();

/**
* Refresh HFiles for the table
*/
CompletableFuture<Long> refreshHFiles(final TableName tableName);

/**
* Refresh HFiles for all the tables under given namespace
*/
CompletableFuture<Long> refreshHFiles(final String namespace);

/**
* Refresh HFiles for all the tables
*/
CompletableFuture<Long> refreshHFiles();
}
Original file line number Diff line number Diff line change
Expand Up @@ -1025,4 +1025,24 @@ public CompletableFuture<List<String>> getCachedFilesList(ServerName serverName)
public CompletableFuture<Void> restoreBackupSystemTable(String snapshotName) {
return wrap(rawAdmin.restoreBackupSystemTable(snapshotName));
}

@Override
public CompletableFuture<Long> refreshMeta() {
return wrap(rawAdmin.refreshMeta());
}

@Override
public CompletableFuture<Long> refreshHFiles(final TableName tableName) {
return wrap(rawAdmin.refreshHFiles(tableName));
}

@Override
public CompletableFuture<Long> refreshHFiles(final String namespace) {
return wrap(rawAdmin.refreshHFiles(namespace));
}

@Override
public CompletableFuture<Long> refreshHFiles() {
return wrap(rawAdmin.refreshHFiles());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,10 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
Expand Down Expand Up @@ -4697,4 +4701,63 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request,
MasterProtos.RestoreBackupSystemTableResponse::getProcId,
new RestoreBackupSystemTableProcedureBiConsumer());
}

private CompletableFuture<Long> internalRefershHFiles(RefreshHFilesRequest request) {
return this.<Long> newMasterCaller()
.action((controller, stub) -> this.<RefreshHFilesRequest, RefreshHFilesResponse, Long> call(
controller, stub, request, MasterService.Interface::refreshHFiles,
RefreshHFilesResponse::getProcId))
.call();
}

@Override
public CompletableFuture<Long> refreshMeta() {
RefreshMetaRequest.Builder request = RefreshMetaRequest.newBuilder();
request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
return this.<Long> newMasterCaller()
.action((controller, stub) -> this.<RefreshMetaRequest, RefreshMetaResponse, Long> call(
controller, stub, request.build(), MasterService.Interface::refreshMeta,
RefreshMetaResponse::getProcId))
.call();
}

@Override
public CompletableFuture<Long> refreshHFiles(final TableName tableName) {
if (tableName.isSystemTable()) {
LOG.warn("Refreshing HFiles for system table {} is not allowed", tableName.getNameAsString());
throw new IllegalArgumentException(
"Not allowed to refresh HFiles for system table '" + tableName.getNameAsString() + "'");
}
// Request builder
RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
request.setTableName(ProtobufUtil.toProtoTableName(tableName));
request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
return internalRefershHFiles(request.build());
}

@Override
public CompletableFuture<Long> refreshHFiles(final String namespace) {
if (
namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)
|| namespace.equals(NamespaceDescriptor.BACKUP_NAMESPACE_NAME_STR)
) {
LOG.warn("Refreshing HFiles for reserve namespace {} is not allowed", namespace);
throw new IllegalArgumentException(
"Not allowed to refresh HFiles for reserve namespace '" + namespace + "'");
}
// Request builder
RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
request.setNamespace(namespace);
request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
return internalRefershHFiles(request.build());
}

@Override
public CompletableFuture<Long> refreshHFiles() {
// Request builder
RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder();
// Set nonce
request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce());
return internalRefershHFiles(request.build());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ static byte[] toByteArray(RegionInfo ri) {
*/
static String prettyPrint(final String encodedRegionName) {
if (encodedRegionName.equals("1028785192")) {
return encodedRegionName + "/hbase:meta";
return encodedRegionName + "/" + TableName.META_TABLE_NAME;
}
return encodedRegionName;
}
Expand Down
25 changes: 25 additions & 0 deletions hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
Original file line number Diff line number Diff line change
Expand Up @@ -1665,6 +1665,31 @@ public enum OperationStatusCode {
*/
public final static boolean REJECT_DECOMMISSIONED_HOSTS_DEFAULT = false;

/**
* Adds a suffix to the meta table name: value=’test’ -> ‘hbase:meta_test’ Added in HBASE-XXXXX to
* support having multiple hbase:meta tables (with distinct names )to enable storage sharing by
* more than one clusters.
*/
public final static String HBASE_META_TABLE_SUFFIX = "hbase.meta.table.suffix";

/**
* Default value of {@link #HBASE_META_TABLE_SUFFIX}
*/
public final static String HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE = "";

/**
* Should HBase only serve Read Requests
*/
public final static String HBASE_GLOBAL_READONLY_ENABLED_KEY = "hbase.global.readonly.enabled";

/**
* Default value of {@link #HBASE_GLOBAL_READONLY_ENABLED_KEY}
*/
public final static boolean HBASE_GLOBAL_READONLY_ENABLED_DEFAULT = false;

/** name of the file having active cluster suffix */
public static final String ACTIVE_CLUSTER_SUFFIX_FILE_NAME = "active.cluster.suffix.id";

private HConstants() {
// Can't be instantiated with this ctor.
}
Expand Down
Loading