Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions hbase-mapreduce/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,11 @@
<artifactId>junit-vintage-engine</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.awaitility</groupId>
<artifactId>awaitility</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,8 @@ public void map(BytesWritable key, NullWritable value, Context context)
Path outputPath = getOutputPath(inputInfo);

copyFile(context, inputInfo, outputPath);
// inject failure
injectTestFailure(context, inputInfo);
}

/**
Expand All @@ -290,19 +292,23 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException
return new Path(outputArchive, path);
}

@SuppressWarnings("checkstyle:linelength")
/**
* Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in
* {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}.
* {@link #map(BytesWritable, NullWritable, org.apache.hadoop.mapreduce.Mapper.Context)}
*/
private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
throws IOException {
if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return;
if (testing.injectedFailureCount >= testing.failuresCountToInject) return;
if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) {
return;
}
if (testing.injectedFailureCount >= testing.failuresCountToInject) {
return;
}
testing.injectedFailureCount++;
context.getCounter(Counter.COPY_FAILED).increment(1);
LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount);
throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s",
throw new IOException(String.format(
context.getTaskAttemptID() + " TEST FAILURE (%d of max %d): Unable to copy input=%s",
testing.injectedFailureCount, testing.failuresCountToInject, inputInfo));
}

Expand Down Expand Up @@ -358,8 +364,6 @@ private void copyFile(final Context context, final SnapshotFileInfo inputInfo,
LOG.error("Error copying " + inputPath + " to " + outputPath, e);
context.getCounter(Counter.COPY_FAILED).increment(1);
throw e;
} finally {
injectTestFailure(context, inputInfo);
}
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;

import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.TestTemplate;

public class ConsecutiveExportsTestBase extends ExportSnapshotTestBase {

protected ConsecutiveExportsTestBase(boolean mob) {
super(mob);
}

@TestTemplate
public void testConsecutiveExports() throws Exception {
Path copyDir = getLocalDestinationDir(TEST_UTIL);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true);
removeExportDir(copyDir);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;

import org.junit.jupiter.api.TestTemplate;

public class ExportFileSystemStateTestBase extends ExportSnapshotTestBase {

protected ExportFileSystemStateTestBase(boolean mob) {
super(mob);
}

/**
* Verify if exported snapshot and copied files matches the original one.
*/
@TestTemplate
public void testExportFileSystemState() throws Exception {
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
}

@TestTemplate
public void testExportFileSystemStateWithSkipTmp() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);
try {
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
} finally {
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, false);
}
}

@TestTemplate
public void testEmptyExportFileSystemState() throws Exception {
testExportFileSystemState(tableName, emptySnapshotName, emptySnapshotName, 0);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;

import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.jupiter.api.Assertions.assertEquals;

import java.time.Duration;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HFileTestUtil;
import org.junit.jupiter.api.TestTemplate;

import org.apache.hbase.thirdparty.com.google.common.collect.Lists;

public class ExportFileSystemStateWithMergeOrSplitRegionTestBase extends ExportSnapshotTestBase {

protected ExportFileSystemStateWithMergeOrSplitRegionTestBase(boolean mob) {
super(mob);
}

@TestTemplate
public void testExportFileSystemStateWithMergeRegion() throws Exception {
// disable compaction
admin.compactionSwitch(false,
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
// create Table
String suffix = mob ? methodName + "-mob" : methodName;
TableName tableName0 = TableName.valueOf("testtb-" + suffix + "-1");
String snapshotName0 = "snaptb0-" + suffix + "-1";
admin.createTable(
TableDescriptorBuilder.newBuilder(tableName0)
.setColumnFamilies(
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()))
.build(),
new byte[][] { Bytes.toBytes("2") });
// put some data
try (Table table = admin.getConnection().getTable(tableName0)) {
table.put(new Put(Bytes.toBytes("1")).addColumn(FAMILY, null, Bytes.toBytes("1")));
table.put(new Put(Bytes.toBytes("2")).addColumn(FAMILY, null, Bytes.toBytes("2")));
}
List<RegionInfo> regions = admin.getRegions(tableName0);
assertEquals(2, regions.size());
tableNumFiles = regions.size();
// merge region
admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes() }, true).get();
await().atMost(Duration.ofSeconds(30))
.untilAsserted(() -> assertThat(admin.getRegions(tableName0), hasSize(1)));
// take a snapshot
admin.snapshot(snapshotName0, tableName0);
// export snapshot and verify
testExportFileSystemState(tableName0, snapshotName0, snapshotName0, tableNumFiles);
}

@TestTemplate
public void testExportFileSystemStateWithSplitRegion() throws Exception {
// disable compaction
admin.compactionSwitch(false,
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
// create Table
String suffix = mob ? methodName + "-mob" : methodName;
TableName splitTableName = TableName.valueOf(suffix);
String splitTableSnap = "snapshot-" + suffix;
admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies(
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build());

Path output = TEST_UTIL.getDataTestDir("output/cf");
TEST_UTIL.getTestFileSystem().mkdirs(output);
// Create and load a large hfile to ensure the execution time of MR job.
HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(),
new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"),
Bytes.toBytes("9"), 9999999);
BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration());
tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() });

List<RegionInfo> regions = admin.getRegions(splitTableName);
assertEquals(1, regions.size());
tableNumFiles = regions.size();

// split region
admin.split(splitTableName, Bytes.toBytes("5"));
await().atMost(Duration.ofSeconds(30))
.untilAsserted(() -> assertThat(admin.getRegions(splitTableName), hasSize(2)));

// take a snapshot
admin.snapshot(splitTableSnap, splitTableName);
// export snapshot and verify
Configuration tmpConf = TEST_UTIL.getConfiguration();
// Decrease the buffer size of copier to avoid the export task finished shortly
tmpConf.setInt("snapshot.export.buffer.size", 1);
// Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference
// files) copied in different mappers concurrently.
tmpConf.setInt("snapshot.export.default.map.group", 1);
testExportFileSystemState(tmpConf, splitTableName, splitTableSnap, splitTableSnap,
tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false,
getBypassRegionPredicate(), true, false);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.junit.jupiter.api.TestTemplate;

public class ExportSnapshotMiscTestBase extends ExportSnapshotTestBase {

protected ExportSnapshotMiscTestBase(boolean mob) {
super(mob);
}

@TestTemplate
public void testExportWithTargetName() throws Exception {
final String targetName = "testExportWithTargetName";
testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles);
}

@TestTemplate
public void testExportWithResetTtl() throws Exception {
String suffix = mob ? methodName + "-mob" : methodName;
TableName tableName = TableName.valueOf(suffix);
String snapshotName = "snaptb-" + suffix;
Long ttl = 100000L;
// create Table
createTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY);
int tableNumFiles = admin.getRegions(tableName).size();
// take a snapshot with TTL
Map<String, Object> props = new HashMap<>();
props.put("TTL", ttl);
admin.snapshot(snapshotName, tableName, props);
Optional<Long> ttlOpt =
admin.listSnapshots().stream().filter(s -> s.getName().equals(snapshotName))
.map(org.apache.hadoop.hbase.client.SnapshotDescription::getTtl).findAny();
assertTrue(ttlOpt.isPresent());
assertEquals(ttl, ttlOpt.get());

testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles,
getHdfsDestinationDir(), false, true);
}

@TestTemplate
public void testExportExpiredSnapshot() throws Exception {
String suffix = mob ? methodName + "-mob" : methodName;
TableName tableName = TableName.valueOf(suffix);
String snapshotName = "snapshot-" + suffix;
createTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY);
Map<String, Object> properties = new HashMap<>();
properties.put("TTL", 10);
org.apache.hadoop.hbase.client.SnapshotDescription snapshotDescription =
new org.apache.hadoop.hbase.client.SnapshotDescription(snapshotName, tableName,
SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties);
admin.snapshot(snapshotDescription);
boolean isExist =
admin.listSnapshots().stream().anyMatch(ele -> snapshotName.equals(ele.getName()));
assertTrue(isExist);
TEST_UTIL.waitFor(60000,
() -> SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()));
boolean isExpiredSnapshot =
SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime());
assertTrue(isExpiredSnapshot);
int res = runExportSnapshot(TEST_UTIL.getConfiguration(), snapshotName, snapshotName,
TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, false, true, true);
assertEquals(res, AbstractHBaseTool.EXIT_FAILURE);
}
}
Loading
Loading