Skip to content

Commit 7f5bb0c

Browse files
authored
HBASE-30002 Rewrite TestExportSnapshot related UTs (#7945) (#7946) (#7955)
(cherry picked from commit 00936bf) (cherry picked from commit e676c75) Signed-off-by: Xiao Liu <liuxiaocs@apache.org>
1 parent c572401 commit 7f5bb0c

37 files changed

Lines changed: 1601 additions & 1051 deletions

hbase-mapreduce/pom.xml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,11 @@
220220
<artifactId>junit-vintage-engine</artifactId>
221221
<scope>test</scope>
222222
</dependency>
223+
<dependency>
224+
<groupId>org.awaitility</groupId>
225+
<artifactId>awaitility</artifactId>
226+
<scope>test</scope>
227+
</dependency>
223228
<dependency>
224229
<groupId>org.slf4j</groupId>
225230
<artifactId>jcl-over-slf4j</artifactId>

hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,8 @@ public void map(BytesWritable key, NullWritable value, Context context)
264264
Path outputPath = getOutputPath(inputInfo);
265265

266266
copyFile(context, inputInfo, outputPath);
267+
// inject failure
268+
injectTestFailure(context, inputInfo);
267269
}
268270

269271
/**
@@ -290,19 +292,23 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException
290292
return new Path(outputArchive, path);
291293
}
292294

293-
@SuppressWarnings("checkstyle:linelength")
294295
/**
295296
* Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in
296-
* {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}.
297+
* {@link #map(BytesWritable, NullWritable, org.apache.hadoop.mapreduce.Mapper.Context)}
297298
*/
298299
private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
299300
throws IOException {
300-
if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return;
301-
if (testing.injectedFailureCount >= testing.failuresCountToInject) return;
301+
if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) {
302+
return;
303+
}
304+
if (testing.injectedFailureCount >= testing.failuresCountToInject) {
305+
return;
306+
}
302307
testing.injectedFailureCount++;
303308
context.getCounter(Counter.COPY_FAILED).increment(1);
304309
LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount);
305-
throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s",
310+
throw new IOException(String.format(
311+
context.getTaskAttemptID() + " TEST FAILURE (%d of max %d): Unable to copy input=%s",
306312
testing.injectedFailureCount, testing.failuresCountToInject, inputInfo));
307313
}
308314

@@ -358,8 +364,6 @@ private void copyFile(final Context context, final SnapshotFileInfo inputInfo,
358364
LOG.error("Error copying " + inputPath + " to " + outputPath, e);
359365
context.getCounter(Counter.COPY_FAILED).increment(1);
360366
throw e;
361-
} finally {
362-
injectTestFailure(context, inputInfo);
363367
}
364368
}
365369

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.snapshot;
19+
20+
import org.apache.hadoop.fs.Path;
21+
import org.junit.jupiter.api.TestTemplate;
22+
23+
public class ConsecutiveExportsTestBase extends ExportSnapshotTestBase {
24+
25+
protected ConsecutiveExportsTestBase(boolean mob) {
26+
super(mob);
27+
}
28+
29+
@TestTemplate
30+
public void testConsecutiveExports() throws Exception {
31+
Path copyDir = getLocalDestinationDir(TEST_UTIL);
32+
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false);
33+
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true);
34+
removeExportDir(copyDir);
35+
}
36+
}
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.snapshot;
19+
20+
import org.junit.jupiter.api.TestTemplate;
21+
22+
public class ExportFileSystemStateTestBase extends ExportSnapshotTestBase {
23+
24+
protected ExportFileSystemStateTestBase(boolean mob) {
25+
super(mob);
26+
}
27+
28+
/**
29+
* Verify if exported snapshot and copied files matches the original one.
30+
*/
31+
@TestTemplate
32+
public void testExportFileSystemState() throws Exception {
33+
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
34+
}
35+
36+
@TestTemplate
37+
public void testExportFileSystemStateWithSkipTmp() throws Exception {
38+
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);
39+
try {
40+
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
41+
} finally {
42+
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, false);
43+
}
44+
}
45+
46+
@TestTemplate
47+
public void testEmptyExportFileSystemState() throws Exception {
48+
testExportFileSystemState(tableName, emptySnapshotName, emptySnapshotName, 0);
49+
}
50+
}
Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.snapshot;
19+
20+
import static org.awaitility.Awaitility.await;
21+
import static org.hamcrest.MatcherAssert.assertThat;
22+
import static org.hamcrest.Matchers.hasSize;
23+
import static org.junit.jupiter.api.Assertions.assertEquals;
24+
25+
import java.time.Duration;
26+
import java.util.List;
27+
import java.util.stream.Collectors;
28+
import org.apache.hadoop.conf.Configuration;
29+
import org.apache.hadoop.fs.Path;
30+
import org.apache.hadoop.hbase.TableName;
31+
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
32+
import org.apache.hadoop.hbase.client.Put;
33+
import org.apache.hadoop.hbase.client.RegionInfo;
34+
import org.apache.hadoop.hbase.client.Table;
35+
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
36+
import org.apache.hadoop.hbase.tool.BulkLoadHFilesTool;
37+
import org.apache.hadoop.hbase.util.Bytes;
38+
import org.apache.hadoop.hbase.util.HFileTestUtil;
39+
import org.junit.jupiter.api.TestTemplate;
40+
41+
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
42+
43+
public class ExportFileSystemStateWithMergeOrSplitRegionTestBase extends ExportSnapshotTestBase {
44+
45+
protected ExportFileSystemStateWithMergeOrSplitRegionTestBase(boolean mob) {
46+
super(mob);
47+
}
48+
49+
@TestTemplate
50+
public void testExportFileSystemStateWithMergeRegion() throws Exception {
51+
// disable compaction
52+
admin.compactionSwitch(false,
53+
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
54+
// create Table
55+
String suffix = mob ? methodName + "-mob" : methodName;
56+
TableName tableName0 = TableName.valueOf("testtb-" + suffix + "-1");
57+
String snapshotName0 = "snaptb0-" + suffix + "-1";
58+
admin.createTable(
59+
TableDescriptorBuilder.newBuilder(tableName0)
60+
.setColumnFamilies(
61+
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build()))
62+
.build(),
63+
new byte[][] { Bytes.toBytes("2") });
64+
// put some data
65+
try (Table table = admin.getConnection().getTable(tableName0)) {
66+
table.put(new Put(Bytes.toBytes("1")).addColumn(FAMILY, null, Bytes.toBytes("1")));
67+
table.put(new Put(Bytes.toBytes("2")).addColumn(FAMILY, null, Bytes.toBytes("2")));
68+
}
69+
List<RegionInfo> regions = admin.getRegions(tableName0);
70+
assertEquals(2, regions.size());
71+
tableNumFiles = regions.size();
72+
// merge region
73+
admin.mergeRegionsAsync(new byte[][] { regions.get(0).getEncodedNameAsBytes(),
74+
regions.get(1).getEncodedNameAsBytes() }, true).get();
75+
await().atMost(Duration.ofSeconds(30))
76+
.untilAsserted(() -> assertThat(admin.getRegions(tableName0), hasSize(1)));
77+
// take a snapshot
78+
admin.snapshot(snapshotName0, tableName0);
79+
// export snapshot and verify
80+
testExportFileSystemState(tableName0, snapshotName0, snapshotName0, tableNumFiles);
81+
}
82+
83+
@TestTemplate
84+
public void testExportFileSystemStateWithSplitRegion() throws Exception {
85+
// disable compaction
86+
admin.compactionSwitch(false,
87+
admin.getRegionServers().stream().map(a -> a.getServerName()).collect(Collectors.toList()));
88+
// create Table
89+
String suffix = mob ? methodName + "-mob" : methodName;
90+
TableName splitTableName = TableName.valueOf(suffix);
91+
String splitTableSnap = "snapshot-" + suffix;
92+
admin.createTable(TableDescriptorBuilder.newBuilder(splitTableName).setColumnFamilies(
93+
Lists.newArrayList(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())).build());
94+
95+
Path output = TEST_UTIL.getDataTestDir("output/cf");
96+
TEST_UTIL.getTestFileSystem().mkdirs(output);
97+
// Create and load a large hfile to ensure the execution time of MR job.
98+
HFileTestUtil.createHFile(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(),
99+
new Path(output, "test_file"), FAMILY, Bytes.toBytes("q"), Bytes.toBytes("1"),
100+
Bytes.toBytes("9"), 9999999);
101+
BulkLoadHFilesTool tool = new BulkLoadHFilesTool(TEST_UTIL.getConfiguration());
102+
tool.run(new String[] { output.getParent().toString(), splitTableName.getNameAsString() });
103+
104+
List<RegionInfo> regions = admin.getRegions(splitTableName);
105+
assertEquals(1, regions.size());
106+
tableNumFiles = regions.size();
107+
108+
// split region
109+
admin.split(splitTableName, Bytes.toBytes("5"));
110+
await().atMost(Duration.ofSeconds(30))
111+
.untilAsserted(() -> assertThat(admin.getRegions(splitTableName), hasSize(2)));
112+
113+
// take a snapshot
114+
admin.snapshot(splitTableSnap, splitTableName);
115+
// export snapshot and verify
116+
Configuration tmpConf = TEST_UTIL.getConfiguration();
117+
// Decrease the buffer size of copier to avoid the export task finished shortly
118+
tmpConf.setInt("snapshot.export.buffer.size", 1);
119+
// Decrease the maximum files of each mapper to ensure the three files(1 hfile + 2 reference
120+
// files) copied in different mappers concurrently.
121+
tmpConf.setInt("snapshot.export.default.map.group", 1);
122+
testExportFileSystemState(tmpConf, splitTableName, splitTableSnap, splitTableSnap,
123+
tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false,
124+
getBypassRegionPredicate(), true, false);
125+
}
126+
}
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hbase.snapshot;
19+
20+
import static org.junit.jupiter.api.Assertions.assertEquals;
21+
import static org.junit.jupiter.api.Assertions.assertTrue;
22+
23+
import java.util.HashMap;
24+
import java.util.Map;
25+
import java.util.Optional;
26+
import org.apache.hadoop.hbase.TableName;
27+
import org.apache.hadoop.hbase.client.SnapshotType;
28+
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
29+
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
30+
import org.junit.jupiter.api.TestTemplate;
31+
32+
public class ExportSnapshotMiscTestBase extends ExportSnapshotTestBase {
33+
34+
protected ExportSnapshotMiscTestBase(boolean mob) {
35+
super(mob);
36+
}
37+
38+
@TestTemplate
39+
public void testExportWithTargetName() throws Exception {
40+
final String targetName = "testExportWithTargetName";
41+
testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles);
42+
}
43+
44+
@TestTemplate
45+
public void testExportWithResetTtl() throws Exception {
46+
String suffix = mob ? methodName + "-mob" : methodName;
47+
TableName tableName = TableName.valueOf(suffix);
48+
String snapshotName = "snaptb-" + suffix;
49+
Long ttl = 100000L;
50+
// create Table
51+
createTable(tableName);
52+
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY);
53+
int tableNumFiles = admin.getRegions(tableName).size();
54+
// take a snapshot with TTL
55+
Map<String, Object> props = new HashMap<>();
56+
props.put("TTL", ttl);
57+
admin.snapshot(snapshotName, tableName, props);
58+
Optional<Long> ttlOpt =
59+
admin.listSnapshots().stream().filter(s -> s.getName().equals(snapshotName))
60+
.map(org.apache.hadoop.hbase.client.SnapshotDescription::getTtl).findAny();
61+
assertTrue(ttlOpt.isPresent());
62+
assertEquals(ttl, ttlOpt.get());
63+
64+
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles,
65+
getHdfsDestinationDir(), false, true);
66+
}
67+
68+
@TestTemplate
69+
public void testExportExpiredSnapshot() throws Exception {
70+
String suffix = mob ? methodName + "-mob" : methodName;
71+
TableName tableName = TableName.valueOf(suffix);
72+
String snapshotName = "snapshot-" + suffix;
73+
createTable(tableName);
74+
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY);
75+
Map<String, Object> properties = new HashMap<>();
76+
properties.put("TTL", 10);
77+
org.apache.hadoop.hbase.client.SnapshotDescription snapshotDescription =
78+
new org.apache.hadoop.hbase.client.SnapshotDescription(snapshotName, tableName,
79+
SnapshotType.FLUSH, null, EnvironmentEdgeManager.currentTime(), -1, properties);
80+
admin.snapshot(snapshotDescription);
81+
boolean isExist =
82+
admin.listSnapshots().stream().anyMatch(ele -> snapshotName.equals(ele.getName()));
83+
assertTrue(isExist);
84+
TEST_UTIL.waitFor(60000,
85+
() -> SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
86+
snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime()));
87+
boolean isExpiredSnapshot =
88+
SnapshotDescriptionUtils.isExpiredSnapshot(snapshotDescription.getTtl(),
89+
snapshotDescription.getCreationTime(), EnvironmentEdgeManager.currentTime());
90+
assertTrue(isExpiredSnapshot);
91+
int res = runExportSnapshot(TEST_UTIL.getConfiguration(), snapshotName, snapshotName,
92+
TEST_UTIL.getDefaultRootDirPath(), getHdfsDestinationDir(), false, false, false, true, true);
93+
assertEquals(res, AbstractHBaseTool.EXIT_FAILURE);
94+
}
95+
}

0 commit comments

Comments
 (0)