diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index fe6f3bc238a9..f004686a4b32 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -187,11 +187,39 @@ public static int getDefaultBufferSize(final FileSystem fs) { */ public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite) throws IOException { + return create(fs, path, perm, overwrite, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *
+ * This test verifies the full workflow: 1. Create and load a table with data 2. Create a snapshot
+ * and restore the snapshot to a temporary directory 3. Configure a job to read the restored
+ * regions via ClientSideRegionScanner using TableSnapshotInputFormat and verify that it succeeds
+ * 4. Delete restored temporary directory 5. Configure a new job and verify that it fails
+ */
+ @Test
+ public void testReadFromRestoredSnapshotViaMR() throws Exception {
+ final TableName tableName = TableName.valueOf(name.getMethodName());
+ final String snapshotName = tableName + "_snapshot";
+ try {
+ if (UTIL.getAdmin().tableExists(tableName)) {
+ UTIL.deleteTable(tableName);
+ }
+ UTIL.createTable(tableName, FAMILIES, new byte[][] { bbb, yyy });
+
+ Admin admin = UTIL.getAdmin();
+ int regionNum = admin.getRegions(tableName).size();
+ LOG.info("Created table with {} regions", regionNum);
+
+ Table table = UTIL.getConnection().getTable(tableName);
+ UTIL.loadTable(table, FAMILIES);
+ table.close();
+
+ Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
+ FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration());
+ SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES),
+ null, snapshotName, rootDir, fs, true);
+ Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" + snapshotName);
+ RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(), fs, rootDir,
+ tempRestoreDir, snapshotName);
+ Assert.assertTrue("Restore directory should exist", fs.exists(tempRestoreDir));
+
+ Job job = Job.getInstance(UTIL.getConfiguration());
+ job.setJarByClass(TestTableSnapshotInputFormat.class);
+ TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
+ TestTableSnapshotInputFormat.class);
+ Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow());
+ Configuration conf = job.getConfiguration();
+ conf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName);
+ conf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString());
+ conf.setInt("hbase.mapreduce.splits.per.region", 1);
+ job.setReducerClass(TestTableSnapshotReducer.class);
+ job.setNumReduceTasks(1);
+ job.setOutputFormatClass(NullOutputFormat.class);
+ TableMapReduceUtil.initTableMapperJob(snapshotName, // table name (snapshot name in this case)
+ scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job,
+ false, false, TableSnapshotInputFormat.class);
+ TableMapReduceUtil.resetCacheConfig(conf);
+ Assert.assertTrue(job.waitForCompletion(true));
+ Assert.assertTrue(job.isSuccessful());
+
+ // Now verify that job fails when restore directory is deleted
+ Assert.assertTrue(fs.delete(tempRestoreDir, true));
+ Assert.assertFalse("Restore directory should not exist after deletion",
+ fs.exists(tempRestoreDir));
+ Job failureJob = Job.getInstance(UTIL.getConfiguration());
+ failureJob.setJarByClass(TestTableSnapshotInputFormat.class);
+ TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(),
+ TestTableSnapshotInputFormat.class);
+ Configuration failureConf = failureJob.getConfiguration();
+ // Configure job to use the deleted restore directory
+ failureConf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName);
+ failureConf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString());
+ failureConf.setInt("hbase.mapreduce.splits.per.region", 1);
+ failureJob.setReducerClass(TestTableSnapshotReducer.class);
+ failureJob.setNumReduceTasks(1);
+ failureJob.setOutputFormatClass(NullOutputFormat.class);
+
+ TableMapReduceUtil.initTableMapperJob(snapshotName, scan, TestTableSnapshotMapper.class,
+ ImmutableBytesWritable.class, NullWritable.class, failureJob, false, false,
+ TableSnapshotInputFormat.class);
+ TableMapReduceUtil.resetCacheConfig(failureConf);
+
+ Assert.assertFalse("Restore directory should not exist before job execution",
+ fs.exists(tempRestoreDir));
+ failureJob.waitForCompletion(true);
+
+ Assert.assertFalse("Job should fail since the restored snapshot directory is deleted",
+ failureJob.isSuccessful());
+
+ } finally {
+ try {
+ if (UTIL.getAdmin().tableExists(tableName)) {
+ UTIL.deleteTable(tableName);
+ }
+ } catch (Exception e) {
+ LOG.warn("Error deleting table", e);
+ }
+ try {
+ UTIL.getAdmin().deleteSnapshot(snapshotName);
+ } catch (Exception e) {
+ LOG.warn("Error deleting snapshot", e);
+ }
+ }
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
index d2cc2e2bfdb4..1244d5bf3525 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
@@ -29,6 +29,7 @@
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.stream.Collectors;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
@@ -37,9 +38,12 @@
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -102,6 +106,7 @@ void fixHoles(CatalogJanitorReport report) {
final List
+ *
+ * @param conf configurations
+ * @param fs {@link FileSystem} on which to write the file
+ * @param path {@link Path} to the file to write
+ * @param perm permissions
+ * @param favoredNodes favored data nodes
+ * @param isRecursiveCreate recursively create parent directories
+ * @return output stream to the created file
+ * @throws IOException if the file cannot be created
+ */
+ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path,
+ FsPermission perm, InetSocketAddress[] favoredNodes, boolean isRecursiveCreate)
+ throws IOException {
if (fs instanceof HFileSystem) {
FileSystem backingFs = ((HFileSystem) fs).getBackingFs();
if (backingFs instanceof DistributedFileSystem) {
@@ -230,7 +256,7 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path
}
}
- return CommonFSUtils.create(fs, path, perm, true);
+ return CommonFSUtils.create(fs, path, perm, true, isRecursiveCreate);
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
index e40a74f49770..882b573ad4e1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
@@ -99,6 +99,7 @@
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -109,6 +110,7 @@
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.ChunkCreator;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -3722,4 +3724,22 @@ public static void await(final long sleepMillis, final BooleanSupplier condition
throw e;
}
}
+
+ public void createRegionDir(RegionInfo hri) throws IOException {
+ Path rootDir = getDataTestDir();
+ Path tableDir = CommonFSUtils.getTableDir(rootDir, hri.getTable());
+ Path regionDir = new Path(tableDir, hri.getEncodedName());
+ FileSystem fs = getTestFileSystem();
+ if (!fs.exists(regionDir)) {
+ fs.mkdirs(regionDir);
+ }
+ }
+
+ public void createRegionDir(RegionInfo regionInfo, MasterFileSystem masterFileSystem)
+ throws IOException {
+ Path tableDir =
+ CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), regionInfo.getTable());
+ HRegionFileSystem.createRegionOnFileSystem(conf, masterFileSystem.getFileSystem(), tableDir,
+ regionInfo);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
index e569457c7479..9daf116db485 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java
@@ -76,12 +76,14 @@ public void before() throws IOException {
this.rss = new MockRegionServerServices(HTU.getConfiguration());
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
+ HTU.createRegionDir(ri);
this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null);
}
@After
public void after() throws IOException {
this.region.close();
+ HTU.cleanupTestDir();
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
index dff2bf2d782a..52bc8985f949 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java
@@ -172,6 +172,7 @@ private static RegionInfo makeOverlap(MasterServices services, RegionInfo a, Reg
throws IOException {
RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable())
.setStartKey(a.getStartKey()).setEndKey(b.getEndKey()).build();
+ TEST_UTIL.createRegionDir(overlapRegion, services.getMasterFileSystem());
MetaTableAccessor.putsToMetaTable(services.getConnection(),
Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion,
EnvironmentEdgeManager.currentTime())));
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
index a6e565cdb414..c9f8d3a1dced 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java
@@ -188,9 +188,11 @@ private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOExcep
CommonFSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName());
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
-
+ Path regionDir = new Path(tableDir, info.getEncodedName());
+ if (!fs.getFileSystem().exists(regionDir)) {
+ fs.getFileSystem().mkdirs(regionDir);
+ }
region.initialize();
-
return region;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
index bb0dc988fcfe..844dda8f7d8d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java
@@ -198,6 +198,7 @@ private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOExcep
.rename(eq(new Path(storeDir, ERROR_FILE)), any());
HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
+ fs.createRegionOnFileSystem(conf, fs.getFileSystem(), tableDir, info);
final Configuration walConf = new Configuration(conf);
CommonFSUtils.setRootDir(walConf, tableDir);
final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index da1c11ba64c4..0847e3f2685c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -7531,6 +7531,7 @@ public void testBulkLoadReplicationEnabled() throws IOException {
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build();
+ TEST_UTIL.createRegionDir(hri);
region = HRegion.openHRegion(hri, tableDescriptor, rss.getWAL(hri),
TEST_UTIL.getConfiguration(), rss, null);
@@ -7890,4 +7891,77 @@ public void testRegionOnCoprocessorsWithoutChange() throws IOException {
public static class NoOpRegionCoprocessor implements RegionCoprocessor, RegionObserver {
// a empty region coprocessor class
}
+
+ /**
+ * Test for HBASE-29662: HRegion.initialize() should fail when trying to recreate .regioninfo file
+ * after the region directory has been deleted. This validates that .regioninfo file creation does
+ * not create parent directories recursively.
+ */
+ @Test
+ public void testHRegionInitializeFailsWithDeletedRegionDir() throws Exception {
+ LOG.info("Testing HRegion initialize failure with deleted region directory");
+
+ TEST_UTIL = new HBaseTestingUtil();
+ Configuration conf = TEST_UTIL.getConfiguration();
+ Path testDir = TEST_UTIL.getDataTestDir("testHRegionInitFailure");
+ FileSystem fs = testDir.getFileSystem(conf);
+
+ // Create table descriptor
+ TableName tableName = TableName.valueOf("TestHRegionInitWithDeletedDir");
+ byte[] family = Bytes.toBytes("info");
+ TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
+ .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
+
+ // Create region info
+ RegionInfo regionInfo =
+ RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).build();
+
+ Path tableDir = CommonFSUtils.getTableDir(testDir, tableName);
+
+ // Create WAL for the region
+ WAL wal = HBaseTestingUtil.createWal(conf, testDir, regionInfo);
+
+ try {
+ // Create region normally (this should succeed and create region directory)
+ LOG.info("Creating region normally - should succeed");
+ HRegion region = HRegion.createHRegion(regionInfo, testDir, conf, htd, wal, true);
+
+ // Verify region directory exists
+ Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
+ assertTrue("Region directory should exist after creation", fs.exists(regionDir));
+
+ Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
+ assertTrue("Region info file should exist after creation", fs.exists(regionInfoFile));
+
+ // Delete the region directory (simulating external deletion or corruption)
+ assertTrue(fs.delete(regionDir, true));
+ assertFalse("Region directory should not exist after deletion", fs.exists(regionDir));
+
+ // Try to open/initialize the region again - this should fail
+ LOG.info("Attempting to re-initialize region with deleted directory - should fail");
+
+ // Create a new region instance (simulating region server restart or reopen)
+ HRegion newRegion = HRegion.newHRegion(tableDir, wal, fs, conf, regionInfo, htd, null);
+ // Try to initialize - this should fail because the regionDir doesn't exist
+ IOException regionInitializeException = null;
+ try {
+ newRegion.initialize(null);
+ } catch (IOException e) {
+ regionInitializeException = e;
+ }
+
+ // Verify the exception is related to missing parent directory
+ assertNotNull("Exception should be thrown", regionInitializeException);
+ String exceptionMessage = regionInitializeException.getMessage().toLowerCase();
+ assertTrue(exceptionMessage.contains("region directory does not exist"));
+ assertFalse("Region directory should still not exist after failed initialization",
+ fs.exists(regionDir));
+
+ } finally {
+ if (wal != null) {
+ wal.close();
+ }
+ TEST_UTIL.cleanupTestDir();
+ }
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
index 9c65a59b115a..ace0df4e4ce0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java
@@ -122,9 +122,8 @@ private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey
ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null);
-
+ fs.createRegionOnFileSystem(walConf, fs.getFileSystem(), tableDir, info);
region.initialize();
-
return region;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
index 18b560519bb5..140b3184908f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java
@@ -836,7 +836,8 @@ public void testSequentialEditLogSeqNum() throws IOException {
// Mock the WAL
MockWAL wal = createMockWAL();
-
+ TEST_UTIL.createRegionDir(hri,
+ TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem());
HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");