diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index fe6f3bc238a9..f004686a4b32 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -187,11 +187,39 @@ public static int getDefaultBufferSize(final FileSystem fs) { */ public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, boolean overwrite) throws IOException { + return create(fs, path, perm, overwrite, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *
    + *
  1. apply the umask in the configuration (if it is enabled)
  2. + *
  3. use the fs configured buffer size (or 4096 if not set)
  4. + *
  5. use the default replication
  6. + *
  7. use the default block size
  8. + *
  9. not track progress
  10. + *
+ * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm intial permissions + * @param overwrite Whether or not the created file should be overwritten. + * @param isRecursiveCreate recursively create parent directories + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream create(FileSystem fs, Path path, FsPermission perm, + boolean overwrite, boolean isRecursiveCreate) throws IOException { if (LOG.isTraceEnabled()) { - LOG.trace("Creating file={} with permission={}, overwrite={}", path, perm, overwrite); + LOG.trace("Creating file={} with permission={}, overwrite={}, recursive={}", path, perm, + overwrite, isRecursiveCreate); + } + if (isRecursiveCreate) { + return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), + getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); + } else { + return fs.createNonRecursive(path, perm, overwrite, getDefaultBufferSize(fs), + getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); } - return fs.create(path, perm, overwrite, getDefaultBufferSize(fs), - getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null); } /** diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index edd2da4129ac..c24f8e62c816 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.TestTableSnapshotScanner; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat.TableSnapshotRegionSplit; +import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests; @@ -582,4 +583,104 @@ public void testCleanRestoreDir() throws Exception { TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); } + + /** + * Test that explicitly restores a snapshot to a temp directory and reads the restored regions via + * ClientSideRegionScanner through a MapReduce job. + *

+ * This test verifies the full workflow: 1. Create and load a table with data 2. Create a snapshot + * and restore the snapshot to a temporary directory 3. Configure a job to read the restored + * regions via ClientSideRegionScanner using TableSnapshotInputFormat and verify that it succeeds + * 4. Delete restored temporary directory 5. Configure a new job and verify that it fails + */ + @Test + public void testReadFromRestoredSnapshotViaMR() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); + final String snapshotName = tableName + "_snapshot"; + try { + if (UTIL.getAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + UTIL.createTable(tableName, FAMILIES, new byte[][] { bbb, yyy }); + + Admin admin = UTIL.getAdmin(); + int regionNum = admin.getRegions(tableName).size(); + LOG.info("Created table with {} regions", regionNum); + + Table table = UTIL.getConnection().getTable(tableName); + UTIL.loadTable(table, FAMILIES); + table.close(); + + Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); + FileSystem fs = rootDir.getFileSystem(UTIL.getConfiguration()); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), + null, snapshotName, rootDir, fs, true); + Path tempRestoreDir = UTIL.getDataTestDirOnTestFS("restore_" + snapshotName); + RestoreSnapshotHelper.copySnapshotForScanner(UTIL.getConfiguration(), fs, rootDir, + tempRestoreDir, snapshotName); + Assert.assertTrue("Restore directory should exist", fs.exists(tempRestoreDir)); + + Job job = Job.getInstance(UTIL.getConfiguration()); + job.setJarByClass(TestTableSnapshotInputFormat.class); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + TestTableSnapshotInputFormat.class); + Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); + Configuration conf = job.getConfiguration(); + conf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName); + conf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString()); + conf.setInt("hbase.mapreduce.splits.per.region", 1); + job.setReducerClass(TestTableSnapshotReducer.class); + job.setNumReduceTasks(1); + job.setOutputFormatClass(NullOutputFormat.class); + TableMapReduceUtil.initTableMapperJob(snapshotName, // table name (snapshot name in this case) + scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, false, TableSnapshotInputFormat.class); + TableMapReduceUtil.resetCacheConfig(conf); + Assert.assertTrue(job.waitForCompletion(true)); + Assert.assertTrue(job.isSuccessful()); + + // Now verify that job fails when restore directory is deleted + Assert.assertTrue(fs.delete(tempRestoreDir, true)); + Assert.assertFalse("Restore directory should not exist after deletion", + fs.exists(tempRestoreDir)); + Job failureJob = Job.getInstance(UTIL.getConfiguration()); + failureJob.setJarByClass(TestTableSnapshotInputFormat.class); + TableMapReduceUtil.addDependencyJarsForClasses(failureJob.getConfiguration(), + TestTableSnapshotInputFormat.class); + Configuration failureConf = failureJob.getConfiguration(); + // Configure job to use the deleted restore directory + failureConf.set("hbase.TableSnapshotInputFormat.snapshot.name", snapshotName); + failureConf.set("hbase.TableSnapshotInputFormat.restore.dir", tempRestoreDir.toString()); + failureConf.setInt("hbase.mapreduce.splits.per.region", 1); + failureJob.setReducerClass(TestTableSnapshotReducer.class); + failureJob.setNumReduceTasks(1); + failureJob.setOutputFormatClass(NullOutputFormat.class); + + TableMapReduceUtil.initTableMapperJob(snapshotName, scan, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, failureJob, false, false, + TableSnapshotInputFormat.class); + TableMapReduceUtil.resetCacheConfig(failureConf); + + Assert.assertFalse("Restore directory should not exist before job execution", + fs.exists(tempRestoreDir)); + failureJob.waitForCompletion(true); + + Assert.assertFalse("Job should fail since the restored snapshot directory is deleted", + failureJob.isSuccessful()); + + } finally { + try { + if (UTIL.getAdmin().tableExists(tableName)) { + UTIL.deleteTable(tableName); + } + } catch (Exception e) { + LOG.warn("Error deleting table", e); + } + try { + UTIL.getAdmin().deleteSnapshot(snapshotName); + } catch (Exception e) { + LOG.warn("Error deleting snapshot", e); + } + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index d2cc2e2bfdb4..1244d5bf3525 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -29,6 +29,7 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.stream.Collectors; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; @@ -37,9 +38,12 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.exceptions.MergeRegionException; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -102,6 +106,7 @@ void fixHoles(CatalogJanitorReport report) { final List newRegionInfos = createRegionInfosForHoles(holes); final List newMetaEntries = createMetaEntries(masterServices, newRegionInfos); + createRegionDirectories(masterServices, newMetaEntries); final TransitRegionStateProcedure[] assignProcedures = masterServices.getAssignmentManager().createRoundRobinAssignProcedures(newMetaEntries); @@ -217,6 +222,27 @@ private static List createMetaEntries(final MasterServices masterSer return createMetaEntriesSuccesses; } + private static void createRegionDirectories(final MasterServices masterServices, + final List regions) { + if (regions.isEmpty()) { + return; + } + final MasterFileSystem mfs = masterServices.getMasterFileSystem(); + final Path rootDir = mfs.getRootDir(); + for (RegionInfo regionInfo : regions) { + if (regionInfo.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID) { + try { + Path tableDir = CommonFSUtils.getTableDir(rootDir, regionInfo.getTable()); + HRegionFileSystem.createRegionOnFileSystem(masterServices.getConfiguration(), + mfs.getFileSystem(), tableDir, regionInfo); + } catch (IOException e) { + LOG.warn("Failed to create region directory for {}: {}", + regionInfo.getRegionNameAsString(), e.getMessage(), e); + } + } + } + } + /** * Fix overlaps noted in CJ consistency report. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index 2a3732c99984..ef11e68217a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -109,6 +109,7 @@ assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() setNextState(TruncateRegionState.TRUNCATE_REGION_MAKE_ONLINE); break; case TRUNCATE_REGION_MAKE_ONLINE: + createRegionOnFileSystem(env); addChildProcedure(createAssignProcedures(env)); setNextState(TruncateRegionState.TRUNCATE_REGION_POST_OPERATION); break; @@ -130,6 +131,20 @@ assert getRegion().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID || isFailed() return Flow.HAS_MORE_STATE; } + private void createRegionOnFileSystem(final MasterProcedureEnv env) throws IOException { + RegionStateNode regionNode = + env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); + regionNode.lock(); + try { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tableDir = CommonFSUtils.getTableDir(mfs.getRootDir(), getTableName()); + HRegionFileSystem.createRegionOnFileSystem(env.getMasterConfiguration(), mfs.getFileSystem(), + tableDir, getRegion()); + } finally { + regionNode.unlock(); + } + } + private void deleteRegionFromFileSystem(final MasterProcedureEnv env) throws IOException { RegionStateNode regionNode = env.getAssignmentManager().getRegionStates().getRegionStateNode(getRegion()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 7f766c558409..a1bf09cba5d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -764,7 +764,10 @@ private static void writeRegionInfoFileContent(final Configuration conf, final F // First check to get the permissions FsPermission perms = CommonFSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content - try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null)) { + // HBASE-29662: Fail .regioninfo file creation, if the region directory doesn't exist, + // avoiding silent masking of missing region directories during region initialization. + // The region directory should already exist when this method is called. + try (FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null, false)) { out.write(content); } } @@ -848,6 +851,14 @@ private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final b CommonFSUtils.delete(fs, tmpPath, true); } + // Check parent (region) directory exists first to maintain HBASE-29662 protection + if (!fs.exists(getRegionDir())) { + throw new IOException("Region directory does not exist: " + getRegionDir()); + } + if (!fs.exists(getTempDir())) { + fs.mkdirs(getTempDir()); + } + // Write HRI to a file in case we need to recover hbase:meta writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 55b77b6aed1b..3b446826b775 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -212,6 +212,32 @@ public static boolean deleteRegionDir(final Configuration conf, final RegionInfo */ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path, FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException { + return create(conf, fs, path, perm, favoredNodes, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *

    + *
  1. overwrite the file if it exists
  2. + *
  3. apply the umask in the configuration (if it is enabled)
  4. + *
  5. use the fs configured buffer size (or 4096 if not set)
  6. + *
  7. use the configured column family replication or default replication if + * {@link ColumnFamilyDescriptorBuilder#DEFAULT_DFS_REPLICATION}
  8. + *
  9. use the default block size
  10. + *
  11. not track progress
  12. + *
+ * @param conf configurations + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm permissions + * @param favoredNodes favored data nodes + * @param isRecursiveCreate recursively create parent directories + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path, + FsPermission perm, InetSocketAddress[] favoredNodes, boolean isRecursiveCreate) + throws IOException { if (fs instanceof HFileSystem) { FileSystem backingFs = ((HFileSystem) fs).getBackingFs(); if (backingFs instanceof DistributedFileSystem) { @@ -230,7 +256,7 @@ public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path } } - return CommonFSUtils.create(fs, path, perm, true); + return CommonFSUtils.create(fs, path, perm, true, isRecursiveCreate); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index e40a74f49770..882b573ad4e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -99,6 +99,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -109,6 +110,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.ChunkCreator; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; @@ -3722,4 +3724,22 @@ public static void await(final long sleepMillis, final BooleanSupplier condition throw e; } } + + public void createRegionDir(RegionInfo hri) throws IOException { + Path rootDir = getDataTestDir(); + Path tableDir = CommonFSUtils.getTableDir(rootDir, hri.getTable()); + Path regionDir = new Path(tableDir, hri.getEncodedName()); + FileSystem fs = getTestFileSystem(); + if (!fs.exists(regionDir)) { + fs.mkdirs(regionDir); + } + } + + public void createRegionDir(RegionInfo regionInfo, MasterFileSystem masterFileSystem) + throws IOException { + Path tableDir = + CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), regionInfo.getTable()); + HRegionFileSystem.createRegionOnFileSystem(conf, masterFileSystem.getFileSystem(), tableDir, + regionInfo); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java index e569457c7479..9daf116db485 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoreRegionCoprocessor.java @@ -76,12 +76,14 @@ public void before() throws IOException { this.rss = new MockRegionServerServices(HTU.getConfiguration()); ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + HTU.createRegionDir(ri); this.region = HRegion.openHRegion(ri, td, null, HTU.getConfiguration(), this.rss, null); } @After public void after() throws IOException { this.region.close(); + HTU.cleanupTestDir(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java index dff2bf2d782a..52bc8985f949 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixer.java @@ -172,6 +172,7 @@ private static RegionInfo makeOverlap(MasterServices services, RegionInfo a, Reg throws IOException { RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()) .setStartKey(a.getStartKey()).setEndKey(b.getEndKey()).build(); + TEST_UTIL.createRegionDir(overlapRegion, services.getMasterFileSystem()); MetaTableAccessor.putsToMetaTable(services.getConnection(), Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion, EnvironmentEdgeManager.currentTime()))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java index a6e565cdb414..c9f8d3a1dced 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveConcurrentClose.java @@ -188,9 +188,11 @@ private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOExcep CommonFSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName()); HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); - + Path regionDir = new Path(tableDir, info.getEncodedName()); + if (!fs.getFileSystem().exists(regionDir)) { + fs.getFileSystem().mkdirs(regionDir); + } region.initialize(); - return region; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java index bb0dc988fcfe..844dda8f7d8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionArchiveIOException.java @@ -198,6 +198,7 @@ private HRegion initHRegion(TableDescriptor htd, RegionInfo info) throws IOExcep .rename(eq(new Path(storeDir, ERROR_FILE)), any()); HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info); + fs.createRegionOnFileSystem(conf, fs.getFileSystem(), tableDir, info); final Configuration walConf = new Configuration(conf); CommonFSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, "log_" + info.getEncodedName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index da1c11ba64c4..0847e3f2685c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7531,6 +7531,7 @@ public void testBulkLoadReplicationEnabled() throws IOException { TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam1)).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); + TEST_UTIL.createRegionDir(hri); region = HRegion.openHRegion(hri, tableDescriptor, rss.getWAL(hri), TEST_UTIL.getConfiguration(), rss, null); @@ -7890,4 +7891,77 @@ public void testRegionOnCoprocessorsWithoutChange() throws IOException { public static class NoOpRegionCoprocessor implements RegionCoprocessor, RegionObserver { // a empty region coprocessor class } + + /** + * Test for HBASE-29662: HRegion.initialize() should fail when trying to recreate .regioninfo file + * after the region directory has been deleted. This validates that .regioninfo file creation does + * not create parent directories recursively. + */ + @Test + public void testHRegionInitializeFailsWithDeletedRegionDir() throws Exception { + LOG.info("Testing HRegion initialize failure with deleted region directory"); + + TEST_UTIL = new HBaseTestingUtil(); + Configuration conf = TEST_UTIL.getConfiguration(); + Path testDir = TEST_UTIL.getDataTestDir("testHRegionInitFailure"); + FileSystem fs = testDir.getFileSystem(conf); + + // Create table descriptor + TableName tableName = TableName.valueOf("TestHRegionInitWithDeletedDir"); + byte[] family = Bytes.toBytes("info"); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); + + // Create region info + RegionInfo regionInfo = + RegionInfoBuilder.newBuilder(tableName).setStartKey(null).setEndKey(null).build(); + + Path tableDir = CommonFSUtils.getTableDir(testDir, tableName); + + // Create WAL for the region + WAL wal = HBaseTestingUtil.createWal(conf, testDir, regionInfo); + + try { + // Create region normally (this should succeed and create region directory) + LOG.info("Creating region normally - should succeed"); + HRegion region = HRegion.createHRegion(regionInfo, testDir, conf, htd, wal, true); + + // Verify region directory exists + Path regionDir = new Path(tableDir, regionInfo.getEncodedName()); + assertTrue("Region directory should exist after creation", fs.exists(regionDir)); + + Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE); + assertTrue("Region info file should exist after creation", fs.exists(regionInfoFile)); + + // Delete the region directory (simulating external deletion or corruption) + assertTrue(fs.delete(regionDir, true)); + assertFalse("Region directory should not exist after deletion", fs.exists(regionDir)); + + // Try to open/initialize the region again - this should fail + LOG.info("Attempting to re-initialize region with deleted directory - should fail"); + + // Create a new region instance (simulating region server restart or reopen) + HRegion newRegion = HRegion.newHRegion(tableDir, wal, fs, conf, regionInfo, htd, null); + // Try to initialize - this should fail because the regionDir doesn't exist + IOException regionInitializeException = null; + try { + newRegion.initialize(null); + } catch (IOException e) { + regionInitializeException = e; + } + + // Verify the exception is related to missing parent directory + assertNotNull("Exception should be thrown", regionInitializeException); + String exceptionMessage = regionInitializeException.getMessage().toLowerCase(); + assertTrue(exceptionMessage.contains("region directory does not exist")); + assertFalse("Region directory should still not exist after failed initialization", + fs.exists(regionDir)); + + } finally { + if (wal != null) { + wal.close(); + } + TEST_UTIL.cleanupTestDir(); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 9c65a59b115a..ace0df4e4ce0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -122,9 +122,8 @@ private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null, MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); HRegion region = new HRegion(fs, wals.getWAL(info), conf, htd, null); - + fs.createRegionOnFileSystem(walConf, fs.getFileSystem(), tableDir, info); region.initialize(); - return region; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 18b560519bb5..140b3184908f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -836,7 +836,8 @@ public void testSequentialEditLogSeqNum() throws IOException { // Mock the WAL MockWAL wal = createMockWAL(); - + TEST_UTIL.createRegionDir(hri, + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem()); HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal); for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");