diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 75dd2ef07b38..3d4e87b11a19 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2651,4 +2651,9 @@ List getLogEntries(Set serverNames, String logType, Server * Get the list of cached files */ List getCachedFilesList(ServerName serverName) throws IOException; + + /** + * Perform hbase:meta table refresh + */ + Long refreshMeta() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index c13dfc33e3d2..5dc01f240dfc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1136,4 +1136,9 @@ public void flushMasterStore() throws IOException { public List getCachedFilesList(ServerName serverName) throws IOException { return get(admin.getCachedFilesList(serverName)); } + + @Override + public Long refreshMeta() throws IOException { + return get(admin.refreshMeta()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 331aa4a254af..45765a35f0c0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1862,4 +1862,9 @@ CompletableFuture> getLogEntries(Set serverNames, Str * Get the list of cached files */ CompletableFuture> getCachedFilesList(ServerName serverName); + + /** + * Perform hbase:meta table refresh + */ + CompletableFuture refreshMeta(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 69f353600036..22b356e6e0d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1005,4 +1005,9 @@ public CompletableFuture flushMasterStore() { public CompletableFuture> getCachedFilesList(ServerName serverName) { return wrap(rawAdmin.getCachedFilesList(serverName)); } + + @Override + public CompletableFuture refreshMeta() { + return wrap(rawAdmin.refreshMeta()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 7cb0e4689510..fa66abb264f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -261,6 +261,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest; @@ -4557,4 +4559,16 @@ List> adminCall(controller, stub, request.build(), resp -> resp.getCachedFilesList())) .serverName(serverName).call(); } + + @Override + public CompletableFuture refreshMeta() { + RefreshMetaRequest.Builder request = RefreshMetaRequest.newBuilder(); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, request.build(), MasterService.Interface::refreshMeta, + RefreshMetaResponse::getProcId)) + .call(); + } + } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index a8adaa27453f..b39b0700aa14 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -799,6 +799,14 @@ message ModifyColumnStoreFileTrackerResponse { message FlushMasterStoreRequest {} message FlushMasterStoreResponse {} +message RefreshMetaRequest { + optional uint64 nonce_group = 1 [default = 0]; + optional uint64 nonce = 2 [default = 0]; +} +message RefreshMetaResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -1270,6 +1278,9 @@ service MasterService { rpc FlushTable(FlushTableRequest) returns(FlushTableResponse); + + rpc RefreshMeta(RefreshMetaRequest) + returns(RefreshMetaResponse); } // HBCK Service definitions. diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index e52a46ab358d..192252486332 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -821,3 +821,13 @@ enum CloseTableRegionsProcedureState { message CloseTableRegionsProcedureStateData { required TableName table_name = 1; } + +enum RefreshMetaState { + REFRESH_META_INIT = 1; + REFRESH_META_SCAN_STORAGE = 2; + REFRESH_META_UPDATE = 3; + REFRESH_META_COMPLETE = 4; +} + +message RefreshMetaStateData { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 98750d38a7c3..9d31511a896c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -742,7 +742,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet * @param connection connection we're using * @param deletes Deletes to add to hbase:meta This list should support #remove. */ - private static void deleteFromMetaTable(final Connection connection, final List deletes) + public static void deleteFromMetaTable(final Connection connection, final List deletes) throws IOException { try (Table t = getMetaHTable(connection)) { debugLogMutations(deletes); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 387be56fdf3d..ce9779f5673f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -169,6 +169,7 @@ import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; +import org.apache.hadoop.hbase.master.procedure.RefreshMetaProcedure; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateRegionProcedure; @@ -246,6 +247,7 @@ import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; @@ -4543,4 +4545,21 @@ protected String getDescription() { } }); } + + public Long refreshMeta(long nonceGroup, long nonce) throws IOException { + return MasterProcedureUtil + .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) { + @Override + protected void run() throws IOException { + LOG.info("Submitting RefreshMetaProcedure"); + submitProcedure( + new RefreshMetaProcedure(procedureExecutor.getEnvironment())); + } + + @Override + protected String getDescription() { + return "RefreshMetaProcedure"; + } + }); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index faedc6dd628f..5ba90597af03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable; +import org.apache.hadoop.hbase.master.procedure.RefreshMetaProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.replication.AbstractPeerNoLockProcedure; import org.apache.hadoop.hbase.mob.MobUtils; @@ -113,6 +114,7 @@ import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil; import org.apache.hadoop.hbase.security.access.UserPermission; import org.apache.hadoop.hbase.security.visibility.VisibilityController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.util.Bytes; @@ -3661,4 +3663,15 @@ public FlushTableResponse flushTable(RpcController controller, FlushTableRequest throw new ServiceException(ioe); } } + + @Override public MasterProtos.RefreshMetaResponse refreshMeta(RpcController controller, + MasterProtos.RefreshMetaRequest request) throws ServiceException { + try { + Long procId = + server.refreshMeta(request.getNonceGroup(), request.getNonce()); + return MasterProtos.RefreshMetaResponse.newBuilder().setProcId(procId).build(); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java new file mode 100644 index 000000000000..d94ea43af377 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RefreshMetaProcedure.java @@ -0,0 +1,413 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshMetaState; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshMetaStateData; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.procedure2.ProcedureUtil; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.util.FSUtils; + +import java.io.DataInputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@InterfaceAudience.Private +public class RefreshMetaProcedure extends AbstractStateMachineTableProcedure { + private static final Logger LOG = LoggerFactory.getLogger(RefreshMetaProcedure.class); + + private List currentRegions; + private List latestRegions; + private static final int MUTATION_BATCH_SIZE = 100; + + public RefreshMetaProcedure() { + super(); + } + + public RefreshMetaProcedure(MasterProcedureEnv env) { + super(env); + } + + @Override + public TableName getTableName() { + return TableName.META_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + @Override + protected Flow executeFromState(MasterProcedureEnv env, RefreshMetaState refreshMetaState) { + LOG.info("Executing RefreshMetaProcedure state: {}", refreshMetaState); + + try { + return switch (refreshMetaState) { + case REFRESH_META_INIT -> executeInit(env); + case REFRESH_META_SCAN_STORAGE -> executeScanStorage(env); + case REFRESH_META_UPDATE -> executeUpdate(env); + case REFRESH_META_COMPLETE -> executeComplete(); + default -> throw new UnsupportedOperationException("Unhandled state: " + refreshMetaState); + }; + } catch (Exception ex) { + LOG.error("Error in RefreshMetaProcedure state {}", refreshMetaState, ex); + setFailure("RefreshMetaProcedure", ex); + return Flow.NO_MORE_STATE; + } + } + + private Flow executeInit(MasterProcedureEnv env) throws IOException { + LOG.trace("Getting current regions from hbase:meta table"); + try { + currentRegions = getCurrentRegions(env.getMasterServices().getConnection()); + LOG.info("Found {} current regions in meta table", currentRegions.size()); + setNextState(RefreshMetaState.REFRESH_META_SCAN_STORAGE); + return Flow.HAS_MORE_STATE; + } catch (IOException ioe) { + LOG.error("Failed to get current regions from meta table", ioe); + throw ioe; + } + } + + private Flow executeScanStorage(MasterProcedureEnv env) throws IOException { + try { + latestRegions = scanBackingStorage(env.getMasterServices().getConnection()); + LOG.info("Found {} regions in backing storage", latestRegions.size()); + setNextState(RefreshMetaState.REFRESH_META_UPDATE); + return Flow.HAS_MORE_STATE; + } catch (IOException ioe) { + LOG.error("Failed to scan backing storage", ioe); + throw ioe; + } + } + + private Flow executeUpdate(MasterProcedureEnv env) throws IOException { + if (currentRegions == null || latestRegions == null) { + LOG.error("Can not execute update on null lists. " + + "Meta Table Regions - {}, Storage Regions - {}", currentRegions, latestRegions); + throw new IOException((currentRegions == null ? "current regions" : "latest regions") + " list is null"); + } + try { + LOG.info("Comparing regions. Current regions: {}, Latest regions: {}", + currentRegions.size(), latestRegions.size()); + compareAndUpdateRegions( + currentRegions.stream() + .collect(Collectors.toMap(RegionInfo::getEncodedName, Function.identity())), + latestRegions.stream() + .collect(Collectors.toMap(RegionInfo::getEncodedName, Function.identity())), + env.getMasterServices().getConnection(), env); + LOG.info("Meta table update completed successfully"); + setNextState(RefreshMetaState.REFRESH_META_COMPLETE); + return Flow.HAS_MORE_STATE; + } catch (IOException ioe) { + LOG.error("Failed to update the hbase:meta table", ioe); + throw ioe; + } + } + + private Flow executeComplete() { + currentRegions = null; + latestRegions = null; + LOG.info("RefreshMetaProcedure completed successfully"); + return Flow.NO_MORE_STATE; + } + + /** Compares the current regions in hbase:meta with the latest regions from backing storage + * and applies necessary mutations (additions, deletions, or modifications) to the meta table. + * + * @param currentMap Current regions from hbase:meta + * @param latestMap Latest regions from backing storage + * @param connection HBase connection to use for meta table operations + * @param env MasterProcedureEnv for accessing master services + * @throws IOException If there is an error accessing the meta table or backing storage + */ + private void compareAndUpdateRegions(Map currentMap, Map latestMap, + Connection connection, MasterProcedureEnv env) throws IOException { + + List mutations = new ArrayList<>(); + + for (Map.Entry entry : latestMap.entrySet()) { + String regionId = entry.getKey(); + RegionInfo latestRegion = entry.getValue(); + if (!currentMap.containsKey(regionId)) { + mutations.add(MetaTableAccessor.makePutFromRegionInfo(latestRegion)); + LOG.debug("Adding the region to meta: {}", latestRegion.getRegionNameAsString()); + } else { + RegionInfo currentRegion = currentMap.get(regionId); + if (hasBoundaryChanged(currentRegion, latestRegion)) { + mutations.add(MetaTableAccessor.makePutFromRegionInfo(latestRegion)); + LOG.debug("Adding a put to update region boundaries in meta: {}", + latestRegion.getRegionNameAsString()); + } + } + } + + for (Map.Entry entry : currentMap.entrySet()) { + String regionId = entry.getKey(); + RegionInfo currentRegion = entry.getValue(); + if (!latestMap.containsKey(regionId)) { + mutations.add(MetaTableAccessor.makeDeleteFromRegionInfo(currentRegion, + EnvironmentEdgeManager.currentTime())); + LOG.debug("Removing region from meta: {}", currentRegion.getRegionNameAsString()); + } + } + + if (!mutations.isEmpty()) { + LOG.info("Applying {} mutations to meta table", mutations.size()); + executeBatchAndRetry(connection, mutations, env); + } else { + LOG.info("No update needed - meta table is in sync with backing storage"); + } + } + + private void executeBatchAndRetry(Connection connection, List mutations, + MasterProcedureEnv env) throws IOException { + List> chunks = Lists.partition(mutations, MUTATION_BATCH_SIZE); + + for (int i = 0; i < chunks.size(); i++) { + List chunk = chunks.get(i); + RetryCounter retryCounter = ProcedureUtil.createRetryCounter(env.getMasterConfiguration()); + while (retryCounter.shouldRetry()) { + try { + applyMutations(connection, chunk); + LOG.debug("Successfully processed batch {}/{}", i + 1, chunks.size()); + break; + } catch (IOException ioe) { + LOG.warn("Batch {}/{} failed on attempt {}/{}", i + 1, chunks.size(), + retryCounter.getAttemptTimes() + 1, retryCounter.getMaxAttempts(), ioe); + if (!retryCounter.shouldRetry()) { + LOG.error("Exceeded max retries for batch {}/{}. Failing refresh meta procedure.", i + 1, chunks.size()); + throw ioe; + } + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupted during retry", ie); + } + } + } + } + } + + private void applyMutations(Connection connection, List mutations) throws IOException { + List puts = mutations.stream() + .filter(m -> m instanceof Put) + .map(m -> (Put) m) + .collect(Collectors.toList()); + + List deletes = mutations.stream() + .filter(m -> m instanceof Delete) + .map(m -> (Delete) m) + .collect(Collectors.toList()); + + if (!puts.isEmpty()) { + MetaTableAccessor.putsToMetaTable(connection, puts); + } + if (!deletes.isEmpty()) { + MetaTableAccessor.deleteFromMetaTable(connection, deletes); + } + } + + private boolean hasBoundaryChanged(RegionInfo region1, RegionInfo region2) { + return !Arrays.equals(region1.getStartKey(), region2.getStartKey()) || + !Arrays.equals(region1.getEndKey(), region2.getEndKey()); + } + + /** Scans the backing storage for all regions and returns a list of RegionInfo objects. + * This method scans the filesystem for region directories and reads their .regioninfo files. + * + * @param connection The HBase connection to use. + * @return List of RegionInfo objects found in the backing storage. + * @throws IOException If there is an error accessing the filesystem or reading region info files. + */ + private List scanBackingStorage(Connection connection) throws IOException { + List regions = new ArrayList<>(); + Configuration conf = connection.getConfiguration(); + FileSystem fs = FileSystem.get(conf); + Path rootDir = CommonFSUtils.getRootDir(conf); + Path dataDir = new Path(rootDir, HConstants.BASE_NAMESPACE_DIR); + + LOG.info("Scanning backing storage under: {}", dataDir); + + if (!fs.exists(dataDir)) { + LOG.warn("Data directory does not exist: {}", dataDir); + return regions; + } + + FileStatus[] namespaceDirs = fs.listStatus(dataDir, path -> !path.getName().matches("^[._-].*")); + LOG.debug("Found {} namespace directories in data dir", Arrays.stream(namespaceDirs).toList()); + + for (FileStatus nsDir : namespaceDirs) { + try { + List namespaceRegions = scanTablesInNamespace(fs, nsDir.getPath()); + regions.addAll(namespaceRegions); + LOG.debug("Found {} regions in namespace {}", namespaceRegions.size(), nsDir.getPath().getName()); + } catch (IOException e) { + LOG.error("Failed to scan namespace directory: {}", nsDir.getPath(), e); + } + } + LOG.info("Scanned backing storage and found {} regions", regions.size()); + return regions; + } + + private List scanTablesInNamespace(FileSystem fs, Path namespacePath) throws IOException { + LOG.debug("Scanning namespace {})", namespacePath.getName()); + List tableDirs = FSUtils.getLocalTableDirs(fs, namespacePath); + + return tableDirs.parallelStream() + .filter(tableDir -> { + if (TableName.isMetaTableName(CommonFSUtils.getTableName(tableDir))) { + LOG.debug("Skipping META table in namespace {}", namespacePath.getName()); + return false; + } + return true; + }) + .flatMap(tableDir -> { + try { + List tableRegions = scanRegionsInTable(fs, FSUtils.getRegionDirs(fs, tableDir)); + LOG.debug("Found {} regions in table {} in namespace {}", + tableRegions.size(), tableDir.getName(), namespacePath.getName()); + return tableRegions.stream(); + } catch (IOException e) { + LOG.warn("Failed to scan table directory: {} for namespace {}", + tableDir, namespacePath.getName(), e); + return Stream.empty(); + } + }).toList(); + } + + private List scanRegionsInTable(FileSystem fs, List regionDirs) throws IOException { + List regions = new ArrayList<>(); + + for (Path regionDir : regionDirs) { + String encodedRegionName = regionDir.getName(); + try { + Path regionInfoPath = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE); + if (fs.exists(regionInfoPath)) { + RegionInfo ri = readRegionInfo(fs, regionInfoPath); + if (ri != null && isValidRegionInfo(ri, encodedRegionName)) { + regions.add(ri); + LOG.debug("Found region: {} -> {}", encodedRegionName, ri.getRegionNameAsString()); + } else { + LOG.warn("Invalid RegionInfo in file: {}", regionInfoPath); + } + } else { + LOG.debug("No .regioninfo file found in region directory: {}", regionDir); + } + } catch (Exception e) { + LOG.warn("Failed to read region info from directory: {}", encodedRegionName, e); + } + } + return regions; + } + + private boolean isValidRegionInfo(RegionInfo regionInfo, String expectedEncodedName) { + if (!expectedEncodedName.equals(regionInfo.getEncodedName())) { + LOG.warn("RegionInfo encoded name mismatch: directory={}, regioninfo={}", + expectedEncodedName, regionInfo.getEncodedName()); + return false; + } + return true; + } + + private RegionInfo readRegionInfo(FileSystem fs, Path regionInfoPath) { + try (FSDataInputStream inputStream = fs.open(regionInfoPath); + DataInputStream dataInputStream = new DataInputStream(inputStream)) { + return RegionInfo.parseFrom(dataInputStream); + } catch (Exception e) { + LOG.warn("Failed to parse .regioninfo file: {}", regionInfoPath, e); + return null; + } + } + + /** + * Retrieves the current regions from the hbase:meta table. + * + * @param connection The HBase connection to use. + * @return List of RegionInfo objects representing the current regions in meta. + * @throws IOException If there is an error accessing the meta table. + */ + List getCurrentRegions(Connection connection) throws IOException { + LOG.info("Getting all regions from meta table"); + return MetaTableAccessor.getAllRegions(connection, true); + } + + @Override + protected void rollbackState(MasterProcedureEnv env, RefreshMetaState refreshMetaState) + throws IOException, InterruptedException { + // No specific rollback needed as it is generally safe to re-run the procedure. + LOG.trace("Rollback not implemented for RefreshMetaProcedure state: {}", refreshMetaState); + } + + @Override + protected RefreshMetaState getState(int stateId) { + return RefreshMetaState.forNumber(stateId); + } + + @Override + protected int getStateId(RefreshMetaState refreshMetaState) { + return refreshMetaState.getNumber(); + } + + @Override + protected RefreshMetaState getInitialState() { + return RefreshMetaState.REFRESH_META_INIT; + } + + @Override + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { + // For now, we'll use a simple approach since we do not need to store any state data + RefreshMetaStateData.Builder builder = RefreshMetaStateData.newBuilder(); + serializer.serialize(builder.build()); + } + + @Override + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { + // For now, we'll use a simple approach since we do not need to store any state data + serializer.deserialize(RefreshMetaStateData.class); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java index 13f458299b90..5b7ab67df0bf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ReadOnlyController.java @@ -109,6 +109,9 @@ public void prePut(ObserverContext c, Pu @Override public void preDelete(ObserverContext c, Delete delete, WALEdit edit) throws IOException { + if (c.getEnvironment().getRegionInfo().getTable().isSystemTable()) { + return; + } internalReadOnlyGuard(); } @@ -166,7 +169,9 @@ public boolean preCheckAndPutAfterRowLock( public boolean preCheckAndDelete(ObserverContext c, byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Delete delete, boolean result) throws IOException { - internalReadOnlyGuard(); + if (!c.getEnvironment().getRegionInfo().getTable().isSystemTable()) { + internalReadOnlyGuard(); + } return RegionObserver.super.preCheckAndDelete(c, row, family, qualifier, op, comparator, delete, result); } @@ -174,7 +179,9 @@ public boolean preCheckAndDelete(ObserverContext c, byte[] row, Filter filter, Delete delete, boolean result) throws IOException { - internalReadOnlyGuard(); + if (!c.getEnvironment().getRegionInfo().getTable().isSystemTable()) { + internalReadOnlyGuard(); + } return RegionObserver.super.preCheckAndDelete(c, row, filter, delete, result); } @@ -183,7 +190,9 @@ public boolean preCheckAndDeleteAfterRowLock( ObserverContext c, byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Delete delete, boolean result) throws IOException { - internalReadOnlyGuard(); + if (!c.getEnvironment().getRegionInfo().getTable().isSystemTable()) { + internalReadOnlyGuard(); + } return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, family, qualifier, op, comparator, delete, result); } @@ -192,7 +201,9 @@ public boolean preCheckAndDeleteAfterRowLock( public boolean preCheckAndDeleteAfterRowLock( ObserverContext c, byte[] row, Filter filter, Delete delete, boolean result) throws IOException { - internalReadOnlyGuard(); + if (!c.getEnvironment().getRegionInfo().getTable().isSystemTable()) { + internalReadOnlyGuard(); + } return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, filter, delete, result); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedure.java new file mode 100644 index 000000000000..8fe9fe2f4811 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedure.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.assertProcNotFailed; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.lang.reflect.Method; +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestRefreshMetaProcedure { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRefreshMetaProcedure.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private ProcedureExecutor procExecutor; + List activeRegions; + TableName tableName = TableName.valueOf("testRefreshMeta"); + + @Before + public void setup() throws Exception { + TEST_UTIL.getConfiguration().set("USE_META_REPLICAS", "false"); + TEST_UTIL.startMiniCluster(); + procExecutor = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + byte[][] splitKeys = + new byte[][] { Bytes.toBytes("split1"), Bytes.toBytes("split2"), Bytes.toBytes("split3") }; + TEST_UTIL.createTable(tableName, Bytes.toBytes("cf"), splitKeys); + TEST_UTIL.waitTableAvailable(tableName); + TEST_UTIL.getAdmin().flush(tableName); + activeRegions = TEST_UTIL.getAdmin().getRegions(tableName); + assertFalse(activeRegions.isEmpty()); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testRefreshMetaProcedureExecutesSuccessfully() { + RefreshMetaProcedure procedure = new RefreshMetaProcedure(procExecutor.getEnvironment()); + long procId = procExecutor.submitProcedure(procedure); + ProcedureTestingUtility.waitProcedure(procExecutor, procId); + assertProcNotFailed(procExecutor.getResult(procId)); + } + + @Test + public void testGetCurrentRegions() throws Exception { + RefreshMetaProcedure procedure = new RefreshMetaProcedure(procExecutor.getEnvironment()); + List regions = procedure.getCurrentRegions(TEST_UTIL.getConnection()); + assertFalse("Should have found regions in meta", regions.isEmpty()); + assertTrue("Should include test table region", + regions.stream().anyMatch(r -> r.getTable().getNameAsString().equals("testRefreshMeta"))); + } + + @Test + public void testScanBackingStorage() throws Exception { + RefreshMetaProcedure procedure = new RefreshMetaProcedure(procExecutor.getEnvironment()); + + Method method = + RefreshMetaProcedure.class.getDeclaredMethod("scanBackingStorage", Connection.class); + method.setAccessible(true); + + List fsRegions = + (List) method.invoke(procedure, TEST_UTIL.getConnection()); + + assertTrue("All regions from meta should be found in the storage", + activeRegions.stream().allMatch(reg -> fsRegions.stream() + .anyMatch(r -> r.getRegionNameAsString().equals(reg.getRegionNameAsString())))); + } + + @Test + public void testHasBoundaryChanged() throws Exception { + RefreshMetaProcedure procedure = new RefreshMetaProcedure(procExecutor.getEnvironment()); + RegionInfo region1 = RegionInfoBuilder.newBuilder(tableName) + .setStartKey(Bytes.toBytes("start1")).setEndKey(Bytes.toBytes("end1")).build(); + + RegionInfo region2 = RegionInfoBuilder.newBuilder(tableName) + .setStartKey(Bytes.toBytes("start2")).setEndKey(Bytes.toBytes("end1")).build(); + + RegionInfo region3 = RegionInfoBuilder.newBuilder(tableName) + .setStartKey(Bytes.toBytes("start1")).setEndKey(Bytes.toBytes("end2")).build(); + + Method method = RefreshMetaProcedure.class.getDeclaredMethod("hasBoundaryChanged", + RegionInfo.class, RegionInfo.class); + method.setAccessible(true); + + assertTrue("Different start keys should have been detected", + (Boolean) method.invoke(procedure, region1, region2)); + + assertTrue("Different end keys should have been detected", + (Boolean) method.invoke(procedure, region1, region3)); + + assertFalse("Identical boundaries should not have been identified", + (Boolean) method.invoke(procedure, region1, region1)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedureIntegration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedureIntegration.java new file mode 100644 index 000000000000..e3616ae0b3bb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRefreshMetaProcedureIntegration.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.procedure; + +import static org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.assertProcNotFailed; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.security.access.ReadOnlyController; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, LargeTests.class }) +public class TestRefreshMetaProcedureIntegration { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRefreshMetaProcedureIntegration.class); + + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + private Admin admin; + private ProcedureExecutor procExecutor; + private HMaster master; + private HRegionServer regionServer; + + @Before + public void setup() throws Exception { + // Configure the cluster with ReadOnlyController + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + ReadOnlyController.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + ReadOnlyController.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + ReadOnlyController.class.getName()); + + // Start in active mode + TEST_UTIL.getConfiguration().setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, false); + + TEST_UTIL.startMiniCluster(); + admin = TEST_UTIL.getAdmin(); + procExecutor = TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + master = TEST_UTIL.getHBaseCluster().getMaster(); + regionServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer(); + } + + @After + public void tearDown() throws Exception { + if (admin != null) { + admin.close(); + } + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * A test for RefreshMetaProcedure to test the workflow: 1. Write data and create regions in + * active mode 2. Switch to read-only mode 3. Use refresh_meta to sync meta table with storage and + * switch back to active mode + */ + @Test + public void testRestoreMissingRegionInMeta() throws Exception { + + TableName tableName = TableName.valueOf("replicaTestTable"); + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); + builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf1")); + byte[] splitKey = Bytes.toBytes("split_key"); + admin.createTable(builder.build(), new byte[][] { splitKey }); + + TEST_UTIL.waitTableAvailable(tableName); + + Table table = TEST_UTIL.getConnection().getTable(tableName); + for (int i = 0; i < 100; i++) { + Put put = new Put(Bytes.toBytes("row_" + String.format("%03d", i))); + put.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("qual"), Bytes.toBytes("value_" + i)); + table.put(put); + } + table.close(); + + admin.flush(tableName); + + List activeRegions = admin.getRegions(tableName); + assertTrue("Should have at least 2 regions after split", activeRegions.size() >= 2); + + Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + RegionInfo regionToRemove = activeRegions.get(0); + + org.apache.hadoop.hbase.client.Delete delete = + new org.apache.hadoop.hbase.client.Delete(regionToRemove.getRegionName()); + metaTable.delete(delete); + metaTable.close(); + + List regionsAfterDrift = admin.getRegions(tableName); + assertEquals("Should have one less region in meta after simulating drift", + activeRegions.size() - 1, regionsAfterDrift.size()); + + setReadOnlyMode(true); + + boolean writeBlocked = false; + try { + Table readOnlyTable = TEST_UTIL.getConnection().getTable(tableName); + Put testPut = new Put(Bytes.toBytes("test_readonly")); + testPut.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("qual"), Bytes.toBytes("should_fail")); + readOnlyTable.put(testPut); + readOnlyTable.close(); + } catch (Exception e) { + if (e.getMessage().contains("Operation not allowed in Read-Only Mode")) { + writeBlocked = true; + } + } + assertTrue("Write operations should be blocked in read-only mode", writeBlocked); + + Long procId = admin.refreshMeta(); + + assertTrue("Procedure ID should be positive", procId > 0); + + TEST_UTIL.waitFor(3000, () -> { + try { + return procExecutor.isFinished(procId); + } catch (Exception e) { + return false; + } + }); + + assertProcNotFailed(procExecutor.getResult(procId)); + + List regionsAfterRefresh = admin.getRegions(tableName); + assertEquals("Missing regions should be restored by refresh_meta", activeRegions.size(), + regionsAfterRefresh.size()); + + boolean regionRestored = regionsAfterRefresh.stream() + .anyMatch(r -> r.getRegionNameAsString().equals(regionToRemove.getRegionNameAsString())); + assertTrue("Missing region should be restored by refresh_meta", regionRestored); + + setReadOnlyMode(false); + + Table activeTable = TEST_UTIL.getConnection().getTable(tableName); + Put testPut = new Put(Bytes.toBytes("test_active_again")); + testPut.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("qual"), + Bytes.toBytes("active_mode_again")); + activeTable.put(testPut); + activeTable.close(); + } + + private void setReadOnlyMode(boolean isReadOnly) { + TEST_UTIL.getConfiguration().setBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY, + isReadOnly); + notifyConfigurationObservers(); + } + + private void notifyConfigurationObservers() { + master.getConfigurationManager().notifyAllObservers(TEST_UTIL.getConfiguration()); + regionServer.getConfigurationManager().notifyAllObservers(TEST_UTIL.getConfiguration()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index 35c868413e19..5bdd97419e67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -989,4 +989,9 @@ public boolean replicationPeerModificationSwitch(boolean on, boolean drainProced public boolean isReplicationPeerModificationEnabled() throws IOException { return admin.isReplicationPeerModificationEnabled(); } + + @Override + public Long refreshMeta() throws IOException { + return admin.refreshMeta(); + } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index bceb1a4fe02d..f85c06bfa25f 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -1917,6 +1917,12 @@ def flush_master_store() def list_tables_by_state(isEnabled) @admin.listTableNamesByState(isEnabled).map(&:getNameAsString) end + + #---------------------------------------------------------------------------------------------- + # Refresh hbase:meta table by syncing with the backing storage + def refresh_meta() + @admin.refreshMeta() + end end # rubocop:enable Metrics/ClassLength end diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 46b38dd96b89..a62076856eb3 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -495,6 +495,7 @@ def self.exception_handler(hide_traceback) decommission_regionservers recommission_regionserver truncate_region + refresh_meta ], # TODO: remove older hlog_roll command aliases: { diff --git a/hbase-shell/src/main/ruby/shell/commands/refresh_meta.rb b/hbase-shell/src/main/ruby/shell/commands/refresh_meta.rb new file mode 100644 index 000000000000..5235c6f6abb1 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/refresh_meta.rb @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RefreshMeta < Command + def help + <<-EOF + Refresh the hbase:meta table by syncing with backing storage. + This command is used in Read Replica clusters to pick up new + tables and regions from the shared storage. + Examples: + + hbase> refresh_meta + + The command returns a procedure ID that can be used to track the progress + of the meta table refresh operation. + EOF + end + + def command + proc_id = admin.refresh_meta + formatter.row(["Refresh meta procedure submitted. Procedure ID: #{proc_id}"]) + proc_id + end + end + end +end diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 0eff84bba7c8..0f29a1bbea0b 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1355,6 +1355,10 @@ public List getCachedFilesList(ServerName serverName) throws IOException throw new NotImplementedException("getCachedFilesList not supported in ThriftAdmin"); } + @Override public Long refreshMeta() throws IOException { + throw new NotImplementedException("refreshMeta not supported in ThriftAdmin"); + } + @Override public boolean replicationPeerModificationSwitch(boolean on, boolean drainProcedures) throws IOException {