Skip to content

Commit f5d18fc

Browse files
authored
Revert "HBASE-29310 Handle Bulk Load Operations in Continuous Backup (#7150)" (#7290)
This reverts commit 5ac2a73.
1 parent 71efe29 commit f5d18fc

File tree

9 files changed

+4
-135
lines changed

9 files changed

+4
-135
lines changed

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/AbstractPitrRestoreHandler.java

Lines changed: 0 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@
4141
import org.apache.hadoop.fs.Path;
4242
import org.apache.hadoop.hbase.HBaseConfiguration;
4343
import org.apache.hadoop.hbase.TableName;
44-
import org.apache.hadoop.hbase.backup.BackupType;
4544
import org.apache.hadoop.hbase.backup.PointInTimeRestoreRequest;
4645
import org.apache.hadoop.hbase.backup.RestoreRequest;
4746
import org.apache.hadoop.hbase.backup.util.BackupUtils;
@@ -249,8 +248,6 @@ private PitrBackupMetadata getValidBackup(TableName sTableName, TableName tTable
249248

250249
try {
251250
if (backupAdmin.validateRequest(restoreRequest)) {
252-
// check if any bulkload entry exists post this backup time and before "endtime"
253-
checkBulkLoadAfterBackup(conn, sTableName, backup, endTime);
254251
return backup;
255252
}
256253
} catch (IOException e) {
@@ -262,31 +259,6 @@ private PitrBackupMetadata getValidBackup(TableName sTableName, TableName tTable
262259
return null;
263260
}
264261

265-
/**
266-
* Checks if any bulk load operation occurred for the specified table post last successful backup
267-
* and before restore time.
268-
* @param conn Active HBase connection
269-
* @param sTableName Table for which to check bulk load history
270-
* @param backup Last successful backup before the target recovery time
271-
* @param endTime Target recovery time
272-
* @throws IOException if a bulkload entry is found in between backup time and endtime
273-
*/
274-
private void checkBulkLoadAfterBackup(Connection conn, TableName sTableName,
275-
PitrBackupMetadata backup, long endTime) throws IOException {
276-
try (BackupSystemTable backupSystemTable = new BackupSystemTable(conn)) {
277-
List<BulkLoad> bulkLoads = backupSystemTable.readBulkloadRows(List.of(sTableName));
278-
for (BulkLoad load : bulkLoads) {
279-
long lastBackupTs = (backup.getType() == BackupType.FULL)
280-
? backup.getStartTs()
281-
: backup.getIncrCommittedWalTs();
282-
if (lastBackupTs < load.getTimestamp() && load.getTimestamp() < endTime) {
283-
throw new IOException("Bulk load operation detected after last successful backup for "
284-
+ "table: " + sTableName);
285-
}
286-
}
287-
}
288-
}
289-
290262
/**
291263
* Determines if the given backup is valid for PITR.
292264
* <p>

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupImageAdapter.java

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020
import java.util.List;
2121
import org.apache.hadoop.hbase.TableName;
22-
import org.apache.hadoop.hbase.backup.BackupType;
2322
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
2423
import org.apache.yetus.audience.InterfaceAudience;
2524

@@ -58,14 +57,4 @@ public String getBackupId() {
5857
public String getRootDir() {
5958
return image.getRootDir();
6059
}
61-
62-
@Override
63-
public BackupType getType() {
64-
return image.getType();
65-
}
66-
67-
@Override
68-
public long getIncrCommittedWalTs() {
69-
return image.getIncrCommittedWalTs();
70-
}
7160
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupInfoAdapter.java

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import java.util.List;
2121
import org.apache.hadoop.hbase.TableName;
2222
import org.apache.hadoop.hbase.backup.BackupInfo;
23-
import org.apache.hadoop.hbase.backup.BackupType;
2423
import org.apache.yetus.audience.InterfaceAudience;
2524

2625
/**
@@ -58,14 +57,4 @@ public String getBackupId() {
5857
public String getRootDir() {
5958
return info.getBackupRootDir();
6059
}
61-
62-
@Override
63-
public BackupType getType() {
64-
return info.getType();
65-
}
66-
67-
@Override
68-
public long getIncrCommittedWalTs() {
69-
return info.getIncrCommittedWalTs();
70-
}
7160
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -101,11 +101,6 @@ Builder withCompleteTime(long completeTime) {
101101
return this;
102102
}
103103

104-
Builder withIncrCommittedWalTs(long incrCommittedWalTs) {
105-
image.setIncrCommittedWalTs(incrCommittedWalTs);
106-
return this;
107-
}
108-
109104
BackupImage build() {
110105
return image;
111106
}
@@ -120,7 +115,6 @@ BackupImage build() {
120115
private long completeTs;
121116
private ArrayList<BackupImage> ancestors;
122117
private Map<TableName, Map<String, Long>> incrTimeRanges;
123-
private long incrCommittedWalTs;
124118

125119
static Builder newBuilder() {
126120
return new Builder();
@@ -131,22 +125,20 @@ public BackupImage() {
131125
}
132126

133127
private BackupImage(String backupId, BackupType type, String rootDir, List<TableName> tableList,
134-
long startTs, long completeTs, long incrCommittedWalTs) {
128+
long startTs, long completeTs) {
135129
this.backupId = backupId;
136130
this.type = type;
137131
this.rootDir = rootDir;
138132
this.tableList = tableList;
139133
this.startTs = startTs;
140134
this.completeTs = completeTs;
141-
this.incrCommittedWalTs = incrCommittedWalTs;
142135
}
143136

144137
static BackupImage fromProto(BackupProtos.BackupImage im) {
145138
String backupId = im.getBackupId();
146139
String rootDir = im.getBackupRootDir();
147140
long startTs = im.getStartTs();
148141
long completeTs = im.getCompleteTs();
149-
long incrCommittedWalTs = im.getIncrCommittedWalTs();
150142
List<HBaseProtos.TableName> tableListList = im.getTableListList();
151143
List<TableName> tableList = new ArrayList<>();
152144
for (HBaseProtos.TableName tn : tableListList) {
@@ -159,8 +151,7 @@ static BackupImage fromProto(BackupProtos.BackupImage im) {
159151
? BackupType.FULL
160152
: BackupType.INCREMENTAL;
161153

162-
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs,
163-
incrCommittedWalTs);
154+
BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
164155
for (BackupProtos.BackupImage img : ancestorList) {
165156
image.addAncestor(fromProto(img));
166157
}
@@ -179,7 +170,6 @@ BackupProtos.BackupImage toProto() {
179170
builder.setBackupId(backupId);
180171
builder.setCompleteTs(completeTs);
181172
builder.setStartTs(startTs);
182-
builder.setIncrCommittedWalTs(incrCommittedWalTs);
183173
if (type == BackupType.FULL) {
184174
builder.setBackupType(BackupProtos.BackupType.FULL);
185175
} else {
@@ -297,14 +287,6 @@ public long getCompleteTs() {
297287
return completeTs;
298288
}
299289

300-
public long getIncrCommittedWalTs() {
301-
return incrCommittedWalTs;
302-
}
303-
304-
public void setIncrCommittedWalTs(long incrCommittedWalTs) {
305-
this.incrCommittedWalTs = incrCommittedWalTs;
306-
}
307-
308290
private void setCompleteTs(long completeTs) {
309291
this.completeTs = completeTs;
310292
}

hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/PitrBackupMetadata.java

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import java.util.List;
2121
import org.apache.hadoop.hbase.TableName;
2222
import org.apache.hadoop.hbase.backup.BackupInfo;
23-
import org.apache.hadoop.hbase.backup.BackupType;
2423
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
2524
import org.apache.yetus.audience.InterfaceAudience;
2625

@@ -48,10 +47,4 @@ public interface PitrBackupMetadata {
4847

4948
/** Returns Root directory where the backup is stored */
5049
String getRootDir();
51-
52-
/** Returns backup type */
53-
BackupType getType();
54-
55-
/** Returns incrCommittedWalTs */
56-
long getIncrCommittedWalTs();
5750
}

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithContinuous.java

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
import static org.apache.hadoop.hbase.replication.regionserver.ReplicationMarkerChore.REPLICATION_MARKER_ENABLED_KEY;
2323
import static org.junit.Assert.assertEquals;
2424
import static org.junit.Assert.assertFalse;
25-
import static org.junit.Assert.assertNotEquals;
2625
import static org.junit.Assert.assertTrue;
2726

2827
import java.io.IOException;
@@ -48,7 +47,6 @@
4847
import org.apache.hadoop.hbase.util.Bytes;
4948
import org.apache.hadoop.hbase.util.CommonFSUtils;
5049
import org.apache.hadoop.hbase.util.HFileTestUtil;
51-
import org.apache.hadoop.util.ToolRunner;
5250
import org.junit.After;
5351
import org.junit.Before;
5452
import org.junit.ClassRule;
@@ -190,54 +188,6 @@ public void testIncrementalBackupCopyingBulkloadTillIncrCommittedWalTs() throws
190188
}
191189
}
192190

193-
@Test
194-
public void testPitrFailureDueToMissingBackupPostBulkload() throws Exception {
195-
String methodName = Thread.currentThread().getStackTrace()[1].getMethodName();
196-
TableName tableName1 = TableName.valueOf("table_" + methodName);
197-
TEST_UTIL.createTable(tableName1, famName);
198-
try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
199-
200-
// The test starts with no data, and no bulk loaded rows.
201-
int expectedRowCount = 0;
202-
assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1));
203-
assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty());
204-
205-
// Create continuous backup, bulk loads are now being tracked
206-
String backup1 = backupTables(BackupType.FULL, List.of(tableName1), BACKUP_ROOT_DIR, true);
207-
assertTrue(checkSucceeded(backup1));
208-
209-
loadTable(TEST_UTIL.getConnection().getTable(tableName1));
210-
expectedRowCount = expectedRowCount + NB_ROWS_IN_BATCH;
211-
performBulkLoad("bulkPreIncr", methodName, tableName1);
212-
expectedRowCount += ROWS_IN_BULK_LOAD;
213-
assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1));
214-
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());
215-
216-
loadTable(TEST_UTIL.getConnection().getTable(tableName1));
217-
Thread.sleep(5000);
218-
219-
// Incremental backup
220-
String backup2 =
221-
backupTables(BackupType.INCREMENTAL, List.of(tableName1), BACKUP_ROOT_DIR, true);
222-
assertTrue(checkSucceeded(backup2));
223-
assertEquals(0, systemTable.readBulkloadRows(List.of(tableName1)).size());
224-
225-
performBulkLoad("bulkPostIncr", methodName, tableName1);
226-
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());
227-
228-
loadTable(TEST_UTIL.getConnection().getTable(tableName1));
229-
Thread.sleep(10000);
230-
long restoreTs = BackupUtils.getReplicationCheckpoint(TEST_UTIL.getConnection());
231-
232-
// expect restore failure due to no backup post bulkPostIncr bulkload
233-
TableName restoredTable = TableName.valueOf("restoredTable");
234-
String[] args = PITRTestUtil.buildPITRArgs(new TableName[] { tableName1 },
235-
new TableName[] { restoredTable }, restoreTs, null);
236-
int ret = ToolRunner.run(conf1, new PointInTimeRestoreDriver(), args);
237-
assertNotEquals("Restore should fail since there is one bulkload without any backup", 0, ret);
238-
}
239-
}
240-
241191
private void performBulkLoad(String keyPrefix, String testDir, TableName tableName)
242192
throws IOException {
243193
FileSystem fs = TEST_UTIL.getTestFileSystem();

hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestPointInTimeRestore.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@ private static void setUpBackups() throws Exception {
6767
// Simulate a backup taken 20 days ago
6868
EnvironmentEdgeManager
6969
.injectEdge(() -> System.currentTimeMillis() - 20 * ONE_DAY_IN_MILLISECONDS);
70-
// Insert initial data into table1
71-
PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000);
70+
PITRTestUtil.loadRandomData(TEST_UTIL, table1, famName, 1000); // Insert initial data into
71+
// table1
7272

7373
// Perform a full backup for table1 with continuous backup enabled
7474
String[] args =

hbase-protocol-shaded/src/main/protobuf/Backup.proto

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ message BackupImage {
6565
optional uint64 complete_ts = 6;
6666
repeated BackupImage ancestors = 7;
6767
repeated TableServerTimestamp tst_map = 8;
68-
optional uint64 incr_committed_wal_ts = 9;
6968

7069
}
7170

hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1197,11 +1197,6 @@ public int run(String[] args) throws Exception {
11971197
public static void main(String[] args) throws Exception {
11981198
Configuration conf = HBaseConfiguration.create();
11991199
int ret = ToolRunner.run(conf, new BulkLoadHFilesTool(conf), args);
1200-
if (ret == 0) {
1201-
System.out.println("Bulk load completed successfully.");
1202-
System.out.println("IMPORTANT: Please take a backup of the table immediately if this table "
1203-
+ "is part of continuous backup");
1204-
}
12051200
System.exit(ret);
12061201
}
12071202

0 commit comments

Comments
 (0)