|
23 | 23 | import java.net.URI; |
24 | 24 | import java.net.URISyntaxException; |
25 | 25 | import java.util.ArrayList; |
| 26 | +import java.util.HashMap; |
26 | 27 | import java.util.List; |
27 | 28 | import java.util.Map; |
28 | 29 | import java.util.Set; |
|
32 | 33 | import org.apache.hadoop.fs.Path; |
33 | 34 | import org.apache.hadoop.hbase.TableName; |
34 | 35 | import org.apache.hadoop.hbase.backup.BackupCopyJob; |
| 36 | +import org.apache.hadoop.hbase.backup.BackupInfo; |
35 | 37 | import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; |
36 | 38 | import org.apache.hadoop.hbase.backup.BackupRequest; |
37 | 39 | import org.apache.hadoop.hbase.backup.BackupRestoreFactory; |
38 | 40 | import org.apache.hadoop.hbase.backup.BackupType; |
| 41 | +import org.apache.hadoop.hbase.backup.HBackupFileSystem; |
39 | 42 | import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob; |
40 | 43 | import org.apache.hadoop.hbase.backup.util.BackupUtils; |
41 | 44 | import org.apache.hadoop.hbase.client.Admin; |
| 45 | +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; |
42 | 46 | import org.apache.hadoop.hbase.client.Connection; |
43 | 47 | import org.apache.hadoop.hbase.mapreduce.WALPlayer; |
| 48 | +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; |
| 49 | +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; |
44 | 50 | import org.apache.hadoop.hbase.util.Bytes; |
45 | 51 | import org.apache.hadoop.hbase.util.CommonFSUtils; |
46 | 52 | import org.apache.hadoop.hbase.util.HFileArchiveUtil; |
|
51 | 57 | import org.slf4j.Logger; |
52 | 58 | import org.slf4j.LoggerFactory; |
53 | 59 |
|
| 60 | +import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; |
| 61 | + |
54 | 62 | /** |
55 | 63 | * Incremental backup implementation. See the {@link #execute() execute} method. |
56 | 64 | */ |
@@ -262,6 +270,16 @@ private void updateFileLists(List<String> activeFiles, List<String> archiveFiles |
262 | 270 | @Override |
263 | 271 | public void execute() throws IOException { |
264 | 272 | try { |
| 273 | + Map<TableName, String> tablesToFullBackupIds = getFullBackupIds(); |
| 274 | + |
| 275 | + try (BackupAdminImpl backupAdmin = new BackupAdminImpl(conn)) { |
| 276 | + for (TableName tn : backupInfo.getTables()) { |
| 277 | + String fullBackupId = tablesToFullBackupIds.get(tn); |
| 278 | + BackupInfo fullBackupInfo = backupAdmin.getBackupInfo(fullBackupId); |
| 279 | + verifyHtd(tn, fullBackupInfo); |
| 280 | + } |
| 281 | + } |
| 282 | + |
265 | 283 | // case PREPARE_INCREMENTAL: |
266 | 284 | beginBackup(backupManager, backupInfo); |
267 | 285 | backupInfo.setPhase(BackupPhase.PREPARE_INCREMENTAL); |
@@ -434,4 +452,63 @@ protected Path getBulkOutputDir() { |
434 | 452 | path = new Path(path, backupId); |
435 | 453 | return path; |
436 | 454 | } |
| 455 | + |
| 456 | + private Map<TableName, String> getFullBackupIds() throws IOException { |
| 457 | + // Ancestors are stored from newest to oldest, so we can iterate backwards |
| 458 | + // in order to populate our backupId map with the most recent full backup |
| 459 | + // for a given table |
| 460 | + List<BackupManifest.BackupImage> images = getAncestors(backupInfo); |
| 461 | + Map<TableName, String> results = new HashMap<>(); |
| 462 | + for (int i = images.size() - 1; i >= 0; i--) { |
| 463 | + BackupManifest.BackupImage image = images.get(i); |
| 464 | + if (image.getType() != BackupType.FULL) { |
| 465 | + continue; |
| 466 | + } |
| 467 | + |
| 468 | + for (TableName tn : image.getTableNames()) { |
| 469 | + results.put(tn, image.getBackupId()); |
| 470 | + } |
| 471 | + } |
| 472 | + return results; |
| 473 | + } |
| 474 | + |
| 475 | + private void verifyHtd(TableName tn, BackupInfo fullBackupInfo) throws IOException { |
| 476 | + try (Admin admin = conn.getAdmin()) { |
| 477 | + ColumnFamilyDescriptor[] currentCfs = admin.getDescriptor(tn).getColumnFamilies(); |
| 478 | + String snapshotName = fullBackupInfo.getSnapshotName(tn); |
| 479 | + Path root = HBackupFileSystem.getTableBackupPath(tn, |
| 480 | + new Path(fullBackupInfo.getBackupRootDir()), fullBackupInfo.getBackupId()); |
| 481 | + Path manifestDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root); |
| 482 | + |
| 483 | + FileSystem fs; |
| 484 | + try { |
| 485 | + fs = FileSystem.get(new URI(fullBackupInfo.getBackupRootDir()), conf); |
| 486 | + } catch (URISyntaxException e) { |
| 487 | + throw new IOException("Unable to get fs", e); |
| 488 | + } |
| 489 | + |
| 490 | + SnapshotProtos.SnapshotDescription snapshotDescription = |
| 491 | + SnapshotDescriptionUtils.readSnapshotInfo(fs, manifestDir); |
| 492 | + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, manifestDir, snapshotDescription); |
| 493 | + |
| 494 | + ColumnFamilyDescriptor[] backupCfs = manifest.getTableDescriptor().getColumnFamilies(); |
| 495 | + verifyCfs(tn, currentCfs, backupCfs); |
| 496 | + } |
| 497 | + } |
| 498 | + |
| 499 | + private static void verifyCfs(TableName tn, ColumnFamilyDescriptor[] currentCfs, |
| 500 | + ColumnFamilyDescriptor[] backupCfs) throws IOException { |
| 501 | + if (currentCfs.length != backupCfs.length) { |
| 502 | + throw ColumnFamilyMismatchException.create(tn, currentCfs, backupCfs); |
| 503 | + } |
| 504 | + |
| 505 | + for (int i = 0; i < backupCfs.length; i++) { |
| 506 | + String currentCf = currentCfs[i].getNameAsString(); |
| 507 | + String backupCf = backupCfs[i].getNameAsString(); |
| 508 | + |
| 509 | + if (!currentCf.equals(backupCf)) { |
| 510 | + throw ColumnFamilyMismatchException.create(tn, currentCfs, backupCfs); |
| 511 | + } |
| 512 | + } |
| 513 | + } |
437 | 514 | } |
0 commit comments