diff --git a/.rubocop.yml b/.rubocop.yml index f877a052eea6..e1eb10a9245b 100644 --- a/.rubocop.yml +++ b/.rubocop.yml @@ -9,3 +9,12 @@ Layout/LineLength: Metrics/MethodLength: Max: 75 + +GlobalVars: + AllowedVariables: + - $CUST1_ENCODED + - $CUST1_ALIAS + - $CUST1_ENCODED + - $GLOB_CUST_ENCODED + - $TEST + - $TEST_CLUSTER \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 1c08ec3b26fd..078ac5997477 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2664,4 +2664,10 @@ List getLogEntries(Set serverNames, String logType, Server @InterfaceAudience.Private void restoreBackupSystemTable(String snapshotName) throws IOException; + + /** + * Refresh the system key cache on all specified region servers. + * @param regionServers the list of region servers to refresh the system key cache on + */ + void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index e6bf6c3d28e0..5ae99b00a7e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1146,4 +1146,9 @@ public List getCachedFilesList(ServerName serverName) throws IOException public void restoreBackupSystemTable(String snapshotName) throws IOException { get(admin.restoreBackupSystemTable(snapshotName)); } + + @Override + public void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException { + get(admin.refreshSystemKeyCacheOnAllServers(regionServers)); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index ec0556f20ac1..a10746f2726b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1874,4 +1874,10 @@ CompletableFuture> getLogEntries(Set serverNames, Str @InterfaceAudience.Private CompletableFuture restoreBackupSystemTable(String snapshotName); + + /** + * Refresh the system key cache on all specified region servers. + * @param regionServers the list of region servers to refresh the system key cache on + */ + CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index b1fb2be13547..d135063ec9a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -686,6 +686,11 @@ public CompletableFuture updateConfiguration(String groupName) { return wrap(rawAdmin.updateConfiguration(groupName)); } + @Override + public CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers) { + return wrap(rawAdmin.refreshSystemKeyCacheOnAllServers(regionServers)); + } + @Override public CompletableFuture rollWALWriter(ServerName serverName) { return wrap(rawAdmin.rollWALWriter(serverName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 710c8c430386..8a5033ff9b18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -150,6 +150,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.LastHighestWalFilenum; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -4662,4 +4663,28 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request, MasterProtos.RestoreBackupSystemTableResponse::getProcId, new RestoreBackupSystemTableProcedureBiConsumer()); } + + @Override + public CompletableFuture refreshSystemKeyCacheOnAllServers(Set regionServers) { + CompletableFuture future = new CompletableFuture<>(); + List> futures = + regionServers.stream().map(this::refreshSystemKeyCache).collect(Collectors.toList()); + addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])), + (result, err) -> { + if (err != null) { + future.completeExceptionally(err); + } else { + future.complete(result); + } + }); + return future; + } + + private CompletableFuture refreshSystemKeyCache(ServerName serverName) { + return this. newAdminCaller() + .action((controller, stub) -> this. adminCall(controller, stub, + EmptyMsg.getDefaultInstance(), + (s, c, req, done) -> s.refreshSystemKeyCache(controller, req, done), resp -> null)) + .serverName(serverName).call(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java index e72e3c978ada..01a5574443d5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java @@ -25,19 +25,18 @@ import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyResponse; import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; @InterfaceAudience.Public public class KeymetaAdminClient implements KeymetaAdmin { - private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminClient.class); private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub; public KeymetaAdminClient(Connection conn) throws IOException { @@ -46,38 +45,54 @@ public KeymetaAdminClient(Connection conn) throws IOException { } @Override - public List enableKeyManagement(String keyCust, String keyNamespace) + public ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace) throws IOException { try { - ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null, - ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build()); - return generateKeyDataList(response); + ManagedKeysProtos.ManagedKeyResponse response = + stub.enableKeyManagement(null, ManagedKeyRequest.newBuilder() + .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build()); + return generateKeyData(response); } catch (ServiceException e) { throw ProtobufUtil.handleRemoteException(e); } } @Override - public List getManagedKeys(String keyCust, String keyNamespace) + public List getManagedKeys(byte[] keyCust, String keyNamespace) throws IOException, KeyException { try { - ManagedKeysProtos.GetManagedKeysResponse statusResponse = stub.getManagedKeys(null, - ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build()); + ManagedKeysProtos.GetManagedKeysResponse statusResponse = + stub.getManagedKeys(null, ManagedKeyRequest.newBuilder() + .setKeyCust(ByteString.copyFrom(keyCust)).setKeyNamespace(keyNamespace).build()); return generateKeyDataList(statusResponse); } catch (ServiceException e) { throw ProtobufUtil.handleRemoteException(e); } } + @Override + public boolean rotateSTK() throws IOException { + try { + ManagedKeysProtos.RotateSTKResponse response = + stub.rotateSTK(null, EmptyMsg.getDefaultInstance()); + return response.getRotated(); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + private static List generateKeyDataList(ManagedKeysProtos.GetManagedKeysResponse stateResponse) { List keyStates = new ArrayList<>(); - for (ManagedKeysResponse state : stateResponse.getStateList()) { - keyStates - .add(new ManagedKeyData(state.getKeyCustBytes().toByteArray(), state.getKeyNamespace(), - null, ManagedKeyState.forValue((byte) state.getKeyState().getNumber()), - state.getKeyMetadata(), state.getRefreshTimestamp())); + for (ManagedKeyResponse state : stateResponse.getStateList()) { + keyStates.add(generateKeyData(state)); } return keyStates; } + + private static ManagedKeyData generateKeyData(ManagedKeysProtos.ManagedKeyResponse response) { + return new ManagedKeyData(response.getKeyCust().toByteArray(), response.getKeyNamespace(), null, + ManagedKeyState.forValue((byte) response.getKeyState().getNumber()), + response.getKeyMetadata(), response.getRefreshTimestamp()); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index e8d965adebba..56a6ad211731 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -506,12 +506,11 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o // is configured String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY); if (alternateAlgorithm != null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Unable to decrypt data with current cipher algorithm '" - + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES) - + "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm - + "' configured."); - } + LOG.debug( + "Unable to decrypt data with current cipher algorithm '{}'. " + + "Trying with the alternate cipher algorithm '{}' configured.", + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES), + alternateAlgorithm); Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm); if (alterCipher == null) { throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available"); @@ -575,7 +574,7 @@ private static Object createProvider(final Configuration conf, String classNameK throw new RuntimeException(e); } keyProviderCache.put(providerCacheKey, provider); - LOG.debug("Installed " + providerClassName + " into key provider cache"); + LOG.debug("Installed {} into key provider cache", providerClassName); } return provider; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java index be4f36d88023..4bf79090c3be 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java @@ -31,23 +31,31 @@ public interface KeymetaAdmin { /** * Enables key management for the specified custodian and namespace. - * @param keyCust The key custodian in base64 encoded format. + * @param keyCust The key custodian identifier. * @param keyNamespace The namespace for the key management. * @return The list of {@link ManagedKeyData} objects each identifying the key and its current * status. * @throws IOException if an error occurs while enabling key management. */ - List enableKeyManagement(String keyCust, String keyNamespace) + ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace) throws IOException, KeyException; /** * Get the status of all the keys for the specified custodian. - * @param keyCust The key custodian in base64 encoded format. + * @param keyCust The key custodian identifier. * @param keyNamespace The namespace for the key management. * @return The list of {@link ManagedKeyData} objects each identifying the key and its current * status. * @throws IOException if an error occurs while enabling key management. */ - List getManagedKeys(String keyCust, String keyNamespace) + List getManagedKeys(byte[] keyCust, String keyNamespace) throws IOException, KeyException; + + /** + * Triggers rotation of the System Key (STK) by checking for a new key and propagating it to all + * region servers. + * @return true if a new STK was found and rotated, false if no change was detected + * @throws IOException if an error occurs while rotating the STK + */ + boolean rotateSTK() throws IOException; } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java index 6782a7d11636..a4c0d6833477 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java @@ -44,6 +44,9 @@ public class MockManagedKeyProvider extends MockAesKeyProvider implements Manage private Map keyState = new HashMap<>(); private String systemKeyAlias = "default_system_key_alias"; + private boolean shouldThrowExceptionOnGetSystemKey = false; + private boolean shouldThrowExceptionOnGetManagedKey = false; + @Override public void initConfig(Configuration conf, String providerParameters) { super.init(providerParameters); @@ -51,11 +54,17 @@ public void initConfig(Configuration conf, String providerParameters) { @Override public ManagedKeyData getSystemKey(byte[] systemId) throws IOException { + if (shouldThrowExceptionOnGetSystemKey) { + throw new IOException("Test exception on getSystemKey"); + } return getKey(systemId, systemKeyAlias, ManagedKeyData.KEY_SPACE_GLOBAL); } @Override public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException { + if (shouldThrowExceptionOnGetManagedKey) { + throw new IOException("Test exception on getManagedKey"); + } String alias = Bytes.toString(key_cust); return getKey(key_cust, alias, key_namespace); } @@ -118,6 +127,14 @@ public String getSystemKeyAlias() { return this.systemKeyAlias; } + public void setShouldThrowExceptionOnGetSystemKey(boolean shouldThrowExceptionOnGetSystemKey) { + this.shouldThrowExceptionOnGetSystemKey = shouldThrowExceptionOnGetSystemKey; + } + + public void setShouldThrowExceptionOnGetManagedKey(boolean shouldThrowExceptionOnGetManagedKey) { + this.shouldThrowExceptionOnGetManagedKey = shouldThrowExceptionOnGetManagedKey; + } + /** * Generate a new secret key. * @return the key diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto index c6a3a31f6183..8e633fc25bab 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto @@ -24,8 +24,10 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; option optimize_for = SPEED; -message ManagedKeysRequest { - required string key_cust = 1; +import "HBase.proto"; + +message ManagedKeyRequest { + required bytes key_cust = 1; required string key_namespace = 2; } @@ -36,8 +38,8 @@ enum ManagedKeyState { KEY_DISABLED = 4; } -message ManagedKeysResponse { - required string key_cust = 1; +message ManagedKeyResponse { + required bytes key_cust = 1; required string key_namespace = 2; required ManagedKeyState key_state = 3; optional string key_metadata = 4; @@ -45,12 +47,18 @@ message ManagedKeysResponse { } message GetManagedKeysResponse { - repeated ManagedKeysResponse state = 1; + repeated ManagedKeyResponse state = 1; +} + +message RotateSTKResponse { + required bool rotated = 1; } service ManagedKeysService { - rpc EnableKeyManagement(ManagedKeysRequest) - returns (GetManagedKeysResponse); - rpc GetManagedKeys(ManagedKeysRequest) + rpc EnableKeyManagement(ManagedKeyRequest) + returns (ManagedKeyResponse); + rpc GetManagedKeys(ManagedKeyRequest) returns (GetManagedKeysResponse); + rpc RotateSTK(EmptyMsg) + returns (RotateSTKResponse); } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto index 30eb328fd3cd..31abe9ad4176 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/region/Admin.proto @@ -420,4 +420,7 @@ service AdminService { rpc GetCachedFilesList(GetCachedFilesListRequest) returns(GetCachedFilesListResponse); + + rpc RefreshSystemKeyCache(EmptyMsg) + returns(EmptyMsg); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index 0993fc0f09da..eb1502685fe6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -194,7 +194,7 @@ public abstract class HBaseServerBase> extends protected final NettyEventLoopGroupConfig eventLoopGroupConfig; - private SystemKeyCache systemKeyCache; + protected SystemKeyCache systemKeyCache; protected KeymetaAdminImpl keymetaAdmin; protected ManagedKeyDataCache managedKeyDataCache; @@ -437,6 +437,17 @@ protected void buildSystemKeyCache() throws IOException { } } + /** + * Rebuilds the system key cache. This method can be called to refresh the system key cache when + * the system key has been rotated. + * @throws IOException if there is an error rebuilding the cache + */ + public void rebuildSystemKeyCache() throws IOException { + if (SecurityUtil.isKeyManagementEnabled(conf)) { + systemKeyCache = SystemKeyCache.createCache(new SystemKeyAccessor(this)); + } + } + protected final void shutdownChore(ScheduledChore chore) { if (chore != null) { chore.shutdown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java index 81707fe1f16b..34e70634498a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionServerAdmin.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; @@ -217,4 +218,8 @@ public CompletableFuture getRegionLoad(GetRegionLoadReque executeProcedures(ExecuteProceduresRequest request) { return call((stub, controller, done) -> stub.executeProcedures(controller, request, done)); } + + public CompletableFuture refreshSystemKeyCache(EmptyMsg request) { + return call((stub, controller, done) -> stub.refreshSystemKeyCache(controller, request, done)); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java index 6fbd177437fe..e263ccb4fbef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java @@ -134,12 +134,12 @@ protected ManagedKeyData retrieveActiveKey(String encKeyCust, byte[] key_cust, /* * Will be useful when refresh API is implemented. if (existingActiveKey != null && * existingActiveKey.equals(pbeKey)) { - * LOG.info("retrieveManagedKey: no change in key for (custodian: {}, namespace: {}", - * encKeyCust, keyNamespace); return null; } // TODO: If existingActiveKey is not null, we - * should update the key state to INACTIVE. + * LOG.info("retrieveActiveKey: no change in key for (custodian: {}, namespace: {}", encKeyCust, + * keyNamespace); return null; } // TODO: If existingActiveKey is not null, we should update the + * key state to INACTIVE. */ LOG.info( - "retrieveManagedKey: got managed key with status: {} and metadata: {} for " + "retrieveActiveKey: got active key with status: {} and metadata: {} for " + "(custodian: {}, namespace: {})", pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, pbeKey.getKeyNamespace()); if (accessor != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java index 4c16d2b59aa7..030a278f023e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java @@ -19,11 +19,15 @@ import java.io.IOException; import java.security.KeyException; -import java.util.Collections; import java.util.List; +import java.util.Set; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,34 +41,70 @@ public KeymetaAdminImpl(Server server) { } @Override - public List enableKeyManagement(String keyCust, String keyNamespace) + public ManagedKeyData enableKeyManagement(byte[] keyCust, String keyNamespace) throws IOException, KeyException { assertKeyManagementEnabled(); - LOG.info("Trying to enable key management on custodian: {} under namespace: {}", keyCust, + String encodedCust = ManagedKeyProvider.encodeToStr(keyCust); + LOG.info("Trying to enable key management on custodian: {} under namespace: {}", encodedCust, keyNamespace); - byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust); // Check if (cust, namespace) pair is already enabled and has an active key. - ManagedKeyData activeKey = getActiveKey(key_cust, keyNamespace); + ManagedKeyData activeKey = getActiveKey(keyCust, keyNamespace); if (activeKey != null) { LOG.info( "enableManagedKeys: specified (custodian: {}, namespace: {}) already has " + "an active managed key with metadata: {}", - keyCust, keyNamespace, activeKey.getKeyMetadata()); - return Collections.singletonList(activeKey); + encodedCust, keyNamespace, activeKey.getKeyMetadata()); + return activeKey; } // Retrieve a single key from provider - ManagedKeyData retrievedKey = retrieveActiveKey(keyCust, key_cust, keyNamespace, this, null); - return Collections.singletonList(retrievedKey); + ManagedKeyData retrievedKey = retrieveActiveKey(encodedCust, keyCust, keyNamespace, this, null); + return retrievedKey; } @Override - public List getManagedKeys(String keyCust, String keyNamespace) + public List getManagedKeys(byte[] keyCust, String keyNamespace) throws IOException, KeyException { assertKeyManagementEnabled(); - LOG.info("Getting key statuses for custodian: {} under namespace: {}", keyCust, keyNamespace); - byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust); - return getAllKeys(key_cust, keyNamespace); + if (LOG.isInfoEnabled()) { + LOG.info("Getting key statuses for custodian: {} under namespace: {}", + ManagedKeyProvider.encodeToStr(keyCust), keyNamespace); + } + return getAllKeys(keyCust, keyNamespace); + } + + @Override + public boolean rotateSTK() throws IOException { + assertKeyManagementEnabled(); + if (!(getServer() instanceof MasterServices)) { + throw new IOException("rotateSTK can only be called on master"); + } + MasterServices master = (MasterServices) getServer(); + + LOG.info("Checking if System Key is rotated"); + boolean rotated = master.rotateSystemKeyIfChanged(); + + if (!rotated) { + LOG.info("No change in System Key is detected"); + return false; + } + + Set regionServers = master.getServerManager().getOnlineServers().keySet(); + + LOG.info("System Key is rotated, initiating cache refresh on all region servers"); + try { + FutureUtils.get(getAsyncAdmin(master).refreshSystemKeyCacheOnAllServers(regionServers)); + } catch (Exception e) { + throw new IOException( + "Failed to initiate System Key cache refresh on one or more region servers", e); + } + + LOG.info("System Key rotation and cache refresh completed successfully"); + return true; + } + + protected AsyncAdmin getAsyncAdmin(MasterServices master) { + return master.getAsyncClusterConnection().getAdmin(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java index 4eb19a602cc0..85ff9ba3feb1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java @@ -19,10 +19,10 @@ import java.io.IOException; import java.security.KeyException; -import java.util.Base64; import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; import org.apache.hadoop.hbase.coprocessor.HasMasterServices; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; @@ -31,18 +31,20 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyResponse; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysService; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.RotateSTKResponse; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.Service; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; + /** * This class implements a coprocessor service endpoint for the key management metadata operations. * It handles the following methods: This endpoint is designed to work in conjunction with the @@ -100,47 +102,72 @@ public class KeymetaAdminServiceImpl extends ManagedKeysService { * @param done The callback to be invoked with the response. */ @Override - public void enableKeyManagement(RpcController controller, ManagedKeysRequest request, - RpcCallback done) { - ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request); - if (builder.getKeyCust() != null && !builder.getKeyCust().isEmpty()) { - try { - List managedKeyStates = master.getKeymetaAdmin() - .enableKeyManagement(request.getKeyCust(), request.getKeyNamespace()); - done.run(generateKeyStateResponse(managedKeyStates, builder)); - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } catch (KeyException e) { - CoprocessorRpcUtils.setControllerException(controller, new IOException(e)); - } + public void enableKeyManagement(RpcController controller, ManagedKeyRequest request, + RpcCallback done) { + ManagedKeyResponse response = null; + ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder(); + try { + initManagedKeyResponseBuilder(controller, request, builder); + ManagedKeyData managedKeyState = master.getKeymetaAdmin() + .enableKeyManagement(request.getKeyCust().toByteArray(), request.getKeyNamespace()); + response = generateKeyStateResponse(managedKeyState, builder); + } catch (IOException | KeyException e) { + CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e)); + builder.setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_FAILED); } + if (response == null) { + response = builder.build(); + } + done.run(response); } @Override - public void getManagedKeys(RpcController controller, ManagedKeysRequest request, + public void getManagedKeys(RpcController controller, ManagedKeyRequest request, RpcCallback done) { - ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request); - if (builder.getKeyCust() != null && !builder.getKeyCust().isEmpty()) { - try { - List managedKeyStates = master.getKeymetaAdmin() - .getManagedKeys(request.getKeyCust(), request.getKeyNamespace()); - done.run(generateKeyStateResponse(managedKeyStates, builder)); - } catch (IOException e) { - CoprocessorRpcUtils.setControllerException(controller, e); - } catch (KeyException e) { - CoprocessorRpcUtils.setControllerException(controller, new IOException(e)); - } + GetManagedKeysResponse keyStateResponse = null; + ManagedKeyResponse.Builder builder = ManagedKeyResponse.newBuilder(); + try { + initManagedKeyResponseBuilder(controller, request, builder); + List managedKeyStates = master.getKeymetaAdmin() + .getManagedKeys(request.getKeyCust().toByteArray(), request.getKeyNamespace()); + keyStateResponse = generateKeyStateResponse(managedKeyStates, builder); + } catch (IOException | KeyException e) { + CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e)); + } + if (keyStateResponse == null) { + keyStateResponse = GetManagedKeysResponse.getDefaultInstance(); + } + done.run(keyStateResponse); + } + + /** + * Rotates the system key (STK) by checking for a new key and propagating it to all region + * servers. + * @param controller The RPC controller. + * @param request The request (empty). + * @param done The callback to be invoked with the response. + */ + @Override + public void rotateSTK(RpcController controller, EmptyMsg request, + RpcCallback done) { + boolean rotated; + try { + rotated = master.getKeymetaAdmin().rotateSTK(); + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, new DoNotRetryIOException(e)); + rotated = false; } + done.run(RotateSTKResponse.newBuilder().setRotated(rotated).build()); } } @InterfaceAudience.Private - public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller, - ManagedKeysRequest request) { - ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder(); - byte[] key_cust = convertToKeyCustBytes(controller, request, builder); - if (key_cust != null) { - builder.setKeyCustBytes(ByteString.copyFrom(key_cust)); + public static ManagedKeyResponse.Builder initManagedKeyResponseBuilder(RpcController controller, + ManagedKeyRequest request, ManagedKeyResponse.Builder builder) throws IOException { + builder.setKeyCust(request.getKeyCust()); + builder.setKeyNamespace(request.getKeyNamespace()); + if (request.getKeyCust().isEmpty()) { + throw new IOException("key_cust must not be empty"); } return builder; } @@ -148,29 +175,19 @@ public static ManagedKeysResponse.Builder getResponseBuilder(RpcController contr // Assumes that all ManagedKeyData objects belong to the same custodian and namespace. @InterfaceAudience.Private public static GetManagedKeysResponse generateKeyStateResponse( - List managedKeyStates, ManagedKeysResponse.Builder builder) { + List managedKeyStates, ManagedKeyResponse.Builder builder) { GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder(); for (ManagedKeyData keyData : managedKeyStates) { - builder - .setKeyState(ManagedKeysProtos.ManagedKeyState.forNumber(keyData.getKeyState().getVal())) - .setKeyMetadata(keyData.getKeyMetadata()).setRefreshTimestamp(keyData.getRefreshTimestamp()) - .setKeyNamespace(keyData.getKeyNamespace()); - responseBuilder.addState(builder.build()); + responseBuilder.addState(generateKeyStateResponse(keyData, builder)); } return responseBuilder.build(); } - @InterfaceAudience.Private - public static byte[] convertToKeyCustBytes(RpcController controller, ManagedKeysRequest request, - ManagedKeysResponse.Builder builder) { - byte[] key_cust = null; - try { - key_cust = Base64.getDecoder().decode(request.getKeyCust()); - } catch (IllegalArgumentException e) { - builder.setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_FAILED); - CoprocessorRpcUtils.setControllerException(controller, new IOException( - "Failed to decode specified prefix as Base64 string: " + request.getKeyCust(), e)); - } - return key_cust; + private static ManagedKeyResponse generateKeyStateResponse(ManagedKeyData keyData, + ManagedKeyResponse.Builder builder) { + builder.setKeyState(ManagedKeysProtos.ManagedKeyState.forNumber(keyData.getKeyState().getVal())) + .setKeyMetadata(keyData.getKeyMetadata()).setRefreshTimestamp(keyData.getRefreshTimestamp()) + .setKeyNamespace(keyData.getKeyNamespace()); + return builder.build(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index d369a19d969e..7037656419b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.http.HttpServer; import org.apache.hadoop.hbase.http.InfoServer; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -1640,6 +1641,17 @@ public MasterWalManager getMasterWalManager() { return this.walManager; } + @Override + public boolean rotateSystemKeyIfChanged() throws IOException { + ManagedKeyData newKey = this.systemKeyManager.rotateSystemKeyIfChanged(); + if (newKey != null) { + this.systemKeyCache = null; + buildSystemKeyCache(); + return true; + } + return false; + } + @Override public SplitWALManager getSplitWALManager() { return splitWALManager; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index e9e0f970ef8d..c63a1e7e8ecf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -193,6 +193,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -3621,6 +3622,12 @@ public GetCachedFilesListResponse getCachedFilesList(RpcController controller, throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); } + @Override + public EmptyMsg refreshSystemKeyCache(RpcController controller, EmptyMsg request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + @Override public GetLiveRegionServersResponse getLiveRegionServers(RpcController controller, GetLiveRegionServersRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index dee9b48f9ea5..745b962860bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -87,6 +87,9 @@ public interface MasterServices extends Server, KeyManagementService { /** Returns Master's WALs {@link MasterWalManager} utility class. */ MasterWalManager getMasterWalManager(); + /** Rotates the system key if changed, returns true if a new key was detected and rotated */ + boolean rotateSystemKeyIfChanged() throws IOException; + /** Returns Master's {@link ServerManager} instance. */ ServerManager getServerManager(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index fdfea375e096..ed2f81a947d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -234,6 +234,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -4058,6 +4059,29 @@ public GetCachedFilesListResponse getCachedFilesList(RpcController controller, return responseBuilder.addAllCachedFiles(fullyCachedFiles).build(); } + /** + * Refreshes the system key cache on the region server by rebuilding it with the latest keys. This + * is called by the master when a system key rotation has occurred. + * @param controller the RPC controller + * @param request the request + * @return empty response + */ + @Override + @QosPriority(priority = HConstants.ADMIN_QOS) + public EmptyMsg refreshSystemKeyCache(final RpcController controller, final EmptyMsg request) + throws ServiceException { + try { + checkOpen(); + requestCount.increment(); + LOG.info("Received RefreshSystemKeyCache request, rebuilding system key cache"); + server.rebuildSystemKeyCache(); + return EmptyMsg.getDefaultInstance(); + } catch (IOException ie) { + LOG.error("Failed to rebuild system key cache", ie); + throw new ServiceException(ie); + } + } + RegionScannerContext checkQuotaAndGetRegionScannerContext(ScanRequest request, ScanResponse.Builder builder) throws IOException { if (request.hasScannerId()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 3fe2937e4d6a..b104f4687608 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -82,8 +82,10 @@ public static Encryption.Context createEncryptionContext(Configuration conf, boolean isKeyManagementEnabled = isKeyManagementEnabled(conf); String cipherName = family.getEncryptionType(); String keyNamespace = null; // Will be set by fallback logic - LOG.debug("Creating encryption context for table: {} and column family: {}", - tableDescriptor.getTableName().getNameAsString(), family.getNameAsString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating encryption context for table: {} and column family: {}", + tableDescriptor.getTableName().getNameAsString(), family.getNameAsString()); + } if (cipherName != null) { if (!Encryption.isEncryptionEnabled(conf)) { throw new IllegalStateException("Encryption for family '" + family.getNameAsString() @@ -108,10 +110,8 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // Scenario 1b: If key management is disabled, unwrap the key using master key. key = EncryptionUtil.unwrapKey(conf, familyKeyBytes); } - if (LOG.isDebugEnabled()) { - LOG.debug("Scenario 1: Use family key for namespace {} cipher: {} " - + "key management enabled: {}", keyNamespace, cipherName, isKeyManagementEnabled); - } + LOG.debug("Scenario 1: Use family key for namespace {} cipher: {} " + + "key management enabled: {}", keyNamespace, cipherName, isKeyManagementEnabled); } catch (KeyException e) { throw new IOException(e); } @@ -133,8 +133,10 @@ public static Encryption.Context createEncryptionContext(Configuration conf, for (String candidate : candidateNamespaces) { if (candidate != null) { // Log information on the table and column family we are looking for the active key in - LOG.debug("Looking for active key for table: {} and column family: {}", - tableDescriptor.getTableName().getNameAsString(), family.getNameAsString()); + if (LOG.isDebugEnabled()) { + LOG.debug("Looking for active key for table: {} and column family: {}", + tableDescriptor.getTableName().getNameAsString(), family.getNameAsString()); + } activeKeyData = managedKeyDataCache .getActiveEntry(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, candidate); if (activeKeyData != null) { @@ -216,9 +218,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, Pat ManagedKeyData kekKeyData = null; byte[] keyBytes = trailer.getEncryptionKey(); Encryption.Context cryptoContext = Encryption.Context.NONE; - if (LOG.isDebugEnabled()) { - LOG.debug("Creating encryption context for path: {}", path); - } + LOG.debug("Creating encryption context for path: {}", path); // Check for any key material available if (keyBytes != null) { cryptoContext = Encryption.newContext(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java index 7c884bdd27e4..56c92c873c03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.keymeta; import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; -import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE; -import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_FAILED; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.contains; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -37,7 +36,6 @@ import java.security.KeyException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Base64; import java.util.List; import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -46,9 +44,10 @@ import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint.KeymetaAdminServiceImpl; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; -import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyResponse; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -80,17 +79,20 @@ public class TestKeymetaEndpoint { @Mock private MasterServices master; @Mock - private RpcCallback done; + private RpcCallback enableKeyManagementDone; @Mock - private KeymetaAdmin keymetaAdmin; + private RpcCallback getManagedKeysDone; KeymetaServiceEndpoint keymetaServiceEndpoint; - private ManagedKeysResponse.Builder responseBuilder; - private ManagedKeysRequest.Builder requestBuilder; + private ManagedKeyResponse.Builder responseBuilder; + private ManagedKeyRequest.Builder requestBuilder; private KeymetaAdminServiceImpl keyMetaAdminService; private ManagedKeyData keyData1; private ManagedKeyData keyData2; + @Mock + private KeymetaAdmin keymetaAdmin; + @Before public void setUp() throws Exception { MockitoAnnotations.initMocks(this); @@ -101,9 +103,10 @@ public void setUp() throws Exception { keymetaServiceEndpoint.start(env); keyMetaAdminService = (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices().iterator().next(); - responseBuilder = ManagedKeysResponse.newBuilder().setKeyState(KEY_ACTIVE); + responseBuilder = + ManagedKeyResponse.newBuilder().setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE); requestBuilder = - ManagedKeysRequest.newBuilder().setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL); + ManagedKeyRequest.newBuilder().setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL); keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE, new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1); keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE, @@ -112,75 +115,33 @@ public void setUp() throws Exception { } @Test - public void testConvertToKeyCustBytesValid() { - // Arrange - String validBase64 = Base64.getEncoder().encodeToString("testKey".getBytes()); - ManagedKeysRequest request = requestBuilder.setKeyCust(validBase64).build(); + public void testCreateResponseBuilderValid() throws IOException { + byte[] cust = "testKey".getBytes(); + ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.copyFrom(cust)).build(); - // Act - byte[] result = - KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder); + ManagedKeyResponse.Builder result = ManagedKeyResponse.newBuilder(); + KeymetaServiceEndpoint.initManagedKeyResponseBuilder(controller, request, result); - // Assert assertNotNull(result); - assertArrayEquals("testKey".getBytes(), result); - assertEquals(KEY_ACTIVE, responseBuilder.getKeyState()); + assertArrayEquals(cust, result.getKeyCust().toByteArray()); verify(controller, never()).setFailed(anyString()); } @Test - public void testConvertToKeyCustBytesInvalid() { - // Arrange - String invalidBase64 = "invalid!Base64@String"; - ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + public void testCreateResponseBuilderEmptyCust() throws IOException { + ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build(); - // Act - byte[] result = - KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder); + IOException exception = assertThrows(IOException.class, () -> KeymetaServiceEndpoint + .initManagedKeyResponseBuilder(controller, request, ManagedKeyResponse.newBuilder())); - // Assert - assertNull(result); - assertEquals(KEY_FAILED, responseBuilder.getKeyState()); - verify(controller).setFailed(anyString()); - } - - @Test - public void testGetResponseBuilder() { - // Arrange - String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes()); - ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build(); - - // Act - ManagedKeysResponse.Builder result = - KeymetaServiceEndpoint.getResponseBuilder(controller, request); - - // Assert - assertNotNull(result); - assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray()); - verify(controller, never()).setFailed(anyString()); - } - - @Test - public void testGetResponseBuilderWithInvalidBase64() { - // Arrange - String keyCust = "invalidBase64!"; - ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust).build(); - - // Act - ManagedKeysResponse.Builder result = - KeymetaServiceEndpoint.getResponseBuilder(controller, request); - - // Assert - assertNotNull(result); - assertEquals(KEY_FAILED, result.getKeyState()); - verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string")); + assertEquals("key_cust must not be empty", exception.getMessage()); } @Test public void testGenerateKeyStateResponse() throws Exception { // Arrange - ManagedKeysResponse response = - responseBuilder.setKeyCustBytes(ByteString.copyFrom(keyData1.getKeyCustodian())) + ManagedKeyResponse response = + responseBuilder.setKeyCust(ByteString.copyFrom(keyData1.getKeyCustodian())) .setKeyNamespace(keyData1.getKeyNamespace()).build(); List managedKeyStates = Arrays.asList(keyData1, keyData2); @@ -192,9 +153,10 @@ public void testGenerateKeyStateResponse() throws Exception { assertNotNull(response); assertNotNull(result.getStateList()); assertEquals(2, result.getStateList().size()); - assertEquals(KEY_ACTIVE, result.getStateList().get(0).getKeyState()); + assertEquals(ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE, + result.getStateList().get(0).getKeyState()); assertEquals(0, Bytes.compareTo(keyData1.getKeyCustodian(), - result.getStateList().get(0).getKeyCustBytes().toByteArray())); + result.getStateList().get(0).getKeyCust().toByteArray())); assertEquals(keyData1.getKeyNamespace(), result.getStateList().get(0).getKeyNamespace()); verify(controller, never()).setFailed(anyString()); } @@ -202,8 +164,8 @@ public void testGenerateKeyStateResponse() throws Exception { @Test public void testGenerateKeyStateResponse_Empty() throws Exception { // Arrange - ManagedKeysResponse response = - responseBuilder.setKeyCustBytes(ByteString.copyFrom(keyData1.getKeyCustodian())) + ManagedKeyResponse response = + responseBuilder.setKeyCust(ByteString.copyFrom(keyData1.getKeyCustodian())) .setKeyNamespace(keyData1.getKeyNamespace()).build(); List managedKeyStates = new ArrayList<>(); @@ -221,20 +183,22 @@ public void testGenerateKeyStateResponse_Empty() throws Exception { @Test public void testGenerateKeyStatResponse_Success() throws Exception { doTestServiceCallForSuccess((controller, request, done) -> keyMetaAdminService - .enableKeyManagement(controller, request, done)); + .enableKeyManagement(controller, request, done), enableKeyManagementDone); } @Test public void testGetManagedKeys_Success() throws Exception { doTestServiceCallForSuccess( - (controller, request, done) -> keyMetaAdminService.getManagedKeys(controller, request, done)); + (controller, request, done) -> keyMetaAdminService.getManagedKeys(controller, request, done), + getManagedKeysDone); } - private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception { + private void doTestServiceCallForSuccess(ServiceCall svc, RpcCallback done) + throws Exception { // Arrange - ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); - List managedKeyStates = Arrays.asList(keyData1); - when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(managedKeyStates); + ManagedKeyRequest request = + requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build(); + when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(keyData1); // Act svc.call(controller, request, done); @@ -244,39 +208,41 @@ private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception { verify(controller, never()).setFailed(anyString()); } - private interface ServiceCall { - void call(RpcController controller, ManagedKeysRequest request, - RpcCallback done) throws Exception; + private interface ServiceCall { + void call(RpcController controller, ManagedKeyRequest request, RpcCallback done) + throws Exception; } @Test public void testGenerateKeyStateResponse_InvalidCust() throws Exception { // Arrange - String invalidBase64 = "invalid!Base64@String"; - ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build(); // Act - keyMetaAdminService.enableKeyManagement(controller, request, done); + keyMetaAdminService.enableKeyManagement(controller, request, enableKeyManagementDone); // Assert - verify(controller).setFailed(contains("IOException")); + verify(controller).setFailed(contains("key_cust must not be empty")); verify(keymetaAdmin, never()).enableKeyManagement(any(), any()); - verify(done, never()).run(any()); + verify(enableKeyManagementDone).run( + argThat(response -> response.getKeyState() == ManagedKeysProtos.ManagedKeyState.KEY_FAILED)); } @Test public void testGenerateKeyStateResponse_IOException() throws Exception { // Arrange when(keymetaAdmin.enableKeyManagement(any(), any())).thenThrow(IOException.class); - ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); + ManagedKeyRequest request = + requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build(); // Act - keyMetaAdminService.enableKeyManagement(controller, request, done); + keyMetaAdminService.enableKeyManagement(controller, request, enableKeyManagementDone); // Assert verify(controller).setFailed(contains("IOException")); verify(keymetaAdmin).enableKeyManagement(any(), any()); - verify(done, never()).run(any()); + verify(enableKeyManagementDone).run( + argThat(response -> response.getKeyState() == ManagedKeysProtos.ManagedKeyState.KEY_FAILED)); } @Test @@ -292,29 +258,27 @@ public void testGetManagedKeys_KeyException() throws Exception { private void doTestGetManagedKeysError(Class exType) throws Exception { // Arrange when(keymetaAdmin.getManagedKeys(any(), any())).thenThrow(exType); - ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); + ManagedKeyRequest request = + requestBuilder.setKeyCust(ByteString.copyFrom(KEY_CUST.getBytes())).build(); // Act - keyMetaAdminService.getManagedKeys(controller, request, done); + keyMetaAdminService.getManagedKeys(controller, request, getManagedKeysDone); // Assert verify(controller).setFailed(contains(exType.getSimpleName())); verify(keymetaAdmin).getManagedKeys(any(), any()); - verify(done, never()).run(any()); + verify(getManagedKeysDone).run(GetManagedKeysResponse.getDefaultInstance()); } @Test public void testGetManagedKeys_InvalidCust() throws Exception { // Arrange - String invalidBase64 = "invalid!Base64@String"; - ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + ManagedKeyRequest request = requestBuilder.setKeyCust(ByteString.EMPTY).build(); - // Act - keyMetaAdminService.getManagedKeys(controller, request, done); + keyMetaAdminService.getManagedKeys(controller, request, getManagedKeysDone); - // Assert - verify(controller).setFailed(contains("IOException")); + verify(controller).setFailed(contains("key_cust must not be empty")); verify(keymetaAdmin, never()).getManagedKeys(any(), any()); - verify(done, never()).run(any()); + verify(getManagedKeysDone).run(argThat(response -> response.getStateList().isEmpty())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java index 63f05e7ee5e9..75beb9f8370f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.keymeta; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -29,14 +31,16 @@ import java.lang.reflect.Field; import java.security.KeyException; import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Function; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; -import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; +import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.junit.ClassRule; @@ -45,6 +49,9 @@ import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +/** + * Tests the admin API via both RPC and local calls. + */ @Category({ MasterTests.class, MediumTests.class }) public class TestManagedKeymeta extends ManagedKeyTestBase { @@ -70,76 +77,183 @@ private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyExcep MockManagedKeyProvider managedKeyProvider = (MockManagedKeyProvider) Encryption.getManagedKeyProvider(master.getConfiguration()); String cust = "cust1"; - String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); - List managedKeyStates = - adminClient.enableKeyManagement(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL); - assertKeyDataListSingleKey(managedKeyStates, ManagedKeyState.ACTIVE); + byte[] custBytes = cust.getBytes(); + ManagedKeyData managedKey = + adminClient.enableKeyManagement(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataSingleKey(managedKey, ManagedKeyState.ACTIVE); List managedKeys = - adminClient.getManagedKeys(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL); - assertEquals(1, managedKeys.size()); + adminClient.getManagedKeys(custBytes, ManagedKeyData.KEY_SPACE_GLOBAL); assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust, ManagedKeyData.KEY_SPACE_GLOBAL) .cloneWithoutKey(), managedKeys.get(0).cloneWithoutKey()); String nonExistentCust = "nonExistentCust"; + byte[] nonExistentBytes = nonExistentCust.getBytes(); managedKeyProvider.setMockedKeyState(nonExistentCust, ManagedKeyState.FAILED); - List keyDataList1 = adminClient.enableKeyManagement( - ManagedKeyProvider.encodeToStr(nonExistentCust.getBytes()), ManagedKeyData.KEY_SPACE_GLOBAL); - assertKeyDataListSingleKey(keyDataList1, ManagedKeyState.FAILED); + ManagedKeyData managedKey1 = + adminClient.enableKeyManagement(nonExistentBytes, ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataSingleKey(managedKey1, ManagedKeyState.FAILED); String disabledCust = "disabledCust"; + byte[] disabledBytes = disabledCust.getBytes(); managedKeyProvider.setMockedKeyState(disabledCust, ManagedKeyState.DISABLED); - List keyDataList2 = adminClient.enableKeyManagement( - ManagedKeyProvider.encodeToStr(disabledCust.getBytes()), ManagedKeyData.KEY_SPACE_GLOBAL); - assertKeyDataListSingleKey(keyDataList2, ManagedKeyState.DISABLED); + ManagedKeyData managedKey2 = + adminClient.enableKeyManagement(disabledBytes, ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataSingleKey(managedKey2, ManagedKeyState.DISABLED); } - private static void assertKeyDataListSingleKey(List managedKeyStates, + private static void assertKeyDataSingleKey(ManagedKeyData managedKeyState, ManagedKeyState keyState) { - assertNotNull(managedKeyStates); - assertEquals(1, managedKeyStates.size()); - assertEquals(keyState, managedKeyStates.get(0).getKeyState()); + assertNotNull(managedKeyState); + assertEquals(keyState, managedKeyState.getKeyState()); } @Test - public void testEnableKeyManagementWithServiceException() throws Exception { - ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub = - mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class); - - ServiceException networkError = new ServiceException("Network error"); - networkError.initCause(new IOException("Network error")); - when(mockStub.enableKeyManagement(any(), any())).thenThrow(networkError); + public void testEnableKeyManagementWithExceptionOnGetManagedKey() throws Exception { + MockManagedKeyProvider managedKeyProvider = + (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration()); + managedKeyProvider.setShouldThrowExceptionOnGetManagedKey(true); + KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection()); + IOException exception = assertThrows(IOException.class, + () -> adminClient.enableKeyManagement(new byte[0], "namespace")); + assertTrue(exception.getMessage().contains("key_cust must not be empty")); + } - KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection()); - // Use reflection to set the stub - Field stubField = KeymetaAdminClient.class.getDeclaredField("stub"); - stubField.setAccessible(true); - stubField.set(client, mockStub); + @Test + public void testEnableKeyManagementWithClientSideServiceException() throws Exception { + doTestWithClientSideServiceException((mockStub, networkError) -> { + try { + when(mockStub.enableKeyManagement(any(), any())).thenThrow(networkError); + } catch (ServiceException e) { + // We are just setting up the mock, so no exception is expected here. + throw new RuntimeException("Unexpected ServiceException", e); + } + return null; + }, (client) -> { + try { + client.enableKeyManagement(new byte[0], "namespace"); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + } - IOException exception = assertThrows(IOException.class, () -> { - client.enableKeyManagement("cust", "namespace"); + @Test + public void testGetManagedKeysWithClientSideServiceException() throws Exception { + // Similar test for getManagedKeys method + doTestWithClientSideServiceException((mockStub, networkError) -> { + try { + when(mockStub.getManagedKeys(any(), any())).thenThrow(networkError); + } catch (ServiceException e) { + // We are just setting up the mock, so no exception is expected here. + throw new RuntimeException("Unexpected ServiceException", e); + } + return null; + }, (client) -> { + try { + client.getManagedKeys(new byte[0], "namespace"); + } catch (IOException | KeyException e) { + throw new RuntimeException(e); + } + return null; }); + } - assertTrue(exception.getMessage().contains("Network error")); + @Test + public void testRotateSTKLocal() throws Exception { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin(); + doTestRotateSTK(keymetaAdmin); } @Test - public void testGetManagedKeysWithServiceException() throws Exception { - // Similar test for getManagedKeys method + public void testRotateSTKOverRPC() throws Exception { + KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection()); + doTestRotateSTK(adminClient); + } + + private void doTestRotateSTK(KeymetaAdmin adminClient) throws IOException { + // Call rotateSTK - since no actual system key change has occurred, + // this should return false (no rotation performed) + boolean result = adminClient.rotateSTK(); + assertFalse("rotateSTK should return false when no key change is detected", result); + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + ManagedKeyData currentSystemKey = master.getSystemKeyCache().getLatestSystemKey(); + + MockManagedKeyProvider managedKeyProvider = + (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration()); + // Once we enable multikeyGenMode on MockManagedKeyProvider, every call should return a new key + // which should trigger a rotation. + managedKeyProvider.setMultikeyGenMode(true); + result = adminClient.rotateSTK(); + assertTrue("rotateSTK should return true when a new key is detected", result); + + ManagedKeyData newSystemKey = master.getSystemKeyCache().getLatestSystemKey(); + assertNotEquals("newSystemKey should be different from currentSystemKey", currentSystemKey, + newSystemKey); + + HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0); + assertEquals("regionServer should have the same new system key", newSystemKey, + regionServer.getSystemKeyCache().getLatestSystemKey()); + + } + + @Test + public void testRotateSTKWithExceptionOnGetSystemKey() throws Exception { + MockManagedKeyProvider managedKeyProvider = + (MockManagedKeyProvider) Encryption.getManagedKeyProvider(TEST_UTIL.getConfiguration()); + managedKeyProvider.setShouldThrowExceptionOnGetSystemKey(true); + KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection()); + IOException exception = assertThrows(IOException.class, () -> adminClient.rotateSTK()); + assertTrue(exception.getMessage().contains("Test exception on getSystemKey")); + } + + @Test + public void testRotateSTKWithClientSideServiceException() throws Exception { + doTestWithClientSideServiceException((mockStub, networkError) -> { + try { + when(mockStub.rotateSTK(any(), any())).thenThrow(networkError); + } catch (ServiceException e) { + // We are just setting up the mock, so no exception is expected here. + throw new RuntimeException("Unexpected ServiceException", e); + } + return null; + }, (client) -> { + try { + client.rotateSTK(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return null; + }); + } + + private void + doTestWithClientSideServiceException(BiFunction< + ManagedKeysProtos.ManagedKeysService.BlockingInterface, ServiceException, Void> setupFunction, + Function testFunction) throws Exception { ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub = mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class); ServiceException networkError = new ServiceException("Network error"); networkError.initCause(new IOException("Network error")); - when(mockStub.getManagedKeys(any(), any())).thenThrow(networkError); KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection()); + // Use reflection to set the stub Field stubField = KeymetaAdminClient.class.getDeclaredField("stub"); stubField.setAccessible(true); stubField.set(client, mockStub); + setupFunction.apply(mockStub, networkError); + IOException exception = assertThrows(IOException.class, () -> { - client.getManagedKeys("cust", "namespace"); + try { + testFunction.apply(client); + } catch (RuntimeException e) { + throw e.getCause(); + } }); assertTrue(exception.getMessage().contains("Network error")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 5b522dc91072..b04380ae450c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -155,6 +155,11 @@ public MasterWalManager getMasterWalManager() { return null; } + @Override + public boolean rotateSystemKeyIfChanged() { + return false; + } + @Override public MasterCoprocessorHost getMasterCoprocessorHost() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 81977c24b290..402e8697fe91 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -144,6 +144,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; @@ -711,6 +712,12 @@ public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController contr return null; } + @Override + public EmptyMsg refreshSystemKeyCache(RpcController controller, EmptyMsg request) + throws ServiceException { + return null; + } + @Override public Connection createConnection(Configuration conf) throws IOException { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java index a2cb14223e17..cb5c8dc11747 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java @@ -23,11 +23,15 @@ import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED; import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -37,17 +41,22 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.CompletableFuture; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.client.AsyncAdmin; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.keymeta.KeyManagementService; import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl; import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -69,15 +78,15 @@ @RunWith(Suite.class) @Suite.SuiteClasses({ TestKeymetaAdminImpl.TestWhenDisabled.class, - TestKeymetaAdminImpl.TestAdminImpl.class, - TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, }) + TestKeymetaAdminImpl.TestAdminImpl.class, TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, + TestKeymetaAdminImpl.TestRotateSTK.class }) @Category({ MasterTests.class, SmallTests.class }) public class TestKeymetaAdminImpl { private static final String CUST = "cust1"; - private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes()); + private static final byte[] CUST_BYTES = CUST.getBytes(); - private final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + protected final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @Rule public TestName name = new TestName(); @@ -129,9 +138,9 @@ public void setUp() throws Exception { @Test public void testDisabled() throws Exception { assertThrows(IOException.class, () -> keymetaAdmin - .enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, KEY_SPACE_GLOBAL)); - assertThrows(IOException.class, - () -> keymetaAdmin.getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, KEY_SPACE_GLOBAL)); + .enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, KEY_SPACE_GLOBAL)); + assertThrows(IOException.class, () -> keymetaAdmin + .getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN_BYTES, KEY_SPACE_GLOBAL)); } } @@ -164,40 +173,35 @@ public void testEnableAndGet() throws Exception { when(keymetaAccessor.getActiveKey(CUST.getBytes(), keySpace)) .thenReturn(managedKeyProvider.getManagedKey(CUST.getBytes(), keySpace)); - List managedKeys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); - assertNotNull(managedKeys); - assertEquals(1, managedKeys.size()); - assertEquals(keyState, managedKeys.get(0).getKeyState()); + ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, keySpace); + assertNotNull(managedKey); + assertEquals(keyState, managedKey.getKeyState()); verify(keymetaAccessor).getActiveKey(CUST.getBytes(), keySpace); - keymetaAdmin.getManagedKeys(ENCODED_CUST, keySpace); + keymetaAdmin.getManagedKeys(CUST_BYTES, keySpace); verify(keymetaAccessor).getAllKeys(CUST.getBytes(), keySpace); } @Test public void testEnableKeyManagement() throws Exception { assumeTrue(keyState == ACTIVE); - List keys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1"); - assertEquals(1, keys.size()); - assertEquals(ManagedKeyState.ACTIVE, keys.get(0).getKeyState()); - assertEquals(ENCODED_CUST, keys.get(0).getKeyCustodianEncoded()); - assertEquals("namespace1", keys.get(0).getKeyNamespace()); + ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1"); + assertEquals(ManagedKeyState.ACTIVE, managedKey.getKeyState()); + assertEquals(ManagedKeyProvider.encodeToStr(CUST_BYTES), managedKey.getKeyCustodianEncoded()); + assertEquals("namespace1", managedKey.getKeyNamespace()); // Second call should return the same keys since our mock key provider returns the same key - List keys2 = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1"); - assertEquals(1, keys2.size()); - assertEquals(keys.get(0), keys2.get(0)); + ManagedKeyData managedKey2 = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1"); + assertEquals(managedKey, managedKey2); } @Test public void testEnableKeyManagementWithMultipleNamespaces() throws Exception { - List keys = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace1"); - assertEquals(1, keys.size()); - assertEquals("namespace1", keys.get(0).getKeyNamespace()); + ManagedKeyData managedKey = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace1"); + assertEquals("namespace1", managedKey.getKeyNamespace()); - List keys2 = keymetaAdmin.enableKeyManagement(ENCODED_CUST, "namespace2"); - assertEquals(1, keys2.size()); - assertEquals("namespace2", keys2.get(0).getKeyNamespace()); + ManagedKeyData managedKey2 = keymetaAdmin.enableKeyManagement(CUST_BYTES, "namespace2"); + assertEquals("namespace2", managedKey2.getKeyNamespace()); } } @@ -221,10 +225,10 @@ public void test() throws Exception { MockManagedKeyProvider managedKeyProvider = (MockManagedKeyProvider) Encryption.getManagedKeyProvider(conf); String cust = "invalidcust1"; - String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); + byte[] custBytes = cust.getBytes(); managedKeyProvider.setMockedKey(cust, null, keySpace); IOException ex = assertThrows(IOException.class, - () -> keymetaAdmin.enableKeyManagement(encodedCust, keySpace)); + () -> keymetaAdmin.enableKeyManagement(custBytes, keySpace)); assertEquals("Invalid null managed key received from key provider", ex.getMessage()); } } @@ -266,4 +270,158 @@ protected boolean assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeySt } return true; } + + /** + * Test class for rotateSTK API + */ + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestRotateSTK extends TestKeymetaAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestRotateSTK.class); + + private ServerManager mockServerManager = mock(ServerManager.class); + private AsyncClusterConnection mockConnection; + private AsyncAdmin mockAsyncAdmin; + + @Override + public void setUp() throws Exception { + super.setUp(); + mockConnection = mock(AsyncClusterConnection.class); + mockAsyncAdmin = mock(AsyncAdmin.class); + when(mockServer.getServerManager()).thenReturn(mockServerManager); + when(mockServer.getAsyncClusterConnection()).thenReturn(mockConnection); + when(mockConnection.getAdmin()).thenReturn(mockAsyncAdmin); + } + + /** + * Test rotateSTK when a new key is detected. Now that we can mock SystemKeyManager via + * master.getSystemKeyManager(), we can properly test the success scenario: 1. + * SystemKeyManager.rotateSystemKeyIfChanged() returns non-null (new key detected) 2. Master + * gets list of online region servers 3. Master makes parallel RPC calls to all region servers + * 4. All region servers successfully rebuild their system key cache 5. Method returns true + */ + @Test + public void testRotateSTKWithNewKey() throws Exception { + // Setup mocks for MasterServices + // Mock SystemKeyManager to return a new key (non-null) + when(mockServer.rotateSystemKeyIfChanged()).thenReturn(true); + + when(mockAsyncAdmin.refreshSystemKeyCacheOnAllServers(any())) + .thenReturn(CompletableFuture.completedFuture(null)); + + KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor); + + // Call rotateSTK - should return true since new key was detected + boolean result = admin.rotateSTK(); + + // Verify the result + assertTrue("rotateSTK should return true when new key is detected", result); + + // Verify that rotateSystemKeyIfChanged was called + verify(mockServer).rotateSystemKeyIfChanged(); + verify(mockAsyncAdmin).refreshSystemKeyCacheOnAllServers(any()); + } + + /** + * Test rotateSTK when no key change is detected. Now that we can mock SystemKeyManager, we can + * properly test the no-change scenario: 1. SystemKeyManager.rotateSystemKeyIfChanged() returns + * null 2. Method returns false immediately without calling any region servers 3. No RPC calls + * are made to region servers + */ + @Test + public void testRotateSTKNoChange() throws Exception { + // Mock SystemKeyManager to return null (no key change) + when(mockServer.rotateSystemKeyIfChanged()).thenReturn(false); + + KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor); + + // Call rotateSTK - should return false since no key change was detected + boolean result = admin.rotateSTK(); + + // Verify the result + assertFalse("rotateSTK should return false when no key change is detected", result); + + // Verify that rotateSystemKeyIfChanged was called + verify(mockServer).rotateSystemKeyIfChanged(); + + // Verify that getOnlineServersList was never called (short-circuit behavior) + verify(mockServerManager, never()).getOnlineServersList(); + } + + @Test + public void testRotateSTKOnIOException() throws Exception { + when(mockServer.rotateSystemKeyIfChanged()).thenThrow(new IOException("test")); + + KeymetaAdminImpl admin = new KeymetaAdminImpl(mockServer); + IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK()); + assertTrue("Exception message should contain 'test', but was: " + ex.getMessage(), + ex.getMessage().equals("test")); + } + + /** + * Test rotateSTK when region server refresh fails. + */ + @Test + public void testRotateSTKWithFailedServerRefresh() throws Exception { + // Setup mocks for MasterServices + // Mock SystemKeyManager to return a new key (non-null) + when(mockServer.rotateSystemKeyIfChanged()).thenReturn(true); + + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new IOException("refresh failed")); + when(mockAsyncAdmin.refreshSystemKeyCacheOnAllServers(any())).thenReturn(failedFuture); + + KeymetaAdminImplForTest admin = new KeymetaAdminImplForTest(mockServer, keymetaAccessor); + + // Call rotateSTK and expect IOException + IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK()); + + assertTrue(ex.getMessage() + .contains("Failed to initiate System Key cache refresh on one or more region servers")); + + // Verify that rotateSystemKeyIfChanged was called + verify(mockServer).rotateSystemKeyIfChanged(); + verify(mockAsyncAdmin).refreshSystemKeyCacheOnAllServers(any()); + } + + @Test + public void testRotateSTKNotOnMaster() throws Exception { + // Create a non-master server mock + Server mockRegionServer = mock(Server.class); + KeyManagementService mockKeyService = mock(KeyManagementService.class); + // Mock KeyManagementService - required by KeyManagementBase constructor + when(mockRegionServer.getKeyManagementService()).thenReturn(mockKeyService); + when(mockKeyService.getConfiguration()).thenReturn(conf); + when(mockRegionServer.getConfiguration()).thenReturn(conf); + when(mockRegionServer.getFileSystem()).thenReturn(mockFileSystem); + + KeymetaAdminImpl admin = new KeymetaAdminImpl(mockRegionServer) { + @Override + protected AsyncAdmin getAsyncAdmin(MasterServices master) { + throw new RuntimeException("Shouldn't be called since we are not on master"); + } + }; + + IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK()); + assertTrue(ex.getMessage().contains("rotateSTK can only be called on master")); + } + + @Test + public void testRotateSTKWhenDisabled() throws Exception { + TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false"); + + KeymetaAdminImpl admin = new KeymetaAdminImpl(mockServer) { + @Override + protected AsyncAdmin getAsyncAdmin(MasterServices master) { + throw new RuntimeException("Shouldn't be called since we are disabled"); + } + }; + + IOException ex = assertThrows(IOException.class, () -> admin.rotateSTK()); + assertTrue("Exception message should contain 'not enabled', but was: " + ex.getMessage(), + ex.getMessage().contains("not enabled")); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java index ca7e20f5869d..3d367d820127 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSRpcServices.java @@ -18,11 +18,22 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Optional; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -35,6 +46,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.EmptyMsg; + /** * Test parts of {@link RSRpcServices} */ @@ -69,4 +85,119 @@ public void testRegionScannerHolderToString() throws UnknownHostException { null, null, false, false, clientIpAndPort, userNameTest); LOG.info("rsh: {}", rsh); } + + /** + * Test the refreshSystemKeyCache RPC method that is used to rebuild the system key cache on + * region servers when a system key rotation has occurred. + */ + @Test + public void testRefreshSystemKeyCache() throws Exception { + // Create mocks + HRegionServer mockServer = mock(HRegionServer.class); + Configuration conf = HBaseConfiguration.create(); + FileSystem mockFs = mock(FileSystem.class); + + when(mockServer.getConfiguration()).thenReturn(conf); + when(mockServer.isOnline()).thenReturn(true); + when(mockServer.isAborted()).thenReturn(false); + when(mockServer.isStopped()).thenReturn(false); + when(mockServer.isDataFileSystemOk()).thenReturn(true); + when(mockServer.getFileSystem()).thenReturn(mockFs); + + // Create RSRpcServices + RSRpcServices rpcServices = new RSRpcServices(mockServer); + + // Create request + EmptyMsg request = EmptyMsg.getDefaultInstance(); + RpcController controller = mock(RpcController.class); + + // Call the RPC method + EmptyMsg response = rpcServices.refreshSystemKeyCache(controller, request); + + // Verify the response is not null + assertNotNull("Response should not be null", response); + + // Verify that rebuildSystemKeyCache was called on the server + verify(mockServer).rebuildSystemKeyCache(); + + LOG.info("refreshSystemKeyCache test completed successfully"); + } + + /** + * Test that refreshSystemKeyCache throws ServiceException when server is not online + */ + @Test + public void testRefreshSystemKeyCacheWhenServerStopped() throws Exception { + // Create mocks + HRegionServer mockServer = mock(HRegionServer.class); + Configuration conf = HBaseConfiguration.create(); + FileSystem mockFs = mock(FileSystem.class); + + when(mockServer.getConfiguration()).thenReturn(conf); + when(mockServer.isOnline()).thenReturn(true); + when(mockServer.isAborted()).thenReturn(false); + when(mockServer.isStopped()).thenReturn(true); // Server is stopped + when(mockServer.isDataFileSystemOk()).thenReturn(true); + when(mockServer.getFileSystem()).thenReturn(mockFs); + + // Create RSRpcServices + RSRpcServices rpcServices = new RSRpcServices(mockServer); + + // Create request + EmptyMsg request = EmptyMsg.getDefaultInstance(); + RpcController controller = mock(RpcController.class); + + // Call the RPC method and expect ServiceException + try { + rpcServices.refreshSystemKeyCache(controller, request); + fail("Expected ServiceException when server is stopped"); + } catch (ServiceException e) { + // Expected + assertTrue("Exception should mention server stopping", + e.getCause().getMessage().contains("stopping")); + LOG.info("Correctly threw ServiceException when server is stopped"); + } + } + + /** + * Test that refreshSystemKeyCache throws ServiceException when rebuildSystemKeyCache fails + */ + @Test + public void testRefreshSystemKeyCacheWhenRebuildFails() throws Exception { + // Create mocks + HRegionServer mockServer = mock(HRegionServer.class); + Configuration conf = HBaseConfiguration.create(); + FileSystem mockFs = mock(FileSystem.class); + + when(mockServer.getConfiguration()).thenReturn(conf); + when(mockServer.isOnline()).thenReturn(true); + when(mockServer.isAborted()).thenReturn(false); + when(mockServer.isStopped()).thenReturn(false); + when(mockServer.isDataFileSystemOk()).thenReturn(true); + when(mockServer.getFileSystem()).thenReturn(mockFs); + + // Make rebuildSystemKeyCache throw IOException + IOException testException = new IOException("Test failure rebuilding cache"); + doThrow(testException).when(mockServer).rebuildSystemKeyCache(); + + // Create RSRpcServices + RSRpcServices rpcServices = new RSRpcServices(mockServer); + + // Create request + EmptyMsg request = EmptyMsg.getDefaultInstance(); + RpcController controller = mock(RpcController.class); + + // Call the RPC method and expect ServiceException + try { + rpcServices.refreshSystemKeyCache(controller, request); + fail("Expected ServiceException when rebuildSystemKeyCache fails"); + } catch (ServiceException e) { + // Expected + assertEquals("Test failure rebuilding cache", e.getCause().getMessage()); + LOG.info("Correctly threw ServiceException when rebuildSystemKeyCache fails"); + } + + // Verify that rebuildSystemKeyCache was called + verify(mockServer).rebuildSystemKeyCache(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index a59b2966b89d..1ea386aba923 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -999,4 +999,9 @@ public boolean replicationPeerModificationSwitch(boolean on, boolean drainProced public boolean isReplicationPeerModificationEnabled() throws IOException { return admin.isReplicationPeerModificationEnabled(); } + + @Override + public void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException { + admin.refreshSystemKeyCacheOnAllServers(regionServers); + } } diff --git a/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb index f70abbdde55b..89b42c4070b0 100644 --- a/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb @@ -20,6 +20,7 @@ require 'java' java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyData +java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider java_import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient module Hbase @@ -46,11 +47,25 @@ def get_key_statuses(key_info) @admin.getManagedKeys(cust, namespace) end + def rotate_stk + @admin.rotateSTK + end + def extract_cust_info(key_info) cust_info = key_info.split(':') raise(ArgumentError, 'Invalid cust:namespace format') unless [1, 2].include?(cust_info.length) - [cust_info[0], cust_info.length > 1 ? cust_info[1] : ManagedKeyData::KEY_SPACE_GLOBAL] + custodian = cust_info[0] + namespace = cust_info.length > 1 ? cust_info[1] : ManagedKeyData::KEY_SPACE_GLOBAL + + begin + cust_bytes = ManagedKeyProvider.decodeToBytes(custodian) + rescue Java::JavaIo::IOException => e + message = e.cause&.message || e.message + raise(ArgumentError, "Failed to decode key custodian '#{custodian}': #{message}") + end + + [cust_bytes, namespace] end end end diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 8bc4852bc3bf..47dc7541b968 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -151,7 +151,7 @@ def hbase_rsgroup_admin end def hbase_keymeta_admin - @keymeta_admin ||= hbase.keymeta_admin + @hbase_keymeta_admin ||= hbase.keymeta_admin end ## @@ -629,6 +629,7 @@ def self.exception_handler(hide_traceback) commands: %w[ enable_key_management show_key_status + rotate_stk ] ) diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb index 9a6d0422ad4e..da3fe6ad8c91 100644 --- a/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb +++ b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb @@ -37,7 +37,7 @@ def help end def command(key_info) - statuses = keymeta_admin.enable_key_management(key_info) + statuses = [keymeta_admin.enable_key_management(key_info)] print_key_statuses(statuses) end end diff --git a/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb b/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb new file mode 100644 index 000000000000..2a8f4306ad5c --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/rotate_stk.rb @@ -0,0 +1,52 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# frozen_string_literal: true + +require 'shell/commands/keymeta_command_base' + +module Shell + module Commands + # RotateStk is a class that provides a Ruby interface to rotate the System Key (STK) + # via HBase Key Management API. + class RotateStk < KeymetaCommandBase + def help + <<-EOF +Rotate the System Key (STK) if a new key is detected. +This command checks for a new system key and propagates it to all region servers. +Returns true if a new key was detected and rotated, false otherwise. + +Example: + hbase> rotate_stk + EOF + end + + def command + result = keymeta_admin.rotate_stk + if result + formatter.row(['System Key rotation was performed successfully and cache was refreshed ' \ + 'on all region servers']) + else + formatter.row(['No System Key change was detected']) + end + result + end + end + end +end + diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java new file mode 100644 index 000000000000..cc4aabe4ff4e --- /dev/null +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestKeymetaMockProviderShell.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.jruby.embed.ScriptingContainer; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, IntegrationTests.class }) +public class TestKeymetaMockProviderShell extends ManagedKeyTestBase implements RubyShellTest { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeymetaMockProviderShell.class); + + private final ScriptingContainer jruby = new ScriptingContainer(); + + @Before + @Override + public void setUp() throws Exception { + // Enable to be able to debug without timing out. + // final Configuration conf = TEST_UTIL.getConfiguration(); + // conf.set("zookeeper.session.timeout", "6000000"); + // conf.set("hbase.rpc.timeout", "6000000"); + // conf.set("hbase.rpc.read.timeout", "6000000"); + // conf.set("hbase.rpc.write.timeout", "6000000"); + // conf.set("hbase.client.operation.timeout", "6000000"); + // conf.set("hbase.client.scanner.timeout.period", "6000000"); + // conf.set("hbase.ipc.client.socket.timeout.connect", "6000000"); + // conf.set("hbase.ipc.client.socket.timeout.read", "6000000"); + // conf.set("hbase.ipc.client.socket.timeout.write", "6000000"); + // conf.set("hbase.master.start.timeout.localHBaseCluster", "6000000"); + // conf.set("hbase.master.init.timeout.localHBaseCluster", "6000000"); + // conf.set("hbase.client.sync.wait.timeout.msec", "6000000"); + // conf.set("hbase.client.retries.number", "1000"); + RubyShellTest.setUpConfig(this); + super.setUp(); + RubyShellTest.setUpJRubyRuntime(this); + RubyShellTest.doTestSetup(this); + jruby.put("$TEST", this); + } + + @Override + public HBaseTestingUtil getTEST_UTIL() { + return TEST_UTIL; + } + + @Override + public ScriptingContainer getJRuby() { + return jruby; + } + + @Override + public String getSuitePattern() { + return "**/*_keymeta_mock_provider_test.rb"; + } + + @Test + public void testRunShellTests() throws Exception { + RubyShellTest.testRunShellTests(this); + } +} diff --git a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb index 9f3048ab5991..83a73842711a 100644 --- a/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb +++ b/hbase-shell/src/test/ruby/shell/admin_keymeta_test.rb @@ -40,18 +40,18 @@ def setup test_key_management($CUST1_ENCODED, 'test_namespace') test_key_management($GLOB_CUST_ENCODED, '*') - puts "Testing that cluster can be restarted when key management is enabled" - $TEST.restartMiniCluster() - puts "Cluster restarted, testing key management again" + puts 'Testing that cluster can be restarted when key management is enabled' + $TEST.restartMiniCluster + puts 'Cluster restarted, testing key management again' setup_hbase test_key_management($GLOB_CUST_ENCODED, '*') - puts "Key management test complete" + puts 'Key management test complete' end def test_key_management(cust, namespace) # Repeat the enable twice in a loop and ensure multiple enables succeed and return the # same output. - 2.times do |i| + 2.times do cust_and_namespace = "#{cust}:#{namespace}" output = capture_stdout { @shell.command('enable_key_management', cust_and_namespace) } puts "enable_key_management output: #{output}" @@ -62,5 +62,16 @@ def test_key_management(cust, namespace) assert(output.include?('1 row(s)')) end end + + define_test 'Decode failure raises friendly error' do + assert_raises(ArgumentError) do + @shell.command('enable_key_management', '!!!:namespace') + end + + error = assert_raises(ArgumentError) do + @shell.command('show_key_status', '!!!:namespace') + end + assert_match(/Failed to decode key custodian/, error.message) + end end end diff --git a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb index 2562a64779e0..35ad85785e0f 100644 --- a/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb +++ b/hbase-shell/src/test/ruby/shell/encrypted_table_keymeta_test.rb @@ -46,7 +46,7 @@ class EncryptedTableKeymetaTest < Test::Unit::TestCase def setup setup_hbase - @test_table = 'enctest'+Time.now.to_i.to_s + @test_table = "enctest#{Time.now.to_i}" @connection = $TEST_CLUSTER.connection end @@ -54,14 +54,17 @@ def setup # Custodian is currently not supported, so this will end up falling back to local key # generation. test_table_put_get_with_encryption($CUST1_ENCODED, '*', - { 'NAME' => 'f', 'ENCRYPTION' => 'AES' }, true) + { 'NAME' => 'f', 'ENCRYPTION' => 'AES' }, + true) end define_test 'Test table with custom namespace attribute in Column Family' do - custom_namespace = "test_global_namespace" - test_table_put_get_with_encryption($GLOB_CUST_ENCODED, custom_namespace, + custom_namespace = 'test_global_namespace' + test_table_put_get_with_encryption( + $GLOB_CUST_ENCODED, custom_namespace, { 'NAME' => 'f', 'ENCRYPTION' => 'AES', 'ENCRYPTION_KEY_NAMESPACE' => custom_namespace }, - false) + false + ) end def test_table_put_get_with_encryption(cust, namespace, table_attrs, fallback_scenario) @@ -88,19 +91,19 @@ def test_table_put_get_with_encryption(cust, namespace, table_attrs, fallback_sc assert_not_nil(hfile_info) live_trailer = hfile_info.getTrailer assert_trailer(live_trailer) - assert_equal(namespace, live_trailer.getKeyNamespace()) + assert_equal(namespace, live_trailer.getKeyNamespace) # When active key is supposed to be used, we can valiate the key bytes in the context against # the actual key from provider. - if !fallback_scenario - encryption_context = hfile_info.getHFileContext().getEncryptionContext() + unless fallback_scenario + encryption_context = hfile_info.getHFileContext.getEncryptionContext assert_not_nil(encryption_context) - assert_not_nil(encryption_context.getKeyBytes()) + assert_not_nil(encryption_context.getKeyBytes) key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration) key_data = key_provider.getManagedKey(ManagedKeyProvider.decodeToBytes(cust), namespace) assert_not_nil(key_data) - assert_equal(namespace, key_data.getKeyNamespace()) - assert_equal(key_data.getTheKey().getEncoded(), encryption_context.getKeyBytes()) + assert_equal(namespace, key_data.getKeyNamespace) + assert_equal(key_data.getTheKey.getEncoded, encryption_context.getKeyBytes) end ## Disable table to ensure that the stores are not cached. diff --git a/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb index 978ee79e8655..8a179c8af2f6 100644 --- a/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb +++ b/hbase-shell/src/test/ruby/shell/key_provider_keymeta_migration_test.rb @@ -98,7 +98,7 @@ def setup expected_namespace: { 'f' => 'shared-global-key' } }, @table_cf_keys => { - cfs: ['cf1', 'cf2'], + cfs: %w[cf1 cf2], expected_namespace: { 'cf1' => "#{@table_cf_keys}/cf1", 'cf2' => "#{@table_cf_keys}/cf2" @@ -106,22 +106,21 @@ def setup } } - # Setup initial KeyStoreKeyProvider setup_old_key_provider - puts " >> Starting Cluster" - $TEST.startMiniCluster() - puts " >> Cluster started" + puts ' >> Starting Cluster' + $TEST.startMiniCluster + puts ' >> Cluster started' setup_hbase end define_test 'Test complete key provider migration' do - puts "\n=== Starting Key Provider Migration Test ===" + puts '\n=== Starting Key Provider Migration Test ===' # Step 1-3: Setup old provider and create tables create_test_tables - puts "\n--- Validating initial table operations ---" + puts '\n--- Validating initial table operations ---' validate_pre_migration_operations(false) # Step 4: Setup new provider and restart @@ -134,13 +133,13 @@ def setup # Step 6: Cleanup and final validation cleanup_old_provider_and_validate - puts "\n=== Migration Test Completed Successfully ===" + puts '\n=== Migration Test Completed Successfully ===' end private def setup_old_key_provider - puts "\n--- Setting up old KeyStoreKeyProvider ---" + puts '\n--- Setting up old KeyStoreKeyProvider ---' # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore) test_data_dir = $TEST_CLUSTER.getDataTestDir("old_keystore_#{@test_timestamp}").toString @@ -150,22 +149,23 @@ def setup_old_key_provider # Create keystore with only the master key # ENCRYPTION_KEY attributes generate their own keys and don't use keystore entries - create_keystore(@old_keystore_file, { - @master_key_alias => generate_key(@master_key_alias) - }) + create_keystore(@old_keystore_file, { @master_key_alias => generate_key(@master_key_alias) }) # Configure old KeyStoreKeyProvider - provider_uri = "jceks://#{File.expand_path(@old_keystore_file)}?password=#{@keystore_password}" + provider_uri = "jceks://#{File.expand_path(@old_keystore_file)}?" \ + "password=#{@keystore_password}" $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY, - KeyStoreKeyProvider.java_class.name) - $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY, provider_uri) - $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY, @master_key_alias) + KeyStoreKeyProvider.java_class.name) + $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_KEYPROVIDER_PARAMETERS_KEY, + provider_uri) + $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MASTERKEY_NAME_CONF_KEY, + @master_key_alias) puts " >> Old KeyStoreKeyProvider configured with keystore: #{@old_keystore_file}" end def create_test_tables - puts "\n--- Creating test tables ---" + puts '\n--- Creating test tables ---' # 1. Table without encryption command(:create, @table_no_encryption, { 'NAME' => 'f' }) @@ -177,17 +177,17 @@ def create_test_tables # 3. Table with table-level key command(:create, @table_table_key, { 'NAME' => 'f', 'ENCRYPTION' => 'AES', - 'ENCRYPTION_KEY' => @table_key_alias }) + 'ENCRYPTION_KEY' => @table_key_alias }) puts " >> Created table #{@table_table_key} with table-level key" # 4. First table with shared key command(:create, @table_shared_key1, { 'NAME' => 'f', 'ENCRYPTION' => 'AES', - 'ENCRYPTION_KEY' => @shared_key_alias }) + 'ENCRYPTION_KEY' => @shared_key_alias }) puts " >> Created table #{@table_shared_key1} with shared key" # 5. Second table with shared key command(:create, @table_shared_key2, { 'NAME' => 'f', 'ENCRYPTION' => 'AES', - 'ENCRYPTION_KEY' => @shared_key_alias }) + 'ENCRYPTION_KEY' => @shared_key_alias }) puts " >> Created table #{@table_shared_key2} with shared key" # 6. Table with column family specific keys @@ -199,10 +199,10 @@ def create_test_tables def validate_pre_migration_operations(is_key_management_enabled) @tables_metadata.each do |table_name, metadata| - puts " >> test_table_operations on table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}" - if metadata[:no_encryption] - next - end + puts " >> test_table_operations on table: #{table_name} with CFs: " \ + "#{metadata[:cfs].join(', ')}" + next if metadata[:no_encryption] + test_table_operations(table_name, metadata[:cfs]) check_hfile_trailers_pre_migration(table_name, metadata[:cfs], is_key_management_enabled) end @@ -230,14 +230,14 @@ def test_table_operations(table_name, column_families) get_result = test_table.table.get(Get.new(Bytes.toBytes('row1'))) assert_false(get_result.isEmpty) assert_equal('value1', - Bytes.toString(get_result.getValue(Bytes.toBytes(cf), Bytes.toBytes('col1')))) + Bytes.toString(get_result.getValue(Bytes.toBytes(cf), Bytes.toBytes('col1')))) end puts " >> Operations validated for #{table_name}" end def setup_new_key_provider - puts "\n--- Setting up new ManagedKeyStoreKeyProvider ---" + puts '\n--- Setting up new ManagedKeyStoreKeyProvider ---' # Use proper test directory (similar to KeymetaTestUtils.setupTestKeyStore) test_data_dir = $TEST_CLUSTER.getDataTestDir("new_keystore_#{@test_timestamp}").toString @@ -252,69 +252,85 @@ def setup_new_key_provider create_keystore(@new_keystore_file, migrated_keys) # Configure ManagedKeyStoreKeyProvider - provider_uri = "jceks://#{File.expand_path(@new_keystore_file)}?password=#{@keystore_password}" + provider_uri = "jceks://#{File.expand_path(@new_keystore_file)}?" \ + "password=#{@keystore_password}" $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, 'true') $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_CONF_KEY, - ManagedKeyStoreKeyProvider.java_class.name) - $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, provider_uri) - $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, - 'system_key') + ManagedKeyStoreKeyProvider.java_class.name) + $TEST_CLUSTER.getConfiguration.set(HConstants::CRYPTO_MANAGED_KEYPROVIDER_PARAMETERS_KEY, + provider_uri) + $TEST_CLUSTER.getConfiguration.set( + HConstants::CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, + 'system_key' + ) # Setup key configurations for ManagedKeyStoreKeyProvider # Shared key configuration $TEST_CLUSTER.getConfiguration.set( "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.alias", - 'shared_global_key') + 'shared_global_key' + ) $TEST_CLUSTER.getConfiguration.setBoolean( - "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.active", true) + "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.shared-global-key.active", + true + ) # Table-level key configuration - let system determine namespace automatically $TEST_CLUSTER.getConfiguration.set( "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.alias", - "#{@table_table_key}_key") + "#{@table_table_key}_key" + ) $TEST_CLUSTER.getConfiguration.setBoolean( - "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.active", true) + "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_table_key}.active", + true + ) # CF-level key configurations - let system determine namespace automatically $TEST_CLUSTER.getConfiguration.set( "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.alias", - "#{@table_cf_keys}_cf1_key") + "#{@table_cf_keys}_cf1_key" + ) $TEST_CLUSTER.getConfiguration.setBoolean( - "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.active", true) + "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf1.active", + true + ) $TEST_CLUSTER.getConfiguration.set( "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.alias", - "#{@table_cf_keys}_cf2_key") + "#{@table_cf_keys}_cf2_key" + ) $TEST_CLUSTER.getConfiguration.setBoolean( - "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.active", true) + "hbase.crypto.managed_key_store.cust.#{$GLOB_CUST_ENCODED}.#{@table_cf_keys}/cf2.active", + true + ) # Enable KeyMeta coprocessor $TEST_CLUSTER.getConfiguration.set('hbase.coprocessor.master.classes', - KeymetaServiceEndpoint.java_class.name) + KeymetaServiceEndpoint.java_class.name) - puts " >> New ManagedKeyStoreKeyProvider configured" + puts ' >> New ManagedKeyStoreKeyProvider configured' end def restart_cluster_and_validate - puts "\n--- Restarting cluster with managed key store key provider ---" + puts '\n--- Restarting cluster with managed key store key provider ---' $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME) - puts " >> Cluster restarted with ManagedKeyStoreKeyProvider" + puts ' >> Cluster restarted with ManagedKeyStoreKeyProvider' setup_hbase # Validate key management service is functional output = capture_stdout { command(:show_key_status, "#{$GLOB_CUST_ENCODED}:*") } assert(output.include?('0 row(s)'), "Expected 0 rows from show_key_status, got: #{output}") - #assert(output.include?(' FAILED '), "Expected FAILED status for show_key_status, got: #{output}") - puts " >> Key management service is functional" + puts ' >> Key management service is functional' # Test operations still work and check HFile trailers - puts "\n--- Validating operations after restart ---" + puts '\n--- Validating operations after restart ---' validate_pre_migration_operations(true) end def check_hfile_trailers_pre_migration(table_name, column_families, is_key_management_enabled) - puts " >> Checking HFile trailers for #{table_name} with CFs: #{column_families.join(', ')}" + puts " >> Checking HFile trailers for #{table_name} with CFs: " \ + "#{column_families.join(', ')}" column_families.each do |cf_name| validate_hfile_trailer(table_name, cf_name, false, is_key_management_enabled, false) @@ -322,7 +338,7 @@ def check_hfile_trailers_pre_migration(table_name, column_families, is_key_manag end def migrate_tables_step_by_step - puts "\n--- Performing step-by-step table migration ---" + puts '\n--- Performing step-by-step table migration ---' # Migrate shared key tables first migrate_shared_key_tables @@ -335,74 +351,80 @@ def migrate_tables_step_by_step end def migrate_shared_key_tables - puts "\n--- Migrating shared key tables ---" + puts '\n--- Migrating shared key tables ---' # Enable key management for shared global key cust_and_namespace = "#{$GLOB_CUST_ENCODED}:shared-global-key" output = capture_stdout { command(:enable_key_management, cust_and_namespace) } assert(output.include?("#{$GLOB_CUST_ENCODED} shared-global-key ACTIVE"), - "Expected ACTIVE status for shared key, got: #{output}") - puts " >> Enabled key management for shared global key" + "Expected ACTIVE status for shared key, got: #{output}") + puts ' >> Enabled key management for shared global key' # Migrate first shared key table - migrate_table_to_managed_key(@table_shared_key1, 'f', 'shared-global-key', true) + migrate_table_to_managed_key(@table_shared_key1, 'f', 'shared-global-key', + use_namespace_attribute: true) # Migrate second shared key table - migrate_table_to_managed_key(@table_shared_key2, 'f', 'shared-global-key', true) + migrate_table_to_managed_key(@table_shared_key2, 'f', 'shared-global-key', + use_namespace_attribute: true) end def migrate_table_level_key - puts "\n--- Migrating table-level key ---" + puts '\n--- Migrating table-level key ---' # Enable key management for table namespace cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{@table_table_key}" output = capture_stdout { command(:enable_key_management, cust_and_namespace) } assert(output.include?("#{$GLOB_CUST_ENCODED} #{@table_table_key} ACTIVE"), - "Expected ACTIVE status for table key, got: #{output}") - puts " >> Enabled key management for table-level key" + "Expected ACTIVE status for table key, got: #{output}") + puts ' >> Enabled key management for table-level key' # Migrate the table - no namespace attribute, let system auto-determine - migrate_table_to_managed_key(@table_table_key, 'f', @table_table_key, false) + migrate_table_to_managed_key(@table_table_key, 'f', @table_table_key) end def migrate_cf_level_keys - puts "\n--- Migrating CF-level keys ---" + puts '\n--- Migrating CF-level keys ---' # Enable key management for CF1 cf1_namespace = "#{@table_cf_keys}/cf1" cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf1_namespace}" output = capture_stdout { command(:enable_key_management, cust_and_namespace) } assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf1_namespace} ACTIVE"), - "Expected ACTIVE status for CF1 key, got: #{output}") - puts " >> Enabled key management for CF1" + "Expected ACTIVE status for CF1 key, got: #{output}") + puts ' >> Enabled key management for CF1' # Enable key management for CF2 cf2_namespace = "#{@table_cf_keys}/cf2" cust_and_namespace = "#{$GLOB_CUST_ENCODED}:#{cf2_namespace}" output = capture_stdout { command(:enable_key_management, cust_and_namespace) } assert(output.include?("#{$GLOB_CUST_ENCODED} #{cf2_namespace} ACTIVE"), - "Expected ACTIVE status for CF2 key, got: #{output}") - puts " >> Enabled key management for CF2" + "Expected ACTIVE status for CF2 key, got: #{output}") + puts ' >> Enabled key management for CF2' # Migrate CF1 - migrate_table_to_managed_key(@table_cf_keys, 'cf1', cf1_namespace, false) + migrate_table_to_managed_key(@table_cf_keys, 'cf1', cf1_namespace) # Migrate CF2 - migrate_table_to_managed_key(@table_cf_keys, 'cf2', cf2_namespace, false) + migrate_table_to_managed_key(@table_cf_keys, 'cf2', cf2_namespace) end - def migrate_table_to_managed_key(table_name, cf_name, namespace, use_namespace_attribute = false) + def migrate_table_to_managed_key(table_name, cf_name, namespace, + use_namespace_attribute: false) puts " >> Migrating table #{table_name}, CF #{cf_name} to namespace #{namespace}" - # Use atomic alter operation to remove ENCRYPTION_KEY and optionally add ENCRYPTION_KEY_NAMESPACE + # Use atomic alter operation to remove ENCRYPTION_KEY and optionally add + # ENCRYPTION_KEY_NAMESPACE if use_namespace_attribute # For shared key tables: remove ENCRYPTION_KEY and add ENCRYPTION_KEY_NAMESPACE atomically command(:alter, table_name, - { 'NAME' => cf_name, 'CONFIGURATION' => {'ENCRYPTION_KEY' => '', 'ENCRYPTION_KEY_NAMESPACE' => namespace }}) + { 'NAME' => cf_name, + 'CONFIGURATION' => { 'ENCRYPTION_KEY' => '', + 'ENCRYPTION_KEY_NAMESPACE' => namespace } }) else # For table/CF level keys: just remove ENCRYPTION_KEY, let system auto-determine namespace command(:alter, table_name, - { 'NAME' => cf_name, 'CONFIGURATION' => {'ENCRYPTION_KEY' => '' }}) + { 'NAME' => cf_name, 'CONFIGURATION' => { 'ENCRYPTION_KEY' => '' } }) end puts " >> Altered #{table_name} CF #{cf_name} to use namespace #{namespace}" @@ -415,7 +437,7 @@ def migrate_table_to_managed_key(table_name, cf_name, namespace, use_namespace_a sleep(5) # Scan all existing data to verify accessibility - scan_and_validate_table(table_name, [cf_name]) + scan_and_validate_table(table_name) # Add new data test_table = table(table_name) @@ -428,8 +450,7 @@ def migrate_table_to_managed_key(table_name, cf_name, namespace, use_namespace_a puts " >> Migration completed for #{table_name} CF #{cf_name}" end - - def scan_and_validate_table(table_name, column_families) + def scan_and_validate_table(table_name) puts " >> Scanning and validating existing data in #{table_name}" test_table = table(table_name) @@ -443,7 +464,7 @@ def scan_and_validate_table(table_name, column_families) end scanner.close - assert(row_count > 0, "Expected to find existing data in #{table_name}") + assert(row_count.positive?, "Expected to find existing data in #{table_name}") puts " >> Found #{row_count} rows, all accessible" end @@ -459,17 +480,22 @@ def validate_hfile_trailer(table_name, cf_name, is_post_migration, is_key_manage regions.each do |region| region.getStores.each do |store| next unless store.getColumnFamilyName == cf_name - puts " >> store file count for CF: #{cf_name} in table: #{table_name} is #{store.getStorefiles.size}" + + puts " >> store file count for CF: #{cf_name} in table: #{table_name} is " \ + "#{store.getStorefiles.size}" if is_compacted assert_equal(1, store.getStorefiles.size) else - assert_true(store.getStorefiles.size > 0) + assert_true(!store.getStorefiles.empty?) end store.getStorefiles.each do |storefile| - puts " >> Checking HFile trailer for storefile: #{storefile.getPath.getName} with sequence id: #{storefile.getMaxSequenceId} against max sequence id of store: #{store.getMaxSequenceId.getAsLong}" + puts " >> Checking HFile trailer for storefile: #{storefile.getPath.getName} " \ + "with sequence id: #{storefile.getMaxSequenceId} against max sequence id of " \ + "store: #{store.getMaxSequenceId.getAsLong}" # The flush would have created new HFiles, but the old would still be there # so we need to make sure to check the latest store only. next unless storefile.getMaxSequenceId == store.getMaxSequenceId.getAsLong + store_file_info = storefile.getFileInfo next unless store_file_info @@ -493,16 +519,15 @@ def validate_hfile_trailer(table_name, cf_name, is_post_migration, is_key_manage puts " >> Trailer validation passed - namespace: #{trailer.getKeyNamespace}" else assert_nil(trailer.getKeyNamespace) - puts " >> Trailer validation passed - using legacy key format" + puts ' >> Trailer validation passed - using legacy key format' end end end end end - def cleanup_old_provider_and_validate - puts "\n--- Cleaning up old key provider and final validation ---" + puts '\n--- Cleaning up old key provider and final validation ---' # Remove old KeyProvider configurations $TEST_CLUSTER.getConfiguration.unset(HConstants::CRYPTO_KEYPROVIDER_CONF_KEY) @@ -511,11 +536,11 @@ def cleanup_old_provider_and_validate # Remove old keystore FileUtils.rm_rf(@old_keystore_file) if File.directory?(@old_keystore_file) - puts " >> Removed old keystore and configuration" + puts ' >> Removed old keystore and configuration' # Restart cluster $TEST.restartMiniCluster(KeymetaTableAccessor::KEY_META_TABLE_NAME) - puts " >> Cluster restarted without old key provider" + puts ' >> Cluster restarted without old key provider' setup_hbase # Validate all data is still accessible @@ -526,32 +551,31 @@ def cleanup_old_provider_and_validate end def validate_all_tables_final - puts "\n--- Final validation - scanning all tables ---" + puts '\n--- Final validation - scanning all tables ---' @tables_metadata.each do |table_name, metadata| - if metadata[:no_encryption] - next - end + next if metadata[:no_encryption] + puts " >> Final validation for table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}" - scan_and_validate_table(table_name, metadata[:cfs]) + scan_and_validate_table(table_name) puts " >> #{table_name} - all data accessible" end end def perform_major_compaction_and_validate - puts "\n--- Performing major compaction and final validation ---" + puts '\n--- Performing major compaction and final validation ---' $TEST_CLUSTER.compact(true) @tables_metadata.each do |table_name, metadata| - if metadata[:no_encryption] - next - end - puts " >> Validating post-compaction HFiles for table: #{table_name} with CFs: #{metadata[:cfs].join(', ')}" + next if metadata[:no_encryption] + + puts " >> Validating post-compaction HFiles for table: #{table_name} with " \ + "CFs: #{metadata[:cfs].join(', ')}" metadata[:cfs].each do |cf_name| # When using random key from system key, there is no namespace - #next if metadata[:expected_namespace][cf_name] == '*' - validate_hfile_trailer(table_name, cf_name, true, true, true, metadata[:expected_namespace][cf_name]) + validate_hfile_trailer(table_name, cf_name, true, true, true, + metadata[:expected_namespace][cf_name]) end end end @@ -559,7 +583,7 @@ def perform_major_compaction_and_validate # Utility methods def extract_and_unwrap_keys_from_tables - puts " >> Extracting and unwrapping keys from encrypted tables" + puts ' >> Extracting and unwrapping keys from encrypted tables' keys = {} @@ -601,9 +625,9 @@ def extract_key_from_table(table_name, cf_name) # Use EncryptionUtil.unwrapKey with master key alias as subject unwrapped_key = EncryptionUtil.unwrapKey($TEST_CLUSTER.getConfiguration, - @master_key_alias, wrapped_key_bytes) + @master_key_alias, wrapped_key_bytes) - return unwrapped_key.getEncoded + unwrapped_key.getEncoded end def generate_key(alias_name) @@ -618,7 +642,7 @@ def create_keystore(keystore_path, key_entries) key_entries.each do |alias_name, key_bytes| secret_key = SecretKeySpec.new(key_bytes, 'AES') store.setEntry(alias_name, KeyStore::SecretKeyEntry.new(secret_key), - KeyStore::PasswordProtection.new(password_chars)) + KeyStore::PasswordProtection.new(password_chars)) end fos = FileOutputStream.new(keystore_path) @@ -629,10 +653,9 @@ def create_keystore(keystore_path, key_entries) end end - def teardown # Cleanup temporary test directories (keystore files will be cleaned up with the directories) - test_base_dir = $TEST_CLUSTER.getDataTestDir().toString + test_base_dir = $TEST_CLUSTER.getDataTestDir.toString Dir.glob(File.join(test_base_dir, "*keystore_#{@test_timestamp}*")).each do |dir| FileUtils.rm_rf(dir) if File.directory?(dir) end diff --git a/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb b/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb new file mode 100644 index 000000000000..77a2a339552e --- /dev/null +++ b/hbase-shell/src/test/ruby/shell/rotate_stk_keymeta_mock_provider_test.rb @@ -0,0 +1,59 @@ +# frozen_string_literal: true + +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'hbase_shell' +require 'stringio' +require 'hbase_constants' +require 'hbase/hbase' +require 'hbase/table' + +java_import org.apache.hadoop.hbase.io.crypto.Encryption +java_import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider +module Hbase + # Test class for rotate_stk command + class RotateSTKKeymetaTest < Test::Unit::TestCase + include TestHelpers + + def setup + setup_hbase + end + + define_test 'Test rotate_stk command' do + puts 'Testing rotate_stk command' + + # this should return false (no rotation performed) + output = capture_stdout { @shell.command(:rotate_stk) } + puts "rotate_stk output: #{output}" + assert(output.include?('No System Key change was detected'), + "Expected output to contain rotation status message, but got: #{output}") + + key_provider = Encryption.getManagedKeyProvider($TEST_CLUSTER.getConfiguration) + # Once we enable multikeyGenMode on MockManagedKeyProvider, every call should return a new key + # which should trigger a rotation. + key_provider.setMultikeyGenMode(true) + output = capture_stdout { @shell.command(:rotate_stk) } + puts "rotate_stk output: #{output}" + assert(output.include?('System Key rotation was performed successfully and cache was ' \ + 'refreshed on all region servers'), + "Expected output to contain rotation status message, but got: #{output}") + end + end +end diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 3d5a7e502e0a..daa67297e252 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1376,4 +1376,10 @@ public boolean isReplicationPeerModificationEnabled() throws IOException { throw new NotImplementedException( "isReplicationPeerModificationEnabled not supported in ThriftAdmin"); } + + @Override + public void refreshSystemKeyCacheOnAllServers(Set regionServers) throws IOException { + throw new NotImplementedException( + "refreshSystemKeyCacheOnAllServers not supported in ThriftAdmin"); + } }