diff --git a/.gitignore b/.gitignore index 52d169dd5ad7..3ad423ef1106 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,9 @@ linklint/ **/*.log tmp **/.flattened-pom.xml +.*.sw* +ID +filenametags +tags +.codegenie +.vscode diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java new file mode 100644 index 000000000000..dddbbdfeae87 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; + +@InterfaceAudience.Public +public class KeymetaAdminClient implements KeymetaAdmin { + private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminClient.class); + private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub; + + public KeymetaAdminClient(Connection conn) throws IOException { + this.stub = ManagedKeysProtos.ManagedKeysService.newBlockingStub( + conn.getAdmin().coprocessorService()); + } + + @Override + public List enableKeyManagement(String keyCust, String keyNamespace) + throws IOException { + try { + ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null, + ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build()); + return generateKeyDataList(response); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public List getManagedKeys(String keyCust, String keyNamespace) + throws IOException, KeyException { + try { + ManagedKeysProtos.GetManagedKeysResponse statusResponse = stub.getManagedKeys(null, + ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build()); + return generateKeyDataList(statusResponse); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + private static List generateKeyDataList( + ManagedKeysProtos.GetManagedKeysResponse stateResponse) { + List keyStates = new ArrayList<>(); + for (ManagedKeysResponse state: stateResponse.getStateList()) { + keyStates.add(new ManagedKeyData( + state.getKeyCustBytes().toByteArray(), + state.getKeyNamespace(), null, + ManagedKeyState.forValue((byte) state.getKeyState().getNumber()), + state.getKeyMetadata(), + state.getRefreshTimestamp(), state.getReadOpCount(), state.getWriteOpCount())); + } + return keyStates; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 04fc5201cc10..91630215e75d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -80,6 +80,21 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm) * @return the encrypted key bytes */ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { + return wrapKey(conf, subject, key, null); + } + + /** + * Protect a key by encrypting it with the secret key of the given subject or kek. The + * configuration must be set up correctly for key alias resolution. Only one of the + * {@code subject} or {@code kek} needs to be specified and the other one can be {@code null}. + * @param conf configuration + * @param subject subject key alias + * @param key the key + * @param kek the key encryption key + * @return the encrypted key bytes + */ + public static byte[] wrapKey(Configuration conf, String subject, Key key, Key kek) + throws IOException { // Wrap the key with the configured encryption algorithm. String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); @@ -100,8 +115,13 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws builder .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); - Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, - iv); + if (kek != null) { + Encryption.encryptWithGivenKey(kek, out, new ByteArrayInputStream(keyBytes), cipher, iv); + } + else { + Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, + cipher, iv); + } builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); @@ -118,6 +138,21 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws * @return the raw key bytes */ public static Key unwrapKey(Configuration conf, String subject, byte[] value) + throws IOException, KeyException { + return unwrapKey(conf, subject, value, null); + } + + /** + * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. Only one of the {@code subject} or {@code kek} + * needs to be specified and the other one can be {@code null}. + * @param conf configuration + * @param subject subject key alias + * @param value the encrypted key bytes + * @param kek the key encryption key + * @return the raw key bytes + */ + public static Key unwrapKey(Configuration conf, String subject, byte[] value, Key kek) throws IOException, KeyException { EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.parser().parseDelimitedFrom(new ByteArrayInputStream(value)); @@ -126,11 +161,12 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value) if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); } - return getUnwrapKey(conf, subject, wrappedKey, cipher); + return getUnwrapKey(conf, subject, wrappedKey, cipher, kek); } private static Key getUnwrapKey(Configuration conf, String subject, - EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek) + throws IOException, KeyException { String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { @@ -143,8 +179,14 @@ private static Key getUnwrapKey(Configuration conf, String subject, } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; - Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), - subject, conf, cipher, iv); + if (kek != null) { + Encryption.decryptWithGivenKey(kek, out, wrappedKey.getData().newInput(), + wrappedKey.getLength(), cipher, iv); + } + else { + Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), + subject, conf, cipher, iv); + } byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { if ( @@ -176,7 +218,7 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); } - return getUnwrapKey(conf, subject, wrappedKey, cipher); + return getUnwrapKey(conf, subject, wrappedKey, cipher, null); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index eba3eb657ea5..08ed56a33e4c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1192,6 +1192,11 @@ public enum OperationStatusCode { /** Temporary directory used for table creation and deletion */ public static final String HBASE_TEMP_DIRECTORY = ".tmp"; + /** + * Directory used for storing master keys for the cluster + */ + public static final String SYSTEM_KEYS_DIRECTORY = ".system_keys"; + public static final String SYSTEM_KEY_FILE_PREFIX = "system_key."; /** * The period (in milliseconds) between computing region server point in time metrics */ @@ -1304,6 +1309,39 @@ public enum OperationStatusCode { /** Configuration key for enabling WAL encryption, a boolean */ public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption"; + /** Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies + * the current system key. */ + public static final String CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY = + "hbase.crypto.managed_key_store.system.key.name"; + public static final String CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX = + "hbase.crypto.managed_key_store.cust."; + + /** Enables or disables the key management feature. */ + public static final String CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY = + "hbase.crypto.managed_keys.enabled"; + public static final boolean CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED = false; + + /** The number of keys to retrieve from Key Provider per each custodian and namespace + * combination. */ + public static final String CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT = + "hbase.crypto.managed_keys.per_cust_namespace.active_count"; + public static final int CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_DEFAULT_COUNT = 1; + /** Enables or disables key lookup during data path as an alternative to static injection of keys + * using control path. */ + public static final String CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY = + "hbase.crypto.managed_keys.dynamic_lookup.enabled"; + public static final boolean CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED = true; + + /** Maximum number of entries in the managed key data cache. */ + public static final String CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY = + "hbase.crypto.managed_keys.l1_cache.max_entries"; + public static final int CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT = 1000; + + /** Maximum number of entries in the managed key active keys cache. */ + public static final String CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY = + "hbase.crypto.managed_keys.l1_active_cache.max_ns_entries"; + public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100; + /** Configuration key for setting RPC codec class name */ public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec"; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index 13e335b82ee3..336c440c4493 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -33,8 +33,10 @@ import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.crypto.aes.AES; import org.apache.hadoop.hbase.util.Bytes; @@ -468,6 +470,19 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin if (key == null) { throw new IOException("No key found for subject '" + subject + "'"); } + encryptWithGivenKey(key, out, in, cipher, iv); + } + + /** + * Encrypts a block of plaintext with the specified symmetric key. + * @param key The symmetric key + * @param out ciphertext + * @param in plaintext + * @param cipher the encryption algorithm + * @param iv the initialization vector, can be null + */ + public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in, + Cipher cipher, byte[] iv) throws IOException { Encryptor e = cipher.getEncryptor(); e.setKey(key); e.setIv(iv); // can be null @@ -490,19 +505,16 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o if (key == null) { throw new IOException("No key found for subject '" + subject + "'"); } - Decryptor d = cipher.getDecryptor(); - d.setKey(key); - d.setIv(iv); // can be null try { - decrypt(out, in, outLen, d); + decryptWithGivenKey(key, out, in, outLen, cipher, iv); } catch (IOException e) { // If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one // is configured String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY); if (alternateAlgorithm != null) { if (LOG.isDebugEnabled()) { - LOG.debug("Unable to decrypt data with current cipher algorithm '" - + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES) + LOG.debug("Unable to decrypt data with current cipher algorithm '" + conf.get( + HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES) + "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm + "' configured."); } @@ -510,16 +522,22 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o if (alterCipher == null) { throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available"); } - d = alterCipher.getDecryptor(); - d.setKey(key); - d.setIv(iv); // can be null - decrypt(out, in, outLen, d); - } else { - throw new IOException(e); + decryptWithGivenKey(key, out, in, outLen, alterCipher, iv); + } + else { + throw e; } } } + public static void decryptWithGivenKey(Key key, OutputStream out, InputStream in, int outLen, + Cipher cipher, byte[] iv) throws IOException { + Decryptor d = cipher.getDecryptor(); + d.setKey(key); + d.setIv(iv); // can be null + decrypt(out, in, outLen, d); + } + private static ClassLoader getClassLoaderForClass(Class c) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); if (cl == null) { @@ -561,6 +579,9 @@ public static KeyProvider getKeyProvider(Configuration conf) { provider = (KeyProvider) ReflectionUtils .newInstance(getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName), conf); provider.init(providerParameters); + if (provider instanceof ManagedKeyProvider) { + ((ManagedKeyProvider) provider).initConfig(conf); + } if (LOG.isDebugEnabled()) { LOG.debug("Installed " + providerClassName + " into key provider cache"); } @@ -571,6 +592,11 @@ public static KeyProvider getKeyProvider(Configuration conf) { } } + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) + public static void clearKeyProviderCache() { + keyProviderCache.clear(); + } + public static void incrementIv(byte[] iv) { incrementIv(iv, 1); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java index 604bede13b20..c401d3b3f6b9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java @@ -76,6 +76,8 @@ @InterfaceAudience.Public public class KeyStoreKeyProvider implements KeyProvider { + private static final char[] NO_PASSWORD = new char[0]; + protected KeyStore store; protected char[] password; // can be null if no password protected Properties passwordFile; // can be null if no file provided @@ -172,9 +174,15 @@ protected char[] getAliasPassword(String alias) { @Override public Key getKey(String alias) { + // First try with no password, as it is more common to have a password only for the store. try { - return store.getKey(alias, getAliasPassword(alias)); + return store.getKey(alias, NO_PASSWORD); } catch (UnrecoverableKeyException e) { + try { + return store.getKey(alias, getAliasPassword(alias)); + } catch (UnrecoverableKeyException|NoSuchAlgorithmException|KeyStoreException e2) { + // Ignore. + } throw new RuntimeException(e); } catch (KeyStoreException e) { throw new RuntimeException(e); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java new file mode 100644 index 000000000000..9d2710fc5a21 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java @@ -0,0 +1,311 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import java.security.Key; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.Base64; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; + +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.util.DataChecksum; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + +/** + * This class represents an encryption key data which includes the key itself, its state, metadata + * and a prefix. The metadata encodes enough information on the key such that it can be used to + * retrieve the exact same key again in the future. If the key state is + * {@link ManagedKeyState#FAILED} expect the key to be {@code null}. + * + * The key data is represented by the following fields: + *
    + *
  • key_cust: The prefix for which this key belongs to
  • + *
  • theKey: The key capturing the bytes and encoding
  • + *
  • keyState: The state of the key (see {@link ManagedKeyState})
  • + *
  • keyMetadata: Metadata that identifies the key
  • + *
+ * + * The class provides methods to retrieve, as well as to compute a checksum + * for the key data. The checksum is used to ensure the integrity of the key data. + * + * The class also provides a method to generate an MD5 hash of the key metadata, which can be used + * for validation and identification. + */ +@InterfaceAudience.Public +public class ManagedKeyData { + /** + * Special value to be used for custodian or namespace to indicate that it is global, meaning it + * is not associated with a specific custodian or namespace. + */ + public static final String KEY_SPACE_GLOBAL = "*"; + + /** + * Encoded form of global custodian. + */ + public static final String KEY_GLOBAL_CUSTODIAN = + ManagedKeyProvider.encodeToStr(KEY_SPACE_GLOBAL.getBytes()); + + private final byte[] keyCustodian; + private final String keyNamespace; + private final Key theKey; + private final ManagedKeyState keyState; + private final String keyMetadata; + private final long refreshTimestamp; + private final long readOpCount; + private final long writeOpCount; + private volatile long keyChecksum = 0; + private byte[] keyMetadataHash; + + /** + * Constructs a new instance with the given parameters. + * + * @param key_cust The key custodian. + * @param theKey The actual key, can be {@code null}. + * @param keyState The state of the key. + * @param keyMetadata The metadata associated with the key. + * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null. + */ + public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState, + String keyMetadata) { + this(key_cust, key_namespace, theKey, keyState, keyMetadata, + EnvironmentEdgeManager.currentTime(), 0, 0); + } + + /** + * Constructs a new instance with the given parameters. + * + * @param key_cust The key custodian. + * @param theKey The actual key, can be {@code null}. + * @param keyState The state of the key. + * @param keyMetadata The metadata associated with the key. + * @param refreshTimestamp The timestamp when this key was last refreshed. + * @param readOpCount The current number of read operations for this key. + * @param writeOpCount The current number of write operations for this key. + * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null. + */ + public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState, + String keyMetadata, long refreshTimestamp, long readOpCount, + long writeOpCount) { + Preconditions.checkNotNull(key_cust, "key_cust should not be null"); + Preconditions.checkNotNull(key_namespace, "key_namespace should not be null"); + Preconditions.checkNotNull(keyState, "keyState should not be null"); + // Only check for null metadata if state is not FAILED + if (keyState != ManagedKeyState.FAILED) { + Preconditions.checkNotNull(keyMetadata, "keyMetadata should not be null"); + } + Preconditions.checkArgument(readOpCount >= 0, "readOpCount: " + readOpCount + + " should be >= 0"); + Preconditions.checkArgument(writeOpCount >= 0, "writeOpCount: " + writeOpCount + + " should be >= 0"); + + this.keyCustodian = key_cust; + this.keyNamespace = key_namespace; + this.theKey = theKey; + this.keyState = keyState; + this.keyMetadata = keyMetadata; + this.refreshTimestamp = refreshTimestamp; + this.readOpCount = readOpCount; + this.writeOpCount = writeOpCount; + } + + @InterfaceAudience.Private + public ManagedKeyData cloneWithoutKey() { + return new ManagedKeyData(keyCustodian, keyNamespace, null, keyState, keyMetadata, + refreshTimestamp, readOpCount, writeOpCount); + } + + /** + * Returns the custodian associated with the key. + * + * @return The key custodian as a byte array. + */ + public byte[] getKeyCustodian() { + return keyCustodian; + } + + /** + * Return the key Custodian in Base64 encoded form. + * @return the encoded key custodian + */ + public String getKeyCustodianEncoded() { + return Base64.getEncoder().encodeToString(keyCustodian); + } + + + /** + * Returns the namespace associated with the key. + * + * @return The namespace as a {@code String}. + */ + public String getKeyNamespace() { + return keyNamespace; + } + + /** + * Returns the actual key. + * + * @return The key as a {@code Key} object. + */ + public Key getTheKey() { + return theKey; + } + + /** + * Returns the state of the key. + * + * @return The key state as a {@code ManagedKeyState} enum value. + */ + public ManagedKeyState getKeyState() { + return keyState; + } + + /** + * Returns the metadata associated with the key. + * + * @return The key metadata as a {@code String}. + */ + public String getKeyMetadata() { + return keyMetadata; + } + + @Override + public String toString() { + return "ManagedKeyData{" + + "keyCustodian=" + Arrays.toString(keyCustodian) + + ", keyNamespace='" + keyNamespace + '\'' + + ", keyState=" + keyState + + ", keyMetadata='" + keyMetadata + '\'' + + ", refreshTimestamp=" + refreshTimestamp + + ", keyChecksum=" + getKeyChecksum() + + '}'; + } + + public long getRefreshTimestamp() { + return refreshTimestamp; + } + + /** + * @return the number of times this key has been used for read operations as of the time this + * key data was initialized. + */ + public long getReadOpCount() { + return readOpCount; + } + + /** + * @return the number of times this key has been used for write operations as of the time this + * key data was initialized. + */ + public long getWriteOpCount() { + return writeOpCount; + } + + /** + * Computes the checksum of the key. If the checksum has already been computed, this method + * returns the previously computed value. The checksum is computed using the CRC32C algorithm. + * + * @return The checksum of the key as a long value, {@code 0} if no key is available. + */ + public long getKeyChecksum() { + if (theKey == null) { + return 0; + } + if (keyChecksum == 0) { + keyChecksum = constructKeyChecksum(theKey.getEncoded()); + } + return keyChecksum; + } + + public static long constructKeyChecksum(byte[] data) { + DataChecksum dataChecksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 16); + dataChecksum.update(data, 0, data.length); + return dataChecksum.getValue(); + } + + /** + * Computes the hash of the key metadata. If the hash has already been computed, this method + * returns the previously computed value. The hash is computed using the MD5 algorithm. + * + * @return The hash of the key metadata as a byte array. + */ + public byte[] getKeyMetadataHash() { + if (keyMetadataHash == null && keyMetadata != null) { + keyMetadataHash = constructMetadataHash(keyMetadata); + } + return keyMetadataHash; + } + + /** + * Return the hash of key metadata in Base64 encoded form. + * @return the encoded hash or {@code null} if no meatadata is available. + */ + public String getKeyMetadataHashEncoded() { + byte[] hash = getKeyMetadataHash(); + if (hash != null) { + return Base64.getEncoder().encodeToString(hash); + } + return null; + } + + public static byte[] constructMetadataHash(String metadata) { + MessageDigest md5; + try { + md5 = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + return md5.digest(metadata.getBytes()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + ManagedKeyData that = (ManagedKeyData) o; + + return new EqualsBuilder() + .append(keyCustodian, that.keyCustodian) + .append(keyNamespace, that.keyNamespace) + .append(theKey, that.theKey) + .append(keyState, that.keyState) + .append(keyMetadata, that.keyMetadata) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(keyCustodian) + .append(keyNamespace) + .append(theKey) + .append(keyState) + .append(keyMetadata) + .toHashCode(); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java new file mode 100644 index 000000000000..27cd91380d6e --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import java.io.IOException; +import java.util.Base64; +import edu.umd.cs.findbugs.annotations.NonNull; + +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Interface for key providers of managed keys. Defines methods for generating and managing + * managed keys, as well as handling key storage and retrieval. + * + * The interface extends the basic {@link KeyProvider} interface with additional + * methods for working with managed keys. + */ +@InterfaceAudience.Public +public interface ManagedKeyProvider extends KeyProvider { + /** + * Initialize the provider with the given configuration. + * + * @param conf Hadoop configuration + */ + void initConfig(Configuration conf); + + /** + * Retrieve the system key using the given system identifier. + * + * @param systemId system identifier + * @return ManagedKeyData for the system key and is guaranteed to be not {@code null} + * @throws IOException if an error occurs while retrieving the key + */ + ManagedKeyData getSystemKey(byte[] systemId) throws IOException; + + /** + * Retrieve a managed key for the specified prefix. + * + * @param key_cust The key custodian. + * @param key_namespace Key namespace + * @return ManagedKeyData for the system key and is expected to be not {@code null} + * @throws IOException if an error occurs while retrieving the key + */ + ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException; + + /** + * Retrieve a key identified by the key metadata. The key metadata is typically generated by the + * same key provider via the {@link #getSystemKey(byte[])} or + * {@link #getManagedKey(byte[], String)} methods. If key couldn't be retrieved using metadata and + * the wrappedKey is provided, the implementation may try to decrypt it as a fallback operation. + * + * @param keyMetaData Key metadata, must not be {@code null}. + * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available. + * @return ManagedKeyData for the key represented by the metadata and is expected to be not + * {@code null} + * @throws IOException if an error occurs while generating the key + */ + @NonNull ManagedKeyData unwrapKey(String keyMetaData, byte[] wrappedKey) throws IOException; + + /** + * Decode the given key custodian which is encoded as Base64 string. + * @param encodedKeyCust The encoded key custodian + * @return the decoded key custodian + * @throws IOException if the encoded key custodian is not a valid Base64 string + */ + static byte[] decodeToBytes(String encodedKeyCust) throws IOException { + byte[] key_cust; + try { + key_cust = Base64.getDecoder().decode(encodedKeyCust); + } + catch (IllegalArgumentException e) { + throw new IOException("Failed to decode specified key custodian as Base64 string: " + + encodedKeyCust, e); + } + return key_cust; + } + + /** + * Encode the given key custodian as Base64 string. + * @param key_cust The key custodian + * @return the encoded key custodian as Base64 string + */ + static String encodeToStr(byte[] key_cust) { + return Base64.getEncoder().encodeToString(key_cust); + } + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java new file mode 100644 index 000000000000..ea64355fc56b --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import java.util.HashMap; +import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Enum of Managed key status. It is used to indicate the status of managed custodian keys. + */ +@InterfaceAudience.Public +public enum ManagedKeyState { + /** Represents the active status of a managed key. */ + ACTIVE((byte) 1), + /** Represents the inactive status of a managed key. */ + INACTIVE((byte) 2), + /** Represents the retrieval failure status of a managed key. */ + FAILED((byte) 3), + /** Represents the disabled status of a managed key. */ + DISABLED((byte) 4), + ; + + private static Map lookupByVal; + + private final byte val; + + private ManagedKeyState(byte val) { + this.val = val; + } + + /** + * Returns the numeric value of the managed key status. + * @return byte value + */ + public byte getVal() { + return val; + } + + /** + * Returns the ManagedKeyState for the given numeric value. + * @param val The numeric value of the desired ManagedKeyState + * @return The ManagedKeyState corresponding to the given value + */ + public static ManagedKeyState forValue(byte val) { + if (lookupByVal == null) { + Map tbl = new HashMap<>(); + for (ManagedKeyState e: ManagedKeyState.values()) { + tbl.put(e.getVal(), e); + } + lookupByVal = tbl; + } + return lookupByVal.get(val); + } + + /** + * This is used to determine if a key is usable for encryption/decryption. + * + * @param state The key state to check + * @return true if the key state is ACTIVE or INACTIVE, false otherwise + */ + public static boolean isUsable(ManagedKeyState state) { + return state == ACTIVE || state == INACTIVE; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java new file mode 100644 index 000000000000..b9005e1b27e7 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import com.google.gson.reflect.TypeToken; +import java.io.IOException; +import java.security.Key; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.GsonUtil; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Public +public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements ManagedKeyProvider { + public static final String KEY_METADATA_ALIAS = "KeyAlias"; + public static final String KEY_METADATA_CUST = "KeyCustodian"; + + private static final java.lang.reflect.Type KEY_METADATA_TYPE = + new TypeToken>(){}.getType(); + + private Configuration conf; + + @Override + public void initConfig(Configuration conf) { + this.conf = conf; + } + + @Override + public ManagedKeyData getSystemKey(byte[] clusterId) { + checkConfig(); + String systemKeyAlias = conf.get(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, + null); + if (systemKeyAlias == null) { + throw new RuntimeException("No alias configured for system key"); + } + Key key = getKey(systemKeyAlias); + if (key == null) { + throw new RuntimeException("Unable to find system key with alias: " + systemKeyAlias); + } + // Encode clusterId too for consistency with that of key custodian. + String keyMetadata = generateKeyMetadata(systemKeyAlias, + ManagedKeyProvider.encodeToStr(clusterId)); + return new ManagedKeyData(clusterId, ManagedKeyData.KEY_SPACE_GLOBAL, key, + ManagedKeyState.ACTIVE, keyMetadata); + } + + @Override + public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException { + checkConfig(); + String encodedCust = ManagedKeyProvider.encodeToStr(key_cust); + String aliasConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." + + "alias"; + String keyMetadata = generateKeyMetadata(conf.get(aliasConfKey, null), encodedCust); + return unwrapKey(keyMetadata, null); + } + + @Override + public ManagedKeyData unwrapKey(String keyMetadataStr, byte[] wrappedKey) throws IOException { + Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyMetadataStr, + KEY_METADATA_TYPE); + String encodedCust = keyMetadata.get(KEY_METADATA_CUST); + String activeStatusConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + + ".active"; + boolean isActive = conf.getBoolean(activeStatusConfKey, true); + byte[] key_cust = ManagedKeyProvider.decodeToBytes(encodedCust); + String alias = keyMetadata.get(KEY_METADATA_ALIAS); + Key key = alias != null ? getKey(alias) : null; + if (key != null) { + return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, key, + isActive ? ManagedKeyState.ACTIVE : ManagedKeyState.INACTIVE, keyMetadataStr); + } + return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, null, + isActive ? ManagedKeyState.FAILED : ManagedKeyState.DISABLED, keyMetadataStr); + } + + private void checkConfig() { + if (conf == null) { + throw new IllegalStateException("initConfig is not called or config is null"); + } + } + + public static String generateKeyMetadata(String aliasName, String encodedCust) { + Map metadata = new HashMap<>(2); + metadata.put(KEY_METADATA_ALIAS, aliasName); + metadata.put(KEY_METADATA_CUST, encodedCust); + return GsonUtil.getDefaultInstance().toJson(metadata, HashMap.class); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java new file mode 100644 index 000000000000..2e52dccc0598 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.List; + +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * KeymetaAdmin is an interface for administrative functions related to managed keys. + * It handles the following methods: + */ +@InterfaceAudience.Public +public interface KeymetaAdmin { + /** + * Enables key management for the specified custodian and namespace. + * + * @param keyCust The key custodian in base64 encoded format. + * @param keyNamespace The namespace for the key management. + * + * @return The list of {@link ManagedKeyData} objects each identifying the key and its current + * status. + * @throws IOException if an error occurs while enabling key management. + */ + List enableKeyManagement(String keyCust, String keyNamespace) + throws IOException, KeyException; + + /** + * Get the status of all the keys for the specified custodian. + * + * @param keyCust The key custodian in base64 encoded format. + * @param keyNamespace The namespace for the key management. + * @return The list of {@link ManagedKeyData} objects each identifying the key and its current + * status. + * @throws IOException if an error occurs while enabling key management. + */ + List getManagedKeys(String keyCust, String keyNamespace) + throws IOException, KeyException; +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 96b3dbd4a8a5..55da4b3b12c0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1688,16 +1688,31 @@ public static byte[] add(final byte[] a, final byte[] b) { /** * Concatenate byte arrays. + * * @param a first third * @param b second third * @param c third third * @return New array made from a, b and c */ public static byte[] add(final byte[] a, final byte[] b, final byte[] c) { - byte[] result = new byte[a.length + b.length + c.length]; + return add(a, b, c, EMPTY_BYTE_ARRAY); + } + + /** + * Concatenate byte arrays. + * + * @param a first fourth + * @param b second fourth + * @param c third fourth + * @param d fourth fourth + * @return New array made from a, b, c, and d + */ + public static byte[] add(final byte[] a, final byte[] b, final byte[] c, final byte[] d) { + byte[] result = new byte[a.length + b.length + c.length + d.length]; System.arraycopy(a, 0, result, 0, a.length); System.arraycopy(b, 0, result, a.length, b.length); System.arraycopy(c, 0, result, a.length + b.length, c.length); + System.arraycopy(d, 0, result, a.length + b.length + c.length, d.length); return result; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index fe6f3bc238a9..da4662d2c8a0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -297,6 +297,10 @@ public static void setRootDir(final Configuration c, final Path root) { c.set(HConstants.HBASE_DIR, root.toString()); } + public static Path getSystemKeyDir(final Configuration c) throws IOException { + return new Path(getRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY); + } + public static void setFsDefault(final Configuration c, final Path root) { c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+ } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java index e592b1f935a1..a4a8ce82b2a8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java @@ -19,8 +19,10 @@ import java.io.IOException; import java.util.concurrent.atomic.LongAdder; + import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder; import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy; import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter; @@ -33,6 +35,8 @@ @InterfaceAudience.Private public final class GsonUtil { + private static Gson INSTANCE; + private GsonUtil() { } @@ -62,4 +66,11 @@ public LongAdder read(JsonReader in) throws IOException { public static GsonBuilder createGsonWithDisableHtmlEscaping() { return createGson().disableHtmlEscaping(); } + + public static Gson getDefaultInstance() { + if (INSTANCE == null) { + INSTANCE = createGson().create(); + } + return INSTANCE; + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java new file mode 100644 index 000000000000..a3397f96df70 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import java.io.IOException; +import java.security.Key; +import java.security.NoSuchAlgorithmException; +import java.util.HashMap; +import java.util.Map; +import javax.crypto.KeyGenerator; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Bytes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A simple implementation of ManagedKeyProvider for testing. It generates a key on demand given a + * prefix. One can control the state of a key by calling setKeyState and can rotate a key by + * calling setKey. + */ +public class MockManagedKeyProvider extends MockAesKeyProvider implements ManagedKeyProvider { + protected static final Logger LOG = LoggerFactory.getLogger(MockManagedKeyProvider.class); + + private boolean multikeyGenMode; + private Map> keys = new HashMap<>(); + private Map> lastGenKeyData = new HashMap<>(); + // Keep references of all generated keys by their full and partial metadata. + private Map allGeneratedKeys = new HashMap<>(); + private Map keyState = new HashMap<>(); + private String systemKeyAlias = "default_system_key_alias"; + + @Override + public void initConfig(Configuration conf) { + // NO-OP + } + + @Override + public ManagedKeyData getSystemKey(byte[] systemId) throws IOException { + return getKey(systemId, systemKeyAlias, ManagedKeyData.KEY_SPACE_GLOBAL); + } + + @Override + public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) + throws IOException { + String alias = Bytes.toString(key_cust); + return getKey(key_cust, alias, key_namespace); + } + + @Override + public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException { + String[] meta_toks = keyMetadata.split(":"); + if (allGeneratedKeys.containsKey(keyMetadata)) { + ManagedKeyState keyState = this.keyState.get(meta_toks[1]); + ManagedKeyData managedKeyData = + new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2], + allGeneratedKeys.get(keyMetadata), + keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata); + return registerKeyData(meta_toks[1], managedKeyData); + } + return new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2], + null, ManagedKeyState.FAILED, keyMetadata); + } + + public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) { + if (! lastGenKeyData.containsKey(keyNamespace)) { + return null; + } + return lastGenKeyData.get(keyNamespace).get(alias); + } + + private ManagedKeyData registerKeyData(String alias, ManagedKeyData managedKeyData) { + if (! lastGenKeyData.containsKey(managedKeyData.getKeyNamespace())) { + lastGenKeyData.put(managedKeyData.getKeyNamespace(), new HashMap<>()); + } + lastGenKeyData.get(managedKeyData.getKeyNamespace()).put(alias, + managedKeyData); + return managedKeyData; + } + + public void setMultikeyGenMode(boolean multikeyGenMode) { + this.multikeyGenMode = multikeyGenMode; + } + + public void setMockedKeyState(String alias, ManagedKeyState status) { + keyState.put(alias, status); + } + + public void setMockedKey(String alias, Key key, String keyNamespace) { + if (! keys.containsKey(keyNamespace)) { + keys.put(keyNamespace, new HashMap<>()); + } + Map keysForSpace = keys.get(keyNamespace); + keysForSpace.put(alias, key); + } + + public Key getMockedKey(String alias, String keySpace) { + Map keysForSpace = keys.get(keySpace); + return keysForSpace != null ? keysForSpace.get(alias) : null; + } + + public void setClusterKeyAlias(String alias) { + this.systemKeyAlias = alias; + } + + public String getSystemKeyAlias() { + return this.systemKeyAlias; + } + + /** + * Generate a new secret key. + * @return the key + */ + public static Key generateSecretKey() { + KeyGenerator keyGen = null; + try { + keyGen = KeyGenerator.getInstance("AES"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + keyGen.init(256); + return keyGen.generateKey(); + } + + private ManagedKeyData getKey(byte[] key_cust, String alias, String key_namespace) { + ManagedKeyState keyState = this.keyState.get(alias); + if (! keys.containsKey(key_namespace)) { + keys.put(key_namespace, new HashMap<>()); + } + Map keySpace = keys.get(key_namespace); + Key key = null; + if (keyState != ManagedKeyState.FAILED && keyState != ManagedKeyState.DISABLED) { + if (multikeyGenMode || ! keySpace.containsKey(alias)) { + key = generateSecretKey(); + keySpace.put(alias, key); + } + key = keySpace.get(alias); + if (key == null) { + return null; + } + } + long checksum = key == null ? 0 : ManagedKeyData.constructKeyChecksum(key.getEncoded()); + String partialMetadata = Bytes.toString(key_cust) + ":" + alias; + String keyMetadata = partialMetadata + ":" + key_namespace + ":" + checksum; + allGeneratedKeys.put(partialMetadata, key); + allGeneratedKeys.put(keyMetadata, key); + ManagedKeyData managedKeyData = + new ManagedKeyData(key_cust, key_namespace, key, + keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata); + return registerKeyData(alias, managedKeyData); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java index 581681988c28..de91aa904581 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java @@ -26,6 +26,8 @@ import java.security.Key; import java.security.KeyStore; import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Collection; import java.util.Properties; import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -33,12 +35,15 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.BeforeClass; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; @Category({ MiscTests.class, SmallTests.class }) +@RunWith(Parameterized.class) public class TestKeyStoreKeyProvider { @ClassRule @@ -53,14 +58,33 @@ public class TestKeyStoreKeyProvider { static File storeFile; static File passwordFile; - @BeforeClass - public static void setUp() throws Exception { + protected KeyProvider provider; + + @Parameterized.Parameter(0) + public boolean withPasswordOnAlias; + @Parameterized.Parameter(1) + public boolean withPasswordFile; + + @Parameterized.Parameters(name = "withPasswordOnAlias={0} withPasswordFile={1}") + public static Collection parameters() { + return Arrays.asList(new Object[][] { + { Boolean.TRUE, Boolean.TRUE }, + { Boolean.TRUE, Boolean.FALSE }, + { Boolean.FALSE, Boolean.TRUE }, + { Boolean.FALSE, Boolean.FALSE }, + }); + } + + @Before + public void setUp() throws Exception { KEY = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(ALIAS)); // Create a JKECS store containing a test secret key KeyStore store = KeyStore.getInstance("JCEKS"); store.load(null, PASSWORD.toCharArray()); store.setEntry(ALIAS, new KeyStore.SecretKeyEntry(new SecretKeySpec(KEY, "AES")), - new KeyStore.PasswordProtection(PASSWORD.toCharArray())); + new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); + Properties p = new Properties(); + addCustomEntries(store, p); // Create the test directory String dataDir = TEST_UTIL.getDataTestDir().toString(); new File(dataDir).mkdirs(); @@ -73,8 +97,6 @@ public static void setUp() throws Exception { os.close(); } // Write the password file - Properties p = new Properties(); - p.setProperty(ALIAS, PASSWORD); passwordFile = new File(dataDir, "keystore.pw"); os = new FileOutputStream(passwordFile); try { @@ -82,26 +104,27 @@ public static void setUp() throws Exception { } finally { os.close(); } - } - @Test - public void testKeyStoreKeyProviderWithPassword() throws Exception { - KeyProvider provider = new KeyStoreKeyProvider(); - provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD); - Key key = provider.getKey(ALIAS); - assertNotNull(key); - byte[] keyBytes = key.getEncoded(); - assertEquals(keyBytes.length, KEY.length); - for (int i = 0; i < KEY.length; i++) { - assertEquals(keyBytes[i], KEY[i]); + provider = createProvider(); + if (withPasswordFile) { + provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile=" + + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8")); + } + else { + provider.init("jceks://" + storeFile.toURI().getPath() + "?password=" + PASSWORD); } } + protected KeyProvider createProvider() { + return new KeyStoreKeyProvider(); + } + + protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception { + passwdProps.setProperty(ALIAS, PASSWORD); + } + @Test - public void testKeyStoreKeyProviderWithPasswordFile() throws Exception { - KeyProvider provider = new KeyStoreKeyProvider(); - provider.init("jceks://" + storeFile.toURI().getPath() + "?passwordFile=" - + URLEncoder.encode(passwordFile.getAbsolutePath(), "UTF-8")); + public void testKeyStoreKeyProvider() throws Exception { Key key = provider.getKey(ALIAS); assertNotNull(key); byte[] keyBytes = key.getEncoded(); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java new file mode 100644 index 000000000000..d4b4fd019a64 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyData.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.security.Key; +import java.security.NoSuchAlgorithmException; +import java.util.Base64; + +import javax.crypto.KeyGenerator; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MiscTests.class, SmallTests.class }) +public class TestManagedKeyData { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagedKeyData.class); + + private byte[] keyCust; + private String keyNamespace; + private Key theKey; + private ManagedKeyState keyState; + private String keyMetadata; + private ManagedKeyData managedKeyData; + + @Before + public void setUp() throws NoSuchAlgorithmException { + keyCust = "testCustodian".getBytes(); + keyNamespace = "testNamespace"; + KeyGenerator keyGen = KeyGenerator.getInstance("AES"); + keyGen.init(256); + theKey = keyGen.generateKey(); + keyState = ManagedKeyState.ACTIVE; + keyMetadata = "testMetadata"; + managedKeyData = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata); + } + + @Test + public void testConstructor() { + assertNotNull(managedKeyData); + assertEquals(keyNamespace, managedKeyData.getKeyNamespace()); + assertArrayEquals(keyCust, managedKeyData.getKeyCustodian()); + assertEquals(theKey, managedKeyData.getTheKey()); + assertEquals(keyState, managedKeyData.getKeyState()); + assertEquals(keyMetadata, managedKeyData.getKeyMetadata()); + } + + @Test + public void testConstructorWithCounts() { + long refreshTimestamp = System.currentTimeMillis(); + long readOpCount = 10; + long writeOpCount = 5; + ManagedKeyData keyDataWithCounts = + new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata, refreshTimestamp, + readOpCount, writeOpCount); + + assertEquals(refreshTimestamp, keyDataWithCounts.getRefreshTimestamp()); + assertEquals(readOpCount, keyDataWithCounts.getReadOpCount()); + assertEquals(writeOpCount, keyDataWithCounts.getWriteOpCount()); + } + + @Test + public void testConstructorNullChecks() { + assertThrows(NullPointerException.class, + () -> new ManagedKeyData(null, keyNamespace, theKey, keyState, keyMetadata)); + assertThrows(NullPointerException.class, + () -> new ManagedKeyData(keyCust, null, theKey, keyState, keyMetadata)); + assertThrows(NullPointerException.class, + () -> new ManagedKeyData(keyCust, keyNamespace, theKey, null, keyMetadata)); + assertThrows(NullPointerException.class, + () -> new ManagedKeyData(keyCust, keyNamespace, theKey, ManagedKeyState.ACTIVE, null)); + } + + @Test + public void testConstructorWithFailedStateAndNullMetadata() { + ManagedKeyData keyData = new ManagedKeyData(keyCust, keyNamespace, null, + ManagedKeyState.FAILED, null); + assertNotNull(keyData); + assertEquals(ManagedKeyState.FAILED, keyData.getKeyState()); + assertNull(keyData.getKeyMetadata()); + assertNull(keyData.getTheKey()); + } + + @Test + public void testConstructorNegativeCountChecks() { + assertThrows(IllegalArgumentException.class, + () -> new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata, 0, -1, 0)); + assertThrows(IllegalArgumentException.class, + () -> new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata, 0, 0, -1)); + } + + @Test + public void testCloneWithoutKey() { + ManagedKeyData cloned = managedKeyData.cloneWithoutKey(); + assertNull(cloned.getTheKey()); + assertEquals(managedKeyData.getKeyCustodian(), cloned.getKeyCustodian()); + assertEquals(managedKeyData.getKeyNamespace(), cloned.getKeyNamespace()); + assertEquals(managedKeyData.getKeyState(), cloned.getKeyState()); + assertEquals(managedKeyData.getKeyMetadata(), cloned.getKeyMetadata()); + } + + @Test + public void testGetKeyCustodianEncoded() { + String encoded = managedKeyData.getKeyCustodianEncoded(); + assertNotNull(encoded); + assertArrayEquals(keyCust, Base64.getDecoder().decode(encoded)); + } + + @Test + public void testGetKeyChecksum() { + long checksum = managedKeyData.getKeyChecksum(); + assertNotEquals(0, checksum); + + // Test with null key + ManagedKeyData nullKeyData = + new ManagedKeyData(keyCust, keyNamespace, null, keyState, keyMetadata); + assertEquals(0, nullKeyData.getKeyChecksum()); + } + + @Test + public void testConstructKeyChecksum() { + byte[] data = "testData".getBytes(); + long checksum = ManagedKeyData.constructKeyChecksum(data); + assertNotEquals(0, checksum); + } + + @Test + public void testGetKeyMetadataHash() { + byte[] hash = managedKeyData.getKeyMetadataHash(); + assertNotNull(hash); + assertEquals(16, hash.length); // MD5 hash is 16 bytes long + } + + @Test + public void testGetKeyMetadataHashEncoded() { + String encodedHash = managedKeyData.getKeyMetadataHashEncoded(); + assertNotNull(encodedHash); + assertEquals(24, encodedHash.length()); // Base64 encoded MD5 hash is 24 characters long + } + + @Test + public void testGetKeyMetadataHashEncodedWithNullHash() { + // Create ManagedKeyData with FAILED state and null metadata + ManagedKeyData keyData = new ManagedKeyData( + "custodian".getBytes(), "namespace", null, ManagedKeyState.FAILED, + null // null metadata should result in null hash + ); + + String encoded = keyData.getKeyMetadataHashEncoded(); + assertNull(encoded); + } + + @Test + public void testConstructMetadataHash() { + byte[] hash = ManagedKeyData.constructMetadataHash(keyMetadata); + assertNotNull(hash); + assertEquals(16, hash.length); // MD5 hash is 16 bytes long + } + + @Test + public void testToString() { + String toString = managedKeyData.toString(); + assertTrue(toString.contains("keyCustodian")); + assertTrue(toString.contains("keyNamespace")); + assertTrue(toString.contains("keyState")); + assertTrue(toString.contains("keyMetadata")); + assertTrue(toString.contains("refreshTimestamp")); + assertTrue(toString.contains("keyChecksum")); + } + + @Test + public void testEquals() { + ManagedKeyData same = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata); + assertEquals(managedKeyData, same); + + ManagedKeyData different = + new ManagedKeyData("differentCust".getBytes(), keyNamespace, theKey, keyState, keyMetadata); + assertNotEquals(managedKeyData, different); + } + + @Test + public void testHashCode() { + ManagedKeyData same = new ManagedKeyData(keyCust, keyNamespace, theKey, keyState, keyMetadata); + assertEquals(managedKeyData.hashCode(), same.hashCode()); + + ManagedKeyData different = + new ManagedKeyData("differentCust".getBytes(), keyNamespace, theKey, keyState, keyMetadata); + assertNotEquals(managedKeyData.hashCode(), different.hashCode()); + } + + @Test + public void testConstants() { + assertEquals("*", ManagedKeyData.KEY_SPACE_GLOBAL); + assertEquals(ManagedKeyProvider.encodeToStr(ManagedKeyData.KEY_SPACE_GLOBAL.getBytes()), + ManagedKeyData.KEY_GLOBAL_CUSTODIAN); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java new file mode 100644 index 000000000000..876e14fa1101 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestManagedKeyProvider.java @@ -0,0 +1,299 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.crypto; + +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_ALIAS; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyStoreKeyProvider.KEY_METADATA_CUST; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.security.KeyStore; +import java.security.MessageDigest; +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.GsonUtil; + +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Parameterized; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ TestManagedKeyProvider.TestManagedKeyStoreKeyProvider.class, + TestManagedKeyProvider.TestManagedKeyProviderDefault.class, }) +@Category({ MiscTests.class, SmallTests.class }) +public class TestManagedKeyProvider { + @RunWith(Parameterized.class) + @Category({ MiscTests.class, SmallTests.class }) + public static class TestManagedKeyStoreKeyProvider extends TestKeyStoreKeyProvider { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagedKeyStoreKeyProvider.class); + + private static final String SYSTEM_KEY_ALIAS = "system-alias"; + + private Configuration conf = HBaseConfiguration.create(); + private int nPrefixes = 2; + private ManagedKeyProvider managedKeyProvider; + private Map prefix2key = new HashMap<>(); + private Map prefix2alias = new HashMap<>(); + private String clusterId; + private byte[] systemKey; + + @Before + public void setUp() throws Exception { + super.setUp(); + managedKeyProvider = (ManagedKeyProvider) provider; + managedKeyProvider.initConfig(conf); + } + + protected KeyProvider createProvider() { + return new ManagedKeyStoreKeyProvider(); + } + + protected void addCustomEntries(KeyStore store, Properties passwdProps) throws Exception { + super.addCustomEntries(store, passwdProps); + for (int i = 0; i < nPrefixes; ++i) { + String prefix = "prefix+ " + i; + String alias = prefix + "-alias"; + byte[] key = MessageDigest.getInstance("SHA-256").digest(Bytes.toBytes(alias)); + prefix2alias.put(new Bytes(prefix.getBytes()), alias); + prefix2key.put(new Bytes(prefix.getBytes()), new Bytes(key)); + store.setEntry(alias, new KeyStore.SecretKeyEntry(new SecretKeySpec(key, "AES")), + new KeyStore.PasswordProtection( + withPasswordOnAlias ? PASSWORD.toCharArray() : new char[0])); + + String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes()); + String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + + "alias"; + conf.set(confKey, alias); + + passwdProps.setProperty(alias, PASSWORD); + + clusterId = UUID.randomUUID().toString(); + systemKey = MessageDigest.getInstance("SHA-256").digest( + Bytes.toBytes(SYSTEM_KEY_ALIAS)); + store.setEntry(SYSTEM_KEY_ALIAS, new KeyStore.SecretKeyEntry( + new SecretKeySpec(systemKey, "AES")), + new KeyStore.PasswordProtection(withPasswordOnAlias ? PASSWORD.toCharArray() : + new char[0])); + + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, SYSTEM_KEY_ALIAS); + + passwdProps.setProperty(SYSTEM_KEY_ALIAS, PASSWORD); + } + } + + private void addEntry(String alias, String prefix) { + String encPrefix = Base64.getEncoder().encodeToString(prefix.getBytes()); + String confKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + "." + + "alias"; + conf.set(confKey, alias); + } + + @Test + public void testMissingConfig() throws Exception { + managedKeyProvider.initConfig(null); + RuntimeException ex = assertThrows(RuntimeException.class, + () -> managedKeyProvider.getSystemKey(null)); + assertEquals("initConfig is not called or config is null", ex.getMessage()); + } + + @Test + public void testGetManagedKey() throws Exception { + for (Bytes prefix : prefix2key.keySet()) { + ManagedKeyData keyData = managedKeyProvider.getManagedKey(prefix.get(), + ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyData(keyData, ManagedKeyState.ACTIVE, prefix2key.get(prefix).get(), prefix.get(), + prefix2alias.get(prefix)); + } + } + + @Test + public void testGetInactiveKey() throws Exception { + Bytes firstPrefix = prefix2key.keySet().iterator().next(); + String encPrefix = Base64.getEncoder().encodeToString(firstPrefix.get()); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encPrefix + ".active", + "false"); + ManagedKeyData keyData = managedKeyProvider.getManagedKey(firstPrefix.get(), + ManagedKeyData.KEY_SPACE_GLOBAL); + assertNotNull(keyData); + assertKeyData(keyData, ManagedKeyState.INACTIVE, prefix2key.get(firstPrefix).get(), + firstPrefix.get(), prefix2alias.get(firstPrefix)); + } + + @Test + public void testGetInvalidKey() throws Exception { + byte[] invalidPrefixBytes = "invalid".getBytes(); + ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefixBytes, + ManagedKeyData.KEY_SPACE_GLOBAL); + assertNotNull(keyData); + assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefixBytes, null); + } + + @Test + public void testGetDisabledKey() throws Exception { + byte[] invalidPrefix = new byte[] { 1, 2, 3 }; + String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active", + "false"); + ManagedKeyData keyData = managedKeyProvider.getManagedKey(invalidPrefix, + ManagedKeyData.KEY_SPACE_GLOBAL); + assertNotNull(keyData); + assertKeyData(keyData, ManagedKeyState.DISABLED, null, + invalidPrefix, null); + } + + @Test + public void testGetSystemKey() throws Exception { + ManagedKeyData clusterKeyData = managedKeyProvider.getSystemKey(clusterId.getBytes()); + assertKeyData(clusterKeyData, ManagedKeyState.ACTIVE, systemKey, clusterId.getBytes(), + SYSTEM_KEY_ALIAS); + conf.unset(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY); + RuntimeException ex = assertThrows(RuntimeException.class, + () -> managedKeyProvider.getSystemKey(null)); + assertEquals("No alias configured for system key", ex.getMessage()); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY, "non_existing_alias"); + ex = assertThrows(RuntimeException.class, + () -> managedKeyProvider.getSystemKey(null)); + assertTrue(ex.getMessage().startsWith("Unable to find system key with alias:")); + } + + @Test + public void testUnwrapInvalidKey() throws Exception { + String invalidAlias = "invalidAlias"; + byte[] invalidPrefix = new byte[] { 1, 2, 3 }; + String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); + String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, + invalidPrefixEnc); + ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null); + assertNotNull(keyData); + assertKeyData(keyData, ManagedKeyState.FAILED, null, invalidPrefix, + invalidAlias); + } + + @Test + public void testUnwrapDisabledKey() throws Exception { + String invalidAlias = "invalidAlias"; + byte[] invalidPrefix = new byte[] { 1, 2, 3 }; + String invalidPrefixEnc = ManagedKeyProvider.encodeToStr(invalidPrefix); + conf.set(HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + invalidPrefixEnc + ".active", + "false"); + String invalidMetadata = ManagedKeyStoreKeyProvider.generateKeyMetadata(invalidAlias, + invalidPrefixEnc); + ManagedKeyData keyData = managedKeyProvider.unwrapKey(invalidMetadata, null); + assertNotNull(keyData); + assertKeyData(keyData, ManagedKeyState.DISABLED, null, invalidPrefix, invalidAlias); + } + + private void assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, byte[] key, + byte[] prefixBytes, String alias) throws Exception { + assertNotNull(keyData); + assertEquals(expKeyState, keyData.getKeyState()); + if (key == null) { + assertNull(keyData.getTheKey()); + } + else { + byte[] keyBytes = keyData.getTheKey().getEncoded(); + assertEquals(key.length, keyBytes.length); + assertEquals(new Bytes(key), keyBytes); + } + Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyData.getKeyMetadata(), + HashMap.class); + assertNotNull(keyMetadata); + assertEquals(new Bytes(prefixBytes), keyData.getKeyCustodian()); + assertEquals(alias, keyMetadata.get(KEY_METADATA_ALIAS)); + assertEquals(Base64.getEncoder().encodeToString(prefixBytes), + keyMetadata.get(KEY_METADATA_CUST)); + assertEquals(keyData, managedKeyProvider.unwrapKey(keyData.getKeyMetadata(), null)); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MiscTests.class, SmallTests.class }) + public static class TestManagedKeyProviderDefault { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagedKeyProviderDefault.class); + + @Test public void testEncodeToStr() { + byte[] input = { 72, 101, 108, 108, 111 }; // "Hello" in ASCII + String expected = "SGVsbG8="; + String actual = ManagedKeyProvider.encodeToStr(input); + + assertEquals("Encoded string should match expected Base64 representation", expected, actual); + } + + @Test public void testDecodeToBytes() throws Exception { + String input = "SGVsbG8="; // "Hello" in Base64 + byte[] expected = { 72, 101, 108, 108, 111 }; + byte[] actual = ManagedKeyProvider.decodeToBytes(input); + + assertTrue("Decoded bytes should match expected ASCII representation", + Arrays.equals(expected, actual)); + } + + @Test public void testEncodeToStrAndDecodeToBytes() throws Exception { + byte[] originalBytes = { 1, 2, 3, 4, 5 }; + String encoded = ManagedKeyProvider.encodeToStr(originalBytes); + byte[] decoded = ManagedKeyProvider.decodeToBytes(encoded); + + assertTrue("Decoded bytes should match original bytes", + Arrays.equals(originalBytes, decoded)); + } + + @Test(expected = Exception.class) public void testDecodeToBytes_InvalidInput() + throws Exception { + String invalidInput = "This is not a valid Base64 string!"; + ManagedKeyProvider.decodeToBytes(invalidInput); + } + + @Test public void testRoundTrip_LargeInput() throws Exception { + byte[] largeInput = new byte[1000]; + for (int i = 0; i < largeInput.length; i++) { + largeInput[i] = (byte) (i % 256); + } + + String encoded = ManagedKeyProvider.encodeToStr(largeInput); + byte[] decoded = ManagedKeyProvider.decodeToBytes(encoded); + + assertTrue("Large input should survive round-trip encoding and decoding", + Arrays.equals(largeInput, decoded)); + } + } +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto new file mode 100644 index 000000000000..452151b98628 --- /dev/null +++ b/hbase-protocol-shaded/src/main/protobuf/server/ManagedKeys.proto @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ManagedKeysProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +message ManagedKeysRequest { + required string key_cust = 1; + required string key_namespace = 2; +} + +enum ManagedKeyState { + KEY_ACTIVE = 1; + KEY_INACTIVE = 2; + KEY_FAILED = 3; + KEY_DISABLED = 4; +} + +message ManagedKeysResponse { + required string key_cust = 1; + required string key_namespace = 2; + required ManagedKeyState key_state = 3; + optional string key_metadata = 4; + optional int64 refresh_timestamp = 5; + optional int64 read_op_count = 6; + optional int64 write_op_count = 7; +} + +message GetManagedKeysResponse { + repeated ManagedKeysResponse state = 1; +} + +service ManagedKeysService { + rpc EnableKeyManagement(ManagedKeysRequest) + returns (GetManagedKeysResponse); + rpc GetManagedKeys(ManagedKeysRequest) + returns (GetManagedKeysResponse); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java index bf9640196f62..12cc7433e7be 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -52,6 +52,11 @@ import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; @@ -60,6 +65,7 @@ import org.apache.hadoop.hbase.regionserver.MemStoreLAB; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ShutdownHook; +import org.apache.hadoop.hbase.security.SecurityUtil; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -187,6 +193,10 @@ public abstract class HBaseServerBase> extends protected final NettyEventLoopGroupConfig eventLoopGroupConfig; + private SystemKeyCache systemKeyCache; + protected KeymetaAdminImpl keymetaAdmin; + protected ManagedKeyDataCache managedKeyDataCache; + private void setupSignalHandlers() { if (!SystemUtils.IS_OS_WINDOWS) { HBasePlatformDependent.handle("HUP", (number, name) -> { @@ -283,6 +293,8 @@ public HBaseServerBase(Configuration conf, String name) throws IOException { initializeFileSystem(); + keymetaAdmin = new KeymetaAdminImpl(this); + int choreServiceInitialSize = conf.getInt(CHORE_SERVICE_INITIAL_POOL_SIZE, DEFAULT_CHORE_SERVICE_INITIAL_POOL_SIZE); this.choreService = new ChoreService(getName(), choreServiceInitialSize, true); @@ -403,6 +415,27 @@ public ZKWatcher getZooKeeper() { return zooKeeper; } + @Override + public KeymetaAdmin getKeymetaAdmin() { + return keymetaAdmin; + } + + @Override + public ManagedKeyDataCache getManagedKeyDataCache() { + return managedKeyDataCache; + } + + @Override + public SystemKeyCache getSystemKeyCache() { + return systemKeyCache; + } + + protected void buildSystemKeyCache() throws IOException { + if (systemKeyCache == null && SecurityUtil.isKeyManagementEnabled(conf)) { + systemKeyCache = SystemKeyCache.createCache(new SystemKeyAccessor(this)); + } + } + protected final void shutdownChore(ScheduledChore chore) { if (chore != null) { chore.shutdown(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 32ad587ad96d..0996fbf21c52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -38,6 +38,9 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; @@ -256,6 +259,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public void updateRegionFavoredNodesMapping(String encodedRegionName, List favoredNodes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index d99807e46b06..c0ddad9109ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -23,6 +23,9 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -83,6 +86,21 @@ default AsyncConnection getAsyncConnection() { /** Returns The {@link ChoreService} instance for this server */ ChoreService getChoreService(); + /** + * @return the cache for cluster keys. + */ + public SystemKeyCache getSystemKeyCache(); + + /** + * @return the cache for managed keys. + */ + public ManagedKeyDataCache getManagedKeyDataCache(); + + /** + * @return the admin for keymeta. + */ + public KeymetaAdmin getKeymetaAdmin(); + /** Returns Return the FileSystem object used (can return null!). */ // TODO: Distinguish between "dataFs" and "walFs". default FileSystem getFileSystem() { @@ -104,4 +122,4 @@ default FileSystem getFileSystem() { default boolean isStopping() { return false; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java new file mode 100644 index 000000000000..a9aea5e8fef2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeyManagementBase.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.HashSet; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.KeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.security.SecurityUtil; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A base class for all keymeta accessor/manager implementations. + */ +@InterfaceAudience.Private +public abstract class KeyManagementBase { + protected static final Logger LOG = LoggerFactory.getLogger(KeyManagementBase.class); + + private Server server; + private final Configuration configuration; + + private Boolean isDynamicLookupEnabled; + private Boolean isKeyManagementEnabled; + private Integer perCustNamespaceActiveKeyCount; + + /** + * Construct with a server instance. Configuration is derived from the server. + * + * @param server the server instance + */ + public KeyManagementBase(Server server) { + this(server.getConfiguration()); + this.server = server; + } + + /** + * Construct with a custom configuration and no server. + * + * @param configuration the configuration instance + */ + public KeyManagementBase(Configuration configuration) { + if (configuration == null) { + throw new IllegalArgumentException("Configuration must be non-null"); + } + this.configuration = configuration; + } + + protected Server getServer() { + return server; + } + + protected Configuration getConfiguration() { + return configuration; + } + + /** + * A utility method for getting the managed key provider. + * @return the key provider + * @throws RuntimeException if no provider is configured or if the configured provider is not an + * instance of ManagedKeyProvider + */ + protected ManagedKeyProvider getKeyProvider() { + KeyProvider provider = Encryption.getKeyProvider(getConfiguration()); + if (!(provider instanceof ManagedKeyProvider)) { + throw new RuntimeException("KeyProvider: " + provider.getClass().getName() + + " expected to be of type ManagedKeyProvider"); + } + return (ManagedKeyProvider) provider; + } + + /** + * A utility method for checking if dynamic lookup is enabled. + * @return true if dynamic lookup is enabled + */ + protected boolean isDynamicLookupEnabled() { + if (isDynamicLookupEnabled == null) { + isDynamicLookupEnabled = getConfiguration().getBoolean( + HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, + HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED); + } + return isDynamicLookupEnabled; + } + + /** + * Check if key management is enabled, otherwise throw exception. + * @throws IOException if key management is not enabled. + */ + protected void assertKeyManagementEnabled() throws IOException { + if (!isKeyManagementEnabled()) { + throw new IOException("Key manage is currently not enabled in HBase configuration"); + } + } + + protected boolean isKeyManagementEnabled() { + if (isKeyManagementEnabled == null) { + isKeyManagementEnabled = SecurityUtil.isKeyManagementEnabled(getConfiguration()); + } + return isKeyManagementEnabled; + } + + protected int getPerCustodianNamespaceActiveKeyConfCount() throws IOException { + if (perCustNamespaceActiveKeyCount == null) { + perCustNamespaceActiveKeyCount = getConfiguration().getInt( + HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, + HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_DEFAULT_COUNT); + } + if (perCustNamespaceActiveKeyCount <= 0) { + throw new IOException("Invalid value: " + perCustNamespaceActiveKeyCount + " configured for: " + + HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT); + } + return perCustNamespaceActiveKeyCount; + } + + /** + * Retrieves specified number of managed keys from the key provider. An attempt is made to + * retrieve the specified number of keys, but the real number of keys retrieved may be less than + * the specified number if the key provider is not capable of producing multiple active keys. If + * existing keys are provided, it will be used to ensure that keys retrieved are not the same as + * those that are already retrieved. + * + * @param encKeyCust the encoded key custodian + * @param key_cust the key custodian + * @param keyNamespace the key namespace + * @param nKeysToRetrieve the number of keys to retrieve + * @param existingKeys the existing keys, typically the active keys already retrieved from the + * key provider. + * @return the retrieved keys + * @throws IOException if an error occurs + * @throws KeyException if an error occurs + */ + protected Set retrieveManagedKeys(String encKeyCust, byte[] key_cust, + String keyNamespace, int nKeysToRetrieve, Set existingKeys) + throws IOException, KeyException { + Set retrievedKeys = new HashSet<>(nKeysToRetrieve); + ManagedKeyProvider provider = getKeyProvider(); + for (int i = 0; i < nKeysToRetrieve; ++i) { + ManagedKeyData pbeKey = provider.getManagedKey(key_cust, keyNamespace); + if (pbeKey == null) { + throw new IOException("Invalid null managed key received from key provider"); + } + if (retrievedKeys.contains(pbeKey) || existingKeys.contains(pbeKey)) { + // This typically means, the key provider is not capable of producing multiple active keys. + LOG.info("enableManagedKeys: specified (custodian: {}, namespace: {}) is configured " + + " to have {} active keys, but received only {} unique keys.", + encKeyCust, keyNamespace, existingKeys.size() + nKeysToRetrieve, + existingKeys.size() + retrievedKeys.size()); + break; + } + retrievedKeys.add(pbeKey); + LOG.info("enableManagedKeys: got managed key with status: {} and metadata: {} for " + + "(custodian: {}, namespace: {})", pbeKey.getKeyState(), pbeKey.getKeyMetadata(), + encKeyCust, keyNamespace); + if (pbeKey.getKeyState() != ManagedKeyState.ACTIVE) { + LOG.info("enableManagedKeys: received non-ACTIVE key with status: {} with metadata: {} for " + + "(custodian: {}, namespace: {})", + pbeKey.getKeyState(), pbeKey.getKeyMetadata(), encKeyCust, keyNamespace); + break; + } + } + return retrievedKeys; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java new file mode 100644 index 000000000000..61bb2062e9e7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminImpl.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class KeymetaAdminImpl extends KeymetaTableAccessor implements KeymetaAdmin { + private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminImpl.class); + + public KeymetaAdminImpl(Server server) { + super(server); + } + + @Override + public List enableKeyManagement(String keyCust, String keyNamespace) + throws IOException, KeyException { + assertKeyManagementEnabled(); + LOG.info("Trying to enable key management on custodian: {} under namespace: {}", keyCust, + keyNamespace); + byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust); + int perCustNamespaceActiveKeyConfCount = getPerCustodianNamespaceActiveKeyConfCount(); + + // Check if (cust, namespace) pair is already enabled and if there are enough number of + // active keys. + List activeKeys = getActiveKeys(key_cust, keyNamespace); + if (activeKeys.size() >= perCustNamespaceActiveKeyConfCount) { + LOG.info("enableManagedKeys: specified (custodian: {}, namespace: {}) already has " + + " {} number of managed keys active, which satisfies the configured minimum: {}", + keyCust, keyNamespace, activeKeys.size(), perCustNamespaceActiveKeyConfCount); + return activeKeys; + } + + int nKeysToRetrieve = perCustNamespaceActiveKeyConfCount - activeKeys.size(); + Set retrievedKeys = retrieveManagedKeys( + keyCust, key_cust, keyNamespace, nKeysToRetrieve, new HashSet<>(activeKeys)); + for (ManagedKeyData pbeKey : retrievedKeys) { + addKey(pbeKey); + } + return retrievedKeys.stream().toList(); + } + + @Override + public List getManagedKeys(String keyCust, String keyNamespace) + throws IOException, KeyException { + assertKeyManagementEnabled(); + LOG.info("Getting key statuses for custodian: {} under namespace: {}", keyCust, + keyNamespace); + byte[] key_cust = ManagedKeyProvider.decodeToBytes(keyCust); + return getAllKeys(key_cust, keyNamespace); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java new file mode 100644 index 000000000000..68f78cd12dd3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaMasterService.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public class KeymetaMasterService extends KeyManagementBase { + private static final Logger LOG = LoggerFactory.getLogger(KeymetaMasterService.class); + + private final MasterServices master; + + private static final TableDescriptorBuilder TABLE_DESCRIPTOR_BUILDER = TableDescriptorBuilder + .newBuilder(KeymetaTableAccessor.KEY_META_TABLE_NAME).setRegionReplication(1) + .setPriority(HConstants.SYSTEMTABLE_QOS) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder( + KeymetaTableAccessor.KEY_META_INFO_FAMILY) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setMaxVersions(1) + .setInMemory(true) + .build()); + + public KeymetaMasterService(MasterServices masterServices) { + super(masterServices); + master = masterServices; + } + + public void init() throws IOException { + if (!isKeyManagementEnabled()) { + return; + } + if (!master.getTableDescriptors().exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)) { + LOG.info("{} table not found. Creating.", + KeymetaTableAccessor.KEY_META_TABLE_NAME.getNameWithNamespaceInclAsString()); + master.createSystemTable(TABLE_DESCRIPTOR_BUILDER.build()); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java new file mode 100644 index 000000000000..067f98340575 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaServiceEndpoint.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; +import org.apache.hadoop.hbase.coprocessor.HasMasterServices; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysService; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.Service; + +/** + * This class implements a coprocessor service endpoint for the key management metadata operations. + * It handles the following methods: + * + * This endpoint is designed to work in conjunction with the {@link KeymetaAdmin} + * interface, which provides the actual implementation of the key metadata operations. + *

+ */ +@CoreCoprocessor @InterfaceAudience.Private +public class KeymetaServiceEndpoint implements MasterCoprocessor { + private static final Logger LOG = LoggerFactory.getLogger(KeymetaServiceEndpoint.class); + + private MasterServices master = null; + + private final ManagedKeysService managedKeysService = new KeymetaAdminServiceImpl(); + + /** + * Starts the coprocessor by initializing the reference to the + * {@link org.apache.hadoop.hbase.master.MasterServices} * instance. + * + * @param env The coprocessor environment. + * @throws IOException If an error occurs during initialization. + */ + @Override + public void start(CoprocessorEnvironment env) throws IOException { + if (!(env instanceof HasMasterServices)) { + throw new IOException("Does not implement HMasterServices"); + } + + master = ((HasMasterServices) env).getMasterServices(); + } + + /** + * Returns an iterable of the available coprocessor services, which includes the + * {@link ManagedKeysService} implemented by + * {@link KeymetaServiceEndpoint.KeymetaAdminServiceImpl}. + * + * @return An iterable of the available coprocessor services. + */ + @Override + public Iterable getServices() { + return Collections.singleton(managedKeysService); + } + + /** + * The implementation of the {@link ManagedKeysProtos.ManagedKeysService} + * interface, which provides the actual method implementations for enabling key management. + */ + @InterfaceAudience.Private + public class KeymetaAdminServiceImpl extends ManagedKeysService { + + /** + * Enables key management for a given tenant and namespace, as specified in the provided + * request. + * + * @param controller The RPC controller. + * @param request The request containing the tenant and table specifications. + * @param done The callback to be invoked with the response. + */ + @Override + public void enableKeyManagement(RpcController controller, ManagedKeysRequest request, + RpcCallback done) { + ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request); + if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) { + try { + List managedKeyStates = master.getKeymetaAdmin() + .enableKeyManagement(request.getKeyCust(), request.getKeyNamespace()); + done.run(generateKeyStateResponse(managedKeyStates, builder)); + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } catch (KeyException e) { + CoprocessorRpcUtils.setControllerException(controller, new IOException(e)); + } + } + } + + @Override + public void getManagedKeys(RpcController controller, ManagedKeysRequest request, + RpcCallback done) { + ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request); + if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) { + try { + List managedKeyStates = master.getKeymetaAdmin() + .getManagedKeys(request.getKeyCust(), request.getKeyNamespace()); + done.run(generateKeyStateResponse(managedKeyStates, builder)); + } catch (IOException e) { + CoprocessorRpcUtils.setControllerException(controller, e); + } catch (KeyException e) { + CoprocessorRpcUtils.setControllerException(controller, new IOException(e)); + } + } + } + } + + @InterfaceAudience.Private + public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller, + ManagedKeysRequest request) { + ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder() + .setKeyNamespace(request.getKeyNamespace()); + byte[] key_cust = convertToKeyCustBytes(controller, request, builder); + if (key_cust != null) { + builder.setKeyCustBytes(ByteString.copyFrom(key_cust)); + } + return builder; + } + + // Assumes that all ManagedKeyData objects belong to the same custodian and namespace. + @InterfaceAudience.Private + public static GetManagedKeysResponse generateKeyStateResponse( + List managedKeyStates, ManagedKeysResponse.Builder builder) { + GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder(); + for (ManagedKeyData keyData: managedKeyStates) { + builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf( + keyData.getKeyState().getVal())) + .setKeyMetadata(keyData.getKeyMetadata()) + .setRefreshTimestamp(keyData.getRefreshTimestamp()) + .setReadOpCount(keyData.getReadOpCount()) + .setWriteOpCount(keyData.getWriteOpCount()) + ; + responseBuilder.addState(builder.build()); + } + return responseBuilder.build(); + } + + @InterfaceAudience.Private + public static byte[] convertToKeyCustBytes(RpcController controller, ManagedKeysRequest request, + ManagedKeysResponse.Builder builder) { + byte[] key_cust = null; + try { + key_cust = Base64.getDecoder().decode(request.getKeyCust()); + } catch (IllegalArgumentException e) { + builder.setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_FAILED); + CoprocessorRpcUtils.setControllerException(controller, new IOException( + "Failed to decode specified prefix as Base64 string: " + request.getKeyCust(), e)); + } + return key_cust; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java new file mode 100644 index 000000000000..6efb24a57fff --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java @@ -0,0 +1,336 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.Key; +import java.security.KeyException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Accessor for keymeta table as part of key management. + */ +@InterfaceAudience.Private +public class KeymetaTableAccessor extends KeyManagementBase { + private static final String KEY_META_INFO_FAMILY_STR = "info"; + + public static final byte[] KEY_META_INFO_FAMILY = Bytes.toBytes(KEY_META_INFO_FAMILY_STR); + + public static final TableName KEY_META_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "keymeta"); + + public static final String DEK_METADATA_QUAL_NAME = "m"; + public static final byte[] DEK_METADATA_QUAL_BYTES = Bytes.toBytes(DEK_METADATA_QUAL_NAME); + + public static final String DEK_CHECKSUM_QUAL_NAME = "c"; + public static final byte[] DEK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(DEK_CHECKSUM_QUAL_NAME); + + public static final String DEK_WRAPPED_BY_STK_QUAL_NAME = "w"; + public static final byte[] DEK_WRAPPED_BY_STK_QUAL_BYTES = + Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME); + + public static final String STK_CHECKSUM_QUAL_NAME = "s"; + public static final byte[] STK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(STK_CHECKSUM_QUAL_NAME); + + public static final String REFRESHED_TIMESTAMP_QUAL_NAME = "t"; + public static final byte[] REFRESHED_TIMESTAMP_QUAL_BYTES = + Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME); + + public static final String KEY_STATE_QUAL_NAME = "k"; + public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME); + + public static final String READ_OP_COUNT_QUAL_NAME = "R"; + public static final byte[] READ_OP_COUNT_QUAL_BYTES = Bytes.toBytes(READ_OP_COUNT_QUAL_NAME); + + public static final String WRITE_OP_COUNT_QUAL_NAME = "W"; + public static final byte[] WRITE_OP_COUNT_QUAL_BYTES = Bytes.toBytes(WRITE_OP_COUNT_QUAL_NAME); + + public KeymetaTableAccessor(Server server) { + super(server); + } + + /** + * Add the specified key to the keymeta table. + * @param keyData The key data. + * @throws IOException when there is an underlying IOException. + */ + public void addKey(ManagedKeyData keyData) throws IOException { + assertKeyManagementEnabled(); + final Put putForMetadata = addMutationColumns(new Put(constructRowKeyForMetadata(keyData)), + keyData); + Connection connection = getServer().getConnection(); + try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { + table.put(putForMetadata); + } + } + + /** + * Get all the keys for the specified key_cust and key_namespace. + * + * @param key_cust The key custodian. + * @param keyNamespace The namespace + * @return a list of key data, one for each key, can be empty when none were found. + * @throws IOException when there is an underlying IOException. + * @throws KeyException when there is an underlying KeyException. + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST) + public List getAllKeys(byte[] key_cust, String keyNamespace) + throws IOException, KeyException { + assertKeyManagementEnabled(); + Connection connection = getServer().getConnection(); + byte[] prefixForScan = Bytes.add(Bytes.toBytes(key_cust.length), key_cust, + Bytes.toBytes(keyNamespace)); + PrefixFilter prefixFilter = new PrefixFilter(prefixForScan); + Scan scan = new Scan(); + scan.setFilter(prefixFilter); + scan.addFamily(KEY_META_INFO_FAMILY); + + try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { + ResultScanner scanner = table.getScanner(scan); + List allKeys = new ArrayList<>(); + for (Result result : scanner) { + ManagedKeyData keyData = parseFromResult(getServer(), key_cust, keyNamespace, result); + if (keyData != null) { + allKeys.add(keyData); + } + } + return allKeys; + } + } + + /** + * Get all the active keys for the specified key_cust and key_namespace. + * + * @param key_cust The prefix + * @param keyNamespace The namespace + * @return a list of key data, one for each active key, can be empty when none were found. + * @throws IOException when there is an underlying IOException. + * @throws KeyException when there is an underlying KeyException. + */ + public List getActiveKeys(byte[] key_cust, String keyNamespace) + throws IOException, KeyException { + assertKeyManagementEnabled(); + List activeKeys = new ArrayList<>(); + for (ManagedKeyData keyData : getAllKeys(key_cust, keyNamespace)) { + if (keyData.getKeyState() == ManagedKeyState.ACTIVE) { + activeKeys.add(keyData); + } + } + return activeKeys; + } + + /** + * Get the specific key identified by key_cust, keyNamespace and keyState. + * + * @param key_cust The prefix. + * @param keyNamespace The namespace. + * @param keyState The state of the key. + * @return the key or {@code null} + * @throws IOException when there is an underlying IOException. + * @throws KeyException when there is an underlying KeyException. + */ + public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, ManagedKeyState keyState) + throws IOException, KeyException { + return getKeyInternal(key_cust, keyNamespace, new byte[] { keyState.getVal() }); + } + + /** + * Get the specific key identified by key_cust, keyNamespace and keyMetadata. + * + * @param key_cust The prefix. + * @param keyNamespace The namespace. + * @param keyMetadata The metadata. + * @return the key or {@code null} + * @throws IOException when there is an underlying IOException. + * @throws KeyException when there is an underlying KeyException. + */ + public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, String keyMetadata) + throws IOException, KeyException { + return getKeyInternal(key_cust, keyNamespace, + ManagedKeyData.constructMetadataHash(keyMetadata)); + } + + /** + * Internal helper method to get a key using the provided metadata hash. + * + * @param key_cust The prefix. + * @param keyNamespace The namespace. + * @param keyMetadataHash The metadata hash or state value. + * @return the key or {@code null} + * @throws IOException when there is an underlying IOException. + * @throws KeyException when there is an underlying KeyException. + */ + private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace, + byte[] keyMetadataHash) throws IOException, KeyException { + assertKeyManagementEnabled(); + Connection connection = getServer().getConnection(); + try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { + byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, keyMetadataHash); + Result result = table.get(new Get(rowKey)); + return parseFromResult(getServer(), key_cust, keyNamespace, result); + } + } + + /** + * Report read or write operation count on the specific key identified by key_cust, keyNamespace + * and keyMetadata. The reported value is added to the existing operation count using the + * Increment mutation. + * @param key_cust The prefix. + * @param keyNamespace The namespace. + * @param keyMetadata The metadata. + * @throws IOException when there is an underlying IOException. + */ + public void reportOperation(byte[] key_cust, String keyNamespace, String keyMetadata, long count, + boolean isReadOperation) throws IOException { + assertKeyManagementEnabled(); + Connection connection = getServer().getConnection(); + try (Table table = connection.getTable(KEY_META_TABLE_NAME)) { + byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, + ManagedKeyData.constructMetadataHash(keyMetadata)); + Increment incr = new Increment(rowKey) + .addColumn(KEY_META_INFO_FAMILY, + isReadOperation ? READ_OP_COUNT_QUAL_BYTES : WRITE_OP_COUNT_QUAL_BYTES, + count); + table.increment(incr); + } + } + + /** + * Add the mutation columns to the given Put that are derived from the keyData. + */ + private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException { + ManagedKeyData latestSystemKey = getServer().getSystemKeyCache().getLatestSystemKey(); + if (keyData.getTheKey() != null) { + byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null, + keyData.getTheKey(), latestSystemKey.getTheKey()); + put.addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES, + Bytes.toBytes(keyData.getKeyChecksum())) + .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK) + .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES, + Bytes.toBytes(latestSystemKey.getKeyChecksum())) + ; + } + Put result = put.setDurability(Durability.SKIP_WAL) + .setPriority(HConstants.SYSTEMTABLE_QOS) + .addColumn(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES, + Bytes.toBytes(keyData.getRefreshTimestamp())) + .addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES, + new byte[] { keyData.getKeyState().getVal() }) + ; + + // Only add metadata column if metadata is not null + String metadata = keyData.getKeyMetadata(); + if (metadata != null) { + result.addColumn(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES, metadata.getBytes()); + } + + return result; + } + + @InterfaceAudience.Private + public static byte[] constructRowKeyForMetadata(ManagedKeyData keyData) { + byte[] keyMetadataHash; + if (keyData.getKeyState() == ManagedKeyState.FAILED && keyData.getKeyMetadata() == null) { + // For FAILED state with null metadata, use state as metadata + keyMetadataHash = new byte[] { keyData.getKeyState().getVal() }; + } else { + keyMetadataHash = keyData.getKeyMetadataHash(); + } + return constructRowKeyForMetadata(keyData.getKeyCustodian(), keyData.getKeyNamespace(), + keyMetadataHash); + } + + @InterfaceAudience.Private + public static byte[] constructRowKeyForMetadata(byte[] key_cust, String keyNamespace, + byte[] keyMetadataHash) { + int custLength = key_cust.length; + return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytesBinary(keyNamespace), + keyMetadataHash); + } + + @InterfaceAudience.Private + public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace, + Result result) throws IOException, KeyException { + if (result == null || result.isEmpty()) { + return null; + } + ManagedKeyState keyState = ManagedKeyState.forValue( + result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]); + String dekMetadata = Bytes.toString(result.getValue(KEY_META_INFO_FAMILY, + DEK_METADATA_QUAL_BYTES)); + byte[] dekWrappedByStk = result.getValue(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES); + if ((keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE) + && dekWrappedByStk == null) { + throw new IOException(keyState + " key must have a wrapped key"); + } + Key dek = null; + if (dekWrappedByStk != null) { + long stkChecksum = + Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES)); + ManagedKeyData clusterKey = server.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum); + if (clusterKey == null) { + LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable", + dekMetadata, stkChecksum); + return null; + } + dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk, + clusterKey.getTheKey()); + } + long refreshedTimestamp = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, + REFRESHED_TIMESTAMP_QUAL_BYTES)); + byte[] readOpValue = result.getValue(KEY_META_INFO_FAMILY, READ_OP_COUNT_QUAL_BYTES); + long readOpCount = readOpValue != null ? Bytes.toLong(readOpValue) : 0; + byte[] writeOpValue = result.getValue(KEY_META_INFO_FAMILY, WRITE_OP_COUNT_QUAL_BYTES); + long writeOpCount = writeOpValue != null ? Bytes.toLong(writeOpValue) : 0; + ManagedKeyData + dekKeyData = new ManagedKeyData(key_cust, keyNamespace, dek, keyState, dekMetadata, + refreshedTimestamp, readOpCount, writeOpCount); + if (dek != null) { + long dekChecksum = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, + DEK_CHECKSUM_QUAL_BYTES)); + if (dekKeyData.getKeyChecksum() != dekChecksum) { + LOG.error("Dropping key, current key checksum: {} didn't match the expected checksum: {}" + + " for key with metadata: {}", dekKeyData.getKeyChecksum(), dekChecksum, dekMetadata); + return null; + } + } + return dekKeyData; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java new file mode 100644 index 000000000000..3359d57ca1ef --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java @@ -0,0 +1,334 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.KeyException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; + +/** + * In-memory cache for ManagedKeyData entries, using key metadata as the cache key. Uses two + * independent Caffeine caches: one for general key data and one for active keys only with + * hierarchical structure for efficient random key selection. + */ +@InterfaceAudience.Private +public class ManagedKeyDataCache extends KeyManagementBase { + private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyDataCache.class); + + private Cache cache; + private Cache> activeKeysCache; + private final KeymetaTableAccessor keymetaAccessor; + + /** + * Composite key for active keys cache containing custodian and namespace. + * NOTE: Pair won't work out of the box because it won't work with byte[] as is. + */ + @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.UNITTEST }) + public static class ActiveKeysCacheKey { + private final byte[] custodian; + private final String namespace; + + public ActiveKeysCacheKey(byte[] custodian, String namespace) { + this.custodian = custodian; + this.namespace = namespace; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null || getClass() != obj.getClass()) + return false; + ActiveKeysCacheKey cacheKey = (ActiveKeysCacheKey) obj; + return Bytes.equals(custodian, cacheKey.custodian) && + Objects.equals(namespace, cacheKey.namespace); + } + + @Override + public int hashCode() { + return Objects.hash(Bytes.hashCode(custodian), namespace); + } + } + + /** + * Constructs the ManagedKeyDataCache with the given configuration and keymeta accessor. When + * keymetaAccessor is null, L2 lookup is disabled and dynamic lookup is enabled. + * + * @param conf The configuration, can't be null. + * @param keymetaAccessor The keymeta accessor, can be null. + */ + public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAccessor) { + super(conf); + this.keymetaAccessor = keymetaAccessor; + if (keymetaAccessor == null) { + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true); + } + + int maxEntries = conf.getInt( + HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY, + HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT); + int activeKeysMaxEntries = conf.getInt( + HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY, + HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT); + this.cache = Caffeine.newBuilder() + .maximumSize(maxEntries) + .build(); + this.activeKeysCache = Caffeine.newBuilder() + .maximumSize(activeKeysMaxEntries) + .build(); + } + + /** + * Retrieves an entry from the cache, loading it from L2 if KeymetaTableAccessor is available. + * When L2 is not available, it will try to load from provider, unless dynamic lookup is disabled. + * + * @param key_cust the key custodian + * @param keyNamespace the key namespace + * @param keyMetadata the key metadata of the entry to be retrieved + * @param wrappedKey The DEK key material encrypted with the corresponding + * KEK, if available. + * @return the corresponding ManagedKeyData entry, or null if not found + * @throws IOException if an error occurs while loading from KeymetaTableAccessor + * @throws KeyException if an error occurs while loading from KeymetaTableAccessor + */ + public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyMetadata, byte[] wrappedKey) + throws IOException, KeyException { + ManagedKeyData entry = cache.get(keyMetadata, metadata -> { + // First check if it's in the active keys cache + ManagedKeyData keyData = getFromActiveKeysCache(key_cust, keyNamespace, keyMetadata); + + // Try to load from L2 + if (keyData == null && keymetaAccessor != null) { + try { + keyData = keymetaAccessor.getKey(key_cust, keyNamespace, metadata); + } catch (IOException | KeyException | RuntimeException e) { + LOG.warn("Failed to load key from KeymetaTableAccessor for metadata: {}", metadata, e); + } + } + + // If not found in L2 and dynamic lookup is enabled, try with Key Provider + if (keyData == null && isDynamicLookupEnabled()) { + try { + ManagedKeyProvider provider = getKeyProvider(); + keyData = provider.unwrapKey(metadata, wrappedKey); + LOG.info("Got key data with status: {} and metadata: {} for prefix: {}", + keyData.getKeyState(), keyData.getKeyMetadata(), + ManagedKeyProvider.encodeToStr(key_cust)); + // Add to KeymetaTableAccessor for future L2 lookups + if (keymetaAccessor != null) { + try { + keymetaAccessor.addKey(keyData); + } catch (IOException | RuntimeException e) { + LOG.warn("Failed to add key to KeymetaTableAccessor for metadata: {}", metadata, e); + } + } + } catch (IOException | RuntimeException e) { + LOG.warn("Failed to load key from provider for metadata: {}", metadata, e); + } + } + + if (keyData == null) { + keyData = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED, keyMetadata); + } + + if (ManagedKeyState.isUsable(keyData.getKeyState())) { + LOG.info("Failed to get usable key data with metadata: {} for prefix: {}", + metadata, ManagedKeyProvider.encodeToStr(key_cust)); + } + return keyData; + }); + if (ManagedKeyState.isUsable(entry.getKeyState())) { + return entry; + } + return null; + } + + /** + * Retrieves an existing key from the active keys. + * + * @param key_cust the key custodian + * @param keyNamespace the key namespace + * @param keyMetadata the key metadata + * @return the ManagedKeyData if found, null otherwise + */ + private ManagedKeyData getFromActiveKeysCache(byte[] key_cust, String keyNamespace, String keyMetadata) { + ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace); + List keyList = activeKeysCache.getIfPresent(cacheKey); + if (keyList != null) { + for (ManagedKeyData keyData : keyList) { + if (keyData.getKeyMetadata().equals(keyMetadata)) { + return keyData; + } + } + } + return null; + } + + /** + * Removes an entry from generic cache based on its key metadata. + * + * @param keyMetadata the key metadata of the entry to be removed + * @return the removed ManagedKeyData entry, or null if not found + */ + public ManagedKeyData removeEntry(String keyMetadata) { + return cache.asMap().remove(keyMetadata); + } + + public ManagedKeyData removeFromActiveKeys(byte[] key_cust, String key_namespace, + String keyMetadata) { + ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, key_namespace); + List keyList = activeKeysCache.getIfPresent(cacheKey); + if (keyList != null) { + // Find and remove the matching key + ManagedKeyData removedEntry = null; + for (int i = 0; i < keyList.size(); i++) { + if (keyList.get(i).getKeyMetadata().equals(keyMetadata)) { + removedEntry = keyList.remove(i); + break; + } + } + // If the list is now empty, remove the entire cache entry + if (keyList.isEmpty()) { + activeKeysCache.invalidate(cacheKey); + } + return removedEntry; + } + return null; + } + + /** + * @return the approximate number of entries in the main cache which is meant for general lookup + * by key metadata. + */ + public int getGenericCacheEntryCount() { + return (int) cache.estimatedSize(); + } + + /** + * @return the approximate number of entries in the active keys cache which is meant for random + * key selection. + */ + public int getActiveCacheEntryCount() { + int activeCacheCount = 0; + for (List keyList : activeKeysCache.asMap().values()) { + activeCacheCount += keyList.size(); + } + return activeCacheCount; + } + + /** + * Retrieves a random active entry from the cache based on its key custodian, key namespace, and + * filters out entries with a status other than ACTIVE. This method also loads active keys from + * provider if not found in cache. + * + * @param key_cust The key custodian. + * @param keyNamespace the key namespace to search for + * @return a random ManagedKeyData entry with the given custodian and ACTIVE status, or null if + * not found + */ + public ManagedKeyData getAnActiveEntry(byte[] key_cust, String keyNamespace) { + ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace); + + List keyList = activeKeysCache.get(cacheKey, key -> { + List activeEntries = new ArrayList<>(); + + // Try to load from KeymetaTableAccessor + if (keymetaAccessor != null) { + try { + List loadedKeys = keymetaAccessor.getActiveKeys(key_cust, keyNamespace); + activeEntries.addAll(loadedKeys); + } catch (IOException | KeyException | RuntimeException e) { + LOG.warn("Failed to load active keys from KeymetaTableAccessor for custodian: {} namespace: {}", + ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e); + } + } + + // If this happens, it means there were no keys in L2, which shouldn't happpen if L2 is + // enabled and keys were injected using control path for this custodian and namespace. In + // this case, we need to retrieve the keys from provider, but before that as a quick + // optimization, we check if there are any active keys in the other cache, which should be + // suitable for standalone tools. + if (activeEntries.isEmpty()) { + this.cache.asMap().values().stream() + .filter(keyData -> Bytes.equals(keyData.getKeyCustodian(), key_cust) + && keyData.getKeyNamespace().equals(keyNamespace) + && keyData.getKeyState() == ManagedKeyState.ACTIVE) + .forEach(keyData -> { + activeEntries.add(keyData); + }); + } + + // As a last ditch effort, load active keys from provider. This typically happens for + // standalone tools. + if (activeEntries.isEmpty() && isDynamicLookupEnabled()) { + try { + String keyCust = ManagedKeyProvider.encodeToStr(key_cust); + Set retrievedKeys = retrieveManagedKeys(keyCust, key_cust, keyNamespace, + getPerCustodianNamespaceActiveKeyConfCount(), new HashSet<>()); + if (keymetaAccessor != null) { + for (ManagedKeyData keyData : retrievedKeys) { + keymetaAccessor.addKey(keyData); + } + } + retrievedKeys.stream().filter(keyData -> keyData.getKeyState() == ManagedKeyState.ACTIVE) + .forEach(activeEntries::add); + } catch (IOException | KeyException | RuntimeException e) { + LOG.warn("Failed to load active keys from provider for custodian: {} namespace: {}", + ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e); + } + } + + // We don't mind returning an empty list here because it will help prevent future L2/provider + // lookups. + return activeEntries; + }); + + // Return a random entry from active keys cache only + if (keyList.isEmpty()) { + return null; + } + + return keyList.get((int) (Math.random() * keyList.size())); + } + + /** + * Invalidates all entries in the cache. + */ + public void invalidateAll() { + cache.invalidateAll(); + activeKeysCache.invalidateAll(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java new file mode 100644 index 000000000000..5a89d38a0bb2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class SystemKeyAccessor extends KeyManagementBase { + protected final Path systemKeyDir; + + public SystemKeyAccessor(Server server) throws IOException { + super(server); + this.systemKeyDir = CommonFSUtils.getSystemKeyDir(server.getConfiguration()); + } + + /** + * Return both the latest system key file and all system key files. + * @return a pair of the latest system key file and all system key files + * @throws IOException if there is an error getting the latest system key file or no cluster key + * is initialized yet. + */ + public Pair> getLatestSystemKeyFile() throws IOException { + if (! isKeyManagementEnabled()) { + return new Pair<>(null, null); + } + List allClusterKeyFiles = getAllSystemKeyFiles(); + if (allClusterKeyFiles.isEmpty()) { + throw new RuntimeException("No cluster key initialized yet"); + } + int currentMaxSeqNum = SystemKeyAccessor.extractKeySequence(allClusterKeyFiles.get(0)); + return new Pair<>(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + currentMaxSeqNum), + allClusterKeyFiles); + } + + /** + * Return all available cluster key files and return them in the order of latest to oldest. + * If no cluster key files are available, then return an empty list. If key management is not + * enabled, then return null. + * + * @return a list of all available cluster key files + * @throws IOException if there is an error getting the cluster key files + */ + public List getAllSystemKeyFiles() throws IOException { + if (!isKeyManagementEnabled()) { + return null; + } + FileSystem fs = getServer().getFileSystem(); + Map clusterKeys = new TreeMap<>(Comparator.reverseOrder()); + for (FileStatus st : fs.globStatus(new Path(systemKeyDir, + SYSTEM_KEY_FILE_PREFIX + "*"))) { + Path keyPath = st.getPath(); + int seqNum = extractSystemKeySeqNum(keyPath); + clusterKeys.put(seqNum, keyPath); + } + + return new ArrayList<>(clusterKeys.values()); + } + + public ManagedKeyData loadSystemKey(Path keyPath) throws IOException { + ManagedKeyProvider provider = getKeyProvider(); + ManagedKeyData keyData = provider.unwrapKey(loadKeyMetadata(keyPath), null); + if (keyData == null) { + throw new RuntimeException("Failed to load system key from: " + keyPath); + } + return keyData; + } + + @InterfaceAudience.Private + public static int extractSystemKeySeqNum(Path keyPath) throws IOException { + if (keyPath.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) { + try { + return Integer.parseInt(keyPath.getName().substring(SYSTEM_KEY_FILE_PREFIX.length())); + } + catch (NumberFormatException e) { + LOG.error("Invalid file name for a cluster key: {}", keyPath, e); + } + } + throw new IOException("Couldn't parse key file name: " + keyPath.getName()); + } + + /** + * Extract the key sequence number from the cluster key file name. + * @param clusterKeyFile the path to the cluster key file + * @return The sequence or {@code -1} if not a valid sequence file. + * @throws IOException if the file name is not a valid sequence file + */ + @InterfaceAudience.Private + public static int extractKeySequence(Path clusterKeyFile) throws IOException { + int keySeq = -1; + if (clusterKeyFile.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) { + String seqStr = clusterKeyFile.getName().substring(SYSTEM_KEY_FILE_PREFIX.length()); + if (! seqStr.isEmpty()) { + try { + keySeq = Integer.parseInt(seqStr); + } catch (NumberFormatException e) { + throw new IOException("Invalid file name for a cluster key: " + clusterKeyFile, e); + } + } + } + return keySeq; + } + + protected String loadKeyMetadata(Path keyPath) throws IOException { + try (FSDataInputStream fin = getServer().getFileSystem().open(keyPath)) { + return fin.readUTF(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java new file mode 100644 index 000000000000..d1e3eb048a9b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings("checkstyle:FinalClass") // as otherwise it breaks mocking. +@InterfaceAudience.Private +public class SystemKeyCache { + private static final Logger LOG = LoggerFactory.getLogger(SystemKeyCache.class); + + private final ManagedKeyData latestSystemKey; + private final Map systemKeys; + + /** + * Construct the System Key cache from the specified accessor. + * @param accessor the accessor to use to load the system keys + * @return the cache or {@code null} if no keys are found. + * @throws IOException if there is an error loading the system keys + */ + public static SystemKeyCache createCache(SystemKeyAccessor accessor) throws IOException { + List allSystemKeyFiles = accessor.getAllSystemKeyFiles(); + if (allSystemKeyFiles.isEmpty()) { + LOG.warn("No system key files found, skipping cache creation"); + return null; + } + ManagedKeyData latestSystemKey = null; + Map systemKeys = new TreeMap<>(); + for (Path keyPath: allSystemKeyFiles) { + LOG.info("Loading system key from: {}", keyPath); + ManagedKeyData keyData = accessor.loadSystemKey(keyPath); + if (latestSystemKey == null) { + latestSystemKey = keyData; + } + systemKeys.put(keyData.getKeyChecksum(), keyData); + } + return new SystemKeyCache(systemKeys, latestSystemKey); + } + + private SystemKeyCache(Map systemKeys, ManagedKeyData latestSystemKey) { + this.systemKeys = systemKeys; + this.latestSystemKey = latestSystemKey; + } + + public ManagedKeyData getLatestSystemKey() { + return latestSystemKey; + } + + public ManagedKeyData getSystemKeyByChecksum(long checksum) { + return systemKeys.get(checksum); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 7ccbb4d614ab..e4bc0f2ea768 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -122,6 +122,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; +import org.apache.hadoop.hbase.keymeta.KeymetaMasterService; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -355,6 +356,8 @@ public class HMaster extends HBaseServerBase implements Maste // file system manager for the master FS operations private MasterFileSystem fileSystemManager; private MasterWalManager walManager; + private SystemKeyManager systemKeyManager; + private KeymetaMasterService keymetaMasterService; // manager to manage procedure-based WAL splitting, can be null if current // is zk-based WAL splitting. SplitWALManager will replace SplitLogManager @@ -992,6 +995,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); this.clusterId = clusterId.toString(); + systemKeyManager = new SystemKeyManager(this); + systemKeyManager.ensureSystemKeyInitialized(); + buildSystemKeyCache(); + // Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their // hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set // hbase.write.hbck1.lock.file to false. @@ -1031,6 +1038,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE Map, List>> procsByType = procedureExecutor .getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass())); + keymetaMasterService = new KeymetaMasterService(this); + keymetaMasterService.init(); + // Create Assignment Manager this.assignmentManager = createAssignmentManager(this, masterRegion); this.assignmentManager.start(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 5a43cd98feb9..0ffbfd15c41d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -66,6 +66,7 @@ public class MasterFileSystem { private final FileSystem walFs; // root log directory on the FS private final Path rootdir; + private final Path systemKeyDir; // hbase temp directory used for table construction and deletion private final Path tempdir; // root hbase directory on the FS @@ -96,6 +97,7 @@ public MasterFileSystem(Configuration conf) throws IOException { // default localfs. Presumption is that rootdir is fully-qualified before // we get to here with appropriate fs scheme. this.rootdir = CommonFSUtils.getRootDir(conf); + this.systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY); // Cover both bases, the old way of setting default fs and the new. // We're supposed to run on 0.20 and 0.21 anyways. @@ -134,6 +136,7 @@ private void createInitialFileSystemLayout() throws IOException { HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME }; // check if the root directory exists checkRootDir(this.rootdir, conf, this.fs); + checkSubDir(this.systemKeyDir, HBASE_DIR_PERMS); // Check the directories under rootdir. checkTempDir(this.tempdir, conf, this.fs); @@ -158,6 +161,7 @@ private void createInitialFileSystemLayout() throws IOException { if (isSecurityEnabled) { fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms); fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms); + fs.setPermission(systemKeyDir, secureRootFilePerms); } FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission(); if ( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index 18dfc7d493bf..99a373c8262f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER; import static org.apache.hadoop.hbase.master.MasterWalManager.NON_META_FILTER; - import java.io.IOException; import java.util.Arrays; import java.util.Collections; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java new file mode 100644 index 000000000000..45b021c77feb --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; + +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; + +@InterfaceAudience.Private +public class SystemKeyManager extends SystemKeyAccessor { + private final MasterServices master; + + public SystemKeyManager(MasterServices master) throws IOException { + super(master); + this.master = master; + } + + public void ensureSystemKeyInitialized() throws IOException { + if (! isKeyManagementEnabled()) { + return; + } + List clusterKeys = getAllSystemKeyFiles(); + if (clusterKeys.isEmpty()) { + LOG.info("Initializing System Key for the first time"); + // Double check for cluster key as another HMaster might have succeeded. + if (rotateSystemKey(null, clusterKeys) == null && + getAllSystemKeyFiles().isEmpty()) { + throw new RuntimeException("Failed to generate or save System Key"); + } + } + else if (rotateSystemKeyIfChanged() != null) { + LOG.info("System key has been rotated"); + } + else { + LOG.info("System key is already initialized and unchanged"); + } + } + + public ManagedKeyData rotateSystemKeyIfChanged() throws IOException { + if (! isKeyManagementEnabled()) { + return null; + } + Pair> latestFileResult = getLatestSystemKeyFile(); + Path latestFile = getLatestSystemKeyFile().getFirst(); + String latestKeyMetadata = loadKeyMetadata(latestFile); + return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond()); + } + + private ManagedKeyData rotateSystemKey(String currentKeyMetadata, List allSystemKeyFiles) + throws IOException { + ManagedKeyProvider provider = getKeyProvider(); + ManagedKeyData clusterKey = provider.getSystemKey( + master.getMasterFileSystem().getClusterId().toString().getBytes()); + if (clusterKey == null) { + throw new IOException("Failed to get system key for cluster id: " + + master.getMasterFileSystem().getClusterId().toString()); + } + if (clusterKey.getKeyState() != ManagedKeyState.ACTIVE) { + throw new IOException("System key is expected to be ACTIVE but it is: " + + clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata()); + } + if (clusterKey.getKeyMetadata() == null) { + throw new IOException("System key is expected to have metadata but it is null"); + } + if (! clusterKey.getKeyMetadata().equals(currentKeyMetadata) && + saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)) { + return clusterKey; + } + return null; + } + + private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyFiles) + throws IOException { + int nextSystemKeySeq = (allSystemKeyFiles.isEmpty() ? -1 + : SystemKeyAccessor.extractKeySequence(allSystemKeyFiles.get(0))) + 1; + LOG.info("Trying to save a new cluster key at seq: {}", nextSystemKeySeq); + MasterFileSystem masterFS = master.getMasterFileSystem(); + Path nextSystemKeyPath = new Path(systemKeyDir, + SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq); + Path tempSystemKeyFile = new Path(masterFS.getTempDir(), + nextSystemKeyPath.getName() + UUID.randomUUID()); + try (FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem() + .create(tempSystemKeyFile)) { + fsDataOutputStream.writeUTF(keyMetadata); + boolean succeeded = masterFS.getFileSystem().rename(tempSystemKeyFile, nextSystemKeyPath); + if (succeeded) { + LOG.info("System key save succeeded for seq: {}", nextSystemKeySeq); + } else { + LOG.error("System key save failed for seq: {}", nextSystemKeySeq); + } + return succeeded; + } + finally { + masterFS.getFileSystem().delete(tempSystemKeyFile, false); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a4105a31bfac..c9f231a5adef 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore; @@ -596,7 +597,6 @@ protected RegionServerCoprocessorHost getCoprocessorHost() { return getRegionServerCoprocessorHost(); } - @Override protected boolean canCreateBaseZNode() { return !clusterMode(); } @@ -1449,6 +1449,9 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c) initializeFileSystem(); } + buildSystemKeyCache(); + managedKeyDataCache = new ManagedKeyDataCache(this.getConfiguration(), keymetaAdmin); + // hack! Maps DFSClient => RegionServer for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapreduce.task.attempt.id") == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index a4ca20fa7311..65e8aa5e66e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationGroupOffset; @@ -366,6 +369,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java index 5f9433a3f141..92b5f340a610 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -45,4 +47,14 @@ public static String getPrincipalWithoutRealm(final String principal) { int i = principal.indexOf("@"); return (i > -1) ? principal.substring(0, i) : principal; } + + /** + * From the given configuration, determine if key management is enabled. + * @param conf the configuration to check + * @return true if key management is enabled + */ + public static boolean isKeyManagementEnabled(Configuration conf) { + return conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, + HConstants.CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java new file mode 100644 index 000000000000..2d8ae446da3a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import java.security.Key; + +import org.apache.hadoop.hbase.io.crypto.KeyProvider; + +public class DummyKeyProvider implements KeyProvider { + @Override + public void init(String params) { + } + + @Override + public Key[] getKeys(String[] aliases) { + return null; + } + + @Override + public Key getKey(String alias) { + return null; + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java new file mode 100644 index 000000000000..b7549007b371 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java @@ -0,0 +1,75 @@ +package org.apache.hadoop.hbase.keymeta; + +import java.io.IOException; +import java.security.Key; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.mockito.Mockito; + +public class ManagedKeyProviderInterceptor extends MockManagedKeyProvider { + public final MockManagedKeyProvider delegate; + public final MockManagedKeyProvider spy; + + public ManagedKeyProviderInterceptor() { + this.delegate = new MockManagedKeyProvider(); + this.spy = Mockito.spy(delegate); + } + + @Override + public void initConfig(Configuration conf) { + spy.initConfig(conf); + } + + @Override + public ManagedKeyData getManagedKey(byte[] custodian, String namespace) throws IOException { + return spy.getManagedKey(custodian, namespace); + } + + @Override + public ManagedKeyData getSystemKey(byte[] systemId) throws IOException { + return spy.getSystemKey(systemId); + } + + @Override + public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException { + return spy.unwrapKey(keyMetadata, wrappedKey); + } + + @Override + public void init(String params) { + spy.init(params); + } + + @Override + public Key getKey(String alias) { + return spy.getKey(alias); + } + + @Override + public Key[] getKeys(String[] aliases) { + return spy.getKeys(aliases); + } + + @Override + public void setMockedKeyState(String alias, ManagedKeyState state) { + delegate.setMockedKeyState(alias, state); + } + + @Override + public void setMultikeyGenMode(boolean multikeyGenMode) { + delegate.setMultikeyGenMode(multikeyGenMode); + } + + @Override + public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) { + return delegate.getLastGeneratedKeyData(alias, keyNamespace); + } + + @Override + public void setMockedKey(String alias, java.security.Key key, String keyNamespace) { + delegate.setMockedKey(alias, key, keyNamespace); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java new file mode 100644 index 000000000000..a0147e6e4e2e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.junit.After; +import org.junit.Before; + +public class ManagedKeyTestBase { + protected HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + @Before + public void setUp() throws Exception { + TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, + MockManagedKeyProvider.class.getName()); + TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes", + KeymetaServiceEndpoint.class.getName()); + + // Start the minicluster + TEST_UTIL.startMiniCluster(1); + TEST_UTIL.waitFor(60000, + () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); + TEST_UTIL.waitUntilAllRegionsAssigned(KeymetaTableAccessor.KEY_META_TABLE_NAME); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java new file mode 100644 index 000000000000..ab871b241830 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, SmallTests.class }) +public class TestKeyManagementBase { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass( + TestKeyManagementBase.class); + + @Test + public void testGetKeyProviderWithInvalidProvider() throws Exception { + // Setup configuration with a non-ManagedKeyProvider + Configuration conf = new Configuration(); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, + "org.apache.hadoop.hbase.keymeta.DummyKeyProvider"); + + Server mockServer = mock(Server.class); + when(mockServer.getConfiguration()).thenReturn(conf); + + KeyManagementBase keyMgmt = new TestKeyManagement(mockServer); + + // Should throw RuntimeException when provider is not ManagedKeyProvider + RuntimeException exception = assertThrows(RuntimeException.class, () -> { + keyMgmt.getKeyProvider(); + }); + + assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider")); + } + + private static class TestKeyManagement extends KeyManagementBase { + public TestKeyManagement(Server server) { + super(server); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java new file mode 100644 index 000000000000..7070596a93c0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java @@ -0,0 +1,333 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; +import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE; +import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_FAILED; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.contains; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; + +import java.io.IOException; +import java.security.KeyException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.coprocessor.HasMasterServices; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint.KeymetaAdminServiceImpl; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + +@Category({ MasterTests.class, SmallTests.class }) +public class TestKeymetaEndpoint { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeymetaEndpoint.class); + + private static final String KEY_CUST = "keyCust"; + private static final String KEY_NAMESPACE = "keyNamespace"; + private static final String KEY_METADATA1 = "keyMetadata1"; + private static final String KEY_METADATA2 = "keyMetadata2"; + + @Mock + private RpcController controller; + @Mock + private MasterServices master; + @Mock + private RpcCallback done; + @Mock + private KeymetaAdmin keymetaAdmin; + + KeymetaServiceEndpoint keymetaServiceEndpoint; + private ManagedKeysResponse.Builder responseBuilder; + private ManagedKeysRequest.Builder requestBuilder; + private KeymetaAdminServiceImpl keyMetaAdminService; + private ManagedKeyData keyData1; + private ManagedKeyData keyData2; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + keymetaServiceEndpoint = new KeymetaServiceEndpoint(); + CoprocessorEnvironment env = mock(CoprocessorEnvironment.class, + withSettings().extraInterfaces(HasMasterServices.class)); + when(((HasMasterServices) env).getMasterServices()).thenReturn(master); + keymetaServiceEndpoint.start(env); + keyMetaAdminService = (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices() + .iterator().next(); + responseBuilder = ManagedKeysResponse.newBuilder().setKeyState(KEY_ACTIVE); + requestBuilder = ManagedKeysRequest.newBuilder() + .setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL); + keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE, + new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1); + keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE, + new SecretKeySpec("key2".getBytes(), "AES"), ACTIVE, KEY_METADATA2); + when(master.getKeymetaAdmin()).thenReturn(keymetaAdmin); + } + + @Test + public void testConvertToKeyCustBytesValid() { + // Arrange + String validBase64 = Base64.getEncoder().encodeToString("testKey".getBytes()); + ManagedKeysRequest request = requestBuilder.setKeyCust(validBase64).build(); + + // Act + byte[] result = + KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder); + + // Assert + assertNotNull(result); + assertArrayEquals("testKey".getBytes(), result); + assertEquals(KEY_ACTIVE, responseBuilder.getKeyState()); + verify(controller, never()).setFailed(anyString()); + } + + @Test + public void testConvertToKeyCustBytesInvalid() { + // Arrange + String invalidBase64 = "invalid!Base64@String"; + ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + + // Act + byte[] result = KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, + responseBuilder); + + // Assert + assertNull(result); + assertEquals(KEY_FAILED, responseBuilder.getKeyState()); + verify(controller).setFailed(anyString()); + } + + @Test + public void testGetResponseBuilder() { + // Arrange + String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes()); + String keyNamespace = "testNamespace"; + ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust) + .setKeyNamespace(keyNamespace) + .build(); + + // Act + ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller, + request); + + // Assert + assertNotNull(result); + assertEquals(keyNamespace, result.getKeyNamespace()); + assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray()); + verify(controller, never()).setFailed(anyString()); + } + + @Test + public void testGetResponseBuilderWithInvalidBase64() { + // Arrange + String keyCust = "invalidBase64!"; + String keyNamespace = "testNamespace"; + ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust) + .setKeyNamespace(keyNamespace) + .build(); + + // Act + ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller, + request); + + // Assert + assertNotNull(result); + assertEquals(keyNamespace, result.getKeyNamespace()); + assertEquals(KEY_FAILED, result.getKeyState()); + verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string")); + } + + @Test + public void testGenerateKeyStateResponse() throws Exception { + // Arrange + ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom( + keyData1.getKeyCustodian())) + .setKeyNamespace(keyData1.getKeyNamespace()) + .build(); + List managedKeyStates = Arrays.asList(keyData1, keyData2); + + // Act + GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse( + managedKeyStates, responseBuilder); + + // Assert + assertNotNull(response); + assertNotNull(result.getStateList()); + assertEquals(2, result.getStateList().size()); + assertEquals(KEY_ACTIVE, result.getStateList().get(0).getKeyState()); + assertEquals(0, Bytes.compareTo(keyData1.getKeyCustodian(), + result.getStateList().get(0).getKeyCustBytes().toByteArray())); + assertEquals(keyData1.getKeyNamespace(), result.getStateList().get(0).getKeyNamespace()); + verify(controller, never()).setFailed(anyString()); + } + + @Test + public void testGenerateKeyStateResponse_Empty() throws Exception { + // Arrange + ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom( + keyData1.getKeyCustodian())) + .setKeyNamespace(keyData1.getKeyNamespace()) + .build(); + List managedKeyStates = new ArrayList<>(); + + // Act + GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse( + managedKeyStates, responseBuilder); + + // Assert + assertNotNull(response); + assertNotNull(result.getStateList()); + assertEquals(0, result.getStateList().size()); + verify(controller, never()).setFailed(anyString()); + } + + @Test + public void testGenerateKeyStatResponse_Success() throws Exception { + doTestServiceCallForSuccess( + (controller, request, done) -> + keyMetaAdminService.enableKeyManagement(controller, request, done)); + } + + @Test + public void testGetManagedKeys_Success() throws Exception { + doTestServiceCallForSuccess( + (controller, request, done) -> + keyMetaAdminService.getManagedKeys(controller, request, done)); + } + + private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception { + // Arrange + ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); + List managedKeyStates = Arrays.asList(keyData1); + when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(managedKeyStates); + + // Act + svc.call(controller, request, done); + + // Assert + verify(done).run(any()); + verify(controller, never()).setFailed(anyString()); + } + + private interface ServiceCall { + void call(RpcController controller, ManagedKeysRequest request, + RpcCallback done) throws Exception; + } + + @Test + public void testGenerateKeyStateResponse_InvalidCust() throws Exception { + // Arrange + String invalidBase64 = "invalid!Base64@String"; + ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + + // Act + keyMetaAdminService.enableKeyManagement(controller, request, done); + + // Assert + verify(controller).setFailed(contains("IOException")); + verify(keymetaAdmin, never()).enableKeyManagement(any(), any()); + verify(done, never()).run(any()); + } + + @Test + public void testGenerateKeyStateResponse_IOException() throws Exception { + // Arrange + when(keymetaAdmin.enableKeyManagement(any(), any())).thenThrow(IOException.class); + ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); + + // Act + keyMetaAdminService.enableKeyManagement(controller, request, done); + + // Assert + verify(controller).setFailed(contains("IOException")); + verify(keymetaAdmin).enableKeyManagement(any(), any()); + verify(done, never()).run(any()); + } + + @Test + public void testGetManagedKeys_IOException() throws Exception { + doTestGetManagedKeysError(IOException.class); + } + + @Test + public void testGetManagedKeys_KeyException() throws Exception { + doTestGetManagedKeysError(KeyException.class); + } + + private void doTestGetManagedKeysError(Class exType) throws Exception { + // Arrange + when(keymetaAdmin.getManagedKeys(any(), any())).thenThrow(exType); + ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build(); + + // Act + keyMetaAdminService.getManagedKeys(controller, request, done); + + // Assert + verify(controller).setFailed(contains(exType.getSimpleName())); + verify(keymetaAdmin).getManagedKeys(any(), any()); + verify(done, never()).run(any()); + } + + @Test + public void testGetManagedKeys_InvalidCust() throws Exception { + // Arrange + String invalidBase64 = "invalid!Base64@String"; + ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build(); + + // Act + keyMetaAdminService.getManagedKeys(controller, request, done); + + // Assert + verify(controller).setFailed(contains("IOException")); + verify(keymetaAdmin, never()).getManagedKeys(any(), any()); + verify(done, never()).run(any()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java new file mode 100644 index 000000000000..f34d482d7940 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Tests for KeymetaMasterService class + */ +@Category({ MasterTests.class, SmallTests.class }) +public class TestKeymetaMasterService { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeymetaMasterService.class); + + @Mock + private MasterServices mockMaster; + @Mock + private TableDescriptors mockTableDescriptors; + + private Configuration conf; + private KeymetaMasterService service; + private AutoCloseable closeableMocks; + + @Before + public void setUp() throws Exception { + closeableMocks = MockitoAnnotations.openMocks(this); + + conf = new Configuration(); + when(mockMaster.getConfiguration()).thenReturn(conf); + when(mockMaster.getTableDescriptors()).thenReturn(mockTableDescriptors); + } + + @After + public void tearDown() throws Exception { + if (closeableMocks != null) { + closeableMocks.close(); + } + } + + @Test + public void testInitWithKeyManagementDisabled() throws Exception { + // Setup - disable key management + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false); + + service = new KeymetaMasterService(mockMaster); + + // Execute + service.init(); // Should return early without creating table + + // Verify - no table operations should be performed + verify(mockMaster, never()).getTableDescriptors(); + verify(mockMaster, never()).createSystemTable(any()); + } + + @Test + public void testInitWithKeyManagementEnabledAndTableExists() throws Exception { + // Setup - enable key management and table already exists + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true); + + service = new KeymetaMasterService(mockMaster); + + // Execute + service.init(); + + // Verify - table exists check is performed but no table creation + verify(mockMaster).getTableDescriptors(); + verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME); + verify(mockMaster, never()).createSystemTable(any()); + } + + @Test + public void testInitWithKeyManagementEnabledAndTableDoesNotExist() throws Exception { + // Setup - enable key management and table does not exist + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false); + + service = new KeymetaMasterService(mockMaster); + + // Execute + service.init(); + + // Verify - table is created + verify(mockMaster).getTableDescriptors(); + verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME); + verify(mockMaster).createSystemTable(any(TableDescriptor.class)); + } + + @Test + public void testInitWithTableDescriptorsIOException() throws Exception { + // Setup - enable key management but table descriptors throws IOException + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + when(mockTableDescriptors.exists(any(TableName.class))) + .thenThrow(new IOException("Table descriptors error")); + + service = new KeymetaMasterService(mockMaster); + + // Execute & Verify - IOException should propagate + try { + service.init(); + } catch (IOException e) { + // Expected exception + } + + verify(mockMaster).getTableDescriptors(); + verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME); + verify(mockMaster, never()).createSystemTable(any()); + } + + @Test + public void testInitWithCreateSystemTableIOException() throws Exception { + // Setup - enable key management, table doesn't exist, but createSystemTable throws IOException + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false); + when(mockMaster.createSystemTable(any(TableDescriptor.class))) + .thenThrow(new IOException("Create table error")); + + service = new KeymetaMasterService(mockMaster); + + // Execute & Verify - IOException should propagate + try { + service.init(); + } catch (IOException e) { + // Expected exception + } + + verify(mockMaster).getTableDescriptors(); + verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME); + verify(mockMaster).createSystemTable(any(TableDescriptor.class)); + } + + @Test + public void testConstructorWithMasterServices() throws Exception { + // Execute + service = new KeymetaMasterService(mockMaster); + + // Verify - constructor should not throw an exception + // The service should be created successfully (no exceptions = success) + // We don't verify internal calls since the constructor just stores references + } + + @Test + public void testMultipleInitCalls() throws Exception { + // Setup - enable key management and table exists + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true); + when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true); + + service = new KeymetaMasterService(mockMaster); + + // Execute - call init multiple times + service.init(); + service.init(); + service.init(); + + // Verify - each call should check table existence (idempotent behavior) + verify(mockMaster, times(3)).getTableDescriptors(); + verify(mockTableDescriptors, times(3)).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME); + verify(mockMaster, never()).createSystemTable(any()); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java new file mode 100644 index 000000000000..3c80f928c9e1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java @@ -0,0 +1,483 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_CHECKSUM_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_METADATA_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_WRAPPED_BY_STK_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_META_INFO_FAMILY; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_STATE_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.READ_OP_COUNT_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.REFRESHED_TIMESTAMP_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.STK_CHECKSUM_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.WRITE_OP_COUNT_QUAL_BYTES; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForMetadata; +import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.parseFromResult; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Suite; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + TestKeymetaTableAccessor.TestAdd.class, + TestKeymetaTableAccessor.TestAddWithNullableFields.class, + TestKeymetaTableAccessor.TestGet.class, + TestKeymetaTableAccessor.TestOps.class, +}) +@Category({ MasterTests.class, SmallTests.class }) +public class TestKeymetaTableAccessor { + protected static final String ALIAS = "custId1"; + protected static final byte[] CUST_ID = ALIAS.getBytes(); + protected static final String KEY_NAMESPACE = "namespace"; + protected static String KEY_METADATA = "metadata1"; + + @Mock + protected Server server; + @Mock + protected Connection connection; + @Mock + protected Table table; + @Mock + protected ResultScanner scanner; + @Mock + protected SystemKeyCache systemKeyCache; + + protected KeymetaTableAccessor accessor; + protected Configuration conf = HBaseConfiguration.create(); + protected MockManagedKeyProvider managedKeyProvider; + protected ManagedKeyData latestSystemKey; + + private AutoCloseable closeableMocks; + + @Before + public void setUp() throws Exception { + closeableMocks = MockitoAnnotations.openMocks(this); + + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName()); + + when(server.getConnection()).thenReturn(connection); + when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table); + when(server.getSystemKeyCache()).thenReturn(systemKeyCache); + when(server.getConfiguration()).thenReturn(conf); + + accessor = new KeymetaTableAccessor(server); + managedKeyProvider = new MockManagedKeyProvider(); + managedKeyProvider.initConfig(conf); + + latestSystemKey = managedKeyProvider.getSystemKey("system-id".getBytes()); + when(systemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey); + when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(latestSystemKey); + } + + @After + public void tearDown() throws Exception { + closeableMocks.close(); + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAdd extends TestKeymetaTableAccessor { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAdd.class); + + @Parameter(0) + public ManagedKeyState keyState; + + @Parameterized.Parameters(name = "{index},keyState={0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { { ACTIVE }, { FAILED }, { INACTIVE }, { DISABLED }, }); + } + + @Test + public void testAddKey() throws Exception { + managedKeyProvider.setMockedKeyState(ALIAS, keyState); + ManagedKeyData keyData = + managedKeyProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + + accessor.addKey(keyData); + + ArgumentCaptor putCaptor = ArgumentCaptor.forClass(Put.class); + verify(table).put(putCaptor.capture()); + Put put = putCaptor.getValue(); + assertPut(keyData, put); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAddWithNullableFields extends TestKeymetaTableAccessor { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAddWithNullableFields.class); + + @Test + public void testAddKeyWithFailedStateAndNullMetadata() throws Exception { + managedKeyProvider.setMockedKeyState(ALIAS, FAILED); + ManagedKeyData keyData = new ManagedKeyData(CUST_ID, KEY_SPACE_GLOBAL, null, FAILED, null); + + accessor.addKey(keyData); + + ArgumentCaptor putCaptor = ArgumentCaptor.forClass(Put.class); + verify(table).put(putCaptor.capture()); + Put put = putCaptor.getValue(); + + // Verify the row key uses state value for metadata hash + byte[] expectedRowKey = constructRowKeyForMetadata(CUST_ID, KEY_SPACE_GLOBAL, + new byte[] { FAILED.getVal() }); + assertEquals(0, Bytes.compareTo(expectedRowKey, put.getRow())); + + Map valueMap = getValueMap(put); + + // Verify key-related columns are not present + assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES))); + assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES))); + assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES))); + + // Verify state is set correctly + assertEquals(new Bytes(new byte[] { FAILED.getVal() }), + valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES))); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestGet extends TestKeymetaTableAccessor { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGet.class); + + @Mock + private Result result1; + @Mock + private Result result2; + + private String keyMetadata2 = "metadata2"; + + @Override + public void setUp() throws Exception { + super.setUp(); + + when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES))) + .thenReturn(new byte[] { ACTIVE.getVal() }); + when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES))) + .thenReturn(new byte[] { FAILED.getVal() }); + for (Result result : Arrays.asList(result1, result2)) { + when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(REFRESHED_TIMESTAMP_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(0L)); + when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(STK_CHECKSUM_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(0L)); + } + when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES))) + .thenReturn(KEY_METADATA.getBytes()); + when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES))) + .thenReturn(keyMetadata2.getBytes()); + } + + @Test + public void testParseEmptyResult() throws Exception { + Result result = mock(Result.class); + when(result.isEmpty()).thenReturn(true); + + assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, null)); + assertNull(parseFromResult(server, CUST_ID, KEY_NAMESPACE, result)); + } + + @Test + public void testGetActiveKeyMissingWrappedKey() throws Exception { + Result result = mock(Result.class); + when(table.get(any(Get.class))).thenReturn(result); + when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES))) + .thenReturn(new byte[] { ACTIVE.getVal() }, new byte[] { INACTIVE.getVal() }); + + IOException ex; + ex = assertThrows(IOException.class, + () -> accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA)); + assertEquals("ACTIVE key must have a wrapped key", ex.getMessage()); + ex = assertThrows(IOException.class, () -> + accessor.getKey(CUST_ID, KEY_SPACE_GLOBAL, KEY_METADATA)); + assertEquals("INACTIVE key must have a wrapped key", ex.getMessage()); + } + + @Test + public void testGetKeyMissingSTK() throws Exception { + when(result1.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES))) + .thenReturn(new byte[] { 0 }); + when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(null); + when(table.get(any(Get.class))).thenReturn(result1); + + ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA); + + assertNull(result); + } + + @Test + public void testGetKeyWithWrappedKey() throws Exception { + ManagedKeyData keyData = setupActiveKey(CUST_ID, result1); + + ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA); + + verify(table).get(any(Get.class)); + assertNotNull(result); + assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian())); + assertEquals(KEY_NAMESPACE, result.getKeyNamespace()); + assertEquals(KEY_METADATA, result.getKeyMetadata()); + assertEquals(0, Bytes.compareTo(keyData.getTheKey().getEncoded(), + result.getTheKey().getEncoded())); + assertEquals(ACTIVE, result.getKeyState()); + + // When DEK checksum doesn't match, we expect a null value. + result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA); + assertNull(result); + } + + @Test + public void testGetKeyWithFailedState() throws Exception { + // Test with FAILED state and null metadata + Result failedResult = mock(Result.class); + when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(KEY_STATE_QUAL_BYTES))) + .thenReturn(new byte[] { FAILED.getVal() }); + when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(REFRESHED_TIMESTAMP_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(0L)); + when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(STK_CHECKSUM_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(0L)); + // Explicitly return null for metadata to simulate FAILED state with null metadata + when(failedResult.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_METADATA_QUAL_BYTES))) + .thenReturn(null); + + when(table.get(any(Get.class))).thenReturn(failedResult); + ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, FAILED); + + verify(table).get(any(Get.class)); + assertNotNull(result); + assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian())); + assertEquals(KEY_NAMESPACE, result.getKeyNamespace()); + assertNull(result.getKeyMetadata()); + assertNull(result.getTheKey()); + assertEquals(FAILED, result.getKeyState()); + } + + @Test + public void testGetKeyWithoutWrappedKey() throws Exception { + when(table.get(any(Get.class))).thenReturn(result2); + + ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA); + + verify(table).get(any(Get.class)); + assertNotNull(result); + assertEquals(0, Bytes.compareTo(CUST_ID, result.getKeyCustodian())); + assertEquals(KEY_NAMESPACE, result.getKeyNamespace()); + assertEquals(keyMetadata2, result.getKeyMetadata()); + assertNull(result.getTheKey()); + assertEquals(FAILED, result.getKeyState()); + } + + @Test + public void testGetKeyWithOps() throws Exception { + long readCnt = 5; + long writeCnt = 10; + when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(READ_OP_COUNT_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(readCnt)); + when(result2.getValue(eq(KEY_META_INFO_FAMILY), eq(WRITE_OP_COUNT_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(writeCnt)); + when(table.get(any(Get.class))).thenReturn(result2); + + ManagedKeyData result = accessor.getKey(CUST_ID, KEY_NAMESPACE, KEY_METADATA); + + verify(table).get(any(Get.class)); + assertNotNull(result); + assertEquals(readCnt, result.getReadOpCount()); + assertEquals(writeCnt, result.getWriteOpCount()); + } + + @Test + public void testGetAllKeys() throws Exception { + ManagedKeyData keyData = setupActiveKey(CUST_ID, result1); + + when(scanner.iterator()).thenReturn(List.of(result1, result2).iterator()); + when(table.getScanner(any(Scan.class))).thenReturn(scanner); + + List allKeys = accessor.getAllKeys(CUST_ID, KEY_NAMESPACE); + + assertEquals(2, allKeys.size()); + assertEquals(KEY_METADATA, allKeys.get(0).getKeyMetadata()); + assertEquals(keyMetadata2, allKeys.get(1).getKeyMetadata()); + verify(table).getScanner(any(Scan.class)); + } + + @Test + public void testGetActiveKeys() throws Exception { + ManagedKeyData keyData = setupActiveKey(CUST_ID, result1); + + when(scanner.iterator()).thenReturn(List.of(result1, result2).iterator()); + when(table.getScanner(any(Scan.class))).thenReturn(scanner); + + List allKeys = accessor.getActiveKeys(CUST_ID, KEY_NAMESPACE); + + assertEquals(1, allKeys.size()); + assertEquals(KEY_METADATA, allKeys.get(0).getKeyMetadata()); + verify(table).getScanner(any(Scan.class)); + } + + private ManagedKeyData setupActiveKey(byte[] custId, Result result) throws Exception { + ManagedKeyData keyData = managedKeyProvider.getManagedKey(custId, KEY_NAMESPACE); + byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(conf, null, + keyData.getTheKey(), latestSystemKey.getTheKey()); + when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_WRAPPED_BY_STK_QUAL_BYTES))) + .thenReturn(dekWrappedBySTK); + when(result.getValue(eq(KEY_META_INFO_FAMILY), eq(DEK_CHECKSUM_QUAL_BYTES))) + .thenReturn(Bytes.toBytes(keyData.getKeyChecksum()), Bytes.toBytes(0L)); + when(table.get(any(Get.class))).thenReturn(result); + return keyData; + } + } + + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestOps extends TestKeymetaTableAccessor { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestOps.class); + + @Parameter(0) + public boolean isReadonly; + + @Parameterized.Parameters(name = "{index},isReadonly={0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { { true }, { false } }); + } + + @Test + public void testReportOperation() throws Exception { + long count = 5; + + accessor.reportOperation(CUST_ID, KEY_NAMESPACE, KEY_METADATA, count, isReadonly); + + ArgumentCaptor incrementCaptor = ArgumentCaptor.forClass(Increment.class); + verify(table).increment(incrementCaptor.capture()); + Increment increment = incrementCaptor.getValue(); + NavigableMap> familyCellMap = increment.getFamilyCellMap(); + List cells = familyCellMap.get(KEY_META_INFO_FAMILY); + assertEquals(1, cells.size()); + Cell cell = cells.get(0); + assertEquals(new Bytes(isReadonly ? READ_OP_COUNT_QUAL_BYTES : WRITE_OP_COUNT_QUAL_BYTES), + new Bytes(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); + assertEquals(new Bytes(Bytes.toBytes(count)), + new Bytes(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + } + } + + protected void assertPut(ManagedKeyData keyData, Put put) { + assertEquals(Durability.SKIP_WAL, put.getDurability()); + assertEquals(HConstants.SYSTEMTABLE_QOS, put.getPriority()); + assertTrue(Bytes.compareTo(constructRowKeyForMetadata(keyData), + put.getRow()) == 0); + + Map valueMap = getValueMap(put); + + if (keyData.getTheKey() != null) { + assertNotNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES))); + assertNotNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES))); + assertEquals(new Bytes(Bytes.toBytes(latestSystemKey.getKeyChecksum())), + valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES))); + } + else { + assertNull(valueMap.get(new Bytes(DEK_CHECKSUM_QUAL_BYTES))); + assertNull(valueMap.get(new Bytes(DEK_WRAPPED_BY_STK_QUAL_BYTES))); + assertNull(valueMap.get(new Bytes(STK_CHECKSUM_QUAL_BYTES))); + } + assertEquals(new Bytes(keyData.getKeyMetadata().getBytes()), + valueMap.get(new Bytes(DEK_METADATA_QUAL_BYTES))); + assertNotNull(valueMap.get(new Bytes(REFRESHED_TIMESTAMP_QUAL_BYTES))); + assertEquals(new Bytes(new byte[] { keyData.getKeyState().getVal() }), + valueMap.get(new Bytes(KEY_STATE_QUAL_BYTES))); + } + + private static Map getValueMap(Put put) { + NavigableMap> familyCellMap = put.getFamilyCellMap(); + List cells = familyCellMap.get(KEY_META_INFO_FAMILY); + Map valueMap = new HashMap<>(); + for (Cell cell : cells) { + valueMap.put( + new Bytes(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()), + new Bytes(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + } + return valueMap; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java new file mode 100644 index 000000000000..c6d33ab05dc9 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeyDataCache.java @@ -0,0 +1,554 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import net.bytebuddy.ByteBuddy; +import net.bytebuddy.dynamic.loading.ClassLoadingStrategy; +import net.bytebuddy.implementation.MethodDelegation; +import net.bytebuddy.implementation.bind.annotation.AllArguments; +import net.bytebuddy.implementation.bind.annotation.Origin; +import net.bytebuddy.implementation.bind.annotation.RuntimeType; +import net.bytebuddy.matcher.ElementMatchers; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Suite; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.Spy; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + TestManagedKeyDataCache.TestGeneric.class, + TestManagedKeyDataCache.TestWithoutL2Cache.class, + TestManagedKeyDataCache.TestWithL2CacheAndNoDynamicLookup.class, + TestManagedKeyDataCache.TestWithL2CacheAndDynamicLookup.class, +}) +@Category({ MasterTests.class, SmallTests.class }) +public class TestManagedKeyDataCache { + private static final String ALIAS = "cust1"; + private static final byte[] CUST_ID = ALIAS.getBytes(); + private static Class providerClass; + + @Mock + private Server server; + @Spy + protected MockManagedKeyProvider testProvider; + protected ManagedKeyDataCache cache; + protected Configuration conf = HBaseConfiguration.create(); + + public static class ForwardingInterceptor { + static ThreadLocal delegate = new ThreadLocal<>(); + + static void setDelegate(MockManagedKeyProvider d) { + delegate.set(d); + } + + @RuntimeType + public Object intercept(@Origin Method method, @AllArguments Object[] args) throws Throwable { + // Translate the InvocationTargetException that results when the provider throws an exception. + // This is actually not needed if the intercept is delegated directly to the spy. + try { + return method.invoke(delegate.get(), args); // calls the spy, triggering Mockito + } catch (InvocationTargetException e) { + throw e.getCause(); + } + } + } + + @BeforeClass + public static synchronized void setUpInterceptor() { + if (providerClass != null) { + return; + } + providerClass = new ByteBuddy() + .subclass(MockManagedKeyProvider.class) + .name("org.apache.hadoop.hbase.io.crypto.MockManagedKeyProviderSpy") + .method(ElementMatchers.any()) // Intercept all methods + // Using a delegator instead of directly forwarding to testProvider to + // facilitate switching the testProvider instance. Besides, it + .intercept(MethodDelegation.to(new ForwardingInterceptor())) + .make() + .load(MockManagedKeyProvider.class.getClassLoader(), ClassLoadingStrategy.Default.INJECTION) + .getLoaded(); + } + + @Before + public void setUp() { + MockitoAnnotations.openMocks(this); + ForwardingInterceptor.setDelegate(testProvider); + + Encryption.clearKeyProviderCache(); + + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, providerClass.getName()); + + // Configure the server mock to return the configuration + when(server.getConfiguration()).thenReturn(conf); + + testProvider.setMultikeyGenMode(true); + } + + @Category({ MasterTests.class, SmallTests.class }) + public static class TestGeneric { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestGeneric.class); + + @Test + public void testEmptyCache() throws Exception { + ManagedKeyDataCache cache = new ManagedKeyDataCache(HBaseConfiguration.create(), null); + assertEquals(0, cache.getGenericCacheEntryCount()); + assertEquals(0, cache.getActiveCacheEntryCount()); + } + + @Test + public void testActiveKeysCacheKeyEqualsAndHashCode() { + byte[] custodian1 = new byte[] {1, 2, 3}; + byte[] custodian2 = new byte[] {1, 2, 3}; + byte[] custodian3 = new byte[] {4, 5, 6}; + String namespace1 = "ns1"; + String namespace2 = "ns2"; + + // Reflexive + ManagedKeyDataCache.ActiveKeysCacheKey key1 = + new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace1); + assertTrue(key1.equals(key1)); + + // Symmetric and consistent for equal content + ManagedKeyDataCache.ActiveKeysCacheKey key2 = + new ManagedKeyDataCache.ActiveKeysCacheKey(custodian2, namespace1); + assertTrue(key1.equals(key2)); + assertTrue(key2.equals(key1)); + assertEquals(key1.hashCode(), key2.hashCode()); + + // Different custodian + ManagedKeyDataCache.ActiveKeysCacheKey key3 = + new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace1); + assertFalse(key1.equals(key3)); + assertFalse(key3.equals(key1)); + + // Different namespace + ManagedKeyDataCache.ActiveKeysCacheKey key4 = + new ManagedKeyDataCache.ActiveKeysCacheKey(custodian1, namespace2); + assertFalse(key1.equals(key4)); + assertFalse(key4.equals(key1)); + + // Null and different class + assertFalse(key1.equals(null)); + assertFalse(key1.equals("not a key")); + + // Both fields different + ManagedKeyDataCache.ActiveKeysCacheKey key5 = + new ManagedKeyDataCache.ActiveKeysCacheKey(custodian3, namespace2); + assertFalse(key1.equals(key5)); + assertFalse(key5.equals(key1)); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestWithoutL2Cache extends TestManagedKeyDataCache { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWithoutL2Cache.class); + + @Before + public void setUp() { + super.setUp(); + cache = new ManagedKeyDataCache(conf, null); + } + + @Test + public void testGenericCacheForNonExistentKey() throws Exception { + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(testProvider).unwrapKey(any(String.class), any()); + } + + public void testWithInvalidProvider() throws Exception { + ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + doThrow(new IOException("Test exception")).when(testProvider).unwrapKey(any(String.class), any()); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null)); + verify(testProvider).unwrapKey(any(String.class), any()); + // A second call to getEntry should not result in a call to the provider due to -ve entry. + clearInvocations(testProvider); + verify(testProvider, never()).unwrapKey(any(String.class), any()); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null)); + doThrow(new IOException("Test exception")).when(testProvider).getManagedKey(any(), any(String.class)); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider).getManagedKey(any(), any(String.class)); + // A second call to getRandomEntry should not result in a call to the provider due to -ve entry. + clearInvocations(testProvider); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider, never()).getManagedKey(any(), any(String.class)); + } + + @Test + public void testGenericCache() throws Exception { + ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertEquals(globalKey1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey1.getKeyMetadata(), null)); + verify(testProvider).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertEquals(globalKey2, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey2.getKeyMetadata(), null)); + verify(testProvider).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + ManagedKeyData globalKey3 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertEquals(globalKey3, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, globalKey3.getKeyMetadata(), null)); + verify(testProvider).getManagedKey(any(), any(String.class)); + } + + @Test + public void testActiveKeysCache() throws Exception { + conf.setInt(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, 10); + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider, times(10)).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + Set activeKeys = new HashSet<>(); + for (int i = 0; i < 10; ++i) { + activeKeys.add(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + } + assertTrue(activeKeys.size() > 1); + verify(testProvider, never()).getManagedKey(any(), any(String.class)); + } + + @Test + public void testGenericCacheOperations() throws Exception { + ManagedKeyData globalKey1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertNull(cache.removeEntry(globalKey1.getKeyMetadata())); + assertGenericCacheEntries(globalKey1); + ManagedKeyData nsKey1 = testProvider.getManagedKey(CUST_ID, "namespace1"); + assertGenericCacheEntries(nsKey1, globalKey1); + ManagedKeyData globalKey2 = testProvider.getManagedKey(CUST_ID, + KEY_SPACE_GLOBAL); + assertGenericCacheEntries(globalKey2, nsKey1, globalKey1); + ManagedKeyData nsKey2 = testProvider.getManagedKey(CUST_ID, + "namespace1"); + assertGenericCacheEntries(nsKey2, globalKey2, nsKey1, globalKey1); + + assertEquals(globalKey1, cache.removeEntry(globalKey1.getKeyMetadata())); + assertGenericCacheEntries(nsKey2, globalKey2, nsKey1); + assertNull(cache.removeEntry(globalKey1.getKeyMetadata())); + // It should be able to retrieve the once removed. + assertGenericCacheEntries(nsKey2, globalKey2, nsKey1, globalKey1); + assertEquals(globalKey1, cache.removeEntry(globalKey1.getKeyMetadata())); + assertEquals(nsKey2, cache.removeEntry(nsKey2.getKeyMetadata())); + assertGenericCacheEntries(globalKey2, nsKey1); + assertEquals(nsKey1, cache.removeEntry(nsKey1.getKeyMetadata())); + assertGenericCacheEntries(globalKey2); + assertEquals(globalKey2, cache.removeEntry(globalKey2.getKeyMetadata())); + cache.invalidateAll(); + assertEquals(0, cache.getGenericCacheEntryCount()); + // Sholld be functional after innvalidation. + assertGenericCacheEntries(globalKey1); + } + + @Test + public void testRandomKeyGetNoActive() throws Exception { + testProvider.setMockedKeyState(ALIAS, FAILED); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider, never()).getManagedKey(any(), any(String.class)); + } + + private void removeFromActiveKeys(ManagedKeyData key) { + cache.removeFromActiveKeys(key.getKeyCustodian(), key.getKeyNamespace(), + key.getKeyMetadata()); + } + + @Test + public void testActiveKeysCacheOperations() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertNull(cache.removeFromActiveKeys(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata())); + + conf.setInt(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, 2); + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + assertNotNull(cache.getAnActiveEntry(CUST_ID, "namespace1")); + assertEquals(4, cache.getActiveCacheEntryCount()); + + key = cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL); + removeFromActiveKeys(key); + assertEquals(3, cache.getActiveCacheEntryCount()); + assertNull(cache.removeFromActiveKeys(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata())); + assertEquals(3, cache.getActiveCacheEntryCount()); + removeFromActiveKeys(cache.getAnActiveEntry(CUST_ID, "namespace1")); + assertEquals(2, cache.getActiveCacheEntryCount()); + removeFromActiveKeys(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + assertEquals(1, cache.getActiveCacheEntryCount()); + removeFromActiveKeys(cache.getAnActiveEntry(CUST_ID, "namespace1")); + assertEquals(0, cache.getActiveCacheEntryCount()); + // It should be able to retrieve the keys again + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + assertEquals(2, cache.getActiveCacheEntryCount()); + + cache.invalidateAll(); + assertEquals(0, cache.getActiveCacheEntryCount()); + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + assertEquals(2, cache.getActiveCacheEntryCount()); + } + + @Test + public void testGenericCacheUsingActiveKeysCacheOverProvider() throws Exception { + conf.setInt(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, 3); + ManagedKeyData key = cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL); + assertNotNull(key); + assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null)); + verify(testProvider, never()).unwrapKey(any(String.class), any()); + } + + @Test + public void testActiveKeysCacheSkippingProviderWhenGenericCacheEntriesExist() throws Exception { + ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertEquals(key1, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null)); + ManagedKeyData key2 = testProvider.getManagedKey(CUST_ID, "namespace1"); + assertEquals(key2, cache.getEntry(CUST_ID, "namespace1", key2.getKeyMetadata(), null)); + verify(testProvider, times(2)).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + assertEquals(key1, cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + // In this case, the provider is not called because the existing keys in generic cache are + // used. + verify(testProvider, never()).getManagedKey(any(), any(String.class)); + assertEquals(1, cache.getActiveCacheEntryCount()); + cache.invalidateAll(); + assertEquals(0, cache.getActiveCacheEntryCount()); + } + + @Test + public void testActiveKeysCacheIgnnoreFailedKeyInGenericCache() throws Exception { + testProvider.setMockedKeyState(ALIAS, FAILED); + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null)); + clearInvocations(testProvider); + testProvider.setMockedKeyState(ALIAS, ACTIVE); + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider).getManagedKey(any(), any(String.class)); + } + + @Test + public void testActiveKeysCacheWithMultipleCustodiansInGenericCache() throws Exception { + ManagedKeyData key1 = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + assertNotNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key1.getKeyMetadata(), null)); + String alias2 = "cust2"; + byte[] cust_id2 = alias2.getBytes(); + ManagedKeyData key2 = testProvider.getManagedKey(cust_id2, KEY_SPACE_GLOBAL); + assertNotNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key2.getKeyMetadata(), null)); + assertNotNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + assertEquals(1, cache.getActiveCacheEntryCount()); + } + + @Test + public void testActiveKeysCacheWithMultipleNamespaces() throws Exception { + ManagedKeyData key1 = cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL); + assertNotNull(key1); + assertEquals(key1, cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + ManagedKeyData key2 = cache.getAnActiveEntry(CUST_ID, "namespace1"); + assertNotNull(key2); + assertEquals(key2, cache.getAnActiveEntry(CUST_ID, "namespace1")); + ManagedKeyData key3 = cache.getAnActiveEntry(CUST_ID, "namespace2"); + assertNotNull(key3); + assertEquals(key3, cache.getAnActiveEntry(CUST_ID, "namespace2")); + verify(testProvider, times(3)).getManagedKey(any(), any(String.class)); + assertEquals(3, cache.getActiveCacheEntryCount()); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestWithL2CacheAndNoDynamicLookup extends TestManagedKeyDataCache { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWithL2CacheAndNoDynamicLookup.class); + private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class); + + @Before + public void setUp() { + super.setUp(); + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, false); + cache = new ManagedKeyDataCache(conf, mockL2); + } + + @Test + public void testGenericCacheNonExistentKeyInL2Cache() throws Exception { + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(mockL2).getKey(any(), any(String.class), any(String.class)); + clearInvocations(mockL2); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(mockL2, never()).getKey(any(), any(String.class), any(String.class)); + } + + @Test + public void testGenericCacheRetrievalFromL2Cache() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata())) + .thenReturn(key); + assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null)); + verify(mockL2).getKey(any(), any(String.class), any(String.class)); + } + + @Test + public void testActiveKeysCacheNonExistentKeyInL2Cache() throws Exception { + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2).getActiveKeys(any(), any(String.class)); + clearInvocations(mockL2); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2, never()).getActiveKeys(any(), any(String.class)); + } + + @Test + public void testActiveKeysCacheRetrievalFromL2Cache() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + when(mockL2.getActiveKeys(CUST_ID, KEY_SPACE_GLOBAL)) + .thenReturn(List.of(key)); + assertEquals(key, cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2).getActiveKeys(any(), any(String.class)); + } + + @Test + public void testGenericCacheWithKeymetaAccessorException() throws Exception { + when(mockL2.getKey(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata")) + .thenThrow(new IOException("Test exception")); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(mockL2).getKey(any(), any(String.class), any(String.class)); + clearInvocations(mockL2); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(mockL2, never()).getKey(any(), any(String.class), any(String.class)); + } + + @Test + public void testGetRandomEntryWithKeymetaAccessorException() throws Exception { + when(mockL2.getActiveKeys(CUST_ID, KEY_SPACE_GLOBAL)) + .thenThrow(new IOException("Test exception")); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2).getActiveKeys(any(), any(String.class)); + clearInvocations(mockL2); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2, never()).getActiveKeys(any(), any(String.class)); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestWithL2CacheAndDynamicLookup extends TestManagedKeyDataCache { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWithL2CacheAndDynamicLookup.class); + private KeymetaTableAccessor mockL2 = mock(KeymetaTableAccessor.class); + + @Before + public void setUp() { + super.setUp(); + conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true); + cache = new ManagedKeyDataCache(conf, mockL2); + } + + @Test + public void testGenericCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + doReturn(key).when(testProvider).unwrapKey(any(String.class), any()); + assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null)); + verify(mockL2).getKey(any(), any(String.class), any(String.class)); + verify(mockL2).addKey(any(ManagedKeyData.class)); + } + + @Test + public void testAddKeyFailure() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + doReturn(key).when(testProvider).unwrapKey(any(String.class), any()); + doThrow(new IOException("Test exception")).when(mockL2).addKey(any(ManagedKeyData.class)); + assertEquals(key, cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, key.getKeyMetadata(), null)); + verify(mockL2).addKey(any(ManagedKeyData.class)); + } + + @Test + public void testGenericCacheDynamicLookupUnexpectedException() throws Exception { + doThrow(new RuntimeException("Test exception")).when(testProvider).unwrapKey(any(String.class), any()); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + assertNull(cache.getEntry(CUST_ID, KEY_SPACE_GLOBAL, "test-metadata", null)); + verify(mockL2).getKey(any(), any(String.class), any(String.class)); + verify(mockL2, never()).addKey(any(ManagedKeyData.class)); + } + + @Test + public void testActiveKeysCacheDynamicLookupUnexpectedException() throws Exception { + doThrow(new RuntimeException("Test exception")).when(testProvider).getManagedKey(any(), any(String.class)); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider).getManagedKey(any(), any(String.class)); + clearInvocations(testProvider); + assertNull(cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(testProvider, never()).getManagedKey(any(), any(String.class)); + } + + @Test + public void testActiveKeysCacheRetrivalFromProviderWhenKeyNotFoundInL2Cache() throws Exception { + ManagedKeyData key = testProvider.getManagedKey(CUST_ID, KEY_SPACE_GLOBAL); + doReturn(key).when(testProvider).getManagedKey(any(), any(String.class)); + assertEquals(key, cache.getAnActiveEntry(CUST_ID, KEY_SPACE_GLOBAL)); + verify(mockL2).getActiveKeys(any(), any(String.class)); + } + } + + protected void assertGenericCacheEntries(ManagedKeyData... keys) throws Exception { + for (ManagedKeyData key: keys) { + assertEquals(key, cache.getEntry(key.getKeyCustodian(), key.getKeyNamespace(), key.getKeyMetadata(), null)); + } + assertEquals(keys.length, cache.getGenericCacheEntryCount()); + assertEquals(0, cache.getActiveCacheEntryCount()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java new file mode 100644 index 000000000000..1ffed4707475 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestManagedKeymeta.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.security.KeyException; +import java.util.List; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestManagedKeymeta extends ManagedKeyTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagedKeymeta.class); + + @Test + public void testEnableLocal() throws Exception { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + KeymetaAdmin keymetaAdmin = master.getKeymetaAdmin(); + doTestEnable(keymetaAdmin); + } + + @Test + public void testEnableOverRPC() throws Exception { + KeymetaAdmin adminClient = new KeymetaAdminClient(TEST_UTIL.getConnection()); + doTestEnable(adminClient); + } + + private void doTestEnable(KeymetaAdmin adminClient) throws IOException, KeyException { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + MockManagedKeyProvider managedKeyProvider = (MockManagedKeyProvider) + Encryption.getKeyProvider(master.getConfiguration()); + String cust = "cust1"; + String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); + List managedKeyStates = + adminClient.enableKeyManagement(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataListSingleKey(managedKeyStates, ManagedKeyState.ACTIVE); + + List managedKeys = + adminClient.getManagedKeys(encodedCust, ManagedKeyData.KEY_SPACE_GLOBAL); + assertEquals(1, managedKeys.size()); + assertEquals(managedKeyProvider.getLastGeneratedKeyData(cust, + ManagedKeyData.KEY_SPACE_GLOBAL).cloneWithoutKey(), managedKeys.get(0).cloneWithoutKey()); + + String nonExistentCust = "nonExistentCust"; + managedKeyProvider.setMockedKeyState(nonExistentCust, ManagedKeyState.FAILED); + List keyDataList1 = + adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(nonExistentCust.getBytes()), + ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataListSingleKey(keyDataList1, ManagedKeyState.FAILED); + + String disabledCust = "disabledCust"; + managedKeyProvider.setMockedKeyState(disabledCust, ManagedKeyState.DISABLED); + List keyDataList2 = + adminClient.enableKeyManagement(ManagedKeyProvider.encodeToStr(disabledCust.getBytes()), + ManagedKeyData.KEY_SPACE_GLOBAL); + assertKeyDataListSingleKey(keyDataList2, ManagedKeyState.DISABLED); + } + + private static void assertKeyDataListSingleKey(List managedKeyStates, + ManagedKeyState keyState) { + assertNotNull(managedKeyStates); + assertEquals(1, managedKeyStates.size()); + assertEquals(keyState, managedKeyStates.get(0).getKeyState()); + } + + @Test + public void testEnableKeyManagementWithServiceException() throws Exception { + ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub = + mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class); + + ServiceException networkError = new ServiceException("Network error"); + networkError.initCause(new IOException("Network error")); + when(mockStub.enableKeyManagement(any(), any())).thenThrow(networkError); + + KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection()); + // Use reflection to set the stub + Field stubField = KeymetaAdminClient.class.getDeclaredField("stub"); + stubField.setAccessible(true); + stubField.set(client, mockStub); + + IOException exception = assertThrows(IOException.class, () -> { + client.enableKeyManagement("cust", "namespace"); + }); + + assertTrue(exception.getMessage().contains("Network error")); + } + + @Test + public void testGetManagedKeysWithServiceException() throws Exception { + // Similar test for getManagedKeys method + ManagedKeysProtos.ManagedKeysService.BlockingInterface mockStub = + mock(ManagedKeysProtos.ManagedKeysService.BlockingInterface.class); + + ServiceException networkError = new ServiceException("Network error"); + networkError.initCause(new IOException("Network error")); + when(mockStub.getManagedKeys(any(), any())).thenThrow(networkError); + + KeymetaAdminClient client = new KeymetaAdminClient(TEST_UTIL.getConnection()); + Field stubField = KeymetaAdminClient.class.getDeclaredField("stub"); + stubField.setAccessible(true); + stubField.set(client, mockStub); + + IOException exception = assertThrows(IOException.class, () -> { + client.getManagedKeys("cust", "namespace"); + }); + + assertTrue(exception.getMessage().contains("Network error")); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java new file mode 100644 index 000000000000..555b5b47c726 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestSystemKeyCache.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.keymeta; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.Key; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Tests for SystemKeyCache class + */ +@Category({ MasterTests.class, SmallTests.class }) +public class TestSystemKeyCache { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSystemKeyCache.class); + + @Mock + private SystemKeyAccessor mockAccessor; + + private static final byte[] TEST_CUSTODIAN = "test-custodian".getBytes(); + private static final String TEST_NAMESPACE = "test-namespace"; + private static final String TEST_METADATA_1 = "metadata-1"; + private static final String TEST_METADATA_2 = "metadata-2"; + private static final String TEST_METADATA_3 = "metadata-3"; + + private Key testKey1; + private Key testKey2; + private Key testKey3; + private ManagedKeyData keyData1; + private ManagedKeyData keyData2; + private ManagedKeyData keyData3; + private Path keyPath1; + private Path keyPath2; + private Path keyPath3; + + @Before + public void setUp() { + MockitoAnnotations.openMocks(this); + + // Create test keys + testKey1 = new SecretKeySpec("test-key-1-bytes".getBytes(), "AES"); + testKey2 = new SecretKeySpec("test-key-2-bytes".getBytes(), "AES"); + testKey3 = new SecretKeySpec("test-key-3-bytes".getBytes(), "AES"); + + // Create test key data with different checksums + keyData1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey1, + ManagedKeyState.ACTIVE, TEST_METADATA_1, 1000L, 0, 0); + keyData2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey2, + ManagedKeyState.ACTIVE, TEST_METADATA_2, 2000L, 0, 0); + keyData3 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, testKey3, + ManagedKeyState.ACTIVE, TEST_METADATA_3, 3000L, 0, 0); + + // Create test paths + keyPath1 = new Path("/system/keys/key1"); + keyPath2 = new Path("/system/keys/key2"); + keyPath3 = new Path("/system/keys/key3"); + } + + @Test + public void testCreateCacheWithSingleSystemKey() throws Exception { + // Setup + List keyPaths = Collections.singletonList(keyPath1); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify + assertNotNull(cache); + assertSame(keyData1, cache.getLatestSystemKey()); + assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum())); + assertNull(cache.getSystemKeyByChecksum(999L)); // Non-existent checksum + + verify(mockAccessor).getAllSystemKeyFiles(); + verify(mockAccessor).loadSystemKey(keyPath1); + } + + @Test + public void testCreateCacheWithMultipleSystemKeys() throws Exception { + // Setup - keys should be processed in order, first one becomes latest + List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1); + when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2); + when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify + assertNotNull(cache); + assertSame(keyData1, cache.getLatestSystemKey()); // First key becomes latest + + // All keys should be accessible by checksum + assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum())); + assertSame(keyData2, cache.getSystemKeyByChecksum(keyData2.getKeyChecksum())); + assertSame(keyData3, cache.getSystemKeyByChecksum(keyData3.getKeyChecksum())); + + // Non-existent checksum should return null + assertNull(cache.getSystemKeyByChecksum(999L)); + + verify(mockAccessor).getAllSystemKeyFiles(); + verify(mockAccessor).loadSystemKey(keyPath1); + verify(mockAccessor).loadSystemKey(keyPath2); + verify(mockAccessor).loadSystemKey(keyPath3); + } + + @Test + public void testCreateCacheWithNoSystemKeyFiles() throws Exception { + // Setup - this covers the uncovered lines 46-47 + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Collections.emptyList()); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify + assertNull(cache); + verify(mockAccessor).getAllSystemKeyFiles(); + } + + @Test + public void testCreateCacheWithEmptyKeyFilesList() throws Exception { + // Setup - alternative empty scenario + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(new ArrayList<>()); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify + assertNull(cache); + verify(mockAccessor).getAllSystemKeyFiles(); + } + + @Test + public void testGetLatestSystemKeyConsistency() throws Exception { + // Setup + List keyPaths = Arrays.asList(keyPath1, keyPath2); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1); + when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify - latest key should be consistent across calls + ManagedKeyData latest1 = cache.getLatestSystemKey(); + ManagedKeyData latest2 = cache.getLatestSystemKey(); + + assertNotNull(latest1); + assertSame(latest1, latest2); + assertSame(keyData1, latest1); // First key should be latest + } + + @Test + public void testGetSystemKeyByChecksumWithDifferentKeys() throws Exception { + // Setup + List keyPaths = Arrays.asList(keyPath1, keyPath2, keyPath3); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1); + when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(keyData2); + when(mockAccessor.loadSystemKey(keyPath3)).thenReturn(keyData3); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify each key can be retrieved by its unique checksum + long checksum1 = keyData1.getKeyChecksum(); + long checksum2 = keyData2.getKeyChecksum(); + long checksum3 = keyData3.getKeyChecksum(); + + // Checksums should be different + assert checksum1 != checksum2; + assert checksum2 != checksum3; + assert checksum1 != checksum3; + + // Each key should be retrievable by its checksum + assertSame(keyData1, cache.getSystemKeyByChecksum(checksum1)); + assertSame(keyData2, cache.getSystemKeyByChecksum(checksum2)); + assertSame(keyData3, cache.getSystemKeyByChecksum(checksum3)); + } + + @Test + public void testGetSystemKeyByChecksumWithNonExistentChecksum() throws Exception { + // Setup + List keyPaths = Collections.singletonList(keyPath1); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(keyData1); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify + assertNotNull(cache); + + // Test various non-existent checksums + assertNull(cache.getSystemKeyByChecksum(0L)); + assertNull(cache.getSystemKeyByChecksum(-1L)); + assertNull(cache.getSystemKeyByChecksum(Long.MAX_VALUE)); + assertNull(cache.getSystemKeyByChecksum(Long.MIN_VALUE)); + + // But the actual checksum should work + assertSame(keyData1, cache.getSystemKeyByChecksum(keyData1.getKeyChecksum())); + } + + @Test(expected = IOException.class) + public void testCreateCacheWithAccessorIOException() throws Exception { + // Setup - accessor throws IOException + when(mockAccessor.getAllSystemKeyFiles()).thenThrow(new IOException("File system error")); + + // Execute - should propagate the IOException + SystemKeyCache.createCache(mockAccessor); + } + + @Test(expected = IOException.class) + public void testCreateCacheWithLoadSystemKeyIOException() throws Exception { + // Setup - loading key throws IOException + List keyPaths = Collections.singletonList(keyPath1); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new IOException("Key load error")); + + // Execute - should propagate the IOException + SystemKeyCache.createCache(mockAccessor); + } + + @Test + public void testCacheWithKeysHavingSameChecksum() throws Exception { + // Setup - create two keys that will have the same checksum (same content) + Key sameKey1 = new SecretKeySpec("identical-bytes".getBytes(), "AES"); + Key sameKey2 = new SecretKeySpec("identical-bytes".getBytes(), "AES"); + + ManagedKeyData sameManagedKey1 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, + sameKey1, ManagedKeyState.ACTIVE, "metadata-A", 1000L, 0, 0); + ManagedKeyData sameManagedKey2 = new ManagedKeyData(TEST_CUSTODIAN, TEST_NAMESPACE, + sameKey2, ManagedKeyState.ACTIVE, "metadata-B", 2000L, 0, 0); + + // Verify they have the same checksum + assertEquals(sameManagedKey1.getKeyChecksum(), sameManagedKey2.getKeyChecksum()); + + List keyPaths = Arrays.asList(keyPath1, keyPath2); + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(keyPaths); + when(mockAccessor.loadSystemKey(keyPath1)).thenReturn(sameManagedKey1); + when(mockAccessor.loadSystemKey(keyPath2)).thenReturn(sameManagedKey2); + + // Execute + SystemKeyCache cache = SystemKeyCache.createCache(mockAccessor); + + // Verify - second key should overwrite first in the map due to same checksum + assertNotNull(cache); + assertSame(sameManagedKey1, cache.getLatestSystemKey()); // First is still latest + + // The map should contain the second key for the shared checksum + ManagedKeyData retrievedKey = cache.getSystemKeyByChecksum(sameManagedKey1.getKeyChecksum()); + assertSame(sameManagedKey2, retrievedKey); // Last one wins in TreeMap + } + + @Test + public void testCreateCacheWithUnexpectedNullKeyData() throws Exception { + when(mockAccessor.getAllSystemKeyFiles()).thenReturn(Arrays.asList(keyPath1)); + when(mockAccessor.loadSystemKey(keyPath1)).thenThrow(new RuntimeException("Key load error")); + + RuntimeException ex = assertThrows(RuntimeException.class, () -> { + SystemKeyCache.createCache(mockAccessor); + }); + assertTrue(ex.getMessage().equals("Key load error")); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index e78ca7d0cdb7..013648d41c4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -38,6 +38,9 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.favored.FavoredNodesManager; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.hbck.HbckChore; import org.apache.hadoop.hbase.master.janitor.CatalogJanitor; @@ -116,6 +119,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public CatalogJanitor getCatalogJanitor() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index a25bae6ec7bd..b63bbbaac8be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -52,6 +52,9 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; @@ -556,6 +559,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public void updateRegionFavoredNodesMapping(String encodedRegionName, List favoredNodes) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index fcb67ed31b47..ed11d69420ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -33,6 +33,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskGroup; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -327,5 +330,17 @@ public ClusterStatusTracker getClusterStatusTracker() { public ActiveMasterManager getActiveMasterManager() { return activeMasterManager; } + + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java new file mode 100644 index 000000000000..6733ad2eb959 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestKeymetaAdminImpl.java @@ -0,0 +1,394 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.Key; +import java.security.KeyException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.keymeta.KeymetaAdminImpl; +import org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.junit.runners.Suite; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ TestKeymetaAdminImpl.TestWhenDisabled.class, + TestKeymetaAdminImpl.TestAdminImpl.class, + TestKeymetaAdminImpl.TestForKeyProviderNullReturn.class, + TestKeymetaAdminImpl.TestMultiKeyGen.class, + TestKeymetaAdminImpl.TestForInvalidKeyCountConfig.class, +}) +@Category({ MasterTests.class, SmallTests.class }) +public class TestKeymetaAdminImpl { + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + private static final String CUST = "cust1"; + private static final String ENCODED_CUST = ManagedKeyProvider.encodeToStr(CUST.getBytes()); + + + @Rule + public TestName name = new TestName(); + + protected Configuration conf; + protected Path testRootDir; + protected FileSystem fs; + + protected FileSystem mockFileSystem = mock(FileSystem.class); + protected Server mockServer = mock(Server.class); + protected DummyKeymetaAdminImpl keymetaAdmin; + KeymetaTableAccessor keymetaAccessor = mock(KeymetaTableAccessor.class); + + @Before + public void setUp() throws Exception { + conf = TEST_UTIL.getConfiguration(); + testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName()); + fs = testRootDir.getFileSystem(conf); + + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName()); + + when(mockServer.getFileSystem()).thenReturn(mockFileSystem); + when(mockServer.getConfiguration()).thenReturn(conf); + keymetaAdmin = new DummyKeymetaAdminImpl(mockServer, keymetaAccessor); + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestWhenDisabled extends TestKeymetaAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestWhenDisabled.class); + + @Override + public void setUp() throws Exception { + super.setUp(); + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false"); + } + + @Test + public void testDisabled() throws Exception { + assertThrows(IOException.class, + () -> keymetaAdmin.enableKeyManagement(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, + KEY_SPACE_GLOBAL)); + assertThrows(IOException.class, + () -> keymetaAdmin.getManagedKeys(ManagedKeyData.KEY_GLOBAL_CUSTODIAN, + KEY_SPACE_GLOBAL)); + } + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAdminImpl extends TestKeymetaAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAdminImpl.class); + + @Parameter(0) + public int nKeys; + @Parameter(1) + public String keySpace; + @Parameter(2) + public ManagedKeyState keyState; + @Parameter(3) + public boolean isNullKey; + + @Parameters(name = "{index},nKeys={0},keySpace={1},keyState={2}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + { 1, KEY_SPACE_GLOBAL, ACTIVE, false }, + { 1, "ns1", ACTIVE, false }, + { 1, KEY_SPACE_GLOBAL, FAILED, true }, + { 1, KEY_SPACE_GLOBAL, INACTIVE, false }, + { 1, KEY_SPACE_GLOBAL, DISABLED, true }, + { 2, KEY_SPACE_GLOBAL, ACTIVE, false }, + }); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + conf.set(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, + Integer.toString(nKeys)); + } + + @Test + public void testEnableAndGet() throws Exception { + MockManagedKeyProvider managedKeyProvider = + (MockManagedKeyProvider) Encryption.getKeyProvider(conf); + String cust = "cust1"; + managedKeyProvider.setMockedKeyState(cust, keyState); + String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); + List managedKeyStates = + keymetaAdmin.enableKeyManagement(encodedCust, keySpace); + assertNotNull(managedKeyStates); + assertEquals(1, managedKeyStates.size()); + assertEquals(keyState, managedKeyStates.get(0).getKeyState()); + verify(keymetaAccessor).addKey(argThat( + (ManagedKeyData keyData) -> assertKeyData(keyData, keyState, + isNullKey ? null : managedKeyProvider.getMockedKey(cust, + keySpace)))); + verify(keymetaAccessor).getAllKeys(cust.getBytes(), keySpace); + reset(keymetaAccessor); + + keymetaAdmin.getManagedKeys(encodedCust, keySpace); + verify(keymetaAccessor).getAllKeys(cust.getBytes(), keySpace); + } + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestMultiKeyGen extends TestKeymetaAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestKeymetaAdminImpl.TestMultiKeyGen.class); + + @Parameter(0) + public String keySpace; + + private MockManagedKeyProvider managedKeyProvider; + + @Parameters(name = "{index},keySpace={0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + { KEY_SPACE_GLOBAL }, + { "ns1" }, + }); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + conf.set(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, "3"); + managedKeyProvider = (MockManagedKeyProvider) Encryption.getKeyProvider(conf); + managedKeyProvider.setMultikeyGenMode(true); + } + + @After + public void tearDown() { + // Reset as this instance gets reused for more than 1 test. + managedKeyProvider.setMockedKeyState(CUST, ACTIVE); + } + + @Test + public void testEnable() throws Exception { + List managedKeyStates; + // Test 1: Enable key management with 3 keys + managedKeyStates = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); + assertKeys(managedKeyStates, 3); + verify(keymetaAccessor).getAllKeys(CUST.getBytes(), keySpace); + verify(keymetaAccessor, times(3)).addKey(any()); + + // Test 2: Enable key management with 3 keys, but already enabled + reset(keymetaAccessor); + when(keymetaAccessor.getAllKeys(CUST.getBytes(), keySpace)).thenReturn(managedKeyStates); + managedKeyStates = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); + assertKeys(managedKeyStates, 3); + verify(keymetaAccessor, times(0)).addKey(any()); + + // Test 3: Enable key management with 4 keys, but only 1 key is added + reset(keymetaAccessor); + when(keymetaAccessor.getAllKeys(CUST.getBytes(), keySpace)).thenReturn(managedKeyStates); + keymetaAdmin.activeKeyCountOverride = 4; + managedKeyStates = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); + assertKeys(managedKeyStates, 1); + verify(keymetaAccessor, times(1)).addKey(any()); + + // Test 4: Enable key management when key provider is not able to generate any new keys + reset(keymetaAccessor); + when(keymetaAccessor.getAllKeys(CUST.getBytes(), keySpace)).thenReturn(managedKeyStates); + managedKeyProvider.setMultikeyGenMode(false); + managedKeyStates = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); + assertKeys(managedKeyStates, 0); + verify(keymetaAccessor, times(0)).addKey(any()); + + // Test 5: Enable key management when key provider is not able to generate any new keys + reset(keymetaAccessor); + managedKeyProvider.setMockedKeyState(CUST, FAILED); + managedKeyStates = keymetaAdmin.enableKeyManagement(ENCODED_CUST, keySpace); + assertNotNull(managedKeyStates); + assertEquals(1, managedKeyStates.size()); + assertEquals(FAILED, managedKeyStates.get(0).getKeyState()); + verify(keymetaAccessor, times(1)).addKey(any()); + } + + private static void assertKeys(List managedKeyStates, int expectedCnt) { + assertNotNull(managedKeyStates); + assertEquals(expectedCnt, managedKeyStates.size()); + for (int i = 0; i < managedKeyStates.size(); ++i) { + assertEquals(ACTIVE, managedKeyStates.get(i).getKeyState()); + } + } + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestForKeyProviderNullReturn extends TestKeymetaAdminImpl { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestForKeyProviderNullReturn.class); + + @Parameter(0) + public String keySpace; + + @Parameters(name = "{index},keySpace={0}") + public static Collection data() { + return Arrays.asList( + new Object[][] { + { KEY_SPACE_GLOBAL }, + { "ns1" }, + }); + } + + @Test + public void test() throws Exception { + MockManagedKeyProvider managedKeyProvider = + (MockManagedKeyProvider) Encryption.getKeyProvider(conf); + String cust = "invalidcust1"; + String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); + managedKeyProvider.setMockedKey(cust, null, keySpace); + IOException ex = assertThrows(IOException.class, + () -> keymetaAdmin.enableKeyManagement(encodedCust, keySpace)); + assertEquals("Invalid null managed key received from key provider", ex.getMessage()); + } + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestForInvalidKeyCountConfig extends TestKeymetaAdminImpl { + @ClassRule public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestForInvalidKeyCountConfig.class); + + @Parameter(0) + public String keyCount;; + @Parameter(1) + public String keySpace; + @Parameter(2) + public Class expectedExType; + @Parameters(name = "{index},keyCount={0},keySpace={1}expectedExType={2}") + public static Collection data() { + return Arrays.asList(new Object[][] { + { "0", KEY_SPACE_GLOBAL, IOException.class }, + { "-1", KEY_SPACE_GLOBAL, IOException.class }, + { "abc", KEY_SPACE_GLOBAL, NumberFormatException.class }, + { "0", "ns1", IOException.class }, + { "-1", "ns1", IOException.class }, + { "abc", "ns1", NumberFormatException.class }, + }); + } + + @Test + public void test() throws Exception { + conf.set(HConstants.CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT, keyCount); + String cust = "cust1"; + String encodedCust = ManagedKeyProvider.encodeToStr(cust.getBytes()); + assertThrows(expectedExType, () -> + keymetaAdmin.enableKeyManagement(encodedCust, keySpace)); + } + } + + private class DummyKeymetaAdminImpl extends KeymetaAdminImpl { + public DummyKeymetaAdminImpl(Server mockServer, KeymetaTableAccessor mockAccessor) { + super(mockServer); + } + + public Integer activeKeyCountOverride; + + @Override + protected int getPerCustodianNamespaceActiveKeyConfCount() throws IOException { + if (activeKeyCountOverride != null) { + return activeKeyCountOverride; + } + return super.getPerCustodianNamespaceActiveKeyConfCount(); + } + + @Override + public void addKey(ManagedKeyData keyData) throws IOException { + keymetaAccessor.addKey(keyData); + } + + @Override + public List getAllKeys(byte[] key_cust, String keyNamespace) + throws IOException, KeyException { + return keymetaAccessor.getAllKeys(key_cust, keyNamespace); + } + } + + protected boolean assertKeyData(ManagedKeyData keyData, ManagedKeyState expKeyState, + Key expectedKey) { + assertNotNull(keyData); + assertEquals(expKeyState, keyData.getKeyState()); + if (expectedKey == null) { + assertNull(keyData.getTheKey()); + } + else { + byte[] keyBytes = keyData.getTheKey().getEncoded(); + byte[] expectedKeyBytes = expectedKey.getEncoded(); + assertEquals(expectedKeyBytes.length, keyBytes.length); + assertEquals(new Bytes(expectedKeyBytes), keyBytes); + } + return true; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java new file mode 100644 index 000000000000..2e62dbee0007 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyAccessorAndManager.java @@ -0,0 +1,521 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE; +import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.Key; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.stream.IntStream; +import javax.crypto.spec.SecretKeySpec; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.BlockJUnit4ClassRunner; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; +import org.junit.runners.Suite; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + TestSystemKeyAccessorAndManager.TestAccessorWhenDisabled.class, + TestSystemKeyAccessorAndManager.TestManagerWhenDisabled.class, + TestSystemKeyAccessorAndManager.TestAccessor.class, + TestSystemKeyAccessorAndManager.TestForInvalidFilenames.class, + TestSystemKeyAccessorAndManager.TestManagerForErrors.class, + TestSystemKeyAccessorAndManager.TestAccessorMisc.class // ADD THIS +}) +@Category({ MasterTests.class, SmallTests.class }) +public class TestSystemKeyAccessorAndManager { + private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); + + @Rule + public TestName name = new TestName(); + + protected Configuration conf; + protected Path testRootDir; + protected FileSystem fs; + + protected FileSystem mockFileSystem = mock(FileSystem.class); + protected MasterServices mockMaster = mock(MasterServices.class); + protected SystemKeyManager systemKeyManager; + + @Before + public void setUp() throws Exception { + conf = TEST_UTIL.getConfiguration(); + testRootDir = TEST_UTIL.getDataTestDir(name.getMethodName()); + fs = testRootDir.getFileSystem(conf); + + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true"); + + when(mockMaster.getFileSystem()).thenReturn(mockFileSystem); + when(mockMaster.getConfiguration()).thenReturn(conf); + systemKeyManager = new SystemKeyManager(mockMaster); + } + + private static FileStatus createMockFile(String fileName) { + Path mockPath = mock(Path.class); + when(mockPath.getName()).thenReturn(fileName); + FileStatus mockFileStatus = mock(FileStatus.class); + when(mockFileStatus.getPath()).thenReturn(mockPath); + return mockFileStatus; + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAccessorWhenDisabled extends TestSystemKeyAccessorAndManager { + @ClassRule public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAccessorWhenDisabled.class); + + @Override public void setUp() throws Exception { + super.setUp(); + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false"); + } + + @Test public void test() throws Exception { + assertNull(systemKeyManager.getAllSystemKeyFiles()); + assertNull(systemKeyManager.getLatestSystemKeyFile().getFirst()); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestManagerWhenDisabled extends TestSystemKeyAccessorAndManager { + @ClassRule public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagerWhenDisabled.class); + + @Override public void setUp() throws Exception { + super.setUp(); + conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "false"); + } + + @Test public void test() throws Exception { + systemKeyManager.ensureSystemKeyInitialized(); + assertNull(systemKeyManager.rotateSystemKeyIfChanged()); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAccessor extends TestSystemKeyAccessorAndManager { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAccessor.class); + + @Test + public void testGetLatestWithNone() throws Exception { + when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]); + + RuntimeException ex = assertThrows(RuntimeException.class, + () -> systemKeyManager.getLatestSystemKeyFile()); + assertEquals("No cluster key initialized yet", ex.getMessage()); + } + + @Test + public void testGetWithSingle() throws Exception { + String fileName = SYSTEM_KEY_FILE_PREFIX + "1"; + FileStatus mockFileStatus = createMockFile(fileName); + + Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); + when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*")))) + .thenReturn(new FileStatus[] { mockFileStatus }); + + List files = systemKeyManager.getAllSystemKeyFiles(); + assertEquals(1, files.size()); + assertEquals(fileName, files.get(0).getName()); + + Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile(); + assertEquals(fileName, latestSystemKeyFileResult.getFirst().getName()); + + assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum( + latestSystemKeyFileResult.getFirst())); + } + + @Test + public void testGetWithMultiple() throws Exception { + FileStatus[] mockFileStatuses = IntStream.rangeClosed(1, 3) + .mapToObj(i -> createMockFile(SYSTEM_KEY_FILE_PREFIX + i)) + .toArray(FileStatus[]::new); + + Path systemKeyDir = CommonFSUtils.getSystemKeyDir(conf); + when(mockFileSystem.globStatus(eq(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX+"*")))) + .thenReturn(mockFileStatuses); + + List files = systemKeyManager.getAllSystemKeyFiles(); + assertEquals(3, files.size()); + + Pair> latestSystemKeyFileResult = systemKeyManager.getLatestSystemKeyFile(); + assertEquals(3, + SystemKeyAccessor.extractSystemKeySeqNum(latestSystemKeyFileResult.getFirst())); + } + + @Test + public void testExtractKeySequenceForInvalidFilename() throws Exception { + assertEquals(-1, SystemKeyAccessor.extractKeySequence( + createMockFile("abcd").getPath())); + } + } + + @RunWith(Parameterized.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestForInvalidFilenames extends TestSystemKeyAccessorAndManager { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestForInvalidFilenames.class); + + @Parameter(0) + public String fileName; + @Parameter(1) + public String expectedErrorMessage; + + @Parameters(name = "{index},fileName={0}") + public static Collection data() { + return Arrays.asList(new Object[][] { + { "abcd", "Couldn't parse key file name: abcd" }, + {SYSTEM_KEY_FILE_PREFIX+"abcd", "Couldn't parse key file name: "+ + SYSTEM_KEY_FILE_PREFIX+"abcd"}, + // Add more test cases here + }); + } + + @Test + public void test() throws Exception { + FileStatus mockFileStatus = createMockFile(fileName); + + IOException ex = assertThrows(IOException.class, + () -> SystemKeyAccessor.extractSystemKeySeqNum(mockFileStatus.getPath())); + assertEquals(expectedErrorMessage, ex.getMessage()); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestManagerForErrors extends TestSystemKeyAccessorAndManager { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestManagerForErrors.class); + + private static final String CLUSTER_ID = "clusterId"; + + @Mock + ManagedKeyProvider mockKeyProvide; + @Mock + MasterFileSystem masterFS; + + private MockSystemKeyManager manager; + private AutoCloseable closeableMocks; + + @Before + public void setUp() throws Exception { + super.setUp(); + closeableMocks = MockitoAnnotations.openMocks(this); + + when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]); + ClusterId clusterId = mock(ClusterId.class); + when(mockMaster.getMasterFileSystem()).thenReturn(masterFS); + when(masterFS.getClusterId()).thenReturn(clusterId); + when(clusterId.toString()).thenReturn(CLUSTER_ID); + when(masterFS.getFileSystem()).thenReturn(mockFileSystem); + + manager = new MockSystemKeyManager(mockMaster, mockKeyProvide); + } + + @After + public void tearDown() throws Exception { + closeableMocks.close(); + } + + @Test + public void testEnsureSystemKeyInitialized_WithNoSystemKeys() throws Exception { + when(mockKeyProvide.getSystemKey(any())).thenReturn(null); + + IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized); + assertEquals("Failed to get system key for cluster id: " + CLUSTER_ID, ex.getMessage()); + } + + @Test + public void testEnsureSystemKeyInitialized_WithNoNonActiveKey() throws Exception { + String metadata = "key-metadata"; + ManagedKeyData keyData = mock(ManagedKeyData.class); + when(keyData.getKeyState()).thenReturn(INACTIVE); + when(keyData.getKeyMetadata()).thenReturn(metadata); + when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData); + + IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized); + assertEquals("System key is expected to be ACTIVE but it is: INACTIVE for metadata: " + + metadata, ex.getMessage()); + } + + @Test + public void testEnsureSystemKeyInitialized_WithInvalidMetadata() throws Exception { + ManagedKeyData keyData = mock(ManagedKeyData.class); + when(keyData.getKeyState()).thenReturn(ACTIVE); + when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData); + + IOException ex = assertThrows(IOException.class, manager::ensureSystemKeyInitialized); + assertEquals("System key is expected to have metadata but it is null", ex.getMessage()); + } + + @Test + public void testEnsureSystemKeyInitialized_WithSaveFailure() throws Exception { + String metadata = "key-metadata"; + ManagedKeyData keyData = mock(ManagedKeyData.class); + when(keyData.getKeyState()).thenReturn(ACTIVE); + when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData); + when(keyData.getKeyMetadata()).thenReturn(metadata); + when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]); + Path rootDir = CommonFSUtils.getRootDir(conf); + when(masterFS.getTempDir()).thenReturn(rootDir); + FSDataOutputStream mockStream = mock(FSDataOutputStream.class); + when(mockFileSystem.create(any())).thenReturn(mockStream); + when(mockFileSystem.rename(any(), any())).thenReturn(false); + + RuntimeException ex = assertThrows(RuntimeException.class, + manager::ensureSystemKeyInitialized); + assertEquals("Failed to generate or save System Key", ex.getMessage()); + } + + @Test + public void testEnsureSystemKeyInitialized_RaceCondition() throws Exception { + String metadata = "key-metadata"; + ManagedKeyData keyData = mock(ManagedKeyData.class); + when(keyData.getKeyState()).thenReturn(ACTIVE); + when(mockKeyProvide.getSystemKey(any())).thenReturn(keyData); + when(keyData.getKeyMetadata()).thenReturn(metadata); + when(mockFileSystem.globStatus(any())).thenReturn(new FileStatus[0]); + Path rootDir = CommonFSUtils.getRootDir(conf); + when(masterFS.getTempDir()).thenReturn(rootDir); + FSDataOutputStream mockStream = mock(FSDataOutputStream.class); + when(mockFileSystem.create(any())).thenReturn(mockStream); + when(mockFileSystem.rename(any(), any())).thenReturn(false); + String fileName = SYSTEM_KEY_FILE_PREFIX + "1"; + FileStatus mockFileStatus = createMockFile(fileName); + when(mockFileSystem.globStatus(any())).thenReturn( + new FileStatus[0], + new FileStatus[] { mockFileStatus } + ); + + manager.ensureSystemKeyInitialized(); + } + } + + @RunWith(BlockJUnit4ClassRunner.class) + @Category({ MasterTests.class, SmallTests.class }) + public static class TestAccessorMisc extends TestSystemKeyAccessorAndManager { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestAccessorMisc.class); + + @Test + public void testLoadSystemKeySuccess() throws Exception { + Path testPath = new Path("/test/key/path"); + String testMetadata = "test-metadata"; + + // Create test key data + Key testKey = new SecretKeySpec("test-key-bytes".getBytes(), "AES"); + ManagedKeyData testKeyData = new ManagedKeyData( + "custodian".getBytes(), "namespace", testKey, + ManagedKeyState.ACTIVE, testMetadata, 1000L, 0, 0); + + // Mock key provider + ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class); + when(realProvider.unwrapKey(testMetadata, null)).thenReturn(testKeyData); + + // Create testable SystemKeyAccessor that overrides both loadKeyMetadata and getKeyProvider + SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) { + @Override + protected String loadKeyMetadata(Path keyPath) throws IOException { + assertEquals(testPath, keyPath); + return testMetadata; + } + + @Override + protected ManagedKeyProvider getKeyProvider() { + return realProvider; + } + }; + + ManagedKeyData result = testAccessor.loadSystemKey(testPath); + assertEquals(testKeyData, result); + + // Verify the key provider was called correctly + verify(realProvider).unwrapKey(testMetadata, null); + } + + @Test(expected = RuntimeException.class) + public void testLoadSystemKeyNullResult() throws Exception { + Path testPath = new Path("/test/key/path"); + String testMetadata = "test-metadata"; + + // Mock key provider to return null + ManagedKeyProvider realProvider = mock(ManagedKeyProvider.class); + when(realProvider.unwrapKey(testMetadata, null)).thenReturn(null); + + SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) { + @Override + protected String loadKeyMetadata(Path keyPath) throws IOException { + assertEquals(testPath, keyPath); + return testMetadata; + } + + @Override + protected ManagedKeyProvider getKeyProvider() { + return realProvider; + } + }; + + testAccessor.loadSystemKey(testPath); + } + + @Test + public void testExtractSystemKeySeqNumValid() throws Exception { + Path testPath1 = new Path(SYSTEM_KEY_FILE_PREFIX + "1"); + Path testPath123 = new Path(SYSTEM_KEY_FILE_PREFIX + "123"); + Path testPathMax = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MAX_VALUE); + + assertEquals(1, SystemKeyAccessor.extractSystemKeySeqNum(testPath1)); + assertEquals(123, SystemKeyAccessor.extractSystemKeySeqNum(testPath123)); + assertEquals(Integer.MAX_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMax)); + } + + + + @Test(expected = IOException.class) + public void testGetAllSystemKeyFilesIOException() throws Exception { + when(mockFileSystem.globStatus(any())).thenThrow(new IOException("Filesystem error")); + systemKeyManager.getAllSystemKeyFiles(); + } + + @Test(expected = IOException.class) + public void testLoadSystemKeyIOExceptionFromMetadata() throws Exception { + Path testPath = new Path("/test/key/path"); + + SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) { + @Override + protected String loadKeyMetadata(Path keyPath) throws IOException { + assertEquals(testPath, keyPath); + throw new IOException("Metadata read failed"); + } + + @Override + protected ManagedKeyProvider getKeyProvider() { + return mock(ManagedKeyProvider.class); + } + }; + + testAccessor.loadSystemKey(testPath); + } + + @Test(expected = RuntimeException.class) + public void testLoadSystemKeyProviderException() throws Exception { + Path testPath = new Path("/test/key/path"); + String testMetadata = "test-metadata"; + + SystemKeyAccessor testAccessor = new SystemKeyAccessor(mockMaster) { + @Override + protected String loadKeyMetadata(Path keyPath) throws IOException { + assertEquals(testPath, keyPath); + return testMetadata; + } + + @Override + protected ManagedKeyProvider getKeyProvider() { + throw new RuntimeException("Key provider not available"); + } + }; + + testAccessor.loadSystemKey(testPath); + } + + @Test + public void testExtractSystemKeySeqNumBoundaryValues() throws Exception { + // Test boundary values + Path testPath0 = new Path(SYSTEM_KEY_FILE_PREFIX + "0"); + Path testPathMin = new Path(SYSTEM_KEY_FILE_PREFIX + Integer.MIN_VALUE); + + assertEquals(0, SystemKeyAccessor.extractSystemKeySeqNum(testPath0)); + assertEquals(Integer.MIN_VALUE, SystemKeyAccessor.extractSystemKeySeqNum(testPathMin)); + } + + @Test + public void testExtractKeySequenceEdgeCases() throws Exception { + // Test various edge cases for extractKeySequence + Path validZero = new Path(SYSTEM_KEY_FILE_PREFIX + "0"); + Path validNegative = new Path(SYSTEM_KEY_FILE_PREFIX + "-1"); + + // Valid cases should still work + assertEquals(0, SystemKeyAccessor.extractKeySequence(validZero)); + assertEquals(-1, SystemKeyAccessor.extractKeySequence(validNegative)); + } + } + + private static class MockSystemKeyManager extends SystemKeyManager { + private final ManagedKeyProvider keyProvider; + + public MockSystemKeyManager(MasterServices master, ManagedKeyProvider keyProvider) + throws IOException { + super(master); + this.keyProvider = keyProvider; + //systemKeyDir = mock(Path.class); + } + + @Override + protected ManagedKeyProvider getKeyProvider() { + return keyProvider; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java new file mode 100644 index 000000000000..a764a5b7de87 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSystemKeyManager.java @@ -0,0 +1,119 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.security.Key; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.crypto.KeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyData; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider; +import org.apache.hadoop.hbase.io.crypto.ManagedKeyState; +import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider; +import org.apache.hadoop.hbase.keymeta.ManagedKeyTestBase; +import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ MasterTests.class, MediumTests.class }) +public class TestSystemKeyManager extends ManagedKeyTestBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestSystemKeyManager.class); + + @Test + public void testSystemKeyInitializationAndRotation() throws Exception { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration()); + assertNotNull(keyProvider); + assertTrue(keyProvider instanceof ManagedKeyProvider); + assertTrue(keyProvider instanceof MockManagedKeyProvider); + MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider; + ManagedKeyData initialSystemKey = validateInitialState(master, pbeKeyProvider); + + restartSystem(); + master = TEST_UTIL.getHBaseCluster().getMaster(); + validateInitialState(master, pbeKeyProvider); + + // Test rotation of cluster key by changing the key that the key provider provides and restart + // master. + String newAlias = "new_cluster_key"; + pbeKeyProvider.setClusterKeyAlias(newAlias); + Key newCluterKey = MockManagedKeyProvider.generateSecretKey(); + pbeKeyProvider.setMockedKey(newAlias, newCluterKey, ManagedKeyData.KEY_SPACE_GLOBAL); + + restartSystem(); + master = TEST_UTIL.getHBaseCluster().getMaster(); + SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master); + assertEquals(2, systemKeyAccessor.getAllSystemKeyFiles().size()); + SystemKeyCache systemKeyCache = master.getSystemKeyCache(); + assertEquals(0, Bytes.compareTo(newCluterKey.getEncoded(), + systemKeyCache.getLatestSystemKey().getTheKey().getEncoded())); + assertEquals(initialSystemKey, + systemKeyAccessor.loadSystemKey(systemKeyAccessor.getAllSystemKeyFiles().get(1))); + assertEquals(initialSystemKey, + systemKeyCache.getSystemKeyByChecksum(initialSystemKey.getKeyChecksum())); + } + + @Test + public void testWithInvalidSystemKey() throws Exception { + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + KeyProvider keyProvider = Encryption.getKeyProvider(master.getConfiguration()); + MockManagedKeyProvider pbeKeyProvider = (MockManagedKeyProvider) keyProvider; + + // Test startup failure when the cluster key is INACTIVE + SystemKeyManager tmpCKM = new SystemKeyManager(master); + tmpCKM.ensureSystemKeyInitialized(); + pbeKeyProvider.setMockedKeyState(pbeKeyProvider.getSystemKeyAlias(), ManagedKeyState.INACTIVE); + assertThrows(IOException.class, tmpCKM::ensureSystemKeyInitialized); + } + + private ManagedKeyData validateInitialState(HMaster master, MockManagedKeyProvider pbeKeyProvider) + throws IOException { + SystemKeyAccessor systemKeyAccessor = new SystemKeyAccessor(master); + assertEquals(1, systemKeyAccessor.getAllSystemKeyFiles().size()); + SystemKeyCache systemKeyCache = master.getSystemKeyCache(); + assertNotNull(systemKeyCache); + ManagedKeyData clusterKey = systemKeyCache.getLatestSystemKey(); + assertEquals(pbeKeyProvider.getSystemKey(master.getClusterId().getBytes()), clusterKey); + assertEquals(clusterKey, + systemKeyCache.getSystemKeyByChecksum(clusterKey.getKeyChecksum())); + return clusterKey; + } + + private void restartSystem() throws Exception { + TEST_UTIL.shutdownMiniHBaseCluster(); + Thread.sleep(2000); + TEST_UTIL.restartHBaseCluster(1); + TEST_UTIL.waitFor(60000, + () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index da1bc04d7e03..ae507f32fd58 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -38,6 +38,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; @@ -215,6 +218,18 @@ public Connection getConnection() { } } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public FileSystem getFileSystem() { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java index 3c55696080e3..18b7744e17cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure2/store/region/RegionProcedureStorePerformanceEvaluation.java @@ -24,6 +24,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.master.region.MasterRegion; import org.apache.hadoop.hbase.master.region.MasterRegionFactory; import org.apache.hadoop.hbase.procedure2.store.ProcedureStorePerformanceEvaluation; @@ -57,6 +60,18 @@ public Configuration getConfiguration() { public ServerName getServerName() { return serverName; } + + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } } private MasterRegion region; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 443019bee808..6ed289ab96d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -46,6 +46,9 @@ import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -837,6 +840,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 58ffdcf91d43..adc420409527 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -55,6 +55,9 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.ipc.SimpleRpcServer; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -350,6 +353,18 @@ public ChoreService getChoreService() { return null; } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public Connection createConnection(Configuration conf) throws IOException { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index 90f4a7555b93..9257b78d6ce7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -26,6 +26,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.keymeta.KeymetaAdmin; +import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache; +import org.apache.hadoop.hbase.keymeta.SystemKeyCache; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.slf4j.Logger; @@ -100,6 +103,18 @@ public ChoreService getChoreService() { throw new UnsupportedOperationException(); } + @Override public SystemKeyCache getSystemKeyCache() { + return null; + } + + @Override public ManagedKeyDataCache getManagedKeyDataCache() { + return null; + } + + @Override public KeymetaAdmin getKeymetaAdmin() { + return null; + } + @Override public FileSystem getFileSystem() { throw new UnsupportedOperationException(); diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index a9b35ed1de21..9b24e5caa973 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -59,6 +59,10 @@ def rsgroup_admin ::Hbase::RSGroupAdmin.new(self.connection) end + def keymeta_admin + ::Hbase::KeymetaAdmin.new(@connection) + end + def taskmonitor ::Hbase::TaskMonitor.new(configuration) end diff --git a/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb new file mode 100644 index 000000000000..f70abbdde55b --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase/keymeta_admin.rb @@ -0,0 +1,56 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# frozen_string_literal: true + +require 'java' +java_import org.apache.hadoop.hbase.io.crypto.ManagedKeyData +java_import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient + +module Hbase + # KeymetaAdmin is a class that provides a Ruby interface to the HBase Key Management API. + # It is used to interface with the HBase Key Management API. + class KeymetaAdmin + def initialize(connection) + @connection = connection + @admin = KeymetaAdminClient.new(connection) + @hb_admin = @connection.getAdmin + end + + def close + @admin.close + end + + def enable_key_management(key_info) + cust, namespace = extract_cust_info(key_info) + @admin.enableKeyManagement(cust, namespace) + end + + def get_key_statuses(key_info) + cust, namespace = extract_cust_info(key_info) + @admin.getManagedKeys(cust, namespace) + end + + def extract_cust_info(key_info) + cust_info = key_info.split(':') + raise(ArgumentError, 'Invalid cust:namespace format') unless [1, 2].include?(cust_info.length) + + [cust_info[0], cust_info.length > 1 ? cust_info[1] : ManagedKeyData::KEY_SPACE_GLOBAL] + end + end +end diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index d4df1f8f5821..67892e5538c0 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -138,3 +138,4 @@ def self.promote_constants(constants) require 'hbase/security' require 'hbase/visibility_labels' require 'hbase/rsgroup_admin' +require 'hbase/keymeta_admin' diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 46b38dd96b89..10f24c4a0d24 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -150,6 +150,10 @@ def hbase_rsgroup_admin @rsgroup_admin ||= hbase.rsgroup_admin end + def hbase_keymeta_admin + @keymeta_admin ||= hbase.keymeta_admin + end + ## # Create singleton methods on the target receiver object for all the loaded commands # @@ -615,6 +619,18 @@ def self.exception_handler(hide_traceback) ] ) +Shell.load_command_group( + 'keymeta', + full_name: 'Keymeta', + comment: "NOTE: The KeyMeta Coprocessor Endpoint must be enabled on the Master else commands fail + with: UnknownProtocolException: No registered Master Coprocessor Endpoint found for + ManagedKeysService", + commands: %w[ + enable_key_management + show_key_status + ] +) + Shell.load_command_group( 'rsgroup', full_name: 'RSGroups', diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index a40f737e7908..a97dddc4e6a0 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -105,6 +105,10 @@ def rsgroup_admin @shell.hbase_rsgroup_admin end + def keymeta_admin + @shell.hbase_keymeta_admin + end + #---------------------------------------------------------------------- # Creates formatter instance first time and then reuses it. def formatter diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb new file mode 100644 index 000000000000..9a6d0422ad4e --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/enable_key_management.rb @@ -0,0 +1,45 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# frozen_string_literal: true + +require 'shell/commands/keymeta_command_base' + +module Shell + module Commands + # EnableKeyManagement is a class that provides a Ruby interface to enable key management via + # HBase Key Management API. + class EnableKeyManagement < KeymetaCommandBase + def help + <<-EOF +Enable key management for a given cust:namespace (cust in Base64 format). +If no namespace is specified, the global namespace (*) is used. + +Example: + hbase> enable_key_management 'cust:namespace' + hbase> enable_key_management 'cust' + EOF + end + + def command(key_info) + statuses = keymeta_admin.enable_key_management(key_info) + print_key_statuses(statuses) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb new file mode 100644 index 000000000000..92b52f5e4be5 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/keymeta_command_base.rb @@ -0,0 +1,48 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# frozen_string_literal: true + +module Shell + module Commands + # KeymetaCommandBase is a base class for all key management commands. + class KeymetaCommandBase < Command + def print_key_statuses(statuses) + formatter.header(%w[ENCODED-KEY NAMESPACE STATUS METADATA METADATA-HASH + REFRESH-TIMESTAMP READ-OP-COUNT WRITE-OP-COUNT]) + statuses.each { |status| formatter.row(format_status_row(status)) } + formatter.footer(statuses.size) + end + + private + + def format_status_row(status) + [ + status.getKeyCustodianEncoded, + status.getKeyNamespace, + status.getKeyStatus.toString, + status.getKeyMetadata, + status.getKeyMetadataHashEncoded, + status.getRefreshTimestamp, + status.getReadOpCount, + status.getWriteOpCount + ] + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb new file mode 100644 index 000000000000..d3670d094ed3 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/show_key_status.rb @@ -0,0 +1,45 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# frozen_string_literal: true + +require 'shell/commands/keymeta_command_base' + +module Shell + module Commands + # ShowKeyStatus is a class that provides a Ruby interface to show key statuses via + # HBase Key Management API. + class ShowKeyStatus < KeymetaCommandBase + def help + <<-EOF +Show key statuses for a given cust:namespace (cust in Base64 format). +If no namespace is specified, the global namespace (*) is used. + +Example: + hbase> show_key_status 'cust:namespace' + hbase> show_key_status 'cust' + EOF + end + + def command(key_info) + statuses = keymeta_admin.get_key_statuses(key_info) + print_key_statuses(statuses) + end + end + end +end diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index dc4bc1816acc..659bfe34067a 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.io.hfile.ChecksumUtil; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.keymeta.KeymetaAdminClient; import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; import org.apache.hadoop.hbase.master.HMaster; @@ -201,6 +202,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** This is for unit tests parameterized with a single boolean. */ public static final List MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination(); + private Admin hbaseAdmin = null; + /** * Checks to see if a specific port is available. * @param port the port number to check for availability @@ -2942,7 +2945,9 @@ public Admin getAdmin() throws IOException { return hbaseAdmin; } - private Admin hbaseAdmin = null; + public KeymetaAdminClient getKeymetaAdmin() throws IOException { + return new KeymetaAdminClient(getConnection()); + } /** * Returns an {@link Hbck} instance. Needs be closed when done.