diff --git a/.gitignore b/.gitignore
index 52d169dd5ad7..3ad423ef1106 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,3 +25,9 @@ linklint/
**/*.log
tmp
**/.flattened-pom.xml
+.*.sw*
+ID
+filenametags
+tags
+.codegenie
+.vscode
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
new file mode 100644
index 000000000000..dddbbdfeae87
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdminClient.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
+@InterfaceAudience.Public
+public class KeymetaAdminClient implements KeymetaAdmin {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaAdminClient.class);
+ private ManagedKeysProtos.ManagedKeysService.BlockingInterface stub;
+
+ public KeymetaAdminClient(Connection conn) throws IOException {
+ this.stub = ManagedKeysProtos.ManagedKeysService.newBlockingStub(
+ conn.getAdmin().coprocessorService());
+ }
+
+ @Override
+ public List enableKeyManagement(String keyCust, String keyNamespace)
+ throws IOException {
+ try {
+ ManagedKeysProtos.GetManagedKeysResponse response = stub.enableKeyManagement(null,
+ ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
+ return generateKeyDataList(response);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ @Override
+ public List getManagedKeys(String keyCust, String keyNamespace)
+ throws IOException, KeyException {
+ try {
+ ManagedKeysProtos.GetManagedKeysResponse statusResponse = stub.getManagedKeys(null,
+ ManagedKeysRequest.newBuilder().setKeyCust(keyCust).setKeyNamespace(keyNamespace).build());
+ return generateKeyDataList(statusResponse);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
+ private static List generateKeyDataList(
+ ManagedKeysProtos.GetManagedKeysResponse stateResponse) {
+ List keyStates = new ArrayList<>();
+ for (ManagedKeysResponse state: stateResponse.getStateList()) {
+ keyStates.add(new ManagedKeyData(
+ state.getKeyCustBytes().toByteArray(),
+ state.getKeyNamespace(), null,
+ ManagedKeyState.forValue((byte) state.getKeyState().getNumber()),
+ state.getKeyMetadata(),
+ state.getRefreshTimestamp(), state.getReadOpCount(), state.getWriteOpCount()));
+ }
+ return keyStates;
+ }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
index 04fc5201cc10..91630215e75d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java
@@ -80,6 +80,21 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm)
* @return the encrypted key bytes
*/
public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException {
+ return wrapKey(conf, subject, key, null);
+ }
+
+ /**
+ * Protect a key by encrypting it with the secret key of the given subject or kek. The
+ * configuration must be set up correctly for key alias resolution. Only one of the
+ * {@code subject} or {@code kek} needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param key the key
+ * @param kek the key encryption key
+ * @return the encrypted key bytes
+ */
+ public static byte[] wrapKey(Configuration conf, String subject, Key key, Key kek)
+ throws IOException {
// Wrap the key with the configured encryption algorithm.
String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
Cipher cipher = Encryption.getCipher(conf, algorithm);
@@ -100,8 +115,13 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
builder
.setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes)));
ByteArrayOutputStream out = new ByteArrayOutputStream();
- Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher,
- iv);
+ if (kek != null) {
+ Encryption.encryptWithGivenKey(kek, out, new ByteArrayInputStream(keyBytes), cipher, iv);
+ }
+ else {
+ Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf,
+ cipher, iv);
+ }
builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray()));
// Build and return the protobuf message
out.reset();
@@ -118,6 +138,21 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws
* @return the raw key bytes
*/
public static Key unwrapKey(Configuration conf, String subject, byte[] value)
+ throws IOException, KeyException {
+ return unwrapKey(conf, subject, value, null);
+ }
+
+ /**
+ * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must
+ * be set up correctly for key alias resolution. Only one of the {@code subject} or {@code kek}
+ * needs to be specified and the other one can be {@code null}.
+ * @param conf configuration
+ * @param subject subject key alias
+ * @param value the encrypted key bytes
+ * @param kek the key encryption key
+ * @return the raw key bytes
+ */
+ public static Key unwrapKey(Configuration conf, String subject, byte[] value, Key kek)
throws IOException, KeyException {
EncryptionProtos.WrappedKey wrappedKey =
EncryptionProtos.WrappedKey.parser().parseDelimitedFrom(new ByteArrayInputStream(value));
@@ -126,11 +161,12 @@ public static Key unwrapKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, kek);
}
private static Key getUnwrapKey(Configuration conf, String subject,
- EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException {
+ EncryptionProtos.WrappedKey wrappedKey, Cipher cipher, Key kek)
+ throws IOException, KeyException {
String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf);
String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim();
if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) {
@@ -143,8 +179,14 @@ private static Key getUnwrapKey(Configuration conf, String subject,
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null;
- Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
- subject, conf, cipher, iv);
+ if (kek != null) {
+ Encryption.decryptWithGivenKey(kek, out, wrappedKey.getData().newInput(),
+ wrappedKey.getLength(), cipher, iv);
+ }
+ else {
+ Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(),
+ subject, conf, cipher, iv);
+ }
byte[] keyBytes = out.toByteArray();
if (wrappedKey.hasHash()) {
if (
@@ -176,7 +218,7 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value)
if (cipher == null) {
throw new RuntimeException("Cipher '" + algorithm + "' not available");
}
- return getUnwrapKey(conf, subject, wrappedKey, cipher);
+ return getUnwrapKey(conf, subject, wrappedKey, cipher, null);
}
/**
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index eba3eb657ea5..08ed56a33e4c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1192,6 +1192,11 @@ public enum OperationStatusCode {
/** Temporary directory used for table creation and deletion */
public static final String HBASE_TEMP_DIRECTORY = ".tmp";
+ /**
+ * Directory used for storing master keys for the cluster
+ */
+ public static final String SYSTEM_KEYS_DIRECTORY = ".system_keys";
+ public static final String SYSTEM_KEY_FILE_PREFIX = "system_key.";
/**
* The period (in milliseconds) between computing region server point in time metrics
*/
@@ -1304,6 +1309,39 @@ public enum OperationStatusCode {
/** Configuration key for enabling WAL encryption, a boolean */
public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption";
+ /** Property used by ManagedKeyStoreKeyProvider class to set the alias that identifies
+ * the current system key. */
+ public static final String CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY =
+ "hbase.crypto.managed_key_store.system.key.name";
+ public static final String CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX =
+ "hbase.crypto.managed_key_store.cust.";
+
+ /** Enables or disables the key management feature. */
+ public static final String CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED = false;
+
+ /** The number of keys to retrieve from Key Provider per each custodian and namespace
+ * combination. */
+ public static final String CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_COUNT =
+ "hbase.crypto.managed_keys.per_cust_namespace.active_count";
+ public static final int CRYPTO_MANAGED_KEYS_PER_CUST_NAMESPACE_ACTIVE_KEY_DEFAULT_COUNT = 1;
+ /** Enables or disables key lookup during data path as an alternative to static injection of keys
+ * using control path. */
+ public static final String CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY =
+ "hbase.crypto.managed_keys.dynamic_lookup.enabled";
+ public static final boolean CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_DEFAULT_ENABLED = true;
+
+ /** Maximum number of entries in the managed key data cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_cache.max_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT = 1000;
+
+ /** Maximum number of entries in the managed key active keys cache. */
+ public static final String CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY =
+ "hbase.crypto.managed_keys.l1_active_cache.max_ns_entries";
+ public static final int CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT = 100;
+
/** Configuration key for setting RPC codec class name */
public static final String RPC_CODEC_CONF_KEY = "hbase.client.rpc.codec";
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
index 13e335b82ee3..336c440c4493 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java
@@ -33,8 +33,10 @@
import javax.crypto.spec.PBEKeySpec;
import javax.crypto.spec.SecretKeySpec;
import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.crypto.aes.AES;
import org.apache.hadoop.hbase.util.Bytes;
@@ -468,6 +470,19 @@ public static void encryptWithSubjectKey(OutputStream out, InputStream in, Strin
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
+ encryptWithGivenKey(key, out, in, cipher, iv);
+ }
+
+ /**
+ * Encrypts a block of plaintext with the specified symmetric key.
+ * @param key The symmetric key
+ * @param out ciphertext
+ * @param in plaintext
+ * @param cipher the encryption algorithm
+ * @param iv the initialization vector, can be null
+ */
+ public static void encryptWithGivenKey(Key key, OutputStream out, InputStream in,
+ Cipher cipher, byte[] iv) throws IOException {
Encryptor e = cipher.getEncryptor();
e.setKey(key);
e.setIv(iv); // can be null
@@ -490,19 +505,16 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
if (key == null) {
throw new IOException("No key found for subject '" + subject + "'");
}
- Decryptor d = cipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
try {
- decrypt(out, in, outLen, d);
+ decryptWithGivenKey(key, out, in, outLen, cipher, iv);
} catch (IOException e) {
// If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one
// is configured
String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY);
if (alternateAlgorithm != null) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to decrypt data with current cipher algorithm '"
- + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ LOG.debug("Unable to decrypt data with current cipher algorithm '" + conf.get(
+ HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES)
+ "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm
+ "' configured.");
}
@@ -510,16 +522,22 @@ public static void decryptWithSubjectKey(OutputStream out, InputStream in, int o
if (alterCipher == null) {
throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available");
}
- d = alterCipher.getDecryptor();
- d.setKey(key);
- d.setIv(iv); // can be null
- decrypt(out, in, outLen, d);
- } else {
- throw new IOException(e);
+ decryptWithGivenKey(key, out, in, outLen, alterCipher, iv);
+ }
+ else {
+ throw e;
}
}
}
+ public static void decryptWithGivenKey(Key key, OutputStream out, InputStream in, int outLen,
+ Cipher cipher, byte[] iv) throws IOException {
+ Decryptor d = cipher.getDecryptor();
+ d.setKey(key);
+ d.setIv(iv); // can be null
+ decrypt(out, in, outLen, d);
+ }
+
private static ClassLoader getClassLoaderForClass(Class> c) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
if (cl == null) {
@@ -561,6 +579,9 @@ public static KeyProvider getKeyProvider(Configuration conf) {
provider = (KeyProvider) ReflectionUtils
.newInstance(getClassLoaderForClass(KeyProvider.class).loadClass(providerClassName), conf);
provider.init(providerParameters);
+ if (provider instanceof ManagedKeyProvider) {
+ ((ManagedKeyProvider) provider).initConfig(conf);
+ }
if (LOG.isDebugEnabled()) {
LOG.debug("Installed " + providerClassName + " into key provider cache");
}
@@ -571,6 +592,11 @@ public static KeyProvider getKeyProvider(Configuration conf) {
}
}
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public static void clearKeyProviderCache() {
+ keyProviderCache.clear();
+ }
+
public static void incrementIv(byte[] iv) {
incrementIv(iv, 1);
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
index 604bede13b20..c401d3b3f6b9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java
@@ -76,6 +76,8 @@
@InterfaceAudience.Public
public class KeyStoreKeyProvider implements KeyProvider {
+ private static final char[] NO_PASSWORD = new char[0];
+
protected KeyStore store;
protected char[] password; // can be null if no password
protected Properties passwordFile; // can be null if no file provided
@@ -172,9 +174,15 @@ protected char[] getAliasPassword(String alias) {
@Override
public Key getKey(String alias) {
+ // First try with no password, as it is more common to have a password only for the store.
try {
- return store.getKey(alias, getAliasPassword(alias));
+ return store.getKey(alias, NO_PASSWORD);
} catch (UnrecoverableKeyException e) {
+ try {
+ return store.getKey(alias, getAliasPassword(alias));
+ } catch (UnrecoverableKeyException|NoSuchAlgorithmException|KeyStoreException e2) {
+ // Ignore.
+ }
throw new RuntimeException(e);
} catch (KeyStoreException e) {
throw new RuntimeException(e);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
new file mode 100644
index 000000000000..9d2710fc5a21
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyData.java
@@ -0,0 +1,311 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.security.Key;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
+import java.util.Base64;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
+
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
+/**
+ * This class represents an encryption key data which includes the key itself, its state, metadata
+ * and a prefix. The metadata encodes enough information on the key such that it can be used to
+ * retrieve the exact same key again in the future. If the key state is
+ * {@link ManagedKeyState#FAILED} expect the key to be {@code null}.
+ *
+ * The key data is represented by the following fields:
+ *
+ *
key_cust: The prefix for which this key belongs to
+ *
theKey: The key capturing the bytes and encoding
+ *
keyState: The state of the key (see {@link ManagedKeyState})
+ *
keyMetadata: Metadata that identifies the key
+ *
+ *
+ * The class provides methods to retrieve, as well as to compute a checksum
+ * for the key data. The checksum is used to ensure the integrity of the key data.
+ *
+ * The class also provides a method to generate an MD5 hash of the key metadata, which can be used
+ * for validation and identification.
+ */
+@InterfaceAudience.Public
+public class ManagedKeyData {
+ /**
+ * Special value to be used for custodian or namespace to indicate that it is global, meaning it
+ * is not associated with a specific custodian or namespace.
+ */
+ public static final String KEY_SPACE_GLOBAL = "*";
+
+ /**
+ * Encoded form of global custodian.
+ */
+ public static final String KEY_GLOBAL_CUSTODIAN =
+ ManagedKeyProvider.encodeToStr(KEY_SPACE_GLOBAL.getBytes());
+
+ private final byte[] keyCustodian;
+ private final String keyNamespace;
+ private final Key theKey;
+ private final ManagedKeyState keyState;
+ private final String keyMetadata;
+ private final long refreshTimestamp;
+ private final long readOpCount;
+ private final long writeOpCount;
+ private volatile long keyChecksum = 0;
+ private byte[] keyMetadataHash;
+
+ /**
+ * Constructs a new instance with the given parameters.
+ *
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
+ * @param keyState The state of the key.
+ * @param keyMetadata The metadata associated with the key.
+ * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
+ */
+ public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
+ String keyMetadata) {
+ this(key_cust, key_namespace, theKey, keyState, keyMetadata,
+ EnvironmentEdgeManager.currentTime(), 0, 0);
+ }
+
+ /**
+ * Constructs a new instance with the given parameters.
+ *
+ * @param key_cust The key custodian.
+ * @param theKey The actual key, can be {@code null}.
+ * @param keyState The state of the key.
+ * @param keyMetadata The metadata associated with the key.
+ * @param refreshTimestamp The timestamp when this key was last refreshed.
+ * @param readOpCount The current number of read operations for this key.
+ * @param writeOpCount The current number of write operations for this key.
+ * @throws NullPointerException if any of key_cust, keyState or keyMetadata is null.
+ */
+ public ManagedKeyData(byte[] key_cust, String key_namespace, Key theKey, ManagedKeyState keyState,
+ String keyMetadata, long refreshTimestamp, long readOpCount,
+ long writeOpCount) {
+ Preconditions.checkNotNull(key_cust, "key_cust should not be null");
+ Preconditions.checkNotNull(key_namespace, "key_namespace should not be null");
+ Preconditions.checkNotNull(keyState, "keyState should not be null");
+ // Only check for null metadata if state is not FAILED
+ if (keyState != ManagedKeyState.FAILED) {
+ Preconditions.checkNotNull(keyMetadata, "keyMetadata should not be null");
+ }
+ Preconditions.checkArgument(readOpCount >= 0, "readOpCount: " + readOpCount +
+ " should be >= 0");
+ Preconditions.checkArgument(writeOpCount >= 0, "writeOpCount: " + writeOpCount +
+ " should be >= 0");
+
+ this.keyCustodian = key_cust;
+ this.keyNamespace = key_namespace;
+ this.theKey = theKey;
+ this.keyState = keyState;
+ this.keyMetadata = keyMetadata;
+ this.refreshTimestamp = refreshTimestamp;
+ this.readOpCount = readOpCount;
+ this.writeOpCount = writeOpCount;
+ }
+
+ @InterfaceAudience.Private
+ public ManagedKeyData cloneWithoutKey() {
+ return new ManagedKeyData(keyCustodian, keyNamespace, null, keyState, keyMetadata,
+ refreshTimestamp, readOpCount, writeOpCount);
+ }
+
+ /**
+ * Returns the custodian associated with the key.
+ *
+ * @return The key custodian as a byte array.
+ */
+ public byte[] getKeyCustodian() {
+ return keyCustodian;
+ }
+
+ /**
+ * Return the key Custodian in Base64 encoded form.
+ * @return the encoded key custodian
+ */
+ public String getKeyCustodianEncoded() {
+ return Base64.getEncoder().encodeToString(keyCustodian);
+ }
+
+
+ /**
+ * Returns the namespace associated with the key.
+ *
+ * @return The namespace as a {@code String}.
+ */
+ public String getKeyNamespace() {
+ return keyNamespace;
+ }
+
+ /**
+ * Returns the actual key.
+ *
+ * @return The key as a {@code Key} object.
+ */
+ public Key getTheKey() {
+ return theKey;
+ }
+
+ /**
+ * Returns the state of the key.
+ *
+ * @return The key state as a {@code ManagedKeyState} enum value.
+ */
+ public ManagedKeyState getKeyState() {
+ return keyState;
+ }
+
+ /**
+ * Returns the metadata associated with the key.
+ *
+ * @return The key metadata as a {@code String}.
+ */
+ public String getKeyMetadata() {
+ return keyMetadata;
+ }
+
+ @Override
+ public String toString() {
+ return "ManagedKeyData{" +
+ "keyCustodian=" + Arrays.toString(keyCustodian) +
+ ", keyNamespace='" + keyNamespace + '\'' +
+ ", keyState=" + keyState +
+ ", keyMetadata='" + keyMetadata + '\'' +
+ ", refreshTimestamp=" + refreshTimestamp +
+ ", keyChecksum=" + getKeyChecksum() +
+ '}';
+ }
+
+ public long getRefreshTimestamp() {
+ return refreshTimestamp;
+ }
+
+ /**
+ * @return the number of times this key has been used for read operations as of the time this
+ * key data was initialized.
+ */
+ public long getReadOpCount() {
+ return readOpCount;
+ }
+
+ /**
+ * @return the number of times this key has been used for write operations as of the time this
+ * key data was initialized.
+ */
+ public long getWriteOpCount() {
+ return writeOpCount;
+ }
+
+ /**
+ * Computes the checksum of the key. If the checksum has already been computed, this method
+ * returns the previously computed value. The checksum is computed using the CRC32C algorithm.
+ *
+ * @return The checksum of the key as a long value, {@code 0} if no key is available.
+ */
+ public long getKeyChecksum() {
+ if (theKey == null) {
+ return 0;
+ }
+ if (keyChecksum == 0) {
+ keyChecksum = constructKeyChecksum(theKey.getEncoded());
+ }
+ return keyChecksum;
+ }
+
+ public static long constructKeyChecksum(byte[] data) {
+ DataChecksum dataChecksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 16);
+ dataChecksum.update(data, 0, data.length);
+ return dataChecksum.getValue();
+ }
+
+ /**
+ * Computes the hash of the key metadata. If the hash has already been computed, this method
+ * returns the previously computed value. The hash is computed using the MD5 algorithm.
+ *
+ * @return The hash of the key metadata as a byte array.
+ */
+ public byte[] getKeyMetadataHash() {
+ if (keyMetadataHash == null && keyMetadata != null) {
+ keyMetadataHash = constructMetadataHash(keyMetadata);
+ }
+ return keyMetadataHash;
+ }
+
+ /**
+ * Return the hash of key metadata in Base64 encoded form.
+ * @return the encoded hash or {@code null} if no meatadata is available.
+ */
+ public String getKeyMetadataHashEncoded() {
+ byte[] hash = getKeyMetadataHash();
+ if (hash != null) {
+ return Base64.getEncoder().encodeToString(hash);
+ }
+ return null;
+ }
+
+ public static byte[] constructMetadataHash(String metadata) {
+ MessageDigest md5;
+ try {
+ md5 = MessageDigest.getInstance("MD5");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e);
+ }
+ return md5.digest(metadata.getBytes());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ ManagedKeyData that = (ManagedKeyData) o;
+
+ return new EqualsBuilder()
+ .append(keyCustodian, that.keyCustodian)
+ .append(keyNamespace, that.keyNamespace)
+ .append(theKey, that.theKey)
+ .append(keyState, that.keyState)
+ .append(keyMetadata, that.keyMetadata)
+ .isEquals();
+ }
+
+ @Override
+ public int hashCode() {
+ return new HashCodeBuilder(17, 37)
+ .append(keyCustodian)
+ .append(keyNamespace)
+ .append(theKey)
+ .append(keyState)
+ .append(keyMetadata)
+ .toHashCode();
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
new file mode 100644
index 000000000000..27cd91380d6e
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyProvider.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.io.IOException;
+import java.util.Base64;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Interface for key providers of managed keys. Defines methods for generating and managing
+ * managed keys, as well as handling key storage and retrieval.
+ *
+ * The interface extends the basic {@link KeyProvider} interface with additional
+ * methods for working with managed keys.
+ */
+@InterfaceAudience.Public
+public interface ManagedKeyProvider extends KeyProvider {
+ /**
+ * Initialize the provider with the given configuration.
+ *
+ * @param conf Hadoop configuration
+ */
+ void initConfig(Configuration conf);
+
+ /**
+ * Retrieve the system key using the given system identifier.
+ *
+ * @param systemId system identifier
+ * @return ManagedKeyData for the system key and is guaranteed to be not {@code null}
+ * @throws IOException if an error occurs while retrieving the key
+ */
+ ManagedKeyData getSystemKey(byte[] systemId) throws IOException;
+
+ /**
+ * Retrieve a managed key for the specified prefix.
+ *
+ * @param key_cust The key custodian.
+ * @param key_namespace Key namespace
+ * @return ManagedKeyData for the system key and is expected to be not {@code null}
+ * @throws IOException if an error occurs while retrieving the key
+ */
+ ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException;
+
+ /**
+ * Retrieve a key identified by the key metadata. The key metadata is typically generated by the
+ * same key provider via the {@link #getSystemKey(byte[])} or
+ * {@link #getManagedKey(byte[], String)} methods. If key couldn't be retrieved using metadata and
+ * the wrappedKey is provided, the implementation may try to decrypt it as a fallback operation.
+ *
+ * @param keyMetaData Key metadata, must not be {@code null}.
+ * @param wrappedKey The DEK key material encrypted with the corresponding KEK, if available.
+ * @return ManagedKeyData for the key represented by the metadata and is expected to be not
+ * {@code null}
+ * @throws IOException if an error occurs while generating the key
+ */
+ @NonNull ManagedKeyData unwrapKey(String keyMetaData, byte[] wrappedKey) throws IOException;
+
+ /**
+ * Decode the given key custodian which is encoded as Base64 string.
+ * @param encodedKeyCust The encoded key custodian
+ * @return the decoded key custodian
+ * @throws IOException if the encoded key custodian is not a valid Base64 string
+ */
+ static byte[] decodeToBytes(String encodedKeyCust) throws IOException {
+ byte[] key_cust;
+ try {
+ key_cust = Base64.getDecoder().decode(encodedKeyCust);
+ }
+ catch (IllegalArgumentException e) {
+ throw new IOException("Failed to decode specified key custodian as Base64 string: "
+ + encodedKeyCust, e);
+ }
+ return key_cust;
+ }
+
+ /**
+ * Encode the given key custodian as Base64 string.
+ * @param key_cust The key custodian
+ * @return the encoded key custodian as Base64 string
+ */
+ static String encodeToStr(byte[] key_cust) {
+ return Base64.getEncoder().encodeToString(key_cust);
+ }
+
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
new file mode 100644
index 000000000000..ea64355fc56b
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyState.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Enum of Managed key status. It is used to indicate the status of managed custodian keys.
+ */
+@InterfaceAudience.Public
+public enum ManagedKeyState {
+ /** Represents the active status of a managed key. */
+ ACTIVE((byte) 1),
+ /** Represents the inactive status of a managed key. */
+ INACTIVE((byte) 2),
+ /** Represents the retrieval failure status of a managed key. */
+ FAILED((byte) 3),
+ /** Represents the disabled status of a managed key. */
+ DISABLED((byte) 4),
+ ;
+
+ private static Map lookupByVal;
+
+ private final byte val;
+
+ private ManagedKeyState(byte val) {
+ this.val = val;
+ }
+
+ /**
+ * Returns the numeric value of the managed key status.
+ * @return byte value
+ */
+ public byte getVal() {
+ return val;
+ }
+
+ /**
+ * Returns the ManagedKeyState for the given numeric value.
+ * @param val The numeric value of the desired ManagedKeyState
+ * @return The ManagedKeyState corresponding to the given value
+ */
+ public static ManagedKeyState forValue(byte val) {
+ if (lookupByVal == null) {
+ Map tbl = new HashMap<>();
+ for (ManagedKeyState e: ManagedKeyState.values()) {
+ tbl.put(e.getVal(), e);
+ }
+ lookupByVal = tbl;
+ }
+ return lookupByVal.get(val);
+ }
+
+ /**
+ * This is used to determine if a key is usable for encryption/decryption.
+ *
+ * @param state The key state to check
+ * @return true if the key state is ACTIVE or INACTIVE, false otherwise
+ */
+ public static boolean isUsable(ManagedKeyState state) {
+ return state == ACTIVE || state == INACTIVE;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
new file mode 100644
index 000000000000..b9005e1b27e7
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/ManagedKeyStoreKeyProvider.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import com.google.gson.reflect.TypeToken;
+import java.io.IOException;
+import java.security.Key;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.GsonUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Public
+public class ManagedKeyStoreKeyProvider extends KeyStoreKeyProvider implements ManagedKeyProvider {
+ public static final String KEY_METADATA_ALIAS = "KeyAlias";
+ public static final String KEY_METADATA_CUST = "KeyCustodian";
+
+ private static final java.lang.reflect.Type KEY_METADATA_TYPE =
+ new TypeToken>(){}.getType();
+
+ private Configuration conf;
+
+ @Override
+ public void initConfig(Configuration conf) {
+ this.conf = conf;
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] clusterId) {
+ checkConfig();
+ String systemKeyAlias = conf.get(HConstants.CRYPTO_MANAGED_KEY_STORE_SYSTEM_KEY_NAME_CONF_KEY,
+ null);
+ if (systemKeyAlias == null) {
+ throw new RuntimeException("No alias configured for system key");
+ }
+ Key key = getKey(systemKeyAlias);
+ if (key == null) {
+ throw new RuntimeException("Unable to find system key with alias: " + systemKeyAlias);
+ }
+ // Encode clusterId too for consistency with that of key custodian.
+ String keyMetadata = generateKeyMetadata(systemKeyAlias,
+ ManagedKeyProvider.encodeToStr(clusterId));
+ return new ManagedKeyData(clusterId, ManagedKeyData.KEY_SPACE_GLOBAL, key,
+ ManagedKeyState.ACTIVE, keyMetadata);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace) throws IOException {
+ checkConfig();
+ String encodedCust = ManagedKeyProvider.encodeToStr(key_cust);
+ String aliasConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust + "." +
+ "alias";
+ String keyMetadata = generateKeyMetadata(conf.get(aliasConfKey, null), encodedCust);
+ return unwrapKey(keyMetadata, null);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadataStr, byte[] wrappedKey) throws IOException {
+ Map keyMetadata = GsonUtil.getDefaultInstance().fromJson(keyMetadataStr,
+ KEY_METADATA_TYPE);
+ String encodedCust = keyMetadata.get(KEY_METADATA_CUST);
+ String activeStatusConfKey = HConstants.CRYPTO_MANAGED_KEY_STORE_CONF_KEY_PREFIX + encodedCust +
+ ".active";
+ boolean isActive = conf.getBoolean(activeStatusConfKey, true);
+ byte[] key_cust = ManagedKeyProvider.decodeToBytes(encodedCust);
+ String alias = keyMetadata.get(KEY_METADATA_ALIAS);
+ Key key = alias != null ? getKey(alias) : null;
+ if (key != null) {
+ return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, key,
+ isActive ? ManagedKeyState.ACTIVE : ManagedKeyState.INACTIVE, keyMetadataStr);
+ }
+ return new ManagedKeyData(key_cust, ManagedKeyData.KEY_SPACE_GLOBAL, null,
+ isActive ? ManagedKeyState.FAILED : ManagedKeyState.DISABLED, keyMetadataStr);
+ }
+
+ private void checkConfig() {
+ if (conf == null) {
+ throw new IllegalStateException("initConfig is not called or config is null");
+ }
+ }
+
+ public static String generateKeyMetadata(String aliasName, String encodedCust) {
+ Map metadata = new HashMap<>(2);
+ metadata.put(KEY_METADATA_ALIAS, aliasName);
+ metadata.put(KEY_METADATA_CUST, encodedCust);
+ return GsonUtil.getDefaultInstance().toJson(metadata, HashMap.class);
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
new file mode 100644
index 000000000000..2e52dccc0598
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaAdmin.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.List;
+
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * KeymetaAdmin is an interface for administrative functions related to managed keys.
+ * It handles the following methods:
+ */
+@InterfaceAudience.Public
+public interface KeymetaAdmin {
+ /**
+ * Enables key management for the specified custodian and namespace.
+ *
+ * @param keyCust The key custodian in base64 encoded format.
+ * @param keyNamespace The namespace for the key management.
+ *
+ * @return The list of {@link ManagedKeyData} objects each identifying the key and its current
+ * status.
+ * @throws IOException if an error occurs while enabling key management.
+ */
+ List enableKeyManagement(String keyCust, String keyNamespace)
+ throws IOException, KeyException;
+
+ /**
+ * Get the status of all the keys for the specified custodian.
+ *
+ * @param keyCust The key custodian in base64 encoded format.
+ * @param keyNamespace The namespace for the key management.
+ * @return The list of {@link ManagedKeyData} objects each identifying the key and its current
+ * status.
+ * @throws IOException if an error occurs while enabling key management.
+ */
+ List getManagedKeys(String keyCust, String keyNamespace)
+ throws IOException, KeyException;
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
index 96b3dbd4a8a5..55da4b3b12c0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java
@@ -1688,16 +1688,31 @@ public static byte[] add(final byte[] a, final byte[] b) {
/**
* Concatenate byte arrays.
+ *
* @param a first third
* @param b second third
* @param c third third
* @return New array made from a, b and c
*/
public static byte[] add(final byte[] a, final byte[] b, final byte[] c) {
- byte[] result = new byte[a.length + b.length + c.length];
+ return add(a, b, c, EMPTY_BYTE_ARRAY);
+ }
+
+ /**
+ * Concatenate byte arrays.
+ *
+ * @param a first fourth
+ * @param b second fourth
+ * @param c third fourth
+ * @param d fourth fourth
+ * @return New array made from a, b, c, and d
+ */
+ public static byte[] add(final byte[] a, final byte[] b, final byte[] c, final byte[] d) {
+ byte[] result = new byte[a.length + b.length + c.length + d.length];
System.arraycopy(a, 0, result, 0, a.length);
System.arraycopy(b, 0, result, a.length, b.length);
System.arraycopy(c, 0, result, a.length + b.length, c.length);
+ System.arraycopy(d, 0, result, a.length + b.length + c.length, d.length);
return result;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index fe6f3bc238a9..da4662d2c8a0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -297,6 +297,10 @@ public static void setRootDir(final Configuration c, final Path root) {
c.set(HConstants.HBASE_DIR, root.toString());
}
+ public static Path getSystemKeyDir(final Configuration c) throws IOException {
+ return new Path(getRootDir(c), HConstants.SYSTEM_KEYS_DIRECTORY);
+ }
+
public static void setFsDefault(final Configuration c, final Path root) {
c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
index e592b1f935a1..a4a8ce82b2a8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java
@@ -19,8 +19,10 @@
import java.io.IOException;
import java.util.concurrent.atomic.LongAdder;
+
import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hbase.thirdparty.com.google.gson.Gson;
import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder;
import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy;
import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter;
@@ -33,6 +35,8 @@
@InterfaceAudience.Private
public final class GsonUtil {
+ private static Gson INSTANCE;
+
private GsonUtil() {
}
@@ -62,4 +66,11 @@ public LongAdder read(JsonReader in) throws IOException {
public static GsonBuilder createGsonWithDisableHtmlEscaping() {
return createGson().disableHtmlEscaping();
}
+
+ public static Gson getDefaultInstance() {
+ if (INSTANCE == null) {
+ INSTANCE = createGson().create();
+ }
+ return INSTANCE;
+ }
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
new file mode 100644
index 000000000000..a3397f96df70
--- /dev/null
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/MockManagedKeyProvider.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.crypto;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
+import java.util.HashMap;
+import java.util.Map;
+import javax.crypto.KeyGenerator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A simple implementation of ManagedKeyProvider for testing. It generates a key on demand given a
+ * prefix. One can control the state of a key by calling setKeyState and can rotate a key by
+ * calling setKey.
+ */
+public class MockManagedKeyProvider extends MockAesKeyProvider implements ManagedKeyProvider {
+ protected static final Logger LOG = LoggerFactory.getLogger(MockManagedKeyProvider.class);
+
+ private boolean multikeyGenMode;
+ private Map> keys = new HashMap<>();
+ private Map> lastGenKeyData = new HashMap<>();
+ // Keep references of all generated keys by their full and partial metadata.
+ private Map allGeneratedKeys = new HashMap<>();
+ private Map keyState = new HashMap<>();
+ private String systemKeyAlias = "default_system_key_alias";
+
+ @Override
+ public void initConfig(Configuration conf) {
+ // NO-OP
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
+ return getKey(systemId, systemKeyAlias, ManagedKeyData.KEY_SPACE_GLOBAL);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] key_cust, String key_namespace)
+ throws IOException {
+ String alias = Bytes.toString(key_cust);
+ return getKey(key_cust, alias, key_namespace);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException {
+ String[] meta_toks = keyMetadata.split(":");
+ if (allGeneratedKeys.containsKey(keyMetadata)) {
+ ManagedKeyState keyState = this.keyState.get(meta_toks[1]);
+ ManagedKeyData managedKeyData =
+ new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
+ allGeneratedKeys.get(keyMetadata),
+ keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
+ return registerKeyData(meta_toks[1], managedKeyData);
+ }
+ return new ManagedKeyData(meta_toks[0].getBytes(), meta_toks[2],
+ null, ManagedKeyState.FAILED, keyMetadata);
+ }
+
+ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
+ if (! lastGenKeyData.containsKey(keyNamespace)) {
+ return null;
+ }
+ return lastGenKeyData.get(keyNamespace).get(alias);
+ }
+
+ private ManagedKeyData registerKeyData(String alias, ManagedKeyData managedKeyData) {
+ if (! lastGenKeyData.containsKey(managedKeyData.getKeyNamespace())) {
+ lastGenKeyData.put(managedKeyData.getKeyNamespace(), new HashMap<>());
+ }
+ lastGenKeyData.get(managedKeyData.getKeyNamespace()).put(alias,
+ managedKeyData);
+ return managedKeyData;
+ }
+
+ public void setMultikeyGenMode(boolean multikeyGenMode) {
+ this.multikeyGenMode = multikeyGenMode;
+ }
+
+ public void setMockedKeyState(String alias, ManagedKeyState status) {
+ keyState.put(alias, status);
+ }
+
+ public void setMockedKey(String alias, Key key, String keyNamespace) {
+ if (! keys.containsKey(keyNamespace)) {
+ keys.put(keyNamespace, new HashMap<>());
+ }
+ Map keysForSpace = keys.get(keyNamespace);
+ keysForSpace.put(alias, key);
+ }
+
+ public Key getMockedKey(String alias, String keySpace) {
+ Map keysForSpace = keys.get(keySpace);
+ return keysForSpace != null ? keysForSpace.get(alias) : null;
+ }
+
+ public void setClusterKeyAlias(String alias) {
+ this.systemKeyAlias = alias;
+ }
+
+ public String getSystemKeyAlias() {
+ return this.systemKeyAlias;
+ }
+
+ /**
+ * Generate a new secret key.
+ * @return the key
+ */
+ public static Key generateSecretKey() {
+ KeyGenerator keyGen = null;
+ try {
+ keyGen = KeyGenerator.getInstance("AES");
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException(e);
+ }
+ keyGen.init(256);
+ return keyGen.generateKey();
+ }
+
+ private ManagedKeyData getKey(byte[] key_cust, String alias, String key_namespace) {
+ ManagedKeyState keyState = this.keyState.get(alias);
+ if (! keys.containsKey(key_namespace)) {
+ keys.put(key_namespace, new HashMap<>());
+ }
+ Map keySpace = keys.get(key_namespace);
+ Key key = null;
+ if (keyState != ManagedKeyState.FAILED && keyState != ManagedKeyState.DISABLED) {
+ if (multikeyGenMode || ! keySpace.containsKey(alias)) {
+ key = generateSecretKey();
+ keySpace.put(alias, key);
+ }
+ key = keySpace.get(alias);
+ if (key == null) {
+ return null;
+ }
+ }
+ long checksum = key == null ? 0 : ManagedKeyData.constructKeyChecksum(key.getEncoded());
+ String partialMetadata = Bytes.toString(key_cust) + ":" + alias;
+ String keyMetadata = partialMetadata + ":" + key_namespace + ":" + checksum;
+ allGeneratedKeys.put(partialMetadata, key);
+ allGeneratedKeys.put(keyMetadata, key);
+ ManagedKeyData managedKeyData =
+ new ManagedKeyData(key_cust, key_namespace, key,
+ keyState == null ? ManagedKeyState.ACTIVE : keyState, keyMetadata);
+ return registerKeyData(alias, managedKeyData);
+ }
+}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
index 581681988c28..de91aa904581 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestKeyStoreKeyProvider.java
@@ -26,6 +26,8 @@
import java.security.Key;
import java.security.KeyStore;
import java.security.MessageDigest;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.Properties;
import javax.crypto.spec.SecretKeySpec;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -33,12 +35,15 @@
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.BeforeClass;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
@Category({ MiscTests.class, SmallTests.class })
+@RunWith(Parameterized.class)
public class TestKeyStoreKeyProvider {
@ClassRule
@@ -53,14 +58,33 @@ public class TestKeyStoreKeyProvider {
static File storeFile;
static File passwordFile;
- @BeforeClass
- public static void setUp() throws Exception {
+ protected KeyProvider provider;
+
+ @Parameterized.Parameter(0)
+ public boolean withPasswordOnAlias;
+ @Parameterized.Parameter(1)
+ public boolean withPasswordFile;
+
+ @Parameterized.Parameters(name = "withPasswordOnAlias={0} withPasswordFile={1}")
+ public static Collection
+ */
+@CoreCoprocessor @InterfaceAudience.Private
+public class KeymetaServiceEndpoint implements MasterCoprocessor {
+ private static final Logger LOG = LoggerFactory.getLogger(KeymetaServiceEndpoint.class);
+
+ private MasterServices master = null;
+
+ private final ManagedKeysService managedKeysService = new KeymetaAdminServiceImpl();
+
+ /**
+ * Starts the coprocessor by initializing the reference to the
+ * {@link org.apache.hadoop.hbase.master.MasterServices} * instance.
+ *
+ * @param env The coprocessor environment.
+ * @throws IOException If an error occurs during initialization.
+ */
+ @Override
+ public void start(CoprocessorEnvironment env) throws IOException {
+ if (!(env instanceof HasMasterServices)) {
+ throw new IOException("Does not implement HMasterServices");
+ }
+
+ master = ((HasMasterServices) env).getMasterServices();
+ }
+
+ /**
+ * Returns an iterable of the available coprocessor services, which includes the
+ * {@link ManagedKeysService} implemented by
+ * {@link KeymetaServiceEndpoint.KeymetaAdminServiceImpl}.
+ *
+ * @return An iterable of the available coprocessor services.
+ */
+ @Override
+ public Iterable getServices() {
+ return Collections.singleton(managedKeysService);
+ }
+
+ /**
+ * The implementation of the {@link ManagedKeysProtos.ManagedKeysService}
+ * interface, which provides the actual method implementations for enabling key management.
+ */
+ @InterfaceAudience.Private
+ public class KeymetaAdminServiceImpl extends ManagedKeysService {
+
+ /**
+ * Enables key management for a given tenant and namespace, as specified in the provided
+ * request.
+ *
+ * @param controller The RPC controller.
+ * @param request The request containing the tenant and table specifications.
+ * @param done The callback to be invoked with the response.
+ */
+ @Override
+ public void enableKeyManagement(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) {
+ ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
+ if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ try {
+ List managedKeyStates = master.getKeymetaAdmin()
+ .enableKeyManagement(request.getKeyCust(), request.getKeyNamespace());
+ done.run(generateKeyStateResponse(managedKeyStates, builder));
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, e);
+ } catch (KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(e));
+ }
+ }
+ }
+
+ @Override
+ public void getManagedKeys(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) {
+ ManagedKeysResponse.Builder builder = getResponseBuilder(controller, request);
+ if (builder.getKeyCust() != null && ! builder.getKeyCust().isEmpty()) {
+ try {
+ List managedKeyStates = master.getKeymetaAdmin()
+ .getManagedKeys(request.getKeyCust(), request.getKeyNamespace());
+ done.run(generateKeyStateResponse(managedKeyStates, builder));
+ } catch (IOException e) {
+ CoprocessorRpcUtils.setControllerException(controller, e);
+ } catch (KeyException e) {
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(e));
+ }
+ }
+ }
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeysResponse.Builder getResponseBuilder(RpcController controller,
+ ManagedKeysRequest request) {
+ ManagedKeysResponse.Builder builder = ManagedKeysResponse.newBuilder()
+ .setKeyNamespace(request.getKeyNamespace());
+ byte[] key_cust = convertToKeyCustBytes(controller, request, builder);
+ if (key_cust != null) {
+ builder.setKeyCustBytes(ByteString.copyFrom(key_cust));
+ }
+ return builder;
+ }
+
+ // Assumes that all ManagedKeyData objects belong to the same custodian and namespace.
+ @InterfaceAudience.Private
+ public static GetManagedKeysResponse generateKeyStateResponse(
+ List managedKeyStates, ManagedKeysResponse.Builder builder) {
+ GetManagedKeysResponse.Builder responseBuilder = GetManagedKeysResponse.newBuilder();
+ for (ManagedKeyData keyData: managedKeyStates) {
+ builder.setKeyState(ManagedKeysProtos.ManagedKeyState.valueOf(
+ keyData.getKeyState().getVal()))
+ .setKeyMetadata(keyData.getKeyMetadata())
+ .setRefreshTimestamp(keyData.getRefreshTimestamp())
+ .setReadOpCount(keyData.getReadOpCount())
+ .setWriteOpCount(keyData.getWriteOpCount())
+ ;
+ responseBuilder.addState(builder.build());
+ }
+ return responseBuilder.build();
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] convertToKeyCustBytes(RpcController controller, ManagedKeysRequest request,
+ ManagedKeysResponse.Builder builder) {
+ byte[] key_cust = null;
+ try {
+ key_cust = Base64.getDecoder().decode(request.getKeyCust());
+ } catch (IllegalArgumentException e) {
+ builder.setKeyState(ManagedKeysProtos.ManagedKeyState.KEY_FAILED);
+ CoprocessorRpcUtils.setControllerException(controller, new IOException(
+ "Failed to decode specified prefix as Base64 string: " + request.getKeyCust(), e));
+ }
+ return key_cust;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
new file mode 100644
index 000000000000..6efb24a57fff
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/KeymetaTableAccessor.java
@@ -0,0 +1,336 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Accessor for keymeta table as part of key management.
+ */
+@InterfaceAudience.Private
+public class KeymetaTableAccessor extends KeyManagementBase {
+ private static final String KEY_META_INFO_FAMILY_STR = "info";
+
+ public static final byte[] KEY_META_INFO_FAMILY = Bytes.toBytes(KEY_META_INFO_FAMILY_STR);
+
+ public static final TableName KEY_META_TABLE_NAME =
+ TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "keymeta");
+
+ public static final String DEK_METADATA_QUAL_NAME = "m";
+ public static final byte[] DEK_METADATA_QUAL_BYTES = Bytes.toBytes(DEK_METADATA_QUAL_NAME);
+
+ public static final String DEK_CHECKSUM_QUAL_NAME = "c";
+ public static final byte[] DEK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(DEK_CHECKSUM_QUAL_NAME);
+
+ public static final String DEK_WRAPPED_BY_STK_QUAL_NAME = "w";
+ public static final byte[] DEK_WRAPPED_BY_STK_QUAL_BYTES =
+ Bytes.toBytes(DEK_WRAPPED_BY_STK_QUAL_NAME);
+
+ public static final String STK_CHECKSUM_QUAL_NAME = "s";
+ public static final byte[] STK_CHECKSUM_QUAL_BYTES = Bytes.toBytes(STK_CHECKSUM_QUAL_NAME);
+
+ public static final String REFRESHED_TIMESTAMP_QUAL_NAME = "t";
+ public static final byte[] REFRESHED_TIMESTAMP_QUAL_BYTES =
+ Bytes.toBytes(REFRESHED_TIMESTAMP_QUAL_NAME);
+
+ public static final String KEY_STATE_QUAL_NAME = "k";
+ public static final byte[] KEY_STATE_QUAL_BYTES = Bytes.toBytes(KEY_STATE_QUAL_NAME);
+
+ public static final String READ_OP_COUNT_QUAL_NAME = "R";
+ public static final byte[] READ_OP_COUNT_QUAL_BYTES = Bytes.toBytes(READ_OP_COUNT_QUAL_NAME);
+
+ public static final String WRITE_OP_COUNT_QUAL_NAME = "W";
+ public static final byte[] WRITE_OP_COUNT_QUAL_BYTES = Bytes.toBytes(WRITE_OP_COUNT_QUAL_NAME);
+
+ public KeymetaTableAccessor(Server server) {
+ super(server);
+ }
+
+ /**
+ * Add the specified key to the keymeta table.
+ * @param keyData The key data.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void addKey(ManagedKeyData keyData) throws IOException {
+ assertKeyManagementEnabled();
+ final Put putForMetadata = addMutationColumns(new Put(constructRowKeyForMetadata(keyData)),
+ keyData);
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ table.put(putForMetadata);
+ }
+ }
+
+ /**
+ * Get all the keys for the specified key_cust and key_namespace.
+ *
+ * @param key_cust The key custodian.
+ * @param keyNamespace The namespace
+ * @return a list of key data, one for each key, can be empty when none were found.
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
+ public List getAllKeys(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ byte[] prefixForScan = Bytes.add(Bytes.toBytes(key_cust.length), key_cust,
+ Bytes.toBytes(keyNamespace));
+ PrefixFilter prefixFilter = new PrefixFilter(prefixForScan);
+ Scan scan = new Scan();
+ scan.setFilter(prefixFilter);
+ scan.addFamily(KEY_META_INFO_FAMILY);
+
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ ResultScanner scanner = table.getScanner(scan);
+ List allKeys = new ArrayList<>();
+ for (Result result : scanner) {
+ ManagedKeyData keyData = parseFromResult(getServer(), key_cust, keyNamespace, result);
+ if (keyData != null) {
+ allKeys.add(keyData);
+ }
+ }
+ return allKeys;
+ }
+ }
+
+ /**
+ * Get all the active keys for the specified key_cust and key_namespace.
+ *
+ * @param key_cust The prefix
+ * @param keyNamespace The namespace
+ * @return a list of key data, one for each active key, can be empty when none were found.
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public List getActiveKeys(byte[] key_cust, String keyNamespace)
+ throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ List activeKeys = new ArrayList<>();
+ for (ManagedKeyData keyData : getAllKeys(key_cust, keyNamespace)) {
+ if (keyData.getKeyState() == ManagedKeyState.ACTIVE) {
+ activeKeys.add(keyData);
+ }
+ }
+ return activeKeys;
+ }
+
+ /**
+ * Get the specific key identified by key_cust, keyNamespace and keyState.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyState The state of the key.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, ManagedKeyState keyState)
+ throws IOException, KeyException {
+ return getKeyInternal(key_cust, keyNamespace, new byte[] { keyState.getVal() });
+ }
+
+ /**
+ * Get the specific key identified by key_cust, keyNamespace and keyMetadata.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadata The metadata.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ public ManagedKeyData getKey(byte[] key_cust, String keyNamespace, String keyMetadata)
+ throws IOException, KeyException {
+ return getKeyInternal(key_cust, keyNamespace,
+ ManagedKeyData.constructMetadataHash(keyMetadata));
+ }
+
+ /**
+ * Internal helper method to get a key using the provided metadata hash.
+ *
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadataHash The metadata hash or state value.
+ * @return the key or {@code null}
+ * @throws IOException when there is an underlying IOException.
+ * @throws KeyException when there is an underlying KeyException.
+ */
+ private ManagedKeyData getKeyInternal(byte[] key_cust, String keyNamespace,
+ byte[] keyMetadataHash) throws IOException, KeyException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace, keyMetadataHash);
+ Result result = table.get(new Get(rowKey));
+ return parseFromResult(getServer(), key_cust, keyNamespace, result);
+ }
+ }
+
+ /**
+ * Report read or write operation count on the specific key identified by key_cust, keyNamespace
+ * and keyMetadata. The reported value is added to the existing operation count using the
+ * Increment mutation.
+ * @param key_cust The prefix.
+ * @param keyNamespace The namespace.
+ * @param keyMetadata The metadata.
+ * @throws IOException when there is an underlying IOException.
+ */
+ public void reportOperation(byte[] key_cust, String keyNamespace, String keyMetadata, long count,
+ boolean isReadOperation) throws IOException {
+ assertKeyManagementEnabled();
+ Connection connection = getServer().getConnection();
+ try (Table table = connection.getTable(KEY_META_TABLE_NAME)) {
+ byte[] rowKey = constructRowKeyForMetadata(key_cust, keyNamespace,
+ ManagedKeyData.constructMetadataHash(keyMetadata));
+ Increment incr = new Increment(rowKey)
+ .addColumn(KEY_META_INFO_FAMILY,
+ isReadOperation ? READ_OP_COUNT_QUAL_BYTES : WRITE_OP_COUNT_QUAL_BYTES,
+ count);
+ table.increment(incr);
+ }
+ }
+
+ /**
+ * Add the mutation columns to the given Put that are derived from the keyData.
+ */
+ private Put addMutationColumns(Put put, ManagedKeyData keyData) throws IOException {
+ ManagedKeyData latestSystemKey = getServer().getSystemKeyCache().getLatestSystemKey();
+ if (keyData.getTheKey() != null) {
+ byte[] dekWrappedBySTK = EncryptionUtil.wrapKey(getServer().getConfiguration(), null,
+ keyData.getTheKey(), latestSystemKey.getTheKey());
+ put.addColumn(KEY_META_INFO_FAMILY, DEK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(keyData.getKeyChecksum()))
+ .addColumn(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES, dekWrappedBySTK)
+ .addColumn(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES,
+ Bytes.toBytes(latestSystemKey.getKeyChecksum()))
+ ;
+ }
+ Put result = put.setDurability(Durability.SKIP_WAL)
+ .setPriority(HConstants.SYSTEMTABLE_QOS)
+ .addColumn(KEY_META_INFO_FAMILY, REFRESHED_TIMESTAMP_QUAL_BYTES,
+ Bytes.toBytes(keyData.getRefreshTimestamp()))
+ .addColumn(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES,
+ new byte[] { keyData.getKeyState().getVal() })
+ ;
+
+ // Only add metadata column if metadata is not null
+ String metadata = keyData.getKeyMetadata();
+ if (metadata != null) {
+ result.addColumn(KEY_META_INFO_FAMILY, DEK_METADATA_QUAL_BYTES, metadata.getBytes());
+ }
+
+ return result;
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(ManagedKeyData keyData) {
+ byte[] keyMetadataHash;
+ if (keyData.getKeyState() == ManagedKeyState.FAILED && keyData.getKeyMetadata() == null) {
+ // For FAILED state with null metadata, use state as metadata
+ keyMetadataHash = new byte[] { keyData.getKeyState().getVal() };
+ } else {
+ keyMetadataHash = keyData.getKeyMetadataHash();
+ }
+ return constructRowKeyForMetadata(keyData.getKeyCustodian(), keyData.getKeyNamespace(),
+ keyMetadataHash);
+ }
+
+ @InterfaceAudience.Private
+ public static byte[] constructRowKeyForMetadata(byte[] key_cust, String keyNamespace,
+ byte[] keyMetadataHash) {
+ int custLength = key_cust.length;
+ return Bytes.add(Bytes.toBytes(custLength), key_cust, Bytes.toBytesBinary(keyNamespace),
+ keyMetadataHash);
+ }
+
+ @InterfaceAudience.Private
+ public static ManagedKeyData parseFromResult(Server server, byte[] key_cust, String keyNamespace,
+ Result result) throws IOException, KeyException {
+ if (result == null || result.isEmpty()) {
+ return null;
+ }
+ ManagedKeyState keyState = ManagedKeyState.forValue(
+ result.getValue(KEY_META_INFO_FAMILY, KEY_STATE_QUAL_BYTES)[0]);
+ String dekMetadata = Bytes.toString(result.getValue(KEY_META_INFO_FAMILY,
+ DEK_METADATA_QUAL_BYTES));
+ byte[] dekWrappedByStk = result.getValue(KEY_META_INFO_FAMILY, DEK_WRAPPED_BY_STK_QUAL_BYTES);
+ if ((keyState == ManagedKeyState.ACTIVE || keyState == ManagedKeyState.INACTIVE)
+ && dekWrappedByStk == null) {
+ throw new IOException(keyState + " key must have a wrapped key");
+ }
+ Key dek = null;
+ if (dekWrappedByStk != null) {
+ long stkChecksum =
+ Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY, STK_CHECKSUM_QUAL_BYTES));
+ ManagedKeyData clusterKey = server.getSystemKeyCache().getSystemKeyByChecksum(stkChecksum);
+ if (clusterKey == null) {
+ LOG.error("Dropping key with metadata: {} as STK with checksum: {} is unavailable",
+ dekMetadata, stkChecksum);
+ return null;
+ }
+ dek = EncryptionUtil.unwrapKey(server.getConfiguration(), null, dekWrappedByStk,
+ clusterKey.getTheKey());
+ }
+ long refreshedTimestamp = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
+ REFRESHED_TIMESTAMP_QUAL_BYTES));
+ byte[] readOpValue = result.getValue(KEY_META_INFO_FAMILY, READ_OP_COUNT_QUAL_BYTES);
+ long readOpCount = readOpValue != null ? Bytes.toLong(readOpValue) : 0;
+ byte[] writeOpValue = result.getValue(KEY_META_INFO_FAMILY, WRITE_OP_COUNT_QUAL_BYTES);
+ long writeOpCount = writeOpValue != null ? Bytes.toLong(writeOpValue) : 0;
+ ManagedKeyData
+ dekKeyData = new ManagedKeyData(key_cust, keyNamespace, dek, keyState, dekMetadata,
+ refreshedTimestamp, readOpCount, writeOpCount);
+ if (dek != null) {
+ long dekChecksum = Bytes.toLong(result.getValue(KEY_META_INFO_FAMILY,
+ DEK_CHECKSUM_QUAL_BYTES));
+ if (dekKeyData.getKeyChecksum() != dekChecksum) {
+ LOG.error("Dropping key, current key checksum: {} didn't match the expected checksum: {}"
+ + " for key with metadata: {}", dekKeyData.getKeyChecksum(), dekChecksum, dekMetadata);
+ return null;
+ }
+ }
+ return dekKeyData;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
new file mode 100644
index 000000000000..3359d57ca1ef
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/ManagedKeyDataCache.java
@@ -0,0 +1,334 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.github.benmanes.caffeine.cache.Cache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+
+/**
+ * In-memory cache for ManagedKeyData entries, using key metadata as the cache key. Uses two
+ * independent Caffeine caches: one for general key data and one for active keys only with
+ * hierarchical structure for efficient random key selection.
+ */
+@InterfaceAudience.Private
+public class ManagedKeyDataCache extends KeyManagementBase {
+ private static final Logger LOG = LoggerFactory.getLogger(ManagedKeyDataCache.class);
+
+ private Cache cache;
+ private Cache> activeKeysCache;
+ private final KeymetaTableAccessor keymetaAccessor;
+
+ /**
+ * Composite key for active keys cache containing custodian and namespace.
+ * NOTE: Pair won't work out of the box because it won't work with byte[] as is.
+ */
+ @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.UNITTEST })
+ public static class ActiveKeysCacheKey {
+ private final byte[] custodian;
+ private final String namespace;
+
+ public ActiveKeysCacheKey(byte[] custodian, String namespace) {
+ this.custodian = custodian;
+ this.namespace = namespace;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null || getClass() != obj.getClass())
+ return false;
+ ActiveKeysCacheKey cacheKey = (ActiveKeysCacheKey) obj;
+ return Bytes.equals(custodian, cacheKey.custodian) &&
+ Objects.equals(namespace, cacheKey.namespace);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(Bytes.hashCode(custodian), namespace);
+ }
+ }
+
+ /**
+ * Constructs the ManagedKeyDataCache with the given configuration and keymeta accessor. When
+ * keymetaAccessor is null, L2 lookup is disabled and dynamic lookup is enabled.
+ *
+ * @param conf The configuration, can't be null.
+ * @param keymetaAccessor The keymeta accessor, can be null.
+ */
+ public ManagedKeyDataCache(Configuration conf, KeymetaTableAccessor keymetaAccessor) {
+ super(conf);
+ this.keymetaAccessor = keymetaAccessor;
+ if (keymetaAccessor == null) {
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_DYNAMIC_LOOKUP_ENABLED_CONF_KEY, true);
+ }
+
+ int maxEntries = conf.getInt(
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_CACHE_MAX_ENTRIES_DEFAULT);
+ int activeKeysMaxEntries = conf.getInt(
+ HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_L1_ACTIVE_CACHE_MAX_NS_ENTRIES_DEFAULT);
+ this.cache = Caffeine.newBuilder()
+ .maximumSize(maxEntries)
+ .build();
+ this.activeKeysCache = Caffeine.newBuilder()
+ .maximumSize(activeKeysMaxEntries)
+ .build();
+ }
+
+ /**
+ * Retrieves an entry from the cache, loading it from L2 if KeymetaTableAccessor is available.
+ * When L2 is not available, it will try to load from provider, unless dynamic lookup is disabled.
+ *
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata of the entry to be retrieved
+ * @param wrappedKey The DEK key material encrypted with the corresponding
+ * KEK, if available.
+ * @return the corresponding ManagedKeyData entry, or null if not found
+ * @throws IOException if an error occurs while loading from KeymetaTableAccessor
+ * @throws KeyException if an error occurs while loading from KeymetaTableAccessor
+ */
+ public ManagedKeyData getEntry(byte[] key_cust, String keyNamespace, String keyMetadata, byte[] wrappedKey)
+ throws IOException, KeyException {
+ ManagedKeyData entry = cache.get(keyMetadata, metadata -> {
+ // First check if it's in the active keys cache
+ ManagedKeyData keyData = getFromActiveKeysCache(key_cust, keyNamespace, keyMetadata);
+
+ // Try to load from L2
+ if (keyData == null && keymetaAccessor != null) {
+ try {
+ keyData = keymetaAccessor.getKey(key_cust, keyNamespace, metadata);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load key from KeymetaTableAccessor for metadata: {}", metadata, e);
+ }
+ }
+
+ // If not found in L2 and dynamic lookup is enabled, try with Key Provider
+ if (keyData == null && isDynamicLookupEnabled()) {
+ try {
+ ManagedKeyProvider provider = getKeyProvider();
+ keyData = provider.unwrapKey(metadata, wrappedKey);
+ LOG.info("Got key data with status: {} and metadata: {} for prefix: {}",
+ keyData.getKeyState(), keyData.getKeyMetadata(),
+ ManagedKeyProvider.encodeToStr(key_cust));
+ // Add to KeymetaTableAccessor for future L2 lookups
+ if (keymetaAccessor != null) {
+ try {
+ keymetaAccessor.addKey(keyData);
+ } catch (IOException | RuntimeException e) {
+ LOG.warn("Failed to add key to KeymetaTableAccessor for metadata: {}", metadata, e);
+ }
+ }
+ } catch (IOException | RuntimeException e) {
+ LOG.warn("Failed to load key from provider for metadata: {}", metadata, e);
+ }
+ }
+
+ if (keyData == null) {
+ keyData = new ManagedKeyData(key_cust, keyNamespace, null, ManagedKeyState.FAILED, keyMetadata);
+ }
+
+ if (ManagedKeyState.isUsable(keyData.getKeyState())) {
+ LOG.info("Failed to get usable key data with metadata: {} for prefix: {}",
+ metadata, ManagedKeyProvider.encodeToStr(key_cust));
+ }
+ return keyData;
+ });
+ if (ManagedKeyState.isUsable(entry.getKeyState())) {
+ return entry;
+ }
+ return null;
+ }
+
+ /**
+ * Retrieves an existing key from the active keys.
+ *
+ * @param key_cust the key custodian
+ * @param keyNamespace the key namespace
+ * @param keyMetadata the key metadata
+ * @return the ManagedKeyData if found, null otherwise
+ */
+ private ManagedKeyData getFromActiveKeysCache(byte[] key_cust, String keyNamespace, String keyMetadata) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
+ List keyList = activeKeysCache.getIfPresent(cacheKey);
+ if (keyList != null) {
+ for (ManagedKeyData keyData : keyList) {
+ if (keyData.getKeyMetadata().equals(keyMetadata)) {
+ return keyData;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Removes an entry from generic cache based on its key metadata.
+ *
+ * @param keyMetadata the key metadata of the entry to be removed
+ * @return the removed ManagedKeyData entry, or null if not found
+ */
+ public ManagedKeyData removeEntry(String keyMetadata) {
+ return cache.asMap().remove(keyMetadata);
+ }
+
+ public ManagedKeyData removeFromActiveKeys(byte[] key_cust, String key_namespace,
+ String keyMetadata) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, key_namespace);
+ List keyList = activeKeysCache.getIfPresent(cacheKey);
+ if (keyList != null) {
+ // Find and remove the matching key
+ ManagedKeyData removedEntry = null;
+ for (int i = 0; i < keyList.size(); i++) {
+ if (keyList.get(i).getKeyMetadata().equals(keyMetadata)) {
+ removedEntry = keyList.remove(i);
+ break;
+ }
+ }
+ // If the list is now empty, remove the entire cache entry
+ if (keyList.isEmpty()) {
+ activeKeysCache.invalidate(cacheKey);
+ }
+ return removedEntry;
+ }
+ return null;
+ }
+
+ /**
+ * @return the approximate number of entries in the main cache which is meant for general lookup
+ * by key metadata.
+ */
+ public int getGenericCacheEntryCount() {
+ return (int) cache.estimatedSize();
+ }
+
+ /**
+ * @return the approximate number of entries in the active keys cache which is meant for random
+ * key selection.
+ */
+ public int getActiveCacheEntryCount() {
+ int activeCacheCount = 0;
+ for (List keyList : activeKeysCache.asMap().values()) {
+ activeCacheCount += keyList.size();
+ }
+ return activeCacheCount;
+ }
+
+ /**
+ * Retrieves a random active entry from the cache based on its key custodian, key namespace, and
+ * filters out entries with a status other than ACTIVE. This method also loads active keys from
+ * provider if not found in cache.
+ *
+ * @param key_cust The key custodian.
+ * @param keyNamespace the key namespace to search for
+ * @return a random ManagedKeyData entry with the given custodian and ACTIVE status, or null if
+ * not found
+ */
+ public ManagedKeyData getAnActiveEntry(byte[] key_cust, String keyNamespace) {
+ ActiveKeysCacheKey cacheKey = new ActiveKeysCacheKey(key_cust, keyNamespace);
+
+ List keyList = activeKeysCache.get(cacheKey, key -> {
+ List activeEntries = new ArrayList<>();
+
+ // Try to load from KeymetaTableAccessor
+ if (keymetaAccessor != null) {
+ try {
+ List loadedKeys = keymetaAccessor.getActiveKeys(key_cust, keyNamespace);
+ activeEntries.addAll(loadedKeys);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active keys from KeymetaTableAccessor for custodian: {} namespace: {}",
+ ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ }
+ }
+
+ // If this happens, it means there were no keys in L2, which shouldn't happpen if L2 is
+ // enabled and keys were injected using control path for this custodian and namespace. In
+ // this case, we need to retrieve the keys from provider, but before that as a quick
+ // optimization, we check if there are any active keys in the other cache, which should be
+ // suitable for standalone tools.
+ if (activeEntries.isEmpty()) {
+ this.cache.asMap().values().stream()
+ .filter(keyData -> Bytes.equals(keyData.getKeyCustodian(), key_cust)
+ && keyData.getKeyNamespace().equals(keyNamespace)
+ && keyData.getKeyState() == ManagedKeyState.ACTIVE)
+ .forEach(keyData -> {
+ activeEntries.add(keyData);
+ });
+ }
+
+ // As a last ditch effort, load active keys from provider. This typically happens for
+ // standalone tools.
+ if (activeEntries.isEmpty() && isDynamicLookupEnabled()) {
+ try {
+ String keyCust = ManagedKeyProvider.encodeToStr(key_cust);
+ Set retrievedKeys = retrieveManagedKeys(keyCust, key_cust, keyNamespace,
+ getPerCustodianNamespaceActiveKeyConfCount(), new HashSet<>());
+ if (keymetaAccessor != null) {
+ for (ManagedKeyData keyData : retrievedKeys) {
+ keymetaAccessor.addKey(keyData);
+ }
+ }
+ retrievedKeys.stream().filter(keyData -> keyData.getKeyState() == ManagedKeyState.ACTIVE)
+ .forEach(activeEntries::add);
+ } catch (IOException | KeyException | RuntimeException e) {
+ LOG.warn("Failed to load active keys from provider for custodian: {} namespace: {}",
+ ManagedKeyProvider.encodeToStr(key_cust), keyNamespace, e);
+ }
+ }
+
+ // We don't mind returning an empty list here because it will help prevent future L2/provider
+ // lookups.
+ return activeEntries;
+ });
+
+ // Return a random entry from active keys cache only
+ if (keyList.isEmpty()) {
+ return null;
+ }
+
+ return keyList.get((int) (Math.random() * keyList.size()));
+ }
+
+ /**
+ * Invalidates all entries in the cache.
+ */
+ public void invalidateAll() {
+ cache.invalidateAll();
+ activeKeysCache.invalidateAll();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
new file mode 100644
index 000000000000..5a89d38a0bb2
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyAccessor.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class SystemKeyAccessor extends KeyManagementBase {
+ protected final Path systemKeyDir;
+
+ public SystemKeyAccessor(Server server) throws IOException {
+ super(server);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(server.getConfiguration());
+ }
+
+ /**
+ * Return both the latest system key file and all system key files.
+ * @return a pair of the latest system key file and all system key files
+ * @throws IOException if there is an error getting the latest system key file or no cluster key
+ * is initialized yet.
+ */
+ public Pair> getLatestSystemKeyFile() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return new Pair<>(null, null);
+ }
+ List allClusterKeyFiles = getAllSystemKeyFiles();
+ if (allClusterKeyFiles.isEmpty()) {
+ throw new RuntimeException("No cluster key initialized yet");
+ }
+ int currentMaxSeqNum = SystemKeyAccessor.extractKeySequence(allClusterKeyFiles.get(0));
+ return new Pair<>(new Path(systemKeyDir, SYSTEM_KEY_FILE_PREFIX + currentMaxSeqNum),
+ allClusterKeyFiles);
+ }
+
+ /**
+ * Return all available cluster key files and return them in the order of latest to oldest.
+ * If no cluster key files are available, then return an empty list. If key management is not
+ * enabled, then return null.
+ *
+ * @return a list of all available cluster key files
+ * @throws IOException if there is an error getting the cluster key files
+ */
+ public List getAllSystemKeyFiles() throws IOException {
+ if (!isKeyManagementEnabled()) {
+ return null;
+ }
+ FileSystem fs = getServer().getFileSystem();
+ Map clusterKeys = new TreeMap<>(Comparator.reverseOrder());
+ for (FileStatus st : fs.globStatus(new Path(systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + "*"))) {
+ Path keyPath = st.getPath();
+ int seqNum = extractSystemKeySeqNum(keyPath);
+ clusterKeys.put(seqNum, keyPath);
+ }
+
+ return new ArrayList<>(clusterKeys.values());
+ }
+
+ public ManagedKeyData loadSystemKey(Path keyPath) throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData keyData = provider.unwrapKey(loadKeyMetadata(keyPath), null);
+ if (keyData == null) {
+ throw new RuntimeException("Failed to load system key from: " + keyPath);
+ }
+ return keyData;
+ }
+
+ @InterfaceAudience.Private
+ public static int extractSystemKeySeqNum(Path keyPath) throws IOException {
+ if (keyPath.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ try {
+ return Integer.parseInt(keyPath.getName().substring(SYSTEM_KEY_FILE_PREFIX.length()));
+ }
+ catch (NumberFormatException e) {
+ LOG.error("Invalid file name for a cluster key: {}", keyPath, e);
+ }
+ }
+ throw new IOException("Couldn't parse key file name: " + keyPath.getName());
+ }
+
+ /**
+ * Extract the key sequence number from the cluster key file name.
+ * @param clusterKeyFile the path to the cluster key file
+ * @return The sequence or {@code -1} if not a valid sequence file.
+ * @throws IOException if the file name is not a valid sequence file
+ */
+ @InterfaceAudience.Private
+ public static int extractKeySequence(Path clusterKeyFile) throws IOException {
+ int keySeq = -1;
+ if (clusterKeyFile.getName().startsWith(SYSTEM_KEY_FILE_PREFIX)) {
+ String seqStr = clusterKeyFile.getName().substring(SYSTEM_KEY_FILE_PREFIX.length());
+ if (! seqStr.isEmpty()) {
+ try {
+ keySeq = Integer.parseInt(seqStr);
+ } catch (NumberFormatException e) {
+ throw new IOException("Invalid file name for a cluster key: " + clusterKeyFile, e);
+ }
+ }
+ }
+ return keySeq;
+ }
+
+ protected String loadKeyMetadata(Path keyPath) throws IOException {
+ try (FSDataInputStream fin = getServer().getFileSystem().open(keyPath)) {
+ return fin.readUTF();
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
new file mode 100644
index 000000000000..d1e3eb048a9b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/keymeta/SystemKeyCache.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings("checkstyle:FinalClass") // as otherwise it breaks mocking.
+@InterfaceAudience.Private
+public class SystemKeyCache {
+ private static final Logger LOG = LoggerFactory.getLogger(SystemKeyCache.class);
+
+ private final ManagedKeyData latestSystemKey;
+ private final Map systemKeys;
+
+ /**
+ * Construct the System Key cache from the specified accessor.
+ * @param accessor the accessor to use to load the system keys
+ * @return the cache or {@code null} if no keys are found.
+ * @throws IOException if there is an error loading the system keys
+ */
+ public static SystemKeyCache createCache(SystemKeyAccessor accessor) throws IOException {
+ List allSystemKeyFiles = accessor.getAllSystemKeyFiles();
+ if (allSystemKeyFiles.isEmpty()) {
+ LOG.warn("No system key files found, skipping cache creation");
+ return null;
+ }
+ ManagedKeyData latestSystemKey = null;
+ Map systemKeys = new TreeMap<>();
+ for (Path keyPath: allSystemKeyFiles) {
+ LOG.info("Loading system key from: {}", keyPath);
+ ManagedKeyData keyData = accessor.loadSystemKey(keyPath);
+ if (latestSystemKey == null) {
+ latestSystemKey = keyData;
+ }
+ systemKeys.put(keyData.getKeyChecksum(), keyData);
+ }
+ return new SystemKeyCache(systemKeys, latestSystemKey);
+ }
+
+ private SystemKeyCache(Map systemKeys, ManagedKeyData latestSystemKey) {
+ this.systemKeys = systemKeys;
+ this.latestSystemKey = latestSystemKey;
+ }
+
+ public ManagedKeyData getLatestSystemKey() {
+ return latestSystemKey;
+ }
+
+ public ManagedKeyData getSystemKeyByChecksum(long checksum) {
+ return systemKeys.get(checksum);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 7ccbb4d614ab..e4bc0f2ea768 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -122,6 +122,7 @@
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.keymeta.KeymetaMasterService;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -355,6 +356,8 @@ public class HMaster extends HBaseServerBase implements Maste
// file system manager for the master FS operations
private MasterFileSystem fileSystemManager;
private MasterWalManager walManager;
+ private SystemKeyManager systemKeyManager;
+ private KeymetaMasterService keymetaMasterService;
// manager to manage procedure-based WAL splitting, can be null if current
// is zk-based WAL splitting. SplitWALManager will replace SplitLogManager
@@ -992,6 +995,10 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.clusterId = clusterId.toString();
+ systemKeyManager = new SystemKeyManager(this);
+ systemKeyManager.ensureSystemKeyInitialized();
+ buildSystemKeyCache();
+
// Precaution. Put in place the old hbck1 lock file to fence out old hbase1s running their
// hbck1s against an hbase2 cluster; it could do damage. To skip this behavior, set
// hbase.write.hbck1.lock.file to false.
@@ -1031,6 +1038,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
Map, List>> procsByType = procedureExecutor
.getActiveProceduresNoCopy().stream().collect(Collectors.groupingBy(p -> p.getClass()));
+ keymetaMasterService = new KeymetaMasterService(this);
+ keymetaMasterService.init();
+
// Create Assignment Manager
this.assignmentManager = createAssignmentManager(this, masterRegion);
this.assignmentManager.start();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 5a43cd98feb9..0ffbfd15c41d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -66,6 +66,7 @@ public class MasterFileSystem {
private final FileSystem walFs;
// root log directory on the FS
private final Path rootdir;
+ private final Path systemKeyDir;
// hbase temp directory used for table construction and deletion
private final Path tempdir;
// root hbase directory on the FS
@@ -96,6 +97,7 @@ public MasterFileSystem(Configuration conf) throws IOException {
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = CommonFSUtils.getRootDir(conf);
+ this.systemKeyDir = CommonFSUtils.getSystemKeyDir(conf);
this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
@@ -134,6 +136,7 @@ private void createInitialFileSystemLayout() throws IOException {
HConstants.CORRUPT_DIR_NAME, ReplicationUtils.REMOTE_WAL_DIR_NAME };
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
+ checkSubDir(this.systemKeyDir, HBASE_DIR_PERMS);
// Check the directories under rootdir.
checkTempDir(this.tempdir, conf, this.fs);
@@ -158,6 +161,7 @@ private void createInitialFileSystemLayout() throws IOException {
if (isSecurityEnabled) {
fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms);
fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms);
+ fs.setPermission(systemKeyDir, secureRootFilePerms);
}
FsPermission currentRootPerms = fs.getFileStatus(this.rootdir).getPermission();
if (
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
index 18dfc7d493bf..99a373c8262f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java
@@ -21,7 +21,6 @@
import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER;
import static org.apache.hadoop.hbase.master.MasterWalManager.NON_META_FILTER;
-
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
new file mode 100644
index 000000000000..45b021c77feb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SystemKeyManager.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.apache.hadoop.hbase.HConstants.SYSTEM_KEY_FILE_PREFIX;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyProvider;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.keymeta.SystemKeyAccessor;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class SystemKeyManager extends SystemKeyAccessor {
+ private final MasterServices master;
+
+ public SystemKeyManager(MasterServices master) throws IOException {
+ super(master);
+ this.master = master;
+ }
+
+ public void ensureSystemKeyInitialized() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return;
+ }
+ List clusterKeys = getAllSystemKeyFiles();
+ if (clusterKeys.isEmpty()) {
+ LOG.info("Initializing System Key for the first time");
+ // Double check for cluster key as another HMaster might have succeeded.
+ if (rotateSystemKey(null, clusterKeys) == null &&
+ getAllSystemKeyFiles().isEmpty()) {
+ throw new RuntimeException("Failed to generate or save System Key");
+ }
+ }
+ else if (rotateSystemKeyIfChanged() != null) {
+ LOG.info("System key has been rotated");
+ }
+ else {
+ LOG.info("System key is already initialized and unchanged");
+ }
+ }
+
+ public ManagedKeyData rotateSystemKeyIfChanged() throws IOException {
+ if (! isKeyManagementEnabled()) {
+ return null;
+ }
+ Pair> latestFileResult = getLatestSystemKeyFile();
+ Path latestFile = getLatestSystemKeyFile().getFirst();
+ String latestKeyMetadata = loadKeyMetadata(latestFile);
+ return rotateSystemKey(latestKeyMetadata, latestFileResult.getSecond());
+ }
+
+ private ManagedKeyData rotateSystemKey(String currentKeyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ ManagedKeyProvider provider = getKeyProvider();
+ ManagedKeyData clusterKey = provider.getSystemKey(
+ master.getMasterFileSystem().getClusterId().toString().getBytes());
+ if (clusterKey == null) {
+ throw new IOException("Failed to get system key for cluster id: " +
+ master.getMasterFileSystem().getClusterId().toString());
+ }
+ if (clusterKey.getKeyState() != ManagedKeyState.ACTIVE) {
+ throw new IOException("System key is expected to be ACTIVE but it is: " +
+ clusterKey.getKeyState() + " for metadata: " + clusterKey.getKeyMetadata());
+ }
+ if (clusterKey.getKeyMetadata() == null) {
+ throw new IOException("System key is expected to have metadata but it is null");
+ }
+ if (! clusterKey.getKeyMetadata().equals(currentKeyMetadata) &&
+ saveLatestSystemKey(clusterKey.getKeyMetadata(), allSystemKeyFiles)) {
+ return clusterKey;
+ }
+ return null;
+ }
+
+ private boolean saveLatestSystemKey(String keyMetadata, List allSystemKeyFiles)
+ throws IOException {
+ int nextSystemKeySeq = (allSystemKeyFiles.isEmpty() ? -1
+ : SystemKeyAccessor.extractKeySequence(allSystemKeyFiles.get(0))) + 1;
+ LOG.info("Trying to save a new cluster key at seq: {}", nextSystemKeySeq);
+ MasterFileSystem masterFS = master.getMasterFileSystem();
+ Path nextSystemKeyPath = new Path(systemKeyDir,
+ SYSTEM_KEY_FILE_PREFIX + nextSystemKeySeq);
+ Path tempSystemKeyFile = new Path(masterFS.getTempDir(),
+ nextSystemKeyPath.getName() + UUID.randomUUID());
+ try (FSDataOutputStream fsDataOutputStream = masterFS.getFileSystem()
+ .create(tempSystemKeyFile)) {
+ fsDataOutputStream.writeUTF(keyMetadata);
+ boolean succeeded = masterFS.getFileSystem().rename(tempSystemKeyFile, nextSystemKeyPath);
+ if (succeeded) {
+ LOG.info("System key save succeeded for seq: {}", nextSystemKeySeq);
+ } else {
+ LOG.error("System key save failed for seq: {}", nextSystemKeySeq);
+ }
+ return succeeded;
+ }
+ finally {
+ masterFS.getFileSystem().delete(tempSystemKeyFile, false);
+ }
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index a4105a31bfac..c9f231a5adef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -120,6 +120,7 @@
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
import org.apache.hadoop.hbase.log.HBaseMarkers;
import org.apache.hadoop.hbase.mob.MobFileCache;
import org.apache.hadoop.hbase.mob.RSMobFileCleanerChore;
@@ -596,7 +597,6 @@ protected RegionServerCoprocessorHost getCoprocessorHost() {
return getRegionServerCoprocessorHost();
}
- @Override
protected boolean canCreateBaseZNode() {
return !clusterMode();
}
@@ -1449,6 +1449,9 @@ protected void handleReportForDutyResponse(final RegionServerStartupResponse c)
initializeFileSystem();
}
+ buildSystemKeyCache();
+ managedKeyDataCache = new ManagedKeyDataCache(this.getConfiguration(), keymetaAdmin);
+
// hack! Maps DFSClient => RegionServer for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
if (this.conf.get("mapreduce.task.attempt.id") == null) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index a4ca20fa7311..65e8aa5e66e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -41,6 +41,9 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.keymeta.KeymetaAdmin;
+import org.apache.hadoop.hbase.keymeta.ManagedKeyDataCache;
+import org.apache.hadoop.hbase.keymeta.SystemKeyCache;
import org.apache.hadoop.hbase.master.replication.OfflineTableReplicationQueueStorage;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationGroupOffset;
@@ -366,6 +369,18 @@ public ChoreService getChoreService() {
return null;
}
+ @Override public SystemKeyCache getSystemKeyCache() {
+ return null;
+ }
+
+ @Override public ManagedKeyDataCache getManagedKeyDataCache() {
+ return null;
+ }
+
+ @Override public KeymetaAdmin getKeymetaAdmin() {
+ return null;
+ }
+
@Override
public FileSystem getFileSystem() {
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
index 5f9433a3f141..92b5f340a610 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/SecurityUtil.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.security;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@@ -45,4 +47,14 @@ public static String getPrincipalWithoutRealm(final String principal) {
int i = principal.indexOf("@");
return (i > -1) ? principal.substring(0, i) : principal;
}
+
+ /**
+ * From the given configuration, determine if key management is enabled.
+ * @param conf the configuration to check
+ * @return true if key management is enabled
+ */
+ public static boolean isKeyManagementEnabled(Configuration conf) {
+ return conf.getBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY,
+ HConstants.CRYPTO_MANAGED_KEYS_DEFAULT_ENABLED);
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
new file mode 100644
index 000000000000..2d8ae446da3a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/DummyKeyProvider.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import java.security.Key;
+
+import org.apache.hadoop.hbase.io.crypto.KeyProvider;
+
+public class DummyKeyProvider implements KeyProvider {
+ @Override
+ public void init(String params) {
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return null;
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
new file mode 100644
index 000000000000..b7549007b371
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyProviderInterceptor.java
@@ -0,0 +1,75 @@
+package org.apache.hadoop.hbase.keymeta;
+
+import java.io.IOException;
+import java.security.Key;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.mockito.Mockito;
+
+public class ManagedKeyProviderInterceptor extends MockManagedKeyProvider {
+ public final MockManagedKeyProvider delegate;
+ public final MockManagedKeyProvider spy;
+
+ public ManagedKeyProviderInterceptor() {
+ this.delegate = new MockManagedKeyProvider();
+ this.spy = Mockito.spy(delegate);
+ }
+
+ @Override
+ public void initConfig(Configuration conf) {
+ spy.initConfig(conf);
+ }
+
+ @Override
+ public ManagedKeyData getManagedKey(byte[] custodian, String namespace) throws IOException {
+ return spy.getManagedKey(custodian, namespace);
+ }
+
+ @Override
+ public ManagedKeyData getSystemKey(byte[] systemId) throws IOException {
+ return spy.getSystemKey(systemId);
+ }
+
+ @Override
+ public ManagedKeyData unwrapKey(String keyMetadata, byte[] wrappedKey) throws IOException {
+ return spy.unwrapKey(keyMetadata, wrappedKey);
+ }
+
+ @Override
+ public void init(String params) {
+ spy.init(params);
+ }
+
+ @Override
+ public Key getKey(String alias) {
+ return spy.getKey(alias);
+ }
+
+ @Override
+ public Key[] getKeys(String[] aliases) {
+ return spy.getKeys(aliases);
+ }
+
+ @Override
+ public void setMockedKeyState(String alias, ManagedKeyState state) {
+ delegate.setMockedKeyState(alias, state);
+ }
+
+ @Override
+ public void setMultikeyGenMode(boolean multikeyGenMode) {
+ delegate.setMultikeyGenMode(multikeyGenMode);
+ }
+
+ @Override
+ public ManagedKeyData getLastGeneratedKeyData(String alias, String keyNamespace) {
+ return delegate.getLastGeneratedKeyData(alias, keyNamespace);
+ }
+
+ @Override
+ public void setMockedKey(String alias, java.security.Key key, String keyNamespace) {
+ delegate.setMockedKey(alias, key, keyNamespace);
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
new file mode 100644
index 000000000000..a0147e6e4e2e
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/ManagedKeyTestBase.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.junit.After;
+import org.junit.Before;
+
+public class ManagedKeyTestBase {
+ protected HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+ @Before
+ public void setUp() throws Exception {
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ MockManagedKeyProvider.class.getName());
+ TEST_UTIL.getConfiguration().set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ TEST_UTIL.getConfiguration().set("hbase.coprocessor.master.classes",
+ KeymetaServiceEndpoint.class.getName());
+
+ // Start the minicluster
+ TEST_UTIL.startMiniCluster(1);
+ TEST_UTIL.waitFor(60000,
+ () -> TEST_UTIL.getMiniHBaseCluster().getMaster().isInitialized());
+ TEST_UTIL.waitUntilAllRegionsAssigned(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
new file mode 100644
index 000000000000..ab871b241830
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeyManagementBase.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeyManagementBase {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(
+ TestKeyManagementBase.class);
+
+ @Test
+ public void testGetKeyProviderWithInvalidProvider() throws Exception {
+ // Setup configuration with a non-ManagedKeyProvider
+ Configuration conf = new Configuration();
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY,
+ "org.apache.hadoop.hbase.keymeta.DummyKeyProvider");
+
+ Server mockServer = mock(Server.class);
+ when(mockServer.getConfiguration()).thenReturn(conf);
+
+ KeyManagementBase keyMgmt = new TestKeyManagement(mockServer);
+
+ // Should throw RuntimeException when provider is not ManagedKeyProvider
+ RuntimeException exception = assertThrows(RuntimeException.class, () -> {
+ keyMgmt.getKeyProvider();
+ });
+
+ assertTrue(exception.getMessage().contains("expected to be of type ManagedKeyProvider"));
+ }
+
+ private static class TestKeyManagement extends KeyManagementBase {
+ public TestKeyManagement(Server server) {
+ super(server);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
new file mode 100644
index 000000000000..7070596a93c0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaEndpoint.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_ACTIVE;
+import static org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeyState.KEY_FAILED;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.contains;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.withSettings;
+
+import java.io.IOException;
+import java.security.KeyException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+import java.util.List;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.keymeta.KeymetaServiceEndpoint.KeymetaAdminServiceImpl;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.GetManagedKeysResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ManagedKeysProtos.ManagedKeysResponse;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaEndpoint {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaEndpoint.class);
+
+ private static final String KEY_CUST = "keyCust";
+ private static final String KEY_NAMESPACE = "keyNamespace";
+ private static final String KEY_METADATA1 = "keyMetadata1";
+ private static final String KEY_METADATA2 = "keyMetadata2";
+
+ @Mock
+ private RpcController controller;
+ @Mock
+ private MasterServices master;
+ @Mock
+ private RpcCallback done;
+ @Mock
+ private KeymetaAdmin keymetaAdmin;
+
+ KeymetaServiceEndpoint keymetaServiceEndpoint;
+ private ManagedKeysResponse.Builder responseBuilder;
+ private ManagedKeysRequest.Builder requestBuilder;
+ private KeymetaAdminServiceImpl keyMetaAdminService;
+ private ManagedKeyData keyData1;
+ private ManagedKeyData keyData2;
+
+ @Before
+ public void setUp() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ keymetaServiceEndpoint = new KeymetaServiceEndpoint();
+ CoprocessorEnvironment env = mock(CoprocessorEnvironment.class,
+ withSettings().extraInterfaces(HasMasterServices.class));
+ when(((HasMasterServices) env).getMasterServices()).thenReturn(master);
+ keymetaServiceEndpoint.start(env);
+ keyMetaAdminService = (KeymetaAdminServiceImpl) keymetaServiceEndpoint.getServices()
+ .iterator().next();
+ responseBuilder = ManagedKeysResponse.newBuilder().setKeyState(KEY_ACTIVE);
+ requestBuilder = ManagedKeysRequest.newBuilder()
+ .setKeyNamespace(ManagedKeyData.KEY_SPACE_GLOBAL);
+ keyData1 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key1".getBytes(), "AES"), ACTIVE, KEY_METADATA1);
+ keyData2 = new ManagedKeyData(KEY_CUST.getBytes(), KEY_NAMESPACE,
+ new SecretKeySpec("key2".getBytes(), "AES"), ACTIVE, KEY_METADATA2);
+ when(master.getKeymetaAdmin()).thenReturn(keymetaAdmin);
+ }
+
+ @Test
+ public void testConvertToKeyCustBytesValid() {
+ // Arrange
+ String validBase64 = Base64.getEncoder().encodeToString("testKey".getBytes());
+ ManagedKeysRequest request = requestBuilder.setKeyCust(validBase64).build();
+
+ // Act
+ byte[] result =
+ KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request, responseBuilder);
+
+ // Assert
+ assertNotNull(result);
+ assertArrayEquals("testKey".getBytes(), result);
+ assertEquals(KEY_ACTIVE, responseBuilder.getKeyState());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testConvertToKeyCustBytesInvalid() {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ byte[] result = KeymetaServiceEndpoint.convertToKeyCustBytes(controller, request,
+ responseBuilder);
+
+ // Assert
+ assertNull(result);
+ assertEquals(KEY_FAILED, responseBuilder.getKeyState());
+ verify(controller).setFailed(anyString());
+ }
+
+ @Test
+ public void testGetResponseBuilder() {
+ // Arrange
+ String keyCust = Base64.getEncoder().encodeToString("testKey".getBytes());
+ String keyNamespace = "testNamespace";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
+ .setKeyNamespace(keyNamespace)
+ .build();
+
+ // Act
+ ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
+ request);
+
+ // Assert
+ assertNotNull(result);
+ assertEquals(keyNamespace, result.getKeyNamespace());
+ assertArrayEquals("testKey".getBytes(), result.getKeyCustBytes().toByteArray());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGetResponseBuilderWithInvalidBase64() {
+ // Arrange
+ String keyCust = "invalidBase64!";
+ String keyNamespace = "testNamespace";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(keyCust)
+ .setKeyNamespace(keyNamespace)
+ .build();
+
+ // Act
+ ManagedKeysResponse.Builder result = KeymetaServiceEndpoint.getResponseBuilder(controller,
+ request);
+
+ // Assert
+ assertNotNull(result);
+ assertEquals(keyNamespace, result.getKeyNamespace());
+ assertEquals(KEY_FAILED, result.getKeyState());
+ verify(controller).setFailed(contains("Failed to decode specified prefix as Base64 string"));
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse() throws Exception {
+ // Arrange
+ ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
+ keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace())
+ .build();
+ List managedKeyStates = Arrays.asList(keyData1, keyData2);
+
+ // Act
+ GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
+ managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(2, result.getStateList().size());
+ assertEquals(KEY_ACTIVE, result.getStateList().get(0).getKeyState());
+ assertEquals(0, Bytes.compareTo(keyData1.getKeyCustodian(),
+ result.getStateList().get(0).getKeyCustBytes().toByteArray()));
+ assertEquals(keyData1.getKeyNamespace(), result.getStateList().get(0).getKeyNamespace());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_Empty() throws Exception {
+ // Arrange
+ ManagedKeysResponse response = responseBuilder.setKeyCustBytes(ByteString.copyFrom(
+ keyData1.getKeyCustodian()))
+ .setKeyNamespace(keyData1.getKeyNamespace())
+ .build();
+ List managedKeyStates = new ArrayList<>();
+
+ // Act
+ GetManagedKeysResponse result = KeymetaServiceEndpoint.generateKeyStateResponse(
+ managedKeyStates, responseBuilder);
+
+ // Assert
+ assertNotNull(response);
+ assertNotNull(result.getStateList());
+ assertEquals(0, result.getStateList().size());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ @Test
+ public void testGenerateKeyStatResponse_Success() throws Exception {
+ doTestServiceCallForSuccess(
+ (controller, request, done) ->
+ keyMetaAdminService.enableKeyManagement(controller, request, done));
+ }
+
+ @Test
+ public void testGetManagedKeys_Success() throws Exception {
+ doTestServiceCallForSuccess(
+ (controller, request, done) ->
+ keyMetaAdminService.getManagedKeys(controller, request, done));
+ }
+
+ private void doTestServiceCallForSuccess(ServiceCall svc) throws Exception {
+ // Arrange
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+ List managedKeyStates = Arrays.asList(keyData1);
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenReturn(managedKeyStates);
+
+ // Act
+ svc.call(controller, request, done);
+
+ // Assert
+ verify(done).run(any());
+ verify(controller, never()).setFailed(anyString());
+ }
+
+ private interface ServiceCall {
+ void call(RpcController controller, ManagedKeysRequest request,
+ RpcCallback done) throws Exception;
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_InvalidCust() throws Exception {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin, never()).enableKeyManagement(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGenerateKeyStateResponse_IOException() throws Exception {
+ // Arrange
+ when(keymetaAdmin.enableKeyManagement(any(), any())).thenThrow(IOException.class);
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+
+ // Act
+ keyMetaAdminService.enableKeyManagement(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin).enableKeyManagement(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGetManagedKeys_IOException() throws Exception {
+ doTestGetManagedKeysError(IOException.class);
+ }
+
+ @Test
+ public void testGetManagedKeys_KeyException() throws Exception {
+ doTestGetManagedKeysError(KeyException.class);
+ }
+
+ private void doTestGetManagedKeysError(Class extends Exception> exType) throws Exception {
+ // Arrange
+ when(keymetaAdmin.getManagedKeys(any(), any())).thenThrow(exType);
+ ManagedKeysRequest request = requestBuilder.setKeyCust(KEY_CUST).build();
+
+ // Act
+ keyMetaAdminService.getManagedKeys(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains(exType.getSimpleName()));
+ verify(keymetaAdmin).getManagedKeys(any(), any());
+ verify(done, never()).run(any());
+ }
+
+ @Test
+ public void testGetManagedKeys_InvalidCust() throws Exception {
+ // Arrange
+ String invalidBase64 = "invalid!Base64@String";
+ ManagedKeysRequest request = requestBuilder.setKeyCust(invalidBase64).build();
+
+ // Act
+ keyMetaAdminService.getManagedKeys(controller, request, done);
+
+ // Assert
+ verify(controller).setFailed(contains("IOException"));
+ verify(keymetaAdmin, never()).getManagedKeys(any(), any());
+ verify(done, never()).run(any());
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
new file mode 100644
index 000000000000..f34d482d7940
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaMasterService.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+/**
+ * Tests for KeymetaMasterService class
+ */
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaMasterService {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestKeymetaMasterService.class);
+
+ @Mock
+ private MasterServices mockMaster;
+ @Mock
+ private TableDescriptors mockTableDescriptors;
+
+ private Configuration conf;
+ private KeymetaMasterService service;
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ conf = new Configuration();
+ when(mockMaster.getConfiguration()).thenReturn(conf);
+ when(mockMaster.getTableDescriptors()).thenReturn(mockTableDescriptors);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (closeableMocks != null) {
+ closeableMocks.close();
+ }
+ }
+
+ @Test
+ public void testInitWithKeyManagementDisabled() throws Exception {
+ // Setup - disable key management
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, false);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init(); // Should return early without creating table
+
+ // Verify - no table operations should be performed
+ verify(mockMaster, never()).getTableDescriptors();
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithKeyManagementEnabledAndTableExists() throws Exception {
+ // Setup - enable key management and table already exists
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init();
+
+ // Verify - table exists check is performed but no table creation
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithKeyManagementEnabledAndTableDoesNotExist() throws Exception {
+ // Setup - enable key management and table does not exist
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute
+ service.init();
+
+ // Verify - table is created
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster).createSystemTable(any(TableDescriptor.class));
+ }
+
+ @Test
+ public void testInitWithTableDescriptorsIOException() throws Exception {
+ // Setup - enable key management but table descriptors throws IOException
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(any(TableName.class)))
+ .thenThrow(new IOException("Table descriptors error"));
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute & Verify - IOException should propagate
+ try {
+ service.init();
+ } catch (IOException e) {
+ // Expected exception
+ }
+
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+
+ @Test
+ public void testInitWithCreateSystemTableIOException() throws Exception {
+ // Setup - enable key management, table doesn't exist, but createSystemTable throws IOException
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(false);
+ when(mockMaster.createSystemTable(any(TableDescriptor.class)))
+ .thenThrow(new IOException("Create table error"));
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute & Verify - IOException should propagate
+ try {
+ service.init();
+ } catch (IOException e) {
+ // Expected exception
+ }
+
+ verify(mockMaster).getTableDescriptors();
+ verify(mockTableDescriptors).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster).createSystemTable(any(TableDescriptor.class));
+ }
+
+ @Test
+ public void testConstructorWithMasterServices() throws Exception {
+ // Execute
+ service = new KeymetaMasterService(mockMaster);
+
+ // Verify - constructor should not throw an exception
+ // The service should be created successfully (no exceptions = success)
+ // We don't verify internal calls since the constructor just stores references
+ }
+
+ @Test
+ public void testMultipleInitCalls() throws Exception {
+ // Setup - enable key management and table exists
+ conf.setBoolean(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, true);
+ when(mockTableDescriptors.exists(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(true);
+
+ service = new KeymetaMasterService(mockMaster);
+
+ // Execute - call init multiple times
+ service.init();
+ service.init();
+ service.init();
+
+ // Verify - each call should check table existence (idempotent behavior)
+ verify(mockMaster, times(3)).getTableDescriptors();
+ verify(mockTableDescriptors, times(3)).exists(KeymetaTableAccessor.KEY_META_TABLE_NAME);
+ verify(mockMaster, never()).createSystemTable(any());
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
new file mode 100644
index 000000000000..3c80f928c9e1
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/keymeta/TestKeymetaTableAccessor.java
@@ -0,0 +1,483 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.keymeta;
+
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyData.KEY_SPACE_GLOBAL;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.ACTIVE;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.DISABLED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.FAILED;
+import static org.apache.hadoop.hbase.io.crypto.ManagedKeyState.INACTIVE;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_METADATA_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.DEK_WRAPPED_BY_STK_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_META_INFO_FAMILY;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.KEY_STATE_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.READ_OP_COUNT_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.REFRESHED_TIMESTAMP_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.STK_CHECKSUM_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.WRITE_OP_COUNT_QUAL_BYTES;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.constructRowKeyForMetadata;
+import static org.apache.hadoop.hbase.keymeta.KeymetaTableAccessor.parseFromResult;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThrows;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.anyLong;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyData;
+import org.apache.hadoop.hbase.io.crypto.ManagedKeyState;
+import org.apache.hadoop.hbase.io.crypto.MockManagedKeyProvider;
+import org.apache.hadoop.hbase.security.EncryptionUtil;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.BlockJUnit4ClassRunner;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Suite;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+ TestKeymetaTableAccessor.TestAdd.class,
+ TestKeymetaTableAccessor.TestAddWithNullableFields.class,
+ TestKeymetaTableAccessor.TestGet.class,
+ TestKeymetaTableAccessor.TestOps.class,
+})
+@Category({ MasterTests.class, SmallTests.class })
+public class TestKeymetaTableAccessor {
+ protected static final String ALIAS = "custId1";
+ protected static final byte[] CUST_ID = ALIAS.getBytes();
+ protected static final String KEY_NAMESPACE = "namespace";
+ protected static String KEY_METADATA = "metadata1";
+
+ @Mock
+ protected Server server;
+ @Mock
+ protected Connection connection;
+ @Mock
+ protected Table table;
+ @Mock
+ protected ResultScanner scanner;
+ @Mock
+ protected SystemKeyCache systemKeyCache;
+
+ protected KeymetaTableAccessor accessor;
+ protected Configuration conf = HBaseConfiguration.create();
+ protected MockManagedKeyProvider managedKeyProvider;
+ protected ManagedKeyData latestSystemKey;
+
+ private AutoCloseable closeableMocks;
+
+ @Before
+ public void setUp() throws Exception {
+ closeableMocks = MockitoAnnotations.openMocks(this);
+
+ conf.set(HConstants.CRYPTO_MANAGED_KEYS_ENABLED_CONF_KEY, "true");
+ conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, MockManagedKeyProvider.class.getName());
+
+ when(server.getConnection()).thenReturn(connection);
+ when(connection.getTable(KeymetaTableAccessor.KEY_META_TABLE_NAME)).thenReturn(table);
+ when(server.getSystemKeyCache()).thenReturn(systemKeyCache);
+ when(server.getConfiguration()).thenReturn(conf);
+
+ accessor = new KeymetaTableAccessor(server);
+ managedKeyProvider = new MockManagedKeyProvider();
+ managedKeyProvider.initConfig(conf);
+
+ latestSystemKey = managedKeyProvider.getSystemKey("system-id".getBytes());
+ when(systemKeyCache.getLatestSystemKey()).thenReturn(latestSystemKey);
+ when(systemKeyCache.getSystemKeyByChecksum(anyLong())).thenReturn(latestSystemKey);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ closeableMocks.close();
+ }
+
+ @RunWith(Parameterized.class)
+ @Category({ MasterTests.class, SmallTests.class })
+ public static class TestAdd extends TestKeymetaTableAccessor {
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestAdd.class);
+
+ @Parameter(0)
+ public ManagedKeyState keyState;
+
+ @Parameterized.Parameters(name = "{index},keyState={0}")
+ public static Collection