Skip to content

Commit

Permalink
[test](vault) Add more regression test about storage vault
Browse files Browse the repository at this point in the history
* Add case sensitive test
* Add kerberos test
* Add concurrent test
  • Loading branch information
SWJTU-ZhangLei committed Feb 10, 2025
1 parent e8c44a7 commit 75c0bf7
Show file tree
Hide file tree
Showing 14 changed files with 1,068 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;

import com.google.common.base.Strings;

import java.util.Map;

// CREATE [EXTERNAL] RESOURCE resource_name
Expand Down Expand Up @@ -69,8 +71,13 @@ public ResourceType getResourceType() {
}

public void analyzeResourceType() throws UserException {
String type = properties.get(TYPE);
if (type == null) {
String type = null;
for (Map.Entry<String, String> property : properties.entrySet()) {
if (property.getKey().equalsIgnoreCase(TYPE)) {
type = property.getValue();
}
}
if (Strings.isNullOrEmpty(type)) {
throw new AnalysisException("Resource type can't be null");
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.qe.ConnectContext;

import com.google.common.base.Strings;

import java.util.Map;

// CREATE STORAGE VAULT vault_name
Expand Down Expand Up @@ -119,10 +121,17 @@ public void analyze(Analyzer analyzer) throws UserException {
if (properties == null || properties.isEmpty()) {
throw new AnalysisException("Storage Vault properties can't be null");
}
String type = properties.get(TYPE);
if (type == null) {

String type = null;
for (Map.Entry<String, String> property : properties.entrySet()) {
if (property.getKey().equalsIgnoreCase(TYPE)) {
type = property.getValue();
}
}
if (Strings.isNullOrEmpty(type)) {
throw new AnalysisException("Storage Vault type can't be null");
}

final String pathVersionString = properties.get(PATH_VERSION);
if (pathVersionString != null) {
this.pathVersion = Integer.parseInt(pathVersionString);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,22 @@
import org.apache.doris.cloud.proto.Cloud;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.security.authentication.AuthenticationConfig;
import org.apache.doris.datasource.property.constants.S3Properties;

import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Maps;
import com.google.gson.annotations.SerializedName;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;

/**
* HDFS resource
Expand Down Expand Up @@ -95,20 +100,42 @@ public static Cloud.HdfsVaultInfo generateHdfsParam(Map<String, String> properti
Cloud.HdfsVaultInfo.Builder hdfsVaultInfoBuilder =
Cloud.HdfsVaultInfo.newBuilder();
Cloud.HdfsBuildConf.Builder hdfsConfBuilder = Cloud.HdfsBuildConf.newBuilder();

Set<String> lowerCaseKeys = properties.keySet().stream().map(String::toLowerCase)
.collect(Collectors.toSet());

for (Map.Entry<String, String> property : properties.entrySet()) {
if (property.getKey().equalsIgnoreCase(HADOOP_FS_NAME)) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
"%s is null or empty", property.getKey());
hdfsConfBuilder.setFsName(property.getValue());
} else if (property.getKey().equalsIgnoreCase(VAULT_PATH_PREFIX)) {
hdfsVaultInfoBuilder.setPrefix(property.getValue());
} else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_USER_NAME)) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
"%s is null or empty", property.getKey());
hdfsConfBuilder.setUser(property.getValue());
} else if (property.getKey()
.equalsIgnoreCase(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION)) {
Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL),
"%s is required for kerberos", AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL);
Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB),
"%s is required for kerberos", AuthenticationConfig.HADOOP_KERBEROS_KEYTAB);
} else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL)) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
"%s is null or empty", property.getKey());
hdfsConfBuilder.setHdfsKerberosPrincipal(property.getValue());
} else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB)) {
Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()),
"%s is null or empty", property.getKey());
hdfsConfBuilder.setHdfsKerberosKeytab(property.getValue());
} else if (property.getKey().equalsIgnoreCase(VAULT_NAME)) {
continue;
} else {
Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.S3_PREFIX),
"Invalid argument %s", property.getKey());
Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.PROVIDER),
"Invalid argument %s", property.getKey());
if (!nonHdfsConfPropertyKeys.contains(property.getKey().toLowerCase())) {
Cloud.HdfsBuildConf.HdfsConfKVPair.Builder conf = Cloud.HdfsBuildConf.HdfsConfKVPair.newBuilder();
conf.setKey(property.getKey());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ private void updateStorageVaultCache(String oldVaultName, String newVaultName, S
rwLock.writeLock().lock();
String cachedVaultId = vaultNameToVaultId.get(oldVaultName);
vaultNameToVaultId.remove(oldVaultName);
Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId), cachedVaultId,
"Cached vault id is null or empty");
Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId),
"Cached vault id %s is null or empty", cachedVaultId);
Preconditions.checkArgument(cachedVaultId.equals(vaultId),
"Cached vault id not equal to remote storage." + cachedVaultId + " - " + vaultId);
"Cached vault id not equal to remote storage. %s vs %s", cachedVaultId, vaultId);
vaultNameToVaultId.put(newVaultName, vaultId);
} finally {
rwLock.writeLock().unlock();
Expand Down
11 changes: 11 additions & 0 deletions regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,17 @@ suite("test_alter_vault_name", "nonConcurrent") {
}, "already existed")

// case5
expectExceptionLike({
sql """
ALTER STORAGE VAULT ${s3VaultName}
PROPERTIES (
"type" = "s3",
"VAULT_NAME" = "@#¥%*&-+=null."
);
"""
}, "Incorrect vault name")

// case6
sql """
CREATE TABLE ${hdfsVaultName} (
C_CUSTKEY INTEGER NOT NULL,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

import java.util.stream.Collectors;
import java.util.stream.Stream;

suite("test_alter_vault_concurrently", "nonConcurrent") {
def suiteName = name;
if (!isCloudMode()) {
logger.info("skip ${name} case, because not cloud mode")
return
}

if (!enableStoragevault()) {
logger.info("skip ${name} case, because storage vault not enabled")
return
}

def randomStr = UUID.randomUUID().toString().replace("-", "")
def s3VaultName = "s3_" + randomStr

sql """
CREATE STORAGE VAULT ${s3VaultName}
PROPERTIES (
"type"="S3",
"s3.endpoint"="${getS3Endpoint()}",
"s3.region" = "${getS3Region()}",
"s3.access_key" = "${getS3AK()}",
"s3.secret_key" = "${getS3SK()}",
"s3.root.path" = "${s3VaultName}",
"s3.bucket" = "${getS3BucketName()}",
"s3.external_endpoint" = "",
"provider" = "${getS3Provider()}",
"use_path_style" = "false"
);
"""

def future1 = thread("threadName1") {
try_sql """
ALTER STORAGE VAULT ${s3VaultName}
PROPERTIES (
"type"="S3",
"VAULT_NAME" = "${s3VaultName}_1"
);
"""
}

def future2 = thread("threadName2") {
try_sql """
ALTER STORAGE VAULT ${s3VaultName}
PROPERTIES (
"type"="S3",
"VAULT_NAME" = "${s3VaultName}_2"
);
"""
}

def combineFuture = combineFutures(future1, future2)
List<List<List<Object>>> result = combineFuture.get()
logger.info("${result}")

def hitNum = 0
def vaultsInfo = try_sql """ SHOW STORAGE VAULTS """
def newS3VaultName = null

for (int i = 0; i < vaultsInfo.size(); i++) {
def name = vaultsInfo[i][0]
if (name.contains(s3VaultName)) {
hitNum++
newS3VaultName = name
assertTrue(name.equalsIgnoreCase("${s3VaultName}_1") || name.equalsIgnoreCase("${s3VaultName}_2"))
}
}
assertEquals(hitNum, 1)

future1 = thread("threadName1") {
try_sql """
ALTER STORAGE VAULT ${newS3VaultName}
PROPERTIES (
"type"="S3",
"VAULT_NAME" = "${s3VaultName}_1"
"s3.access_key" = "error_ak_1",
"s3.secret_key" = "error_sk_1"
);
"""
}

future2 = thread("threadName2") {
try_sql """
ALTER STORAGE VAULT ${newS3VaultName}
PROPERTIES (
"type"="S3",
"s3.access_key" = "error_ak_2",
"s3.secret_key" = "error_sk_2"
);
"""
}

combineFuture = combineFutures(future1, future2)
result = combineFuture.get()
logger.info("${result}")

vaultsInfo = try_sql """ SHOW STORAGE VAULTS """
def found = false
for (int i = 0; i < vaultsInfo.size(); i++) {
def name = vaultsInfo[i][0]
if (name.contains(newS3VaultName)) {
logger.info("${vaultsInfo[i]}");
assertTrue(vaultsInfo[i][2].contains("error_ak_1") || vaultsInfo[i][2].contains("error_ak_2"))
found = true
}
}
assertTrue(found)
}
Loading

0 comments on commit 75c0bf7

Please sign in to comment.