From 75c0bf7df923f5520eb808a393f65c82fceb2953 Mon Sep 17 00:00:00 2001 From: Lei Zhang Date: Tue, 21 Jan 2025 18:37:38 +0800 Subject: [PATCH] [test](vault) Add more regression test about storage vault * Add case sensitive test * Add kerberos test * Add concurrent test --- .../doris/analysis/CreateResourceStmt.java | 11 +- .../analysis/CreateStorageVaultStmt.java | 13 +- .../doris/catalog/HdfsStorageVault.java | 27 +++ .../apache/doris/catalog/StorageVaultMgr.java | 6 +- .../alter/test_alter_vault_name.groovy | 11 + .../test_alter_vault_concurrently.groovy | 128 ++++++++++ .../test_create_vault_concurrently.groovy | 132 ++++++++++ .../test_default_vault_concurrenlty.groovy | 127 ++++++++++ .../vault_p0/create/test_create_vault.groovy | 23 ++ ...st_create_vault_with_case_sensitive.groovy | 228 ++++++++++++++++++ .../test_create_vault_with_kerberos.groovy | 106 ++++++++ ...st_vault_privilege_with_multi_roles.groovy | 178 ++++++++++++++ .../test_vault_privilege_with_role.groovy | 78 +++++- .../test_vault_privilege_with_user.groovy | 8 + 14 files changed, 1068 insertions(+), 8 deletions(-) create mode 100644 regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy create mode 100644 regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy create mode 100644 regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy create mode 100644 regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy create mode 100644 regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy create mode 100644 regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java index 4a358510fa015b..3feccbba9baedb 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateResourceStmt.java @@ -30,6 +30,8 @@ import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.qe.ConnectContext; +import com.google.common.base.Strings; + import java.util.Map; // CREATE [EXTERNAL] RESOURCE resource_name @@ -69,8 +71,13 @@ public ResourceType getResourceType() { } public void analyzeResourceType() throws UserException { - String type = properties.get(TYPE); - if (type == null) { + String type = null; + for (Map.Entry property : properties.entrySet()) { + if (property.getKey().equalsIgnoreCase(TYPE)) { + type = property.getValue(); + } + } + if (Strings.isNullOrEmpty(type)) { throw new AnalysisException("Resource type can't be null"); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java index f1aff6b9ab6f19..6e414db931486d 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java +++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateStorageVaultStmt.java @@ -31,6 +31,8 @@ import org.apache.doris.mysql.privilege.PrivPredicate; import org.apache.doris.qe.ConnectContext; +import com.google.common.base.Strings; + import java.util.Map; // CREATE STORAGE VAULT vault_name @@ -119,10 +121,17 @@ public void analyze(Analyzer analyzer) throws UserException { if (properties == null || properties.isEmpty()) { throw new AnalysisException("Storage Vault properties can't be null"); } - String type = properties.get(TYPE); - if (type == null) { + + String type = null; + for (Map.Entry property : properties.entrySet()) { + if (property.getKey().equalsIgnoreCase(TYPE)) { + type = property.getValue(); + } + } + if (Strings.isNullOrEmpty(type)) { throw new AnalysisException("Storage Vault type can't be null"); } + final String pathVersionString = properties.get(PATH_VERSION); if (pathVersionString != null) { this.pathVersion = Integer.parseInt(pathVersionString); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java index 9be463ee3a1c2a..03bb0fcaef6546 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/HdfsStorageVault.java @@ -20,10 +20,14 @@ import org.apache.doris.cloud.proto.Cloud; import org.apache.doris.common.DdlException; import org.apache.doris.common.security.authentication.AuthenticationConfig; +import org.apache.doris.datasource.property.constants.S3Properties; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.gson.annotations.SerializedName; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -31,6 +35,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; /** * HDFS resource @@ -95,20 +100,42 @@ public static Cloud.HdfsVaultInfo generateHdfsParam(Map properti Cloud.HdfsVaultInfo.Builder hdfsVaultInfoBuilder = Cloud.HdfsVaultInfo.newBuilder(); Cloud.HdfsBuildConf.Builder hdfsConfBuilder = Cloud.HdfsBuildConf.newBuilder(); + + Set lowerCaseKeys = properties.keySet().stream().map(String::toLowerCase) + .collect(Collectors.toSet()); + for (Map.Entry property : properties.entrySet()) { if (property.getKey().equalsIgnoreCase(HADOOP_FS_NAME)) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()), + "%s is null or empty", property.getKey()); hdfsConfBuilder.setFsName(property.getValue()); } else if (property.getKey().equalsIgnoreCase(VAULT_PATH_PREFIX)) { hdfsVaultInfoBuilder.setPrefix(property.getValue()); } else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_USER_NAME)) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()), + "%s is null or empty", property.getKey()); hdfsConfBuilder.setUser(property.getValue()); + } else if (property.getKey() + .equalsIgnoreCase(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION)) { + Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL), + "%s is required for kerberos", AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL); + Preconditions.checkArgument(lowerCaseKeys.contains(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB), + "%s is required for kerberos", AuthenticationConfig.HADOOP_KERBEROS_KEYTAB); } else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_PRINCIPAL)) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()), + "%s is null or empty", property.getKey()); hdfsConfBuilder.setHdfsKerberosPrincipal(property.getValue()); } else if (property.getKey().equalsIgnoreCase(AuthenticationConfig.HADOOP_KERBEROS_KEYTAB)) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(property.getValue()), + "%s is null or empty", property.getKey()); hdfsConfBuilder.setHdfsKerberosKeytab(property.getValue()); } else if (property.getKey().equalsIgnoreCase(VAULT_NAME)) { continue; } else { + Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.S3_PREFIX), + "Invalid argument %s", property.getKey()); + Preconditions.checkArgument(!property.getKey().toLowerCase().contains(S3Properties.PROVIDER), + "Invalid argument %s", property.getKey()); if (!nonHdfsConfPropertyKeys.contains(property.getKey().toLowerCase())) { Cloud.HdfsBuildConf.HdfsConfKVPair.Builder conf = Cloud.HdfsBuildConf.HdfsConfKVPair.newBuilder(); conf.setKey(property.getKey()); diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java index 5ad0417d7dd24c..e5088a7795cb2b 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java +++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/StorageVaultMgr.java @@ -126,10 +126,10 @@ private void updateStorageVaultCache(String oldVaultName, String newVaultName, S rwLock.writeLock().lock(); String cachedVaultId = vaultNameToVaultId.get(oldVaultName); vaultNameToVaultId.remove(oldVaultName); - Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId), cachedVaultId, - "Cached vault id is null or empty"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(cachedVaultId), + "Cached vault id %s is null or empty", cachedVaultId); Preconditions.checkArgument(cachedVaultId.equals(vaultId), - "Cached vault id not equal to remote storage." + cachedVaultId + " - " + vaultId); + "Cached vault id not equal to remote storage. %s vs %s", cachedVaultId, vaultId); vaultNameToVaultId.put(newVaultName, vaultId); } finally { rwLock.writeLock().unlock(); diff --git a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy index e094e12056d376..c460c361b9c286 100644 --- a/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy +++ b/regression-test/suites/vault_p0/alter/test_alter_vault_name.groovy @@ -101,6 +101,17 @@ suite("test_alter_vault_name", "nonConcurrent") { }, "already existed") // case5 + expectExceptionLike({ + sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "s3", + "VAULT_NAME" = "@#¥%*&-+=null." + ); + """ + }, "Incorrect vault name") + + // case6 sql """ CREATE TABLE ${hdfsVaultName} ( C_CUSTKEY INTEGER NOT NULL, diff --git a/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy b/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy new file mode 100644 index 00000000000000..b7128acab84e7b --- /dev/null +++ b/regression-test/suites/vault_p0/concurent/test_alter_vault_concurrently.groovy @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; +import java.util.stream.Stream; + +suite("test_alter_vault_concurrently", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + def randomStr = UUID.randomUUID().toString().replace("-", "") + def s3VaultName = "s3_" + randomStr + + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + def future1 = thread("threadName1") { + try_sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type"="S3", + "VAULT_NAME" = "${s3VaultName}_1" + ); + """ + } + + def future2 = thread("threadName2") { + try_sql """ + ALTER STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type"="S3", + "VAULT_NAME" = "${s3VaultName}_2" + ); + """ + } + + def combineFuture = combineFutures(future1, future2) + List>> result = combineFuture.get() + logger.info("${result}") + + def hitNum = 0 + def vaultsInfo = try_sql """ SHOW STORAGE VAULTS """ + def newS3VaultName = null + + for (int i = 0; i < vaultsInfo.size(); i++) { + def name = vaultsInfo[i][0] + if (name.contains(s3VaultName)) { + hitNum++ + newS3VaultName = name + assertTrue(name.equalsIgnoreCase("${s3VaultName}_1") || name.equalsIgnoreCase("${s3VaultName}_2")) + } + } + assertEquals(hitNum, 1) + + future1 = thread("threadName1") { + try_sql """ + ALTER STORAGE VAULT ${newS3VaultName} + PROPERTIES ( + "type"="S3", + "VAULT_NAME" = "${s3VaultName}_1" + "s3.access_key" = "error_ak_1", + "s3.secret_key" = "error_sk_1" + ); + """ + } + + future2 = thread("threadName2") { + try_sql """ + ALTER STORAGE VAULT ${newS3VaultName} + PROPERTIES ( + "type"="S3", + "s3.access_key" = "error_ak_2", + "s3.secret_key" = "error_sk_2" + ); + """ + } + + combineFuture = combineFutures(future1, future2) + result = combineFuture.get() + logger.info("${result}") + + vaultsInfo = try_sql """ SHOW STORAGE VAULTS """ + def found = false + for (int i = 0; i < vaultsInfo.size(); i++) { + def name = vaultsInfo[i][0] + if (name.contains(newS3VaultName)) { + logger.info("${vaultsInfo[i]}"); + assertTrue(vaultsInfo[i][2].contains("error_ak_1") || vaultsInfo[i][2].contains("error_ak_2")) + found = true + } + } + assertTrue(found) +} diff --git a/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy b/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy new file mode 100644 index 00000000000000..985bf971e3a26b --- /dev/null +++ b/regression-test/suites/vault_p0/concurent/test_create_vault_concurrently.groovy @@ -0,0 +1,132 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; +import java.util.stream.Stream; + +suite("test_create_vault_concurrently", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + def randomStr = UUID.randomUUID().toString().replace("-", "") + def s3VaultName = "s3_" + randomStr + + def future1 = thread("threadName1") { + for (int i = 0; i < 100; i++) { + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + } + } + + def future2 = thread("threadName2") { + for (int i = 0; i < 100; i++) { + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + } + } + + def future3 = thread("threadName3") { + for (int i = 0; i < 100; i++) { + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + } + } + + def future4 = thread("threadName4") { + for (int i = 0; i < 100; i++) { + sql """ + CREATE STORAGE VAULT IF NOT EXISTS ${s3VaultName} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + } + } + + // equals to combineFutures([future1, future2, future3, future4]), which [] is a Iterable + def combineFuture = combineFutures(future1, future2, future3, future4) + // or you can use lazyCheckThread action(see lazyCheck_action.groovy), and not have to check exception from futures. + List>> result = combineFuture.get() + logger.info("${result}") + + boolean s3VaultExisted = false; + def vaults_info = try_sql """ SHOW STORAGE VAULTS """ + + for (int i = 0; i < vaults_info.size(); i++) { + def name = vaults_info[i][0] + if (name.equals(s3VaultName)) { + s3VaultExisted = true; + } + } + assertTrue(s3VaultExisted) +} diff --git a/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy b/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy new file mode 100644 index 00000000000000..2ce094ea668808 --- /dev/null +++ b/regression-test/suites/vault_p0/concurent/test_default_vault_concurrenlty.groovy @@ -0,0 +1,127 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; +import java.util.stream.Stream; + +suite("test_default_vault_concurrently", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + def randomStr = UUID.randomUUID().toString().replace("-", "") + def s3VaultName1 = "s3_" + randomStr + "_1" + def s3VaultName2 = "s3_" + randomStr + "_2" + + sql """ + CREATE STORAGE VAULT ${s3VaultName1} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName1}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + sql """ + CREATE STORAGE VAULT ${s3VaultName2} + PROPERTIES ( + "type"="S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName2}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + def future1 = thread("threadName1") { + for (int i = 0; i < 200; i++) { + sql """SET ${s3VaultName1} AS DEFAULT STORAGE VAULT;""" + } + } + + def future2 = thread("threadName2") { + for (int i = 0; i < 200; i++) { + sql """SET ${s3VaultName2} AS DEFAULT STORAGE VAULT;""" + } + } + + def combineFuture = combineFutures(future1, future2) + + List>> result = combineFuture.get() + logger.info("${result}") + + def vaultsInfo = try_sql """ SHOW STORAGE VAULTS """ + def found = false + def defaultVaultName = null + for (int i = 0; i < vaultsInfo.size(); i++) { + def name = vaultsInfo[i][0] + def isDefault = vaultsInfo[i][3] + if (isDefault.equalsIgnoreCase("true")) { + assertFalse(found) + found = true + defaultVaultName = name; + assertTrue(name.equalsIgnoreCase(s3VaultName1) || name.equalsIgnoreCase(s3VaultName2)) + } + } + assertTrue(found) + + sql """ + CREATE TABLE ${defaultVaultName} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1" + ) + """ + + future1 = thread("threadName1") { + for (int i = 0; i < 50; i++) { + sql """ insert into ${defaultVaultName} values(${i}, ${i}); """ + } + } + + future2 = thread("threadName2") { + sql """ UNSET DEFAULT STORAGE VAULT; """ + } + + combineFuture = combineFutures(future1, future2) + + result = combineFuture.get() + logger.info("${result}") +} diff --git a/regression-test/suites/vault_p0/create/test_create_vault.groovy b/regression-test/suites/vault_p0/create/test_create_vault.groovy index 49ea2565cc63af..812e3aea43848d 100644 --- a/regression-test/suites/vault_p0/create/test_create_vault.groovy +++ b/regression-test/suites/vault_p0/create/test_create_vault.groovy @@ -53,6 +53,18 @@ suite("test_create_vault", "nonConcurrent") { """ }, "Incorrect vault name") + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT '@#¥%*&-+=null.' + PROPERTIES ( + "type"="S3", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${exceed64LengthStr}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + }, "Incorrect vault name") + sql """ CREATE STORAGE VAULT ${length64Str} PROPERTIES ( @@ -206,6 +218,17 @@ suite("test_create_vault", "nonConcurrent") { "hadoop.username" = "${getHmsUser()}" ); """ + }, "Invalid argument s3.bucket") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type"="hdfs", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ }, "invalid fs_name") // test `if not exist` and dup name hdfs vault diff --git a/regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy b/regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy new file mode 100644 index 00000000000000..0a674c9f3804ca --- /dev/null +++ b/regression-test/suites/vault_p0/create/test_create_vault_with_case_sensitive.groovy @@ -0,0 +1,228 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_create_vault_with_case_sensitive", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + def randomStr = UUID.randomUUID().toString().replace("-", "") + def s3VaultName = "s3_" + randomStr + def hdfsVaultName = "hdfs_" + randomStr + + // hdfs vault case + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "aaaa", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + }, "Unsupported Storage Vault type") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "s3", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + }, "Missing [s3.endpoint] in properties") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "S3", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + }, "Missing [s3.endpoint] in properties") + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName.toUpperCase()} + PROPERTIES ( + "TYPE" = "HDFS", + "FS.DEFAULTFS"="${getHmsHdfsFs()}", + "PATH_PREFIX" = "${hdfsVaultName.toUpperCase()}", + "HADOOP.USERNAME" = "${getHmsUser()}" + ); + """ + + // s3 vault case + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "bbbb", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + }, "Unsupported Storage Vault type") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "hdfs", + "FS.DEFAULTFS"="${getHmsHdfsFs()}", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + }, "Invalid argument s3.region") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "HDFS", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + }, "Invalid argument s3.region") + + sql """ + CREATE STORAGE VAULT ${s3VaultName} + PROPERTIES ( + "type" = "s3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + // S3.xx properties is case sensitive + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${s3VaultName.toUpperCase()} + PROPERTIES ( + "TYPE" = "S3", + "S3.ENDPOINT"="${getS3Endpoint()}", + "S3.REGION" = "${getS3Region()}", + "S3.ACCESS_KEY" = "${getS3AK()}", + "S3.SECRET_KEY" = "${getS3SK()}", + "S3.ROOT.PATH" = "${s3VaultName}", + "S3.BUCKET" = "${getS3BucketName()}", + "S3.EXTERNAL_ENDPOINT" = "", + "PROVIDER" = "${getS3Provider()}", + "USE_PATH_STYLE" = "false" + ); + """ + }, "Missing [s3.endpoint] in properties") + + sql """ + CREATE STORAGE VAULT ${s3VaultName.toUpperCase()} + PROPERTIES ( + "TYPE" = "S3", + "s3.endpoint"="${getS3Endpoint()}", + "s3.region" = "${getS3Region()}", + "s3.access_key" = "${getS3AK()}", + "s3.secret_key" = "${getS3SK()}", + "s3.root.path" = "${s3VaultName}", + "s3.bucket" = "${getS3BucketName()}", + "s3.external_endpoint" = "", + "provider" = "${getS3Provider()}", + "use_path_style" = "false" + ); + """ + + def vaultInfos = try_sql """SHOW STORAGE VAULTS""" + + boolean hdfsVaultLowerExist = false; + boolean hdfsVaultUpperExist = false; + + boolean s3VaultLowerExist = false; + boolean s3VaultUpperExist = false; + + for (int i = 0; i < vaultInfos.size(); i++) { + logger.info("vault info: ${vaultInfos[i]}") + if (vaultInfos[i][0].equals(hdfsVaultName)) { + hdfsVaultLowerExist = true + } + + if (vaultInfos[i][0].equals(hdfsVaultName.toUpperCase())) { + hdfsVaultUpperExist = true + } + + if (vaultInfos[i][0].equals(s3VaultName)) { + s3VaultLowerExist = true + } + + if (vaultInfos[i][0].equals(s3VaultName.toUpperCase())) { + s3VaultUpperExist = true + } + } + assertTrue(hdfsVaultLowerExist) + assertTrue(hdfsVaultUpperExist) + assertTrue(s3VaultLowerExist) + assertTrue(s3VaultUpperExist) +} \ No newline at end of file diff --git a/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy b/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy new file mode 100644 index 00000000000000..d6f11f96cd74c4 --- /dev/null +++ b/regression-test/suites/vault_p0/create/test_create_vault_with_kerberos.groovy @@ -0,0 +1,106 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_create_vault_with_kerberos", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${name} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${name} case, because storage vault not enabled") + return + } + + def randomStr = UUID.randomUUID().toString().replace("-", "") + def hdfsVaultName = "hdfs_" + randomStr + def hdfsVaultName2 = "hdfs2_" + randomStr + def tableName = "tbl_" + randomStr + def tableName2 = "tbl2_" + randomStr + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type" = "hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "not_exist_user" + ); + """ + + sql """ + CREATE TABLE ${tableName} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + + expectExceptionLike({ + sql """ insert into ${tableName} values(1, 1); """ + }, "Permission denied: user=not_exist_user") + + expectExceptionLike({ + sql """ + CREATE STORAGE VAULT ${hdfsVaultName}_2 + PROPERTIES ( + "type" = "hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}_2", + "hadoop.username" = "${getHmsUser()}", + "hadoop.security.authentication" = "kerberos" + ); + """ + }, "hadoop.kerberos.principal is required for kerberos") + + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName2} + PROPERTIES ( + "type" = "hdfs", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName2}", + "hadoop.username" = "${getHmsUser()}", + "hadoop.security.authentication" = "kerberos", + "hadoop.kerberos.principal" = "hadoop/127.0.0.1@XXX", + "hadoop.kerberos.keytab" = "/etc/emr.keytab" + ); + """ + + sql """ + CREATE TABLE ${tableName2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName2} + ) + """ + + expectExceptionLike({ + sql """ insert into ${tableName2} values(1, 1); """ + }, "vault id not found, maybe not sync") +} \ No newline at end of file diff --git a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy new file mode 100644 index 00000000000000..5bbeaf06c66167 --- /dev/null +++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_multi_roles.groovy @@ -0,0 +1,178 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import java.util.stream.Collectors; + +suite("test_vault_privilege_with_multi_roles", "nonConcurrent") { + def suiteName = name; + if (!isCloudMode()) { + logger.info("skip ${suiteName} case, because not cloud mode") + return + } + + if (!enableStoragevault()) { + logger.info("skip ${suiteName} case, because storage vault not enabled") + return + } + + def dbName = context.config.getDbNameByFile(context.file) + def randomStr = UUID.randomUUID().toString().replace("-", "") + def hdfsVaultName = "hdfs_" + randomStr + + def userName = "user_${randomStr}" + def userPassword = "Cloud12345" + def roleName1 = "role1_${randomStr}" + def roleName2 = "role2_${randomStr}" + def tableName1 = "tbl1_${randomStr}" + def tableName2 = "tbl2_${randomStr}" + def tableName3 = "tbl3_${randomStr}" + + sql """DROP TABLE IF EXISTS ${dbName}.${tableName1}""" + sql """DROP TABLE IF EXISTS ${dbName}.${tableName2}""" + sql """DROP USER IF EXISTS ${userName}""" + sql """DROP ROLE IF EXISTS ${roleName1}""" + sql """DROP ROLE IF EXISTS ${roleName2}""" + + sql """CREATE ROLE ${roleName1}""" + sql """CREATE ROLE ${roleName2}""" + + sql """CREATE USER ${userName} identified by '${userPassword}'""" + sql """GRANT create_priv ON *.*.* TO '${userName}'; """ + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName} + PROPERTIES ( + "type"="HDFS", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + + connect(userName, userPassword, context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName1} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + }, "denied") + } + + sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE '${roleName1}';""" + sql """ GRANT '${roleName1}' TO '${userName}';""" + + connect(userName, userPassword, context.config.jdbcUrl) { + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName1} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + } + + sql """ GRANT load_priv,select_priv ON *.*.* TO '${userName}';""" + sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${userName}';""" + connect(userName, userPassword, context.config.jdbcUrl) { + sql """ + insert into ${dbName}.${tableName1} values(1, 1); + select * from ${dbName}.${tableName1}; + """ + } + + sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE '${roleName1}';""" + connect(userName, userPassword, context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE TABLE ${dbName}.${tableName2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + }, "denied") + } + + sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName}' TO ROLE '${roleName2}';""" + sql """ GRANT '${roleName2}' TO '${userName}';""" + + connect(userName, userPassword, context.config.jdbcUrl) { + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + } + + connect(userName, userPassword, context.config.jdbcUrl) { + sql """ + insert into ${dbName}.${tableName1} values(1, 1); + select * from ${dbName}.${tableName1}; + """ + } + + connect(userName, userPassword, context.config.jdbcUrl) { + sql """ + insert into ${dbName}.${tableName2} values(1, 1); + select * from ${dbName}.${tableName2}; + """ + } + + sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE '${roleName2}';""" + + connect(userName, userPassword, context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + CREATE TABLE ${dbName}.${tableName3} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName} + ) + """ + }, "denied") + } +} \ No newline at end of file diff --git a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy index 7192dc40aed816..792a627c705303 100644 --- a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy +++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_role.groovy @@ -95,8 +95,9 @@ suite("test_vault_privilege_with_role", "nonConcurrent") { connect(userName, userPassword, context.config.jdbcUrl) { sql """ insert into ${dbName}.${tableName} values(1, 1); - select * from ${dbName}.${tableName}; """ + def result = sql """ select * from ${dbName}.${tableName}; """ + assertEquals(result.size(), 1) } sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName}' FROM ROLE '${roleName}';""" @@ -117,4 +118,79 @@ suite("test_vault_privilege_with_role", "nonConcurrent") { }, "denied") } + def hdfsVaultName2 = "hdfs2_" + randomStr + def userName2 = "user2_${randomStr}" + def userPassword2 = "Cloud789654" + def roleName2 = "role2_${randomStr}" + def tableName2 = "tbl2_${randomStr}" + + sql """DROP TABLE IF EXISTS ${dbName}.${tableName2}""" + sql """DROP USER IF EXISTS ${userName2}""" + sql """DROP ROLE IF EXISTS ${roleName2}""" + + sql """CREATE ROLE ${roleName2}""" + sql """CREATE USER ${userName2} identified by '${userPassword2}'""" + sql """GRANT create_priv ON *.*.* TO '${userName2}'; """ + sql """GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName2}' TO '${userName2}';""" + + sql """ + CREATE STORAGE VAULT ${hdfsVaultName2} + PROPERTIES ( + "type"="HDFS", + "fs.defaultFS"="${getHmsHdfsFs()}", + "path_prefix" = "${hdfsVaultName2}", + "hadoop.username" = "${getHmsUser()}" + ); + """ + + sql """ GRANT usage_priv ON STORAGE VAULT '${hdfsVaultName2}' TO ROLE '${roleName2}';""" + + connect(userName2, userPassword2, context.config.jdbcUrl) { + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2} ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName2} + ) + """ + } + + sql """ GRANT load_priv,select_priv ON *.*.* TO '${userName2}';""" + sql """ GRANT USAGE_PRIV ON COMPUTE GROUP '%' TO '${userName2}';""" + connect(userName2, userPassword2, context.config.jdbcUrl) { + sql """ + insert into ${dbName}.${tableName2} values(1, 1); + """ + def result = sql """ select * from ${dbName}.${tableName2}; """ + assertEquals(result.size(), 1) + } + + sql """REVOKE usage_priv ON STORAGE VAULT '${hdfsVaultName2}' FROM ROLE '${roleName2}';""" + connect(userName2, userPassword2, context.config.jdbcUrl) { + sql """ + CREATE TABLE IF NOT EXISTS ${dbName}.${tableName2}_2 ( + C_CUSTKEY INTEGER NOT NULL, + C_NAME INTEGER NOT NULL + ) + DUPLICATE KEY(C_CUSTKEY, C_NAME) + DISTRIBUTED BY HASH(C_CUSTKEY) BUCKETS 1 + PROPERTIES ( + "replication_num" = "1", + "storage_vault_name" = ${hdfsVaultName2} + ) + """ + } + + connect(userName2, userPassword2, context.config.jdbcUrl) { + sql """ + insert into ${dbName}.${tableName2} values(2, 2); + """ + def result = sql """ select * from ${dbName}.${tableName2}; """ + assertEquals(result.size(), 2) + } } \ No newline at end of file diff --git a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy index 89a158323be835..e22c88c3d7b69c 100644 --- a/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy +++ b/regression-test/suites/vault_p0/privilege/test_vault_privilege_with_user.groovy @@ -77,6 +77,14 @@ suite("test_vault_privilege_with_user", "nonConcurrent") { }, "denied") } + connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) { + expectExceptionLike({ + sql """ + UNSET DEFAULT STORAGE VAULT; + """ + }, "denied") + } + connect(hdfsUser, 'Cloud12345', context.config.jdbcUrl) { expectExceptionLike({ sql """