From 6b6b736d40985ce05f4071bfca2c763777af9b4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E7=82=8E=E6=B3=BC?= Date: Mon, 25 Mar 2024 22:49:49 +0800 Subject: [PATCH 1/4] refactor: replace `NonEmptyString` with `Tenant` - Part of #14719 --- Cargo.lock | 2 - src/bendpy/src/context.rs | 4 +- src/bendpy/src/lib.rs | 2 +- src/binaries/query/entry.rs | 2 +- src/binaries/query/oss_main.rs | 2 +- src/meta/api/src/crud/mod.rs | 8 +- src/meta/api/src/schema_api_impl.rs | 43 ++++++---- src/meta/api/src/schema_api_test_suite.rs | 20 ++--- src/meta/app/src/app_error.rs | 24 ++++++ .../src/principal/user_defined_function.rs | 16 ++-- src/meta/app/src/schema/catalog.rs | 84 ++++--------------- src/meta/app/src/schema/catalog_name_ident.rs | 64 ++++++++++++++ src/meta/app/src/schema/index.rs | 7 +- src/meta/app/src/schema/mod.rs | 3 + src/meta/app/src/tenant/mod.rs | 2 + src/meta/app/src/tenant/tenant.rs | 25 +++++- src/meta/app/src/tenant/tenant_serde.rs | 43 ++++++++++ src/meta/app/src/tenant_key.rs | 5 ++ .../src/catalog_from_to_protobuf_impl.rs | 26 ------ src/meta/proto-conv/src/from_to_protobuf.rs | 8 ++ src/meta/proto-conv/src/lib.rs | 1 + .../src/tident_from_to_protobuf_impl.rs | 65 ++++++++++++++ src/meta/protos/proto/catalog.proto | 2 +- src/meta/protos/proto/tenant.proto | 30 +++++++ src/query/catalog/src/catalog/interface.rs | 3 +- src/query/catalog/src/catalog/manager.rs | 22 ++--- .../catalog/src/catalog/session_catalog.rs | 3 +- src/query/catalog/src/table_context.rs | 4 +- src/query/config/Cargo.toml | 1 - src/query/config/src/config.rs | 6 +- src/query/config/src/inner.rs | 6 +- .../background_service_handler.rs | 9 +- .../src/background_service/compaction_job.rs | 8 +- .../ee/src/background_service/session.rs | 2 +- src/query/ee/src/enterprise_services.rs | 2 +- src/query/ee/src/stream/handler.rs | 12 +-- src/query/ee/src/test_kits/mock_services.rs | 2 +- .../it/aggregating_index/index_refresh.rs | 10 +-- .../ee/tests/it/inverted_index/pruning.rs | 6 +- src/query/ee/tests/it/license/license_mgr.rs | 4 +- src/query/formats/Cargo.toml | 1 - src/query/formats/tests/it/main.rs | 4 +- .../formats/tests/it/output_format_tcsv.rs | 6 +- src/query/management/src/quota/quota_mgr.rs | 8 +- src/query/management/src/role/role_mgr.rs | 19 ++--- .../management/src/setting/setting_mgr.rs | 8 +- src/query/management/src/stage/stage_mgr.rs | 8 +- src/query/management/src/udf/udf_mgr.rs | 25 +++--- src/query/management/src/user/user_mgr.rs | 5 +- src/query/management/tests/it/role.rs | 5 +- src/query/management/tests/it/setting.rs | 5 +- src/query/management/tests/it/stage.rs | 8 +- src/query/management/tests/it/udf.rs | 4 +- src/query/management/tests/it/user.rs | 42 +++++----- src/query/service/src/api/http/v1/settings.rs | 15 ++-- .../service/src/api/http/v1/tenant_tables.rs | 14 +++- .../src/catalogs/default/database_catalog.rs | 9 +- .../src/catalogs/default/immutable_catalog.rs | 3 +- .../src/catalogs/default/mutable_catalog.rs | 9 +- src/query/service/src/clusters/cluster.rs | 6 +- src/query/service/src/global_services.rs | 9 +- .../interpreters/access/privilege_access.rs | 23 ++--- .../service/src/interpreters/common/grant.rs | 4 +- .../src/interpreters/common/notification.rs | 2 +- .../src/interpreters/common/query_log.rs | 4 +- .../service/src/interpreters/common/task.rs | 2 +- .../src/interpreters/hook/refresh_hook.rs | 4 +- .../interpreter_catalog_create.rs | 7 +- .../interpreter_cluster_key_alter.rs | 2 +- .../interpreter_cluster_key_drop.rs | 2 +- .../interpreter_data_mask_desc.rs | 2 +- .../interpreter_database_create.rs | 10 ++- .../interpreters/interpreter_database_drop.rs | 4 +- .../interpreter_database_show_create.rs | 2 +- .../src/interpreters/interpreter_delete.rs | 2 +- .../interpreters/interpreter_index_create.rs | 10 +-- .../interpreters/interpreter_index_drop.rs | 11 +-- .../src/interpreters/interpreter_metrics.rs | 2 +- .../interpreter_notification_alter.rs | 2 +- .../interpreter_privilege_grant.rs | 8 +- .../src/interpreters/interpreter_setting.rs | 7 +- .../interpreter_share_alter_tenants.rs | 8 +- .../interpreters/interpreter_share_create.rs | 2 +- .../interpreters/interpreter_share_desc.rs | 2 +- .../interpreters/interpreter_share_drop.rs | 2 +- .../interpreter_share_grant_object.rs | 4 +- .../interpreter_share_revoke_object.rs | 4 +- .../interpreters/interpreter_share_show.rs | 6 +- .../interpreter_share_show_grant_tenants.rs | 2 +- ...nterpreter_show_object_grant_privileges.rs | 2 +- .../interpreter_table_add_column.rs | 4 +- .../interpreters/interpreter_table_create.rs | 20 ++--- .../interpreters/interpreter_table_drop.rs | 6 +- .../interpreter_table_drop_column.rs | 4 +- .../interpreter_table_modify_column.rs | 16 ++-- .../interpreter_table_optimize.rs | 6 +- .../interpreter_table_recluster.rs | 4 +- .../interpreter_table_rename_column.rs | 4 +- .../interpreters/interpreter_table_revert.rs | 2 +- .../interpreter_table_set_options.rs | 4 +- .../interpreter_table_show_create.rs | 2 +- .../src/interpreters/interpreter_update.rs | 4 +- .../src/interpreters/interpreter_user_drop.rs | 2 +- .../interpreter_user_stage_create.rs | 15 ++-- .../interpreter_vacuum_drop_tables.rs | 6 +- .../interpreter_vacuum_temporary_files.rs | 2 +- .../interpreters/interpreter_view_create.rs | 2 +- .../interpreter_virtual_column_alter.rs | 2 +- .../interpreter_virtual_column_create.rs | 2 +- .../interpreter_virtual_column_drop.rs | 2 +- src/query/service/src/local/mod.rs | 2 +- .../pipelines/builders/builder_aggregate.rs | 2 +- .../src/pipelines/builders/builder_sort.rs | 2 +- .../aggregator/aggregate_exchange_injector.rs | 2 +- .../build_spill/build_spill_state.rs | 2 +- .../probe_spill/probe_spill_state.rs | 2 +- .../flight_sql/flight_sql_service/catalog.rs | 10 +-- src/query/service/src/sessions/query_ctx.rs | 12 +-- .../service/src/sessions/query_ctx_shared.rs | 12 +-- src/query/service/src/sessions/session.rs | 6 +- src/query/service/src/sessions/session_ctx.rs | 13 ++- .../table_functions/cloud/task_dependents.rs | 9 +- .../cloud/task_dependents_enable.rs | 9 +- .../src/table_functions/openai/ai_to_sql.rs | 2 +- .../others/execute_background_job.rs | 4 +- .../table_functions/others/license_info.rs | 19 +++-- .../others/suggested_background_tasks.rs | 2 +- .../table_functions/others/tenant_quota.rs | 4 +- src/query/service/src/test_kits/config.rs | 4 +- src/query/service/src/test_kits/fixture.rs | 21 ++--- src/query/service/tests/it/api/http/status.rs | 7 +- src/query/service/tests/it/auth.rs | 2 +- .../tests/it/catalogs/database_catalog.rs | 29 ++++--- .../service/tests/it/sessions/session.rs | 8 +- .../tests/it/sessions/session_context.rs | 4 +- .../service/tests/it/spillers/spiller.rs | 2 +- .../tests/it/sql/exec/get_table_bind_test.rs | 12 +-- .../optimizer/agg_index_query_rewrite.rs | 2 +- .../storages/fuse/operations/alter_table.rs | 6 +- .../it/storages/fuse/operations/analyze.rs | 2 +- .../it/storages/fuse/operations/clustering.rs | 6 +- .../it/storages/fuse/operations/commit.rs | 8 +- .../mutation/block_compact_mutator.rs | 4 +- .../mutation/segments_compact_mutator.rs | 6 +- .../storages/fuse/operations/table_analyze.rs | 2 +- .../service/tests/it/storages/fuse/pruning.rs | 6 +- src/query/settings/src/settings.rs | 8 +- src/query/settings/src/settings_global.rs | 4 +- src/query/settings/tests/it/setting.rs | 10 +-- src/query/sharing/src/share_endpoint.rs | 2 + src/query/sharing/src/signer.rs | 1 + .../physical_plans/physical_table_scan.rs | 2 +- src/query/sql/src/executor/table_read_plan.rs | 13 +-- .../sql/src/planner/binder/ddl/account.rs | 12 +-- .../sql/src/planner/binder/ddl/catalog.rs | 4 +- .../sql/src/planner/binder/ddl/column.rs | 4 +- .../sql/src/planner/binder/ddl/data_mask.rs | 4 +- .../sql/src/planner/binder/ddl/database.rs | 8 +- src/query/sql/src/planner/binder/ddl/index.rs | 4 +- .../src/planner/binder/ddl/network_policy.rs | 6 +- .../src/planner/binder/ddl/notification.rs | 8 +- .../src/planner/binder/ddl/password_policy.rs | 6 +- src/query/sql/src/planner/binder/ddl/share.rs | 10 +-- src/query/sql/src/planner/binder/ddl/stage.rs | 2 +- .../sql/src/planner/binder/ddl/stream.rs | 4 +- src/query/sql/src/planner/binder/ddl/table.rs | 26 +++--- src/query/sql/src/planner/binder/ddl/task.rs | 12 +-- src/query/sql/src/planner/binder/ddl/view.rs | 6 +- .../src/planner/binder/ddl/virtual_column.rs | 4 +- src/query/sql/src/planner/binder/table.rs | 2 +- src/query/sql/src/planner/bloom_index.rs | 6 +- src/query/sql/src/planner/dataframe.rs | 2 +- .../sql/src/planner/expression_parser.rs | 12 +-- .../sql/src/planner/plans/ddl/catalog.rs | 25 ++---- src/query/sql/src/planner/plans/ddl/table.rs | 4 +- .../semantic/virtual_column_rewriter.rs | 2 +- .../storages/fuse/src/operations/analyze.rs | 4 +- src/query/storages/fuse/src/operations/gc.rs | 2 +- .../clustering_information_table.rs | 2 +- .../fuse_blocks/fuse_block_table.rs | 2 +- .../fuse_columns/fuse_column_table.rs | 2 +- .../fuse_encodings/fuse_encoding_table.rs | 2 +- .../fuse_segments/fuse_segment_table.rs | 2 +- .../fuse_snapshots/fuse_snapshot_table.rs | 2 +- .../fuse_statistics/fuse_statistic_table.rs | 2 +- .../storages/hive/hive/src/hive_catalog.rs | 3 +- src/query/storages/iceberg/src/catalog.rs | 3 +- .../storages/result_cache/src/read/reader.rs | 2 +- .../storages/result_cache/src/write/sink.rs | 2 +- .../stream/src/stream_status_table_func.rs | 2 +- .../system/src/background_jobs_table.rs | 2 +- .../system/src/background_tasks_table.rs | 2 +- .../storages/system/src/catalogs_table.rs | 2 +- .../storages/system/src/columns_table.rs | 8 +- .../storages/system/src/databases_table.rs | 5 +- .../storages/system/src/indexes_table.rs | 2 +- src/query/storages/system/src/locks_table.rs | 2 +- .../system/src/notification_history_table.rs | 10 ++- .../system/src/notifications_table.rs | 10 ++- .../storages/system/src/query_cache_table.rs | 2 +- src/query/storages/system/src/stages_table.rs | 1 + .../storages/system/src/streams_table.rs | 9 +- src/query/storages/system/src/tables_table.rs | 9 +- .../storages/system/src/task_history_table.rs | 10 ++- src/query/storages/system/src/tasks_table.rs | 9 +- .../storages/system/src/temp_files_table.rs | 2 +- .../system/src/virtual_columns_table.rs | 2 +- src/query/users/src/connection.rs | 13 ++- src/query/users/src/file_format.rs | 13 ++- src/query/users/src/network_policy.rs | 21 ++--- src/query/users/src/password_policy.rs | 29 ++----- src/query/users/src/role_cache_mgr.rs | 24 +++--- src/query/users/src/role_mgr.rs | 33 ++++---- src/query/users/src/user_api.rs | 34 ++++---- src/query/users/src/user_mgr.rs | 32 +++---- src/query/users/src/user_setting.rs | 8 +- src/query/users/src/user_stage.rs | 17 ++-- src/query/users/src/user_udf.rs | 20 ++--- src/query/users/tests/it/network_policy.rs | 9 +- src/query/users/tests/it/password_policy.rs | 6 +- src/query/users/tests/it/role_cache_mgr.rs | 5 +- src/query/users/tests/it/role_mgr.rs | 5 +- src/query/users/tests/it/user_mgr.rs | 23 ++--- src/query/users/tests/it/user_udf.rs | 7 +- 224 files changed, 1035 insertions(+), 893 deletions(-) create mode 100644 src/meta/app/src/schema/catalog_name_ident.rs create mode 100644 src/meta/app/src/tenant/tenant_serde.rs create mode 100644 src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs create mode 100644 src/meta/protos/proto/tenant.proto diff --git a/Cargo.lock b/Cargo.lock index b8baf9a6b8822..a387d5ff3ff02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2913,7 +2913,6 @@ dependencies = [ "databend-common-exception", "databend-common-grpc", "databend-common-meta-app", - "databend-common-meta-types", "databend-common-storage", "databend-common-tracing", "databend-common-users", @@ -3038,7 +3037,6 @@ dependencies = [ "databend-common-expression", "databend-common-io", "databend-common-meta-app", - "databend-common-meta-types", "databend-common-settings", "databend-storages-common-blocks", "databend-storages-common-table-meta", diff --git a/src/bendpy/src/context.rs b/src/bendpy/src/context.rs index 9b31fcd7d5454..af038b8260f91 100644 --- a/src/bendpy/src/context.rs +++ b/src/bendpy/src/context.rs @@ -19,6 +19,7 @@ use databend_common_exception::Result; use databend_common_meta_app::principal::GrantObject; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserPrivilegeSet; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::NonEmptyString; use databend_common_users::UserApiProvider; use databend_query::sessions::QueryContext; @@ -57,13 +58,14 @@ impl PySessionContext { }; let tenant = NonEmptyString::new(tenant).unwrap(); + let tenant = Tenant::new_nonempty(tenant); let config = GlobalConfig::instance(); UserApiProvider::try_create_simple(config.meta.to_meta_grpc_client_conf(), &tenant) .await .unwrap(); - session.set_current_tenant(tenant.to_string()); + session.set_current_tenant(tenant.name().to_string()); let mut user = UserInfo::new_no_auth("root", "%"); user.grants.grant_privileges( diff --git a/src/bendpy/src/lib.rs b/src/bendpy/src/lib.rs index b6236c8a477cf..1e6db0b4839dd 100644 --- a/src/bendpy/src/lib.rs +++ b/src/bendpy/src/lib.rs @@ -57,7 +57,7 @@ fn databend(_py: Python, m: &PyModule) -> PyResult<()> { GlobalServices::init(&conf).await.unwrap(); // init oss license manager - OssLicenseManager::init(conf.query.tenant_id.to_string()).unwrap(); + OssLicenseManager::init(conf.query.tenant_id.name().to_string()).unwrap(); ClusterDiscovery::instance() .register_to_metastore(&conf) .await diff --git a/src/binaries/query/entry.rs b/src/binaries/query/entry.rs index ffba334b918f3..3fdbfeb20b028 100644 --- a/src/binaries/query/entry.rs +++ b/src/binaries/query/entry.rs @@ -107,7 +107,7 @@ async fn precheck_services(conf: &InnerConfig) -> Result<()> { traces_sample_rate, ..Default::default() }))); - sentry::configure_scope(|scope| scope.set_tag("tenant", tenant)); + sentry::configure_scope(|scope| scope.set_tag("tenant", tenant.name())); sentry::configure_scope(|scope| scope.set_tag("cluster_id", cluster_id)); sentry::configure_scope(|scope| scope.set_tag("address", flight_addr)); } diff --git a/src/binaries/query/oss_main.rs b/src/binaries/query/oss_main.rs index ba7a2c4e07a84..aad3f95890952 100644 --- a/src/binaries/query/oss_main.rs +++ b/src/binaries/query/oss_main.rs @@ -57,6 +57,6 @@ async fn main_entrypoint() -> Result<()> { init_services(&conf).await?; // init oss license manager - OssLicenseManager::init(conf.query.tenant_id.to_string())?; + OssLicenseManager::init(conf.query.tenant_id.name().to_string())?; start_services(&conf).await } diff --git a/src/meta/api/src/crud/mod.rs b/src/meta/api/src/crud/mod.rs index 6ec8c7827f71f..c4033e24bc303 100644 --- a/src/meta/api/src/crud/mod.rs +++ b/src/meta/api/src/crud/mod.rs @@ -30,7 +30,6 @@ use databend_common_meta_kvapi::kvapi::ValueWithName; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::SeqV; use databend_common_meta_types::SeqValue; use databend_common_meta_types::With; @@ -58,13 +57,10 @@ pub struct CrudMgr { impl CrudMgr { /// Create a new `CrudMgr` instance providing CRUD access for a key space defined by `R`: [`TenantResource`]. - pub fn create( - kv_api: Arc>, - tenant: &NonEmptyString, - ) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { CrudMgr { kv_api, - tenant: Tenant::new_nonempty(tenant.clone()), + tenant: tenant.clone(), _p: Default::default(), } } diff --git a/src/meta/api/src/schema_api_impl.rs b/src/meta/api/src/schema_api_impl.rs index 24a4051eb3c36..ad61617ac9498 100644 --- a/src/meta/api/src/schema_api_impl.rs +++ b/src/meta/api/src/schema_api_impl.rs @@ -41,6 +41,7 @@ use databend_common_meta_app::app_error::StreamVersionMismatched; use databend_common_meta_app::app_error::TableAlreadyExists; use databend_common_meta_app::app_error::TableLockExpired; use databend_common_meta_app::app_error::TableVersionMismatched; +use databend_common_meta_app::app_error::TenantIsEmpty; use databend_common_meta_app::app_error::UndropDbHasNoHistory; use databend_common_meta_app::app_error::UndropDbWithNoDropTime; use databend_common_meta_app::app_error::UndropTableAlreadyExists; @@ -180,6 +181,8 @@ use databend_common_meta_app::share::ShareGrantObject; use databend_common_meta_app::share::ShareNameIdent; use databend_common_meta_app::share::ShareSpec; use databend_common_meta_app::share::ShareTableInfoMap; +use databend_common_meta_app::tenant::Tenant; +use databend_common_meta_app::KeyWithTenant; use databend_common_meta_kvapi::kvapi; use databend_common_meta_kvapi::kvapi::Key; use databend_common_meta_kvapi::kvapi::UpsertKVReq; @@ -193,6 +196,7 @@ use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; use databend_common_meta_types::MetaId; use databend_common_meta_types::MetaNetworkError; +use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::Operation; use databend_common_meta_types::SeqV; use databend_common_meta_types::TxnCondition; @@ -933,7 +937,10 @@ impl + ?Sized> SchemaApi for KV { return Err(KVAppError::AppError(AppError::IndexAlreadyExists( IndexAlreadyExists::new( &tenant_index.index_name, - format!("create index with tenant: {}", tenant_index.tenant), + format!( + "create index with tenant: {}", + tenant_index.tenant.display() + ), ), ))); } @@ -3892,8 +3899,8 @@ impl + ?Sized> SchemaApi for KV { } else { Err(KVAppError::AppError(AppError::CatalogAlreadyExists( CatalogAlreadyExists::new( - &name_key.catalog_name, - format!("create catalog: tenant: {}", name_key.tenant), + name_key.name(), + format!("create catalog: tenant: {}", name_key.tenant_name()), ), ))) }; @@ -3972,8 +3979,12 @@ impl + ?Sized> SchemaApi for KV { loop { trials.next().unwrap()?.await; - let res = - get_catalog_or_err(self, name_key, format!("drop_catalog: {}", &name_key)).await; + let res = get_catalog_or_err( + self, + name_key, + format!("drop_catalog: {}", name_key.display()), + ) + .await; let (_, catalog_id, catalog_meta_seq, _) = match res { Ok(x) => x, @@ -4041,11 +4052,11 @@ impl + ?Sized> SchemaApi for KV { ) -> Result>, KVAppError> { debug!(req :? =(&req); "SchemaApi: {}", func_name!()); - let name_key = CatalogNameIdent { - tenant: req.tenant, - // Using a empty catalog to to list all - catalog_name: "".to_string(), - }; + let tenant = Tenant::new_nonempty( + NonEmptyString::new(req.tenant) + .map_err(|_e| AppError::from(TenantIsEmpty::new("SchemaApi::list_catalogs")))?, + ); + let name_key = CatalogNameIdent::new(tenant, ""); // Pairs of catalog-name and catalog_id with seq let (tenant_catalog_names, catalog_ids) = list_u64_value(self, &name_key).await?; @@ -4076,10 +4087,10 @@ impl + ?Sized> SchemaApi for KV { catalog_id: catalog_ids[i], } .into(), - name_ident: CatalogNameIdent { - tenant: name_key.tenant.clone(), - catalog_name: tenant_catalog_names[i].catalog_name.clone(), - } + name_ident: CatalogNameIdent::new( + name_key.tenant().clone(), + tenant_catalog_names[i].name(), + ) .into(), meta: catalog_meta, }; @@ -5494,8 +5505,8 @@ pub fn catalog_has_to_exist( Err(KVAppError::AppError(AppError::UnknownCatalog( UnknownCatalog::new( - &catalog_name_ident.catalog_name, - format!("{}: {}", msg, catalog_name_ident), + catalog_name_ident.name(), + format!("{}: {}", msg, catalog_name_ident.display()), ), ))) } else { diff --git a/src/meta/api/src/schema_api_test_suite.rs b/src/meta/api/src/schema_api_test_suite.rs index 3ff0d5ddadb0b..bcd04bc3aa6d6 100644 --- a/src/meta/api/src/schema_api_test_suite.rs +++ b/src/meta/api/src/schema_api_test_suite.rs @@ -1419,16 +1419,17 @@ impl SchemaApiTestSuite { #[minitrace::trace] async fn catalog_create_get_list_drop(&self, mt: &MT) -> anyhow::Result<()> { - let tenant = "tenant1"; + let tenant_name = "tenant1"; + let tenant = Tenant::new_literal(tenant_name); + let catalog_name = "catalog1"; + let ident = CatalogNameIdent::new(tenant.clone(), catalog_name); + info!("--- create catalog1"); let req = CreateCatalogReq { if_not_exists: false, - name_ident: CatalogNameIdent { - tenant: tenant.to_string(), - catalog_name: catalog_name.to_string(), - }, + name_ident: ident.clone(), meta: CatalogMeta { catalog_option: CatalogOption::Iceberg(IcebergCatalogOption { storage_params: Box::new(StorageParams::S3(StorageS3Config { @@ -1443,9 +1444,7 @@ impl SchemaApiTestSuite { let res = mt.create_catalog(req).await?; info!("create catalog res: {:?}", res); - let got = mt - .get_catalog(GetCatalogReq::new(tenant, catalog_name)) - .await?; + let got = mt.get_catalog(GetCatalogReq::new(ident.clone())).await?; assert_eq!(got.id.catalog_id, res.catalog_id); assert_eq!(got.name_ident.tenant, "tenant1"); assert_eq!(got.name_ident.catalog_name, "catalog1"); @@ -1458,10 +1457,7 @@ impl SchemaApiTestSuite { let _ = mt .drop_catalog(DropCatalogReq { if_exists: false, - name_ident: CatalogNameIdent { - tenant: tenant.to_string(), - catalog_name: catalog_name.to_string(), - }, + name_ident: ident.clone(), }) .await?; diff --git a/src/meta/app/src/app_error.rs b/src/meta/app/src/app_error.rs index aca853cf53d0d..d459bfa546426 100644 --- a/src/meta/app/src/app_error.rs +++ b/src/meta/app/src/app_error.rs @@ -26,6 +26,20 @@ pub trait AppErrorMessage: Display { } } +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, thiserror::Error)] +#[error("Tenant is empty when: `{context}`")] +pub struct TenantIsEmpty { + context: String, +} + +impl TenantIsEmpty { + pub fn new(context: impl Into) -> Self { + Self { + context: context.into(), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, thiserror::Error)] #[error("DatabaseAlreadyExists: `{db_name}` while `{context}`")] pub struct DatabaseAlreadyExists { @@ -890,6 +904,9 @@ impl VirtualColumnNotFound { /// The application does not get expected result but there is nothing wrong with meta-service. #[derive(thiserror::Error, serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq, Eq)] pub enum AppError { + #[error(transparent)] + TenantIsEmpty(#[from] TenantIsEmpty), + #[error(transparent)] TableVersionMismatched(#[from] TableVersionMismatched), @@ -1051,6 +1068,12 @@ pub enum AppError { MultiStatementTxnCommitFailed(#[from] MultiStmtTxnCommitFailed), } +impl AppErrorMessage for TenantIsEmpty { + fn message(&self) -> String { + self.to_string() + } +} + impl AppErrorMessage for UnknownBackgroundJob { fn message(&self) -> String { format!("Unknown background job '{}'", self.name) @@ -1380,6 +1403,7 @@ impl AppErrorMessage for VirtualColumnAlreadyExists { impl From for ErrorCode { fn from(app_err: AppError) -> Self { match app_err { + AppError::TenantIsEmpty(err) => ErrorCode::TenantIsEmpty(err.message()), AppError::UnknownDatabase(err) => ErrorCode::UnknownDatabase(err.message()), AppError::UnknownDatabaseId(err) => ErrorCode::UnknownDatabaseId(err.message()), AppError::UnknownTableId(err) => ErrorCode::UnknownTableId(err.message()), diff --git a/src/meta/app/src/principal/user_defined_function.rs b/src/meta/app/src/principal/user_defined_function.rs index 3d613f6ec6b1f..52ab564cf3b2c 100644 --- a/src/meta/app/src/principal/user_defined_function.rs +++ b/src/meta/app/src/principal/user_defined_function.rs @@ -20,16 +20,18 @@ use chrono::Utc; use databend_common_expression::types::DataType; use databend_common_meta_kvapi::kvapi::Key; +use crate::tenant::Tenant; + #[derive(Clone, Debug, Eq, PartialEq)] pub struct UdfName { - pub tenant: String, + pub tenant: Tenant, pub name: String, } impl UdfName { - pub fn new(tenant: impl ToString, name: impl ToString) -> Self { + pub fn new(tenant: &Tenant, name: impl ToString) -> Self { Self { - tenant: tenant.to_string(), + tenant: tenant.clone(), name: name.to_string(), } } @@ -219,12 +221,12 @@ mod kv_api_impl { /// It belongs to a tenant fn parent(&self) -> Option { - Some(Tenant::new(&self.tenant).to_string_key()) + Some(self.tenant.to_string_key()) } fn to_string_key(&self) -> String { kvapi::KeyBuilder::new_prefixed(Self::PREFIX) - .push_str(&self.tenant) + .push_str(self.tenant.name()) .push_str(&self.name) .done() } @@ -232,10 +234,12 @@ mod kv_api_impl { fn from_str_key(s: &str) -> Result { let mut p = kvapi::KeyParser::new_prefixed(s, Self::PREFIX)?; - let tenant = p.next_str()?; + let tenant = p.next_nonempty()?; let name = p.next_str()?; p.done()?; + let tenant = Tenant::new_nonempty(tenant); + Ok(UdfName { tenant, name }) } } diff --git a/src/meta/app/src/schema/catalog.rs b/src/meta/app/src/schema/catalog.rs index 190b2bbafe16c..fca31b0def235 100644 --- a/src/meta/app/src/schema/catalog.rs +++ b/src/meta/app/src/schema/catalog.rs @@ -18,7 +18,9 @@ use std::ops::Deref; use chrono::DateTime; use chrono::Utc; +use crate::schema::CatalogNameIdent; use crate::storage::StorageParams; +use crate::KeyWithTenant; #[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] pub enum CatalogType { @@ -84,8 +86,8 @@ pub struct CatalogName { impl From for CatalogName { fn from(ident: CatalogNameIdent) -> Self { CatalogName { - tenant: ident.tenant, - catalog_name: ident.catalog_name, + tenant: ident.tenant_name().to_string(), + catalog_name: ident.name().to_string(), } } } @@ -160,14 +162,6 @@ pub struct CatalogMeta { pub created_on: DateTime, } -/// The name of a catalog, -/// which is used as a key and does not support other codec method such as serde. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct CatalogNameIdent { - pub tenant: String, - pub catalog_name: String, -} - // serde is required by `CatalogInfo.id` #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct CatalogId { @@ -180,12 +174,6 @@ impl CatalogId { } } -impl Display for CatalogNameIdent { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "'{}'/'{}'", self.tenant, self.catalog_name) - } -} - #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct CatalogIdToName { pub catalog_id: u64, @@ -206,10 +194,10 @@ pub struct CreateCatalogReq { impl CreateCatalogReq { pub fn tenant(&self) -> &str { - &self.name_ident.tenant + self.name_ident.tenant_name() } pub fn catalog_name(&self) -> &str { - &self.name_ident.catalog_name + self.name_ident.name() } } @@ -218,7 +206,10 @@ impl Display for CreateCatalogReq { write!( f, "create_catalog(if_not_exists={}):{}/{}={:?}", - self.if_not_exists, self.name_ident.tenant, self.name_ident.catalog_name, self.meta + self.if_not_exists, + self.name_ident.tenant_name(), + self.name_ident.name(), + self.meta ) } } @@ -239,7 +230,9 @@ impl Display for DropCatalogReq { write!( f, "drop_catalog(if_exists={}):{}/{}", - self.if_exists, self.name_ident.tenant, self.name_ident.catalog_name + self.if_exists, + self.name_ident.tenant_name(), + self.name_ident.name() ) } } @@ -261,13 +254,8 @@ impl Deref for GetCatalogReq { } impl GetCatalogReq { - pub fn new(tenant: impl Into, catalog_name: impl Into) -> GetCatalogReq { - GetCatalogReq { - inner: CatalogNameIdent { - tenant: tenant.into(), - catalog_name: catalog_name.into(), - }, - } + pub fn new(ident: CatalogNameIdent) -> GetCatalogReq { + GetCatalogReq { inner: ident } } } @@ -286,45 +274,11 @@ impl ListCatalogReq { mod kvapi_key_impl { use databend_common_meta_kvapi::kvapi; - use databend_common_meta_kvapi::kvapi::Key; use super::CatalogId; use super::CatalogIdToName; - use super::CatalogNameIdent; use crate::schema::CatalogMeta; - use crate::tenant::Tenant; - - /// __fd_catalog// -> - impl kvapi::Key for CatalogNameIdent { - const PREFIX: &'static str = "__fd_catalog"; - - type ValueType = CatalogId; - - /// It belongs to a tenant - fn parent(&self) -> Option { - Some(Tenant::new(&self.tenant).to_string_key()) - } - - fn to_string_key(&self) -> String { - kvapi::KeyBuilder::new_prefixed(Self::PREFIX) - .push_str(&self.tenant) - .push_str(&self.catalog_name) - .done() - } - - fn from_str_key(s: &str) -> Result { - let mut p = kvapi::KeyParser::new_prefixed(s, Self::PREFIX)?; - - let tenant = p.next_str()?; - let catalog_name = p.next_str()?; - p.done()?; - - Ok(CatalogNameIdent { - tenant, - catalog_name, - }) - } - } + use crate::schema::CatalogNameIdent; /// "__fd_catalog_by_id/" impl kvapi::Key for CatalogId { @@ -378,12 +332,6 @@ mod kvapi_key_impl { } } - impl kvapi::Value for CatalogId { - fn dependency_keys(&self) -> impl IntoIterator { - [self.to_string_key()] - } - } - impl kvapi::Value for CatalogMeta { fn dependency_keys(&self) -> impl IntoIterator { [] diff --git a/src/meta/app/src/schema/catalog_name_ident.rs b/src/meta/app/src/schema/catalog_name_ident.rs new file mode 100644 index 0000000000000..0db190c21e3b1 --- /dev/null +++ b/src/meta/app/src/schema/catalog_name_ident.rs @@ -0,0 +1,64 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::tenant_key::TIdent; + +/// The identifier of a catalog, +/// which is used as a key and does not support other codec method such as serde. +pub type CatalogNameIdent = TIdent; + +pub use kvapi_impl::Resource; + +mod kvapi_impl { + + use databend_common_meta_kvapi::kvapi; + use databend_common_meta_kvapi::kvapi::Key; + + use crate::schema::CatalogId; + use crate::tenant_key::TenantResource; + + pub struct Resource; + impl TenantResource for Resource { + const PREFIX: &'static str = "__fd_catalog"; + type ValueType = CatalogId; + } + + impl kvapi::Value for CatalogId { + fn dependency_keys(&self) -> impl IntoIterator { + [self.to_string_key()] + } + } + + // impl From> for ErrorCode { + // impl From> for ErrorCode { +} + +#[cfg(test)] +mod tests { + use databend_common_meta_kvapi::kvapi::Key; + + use super::CatalogNameIdent; + use crate::tenant::Tenant; + + #[test] + fn test_catalog_name_ident() { + let tenant = Tenant::new_literal("test"); + let ident = CatalogNameIdent::new(tenant, "test1"); + + let key = ident.to_string_key(); + assert_eq!(key, "__fd_catalog/test/test1"); + + assert_eq!(ident, CatalogNameIdent::from_str_key(&key).unwrap()); + } +} diff --git a/src/meta/app/src/schema/index.rs b/src/meta/app/src/schema/index.rs index 0f6b3dc8ac9ab..046e9b8a34d41 100644 --- a/src/meta/app/src/schema/index.rs +++ b/src/meta/app/src/schema/index.rs @@ -144,7 +144,12 @@ impl Display for CreateIndexReq { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self.create_option { CreateOption::Create => { - write!(f, "create_index:{}={:?}", self.name_ident.tenant, self.meta) + write!( + f, + "create_index:{}={:?}", + self.name_ident.tenant.display(), + self.meta + ) } CreateOption::CreateIfNotExists => write!( f, diff --git a/src/meta/app/src/schema/mod.rs b/src/meta/app/src/schema/mod.rs index 120ad6da43e7a..23f9d8108d783 100644 --- a/src/meta/app/src/schema/mod.rs +++ b/src/meta/app/src/schema/mod.rs @@ -15,6 +15,7 @@ //! Schema types pub mod catalog; +pub mod catalog_name_ident; mod create_option; mod database; mod index; @@ -23,7 +24,9 @@ mod lock; mod ownership; mod table; mod virtual_column; + pub use catalog::*; +pub use catalog_name_ident::CatalogNameIdent; pub use create_option::CreateOption; pub use database::CreateDatabaseReply; pub use database::CreateDatabaseReq; diff --git a/src/meta/app/src/tenant/mod.rs b/src/meta/app/src/tenant/mod.rs index ef2d46835f3be..ba7a7c7d1b72e 100644 --- a/src/meta/app/src/tenant/mod.rs +++ b/src/meta/app/src/tenant/mod.rs @@ -16,7 +16,9 @@ mod quota; #[allow(clippy::module_inception)] mod tenant; mod tenant_quota_ident; +mod tenant_serde; pub use quota::TenantQuota; pub use tenant::Tenant; pub use tenant_quota_ident::TenantQuotaIdent; +pub use tenant_serde::TenantSerde; diff --git a/src/meta/app/src/tenant/tenant.rs b/src/meta/app/src/tenant/tenant.rs index 248cd153e90f6..ff7cce949aef7 100644 --- a/src/meta/app/src/tenant/tenant.rs +++ b/src/meta/app/src/tenant/tenant.rs @@ -12,13 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::fmt::Display; + +use databend_common_exception::ErrorCode; use databend_common_meta_types::NonEmptyString; /// Tenant is not stored directly in meta-store. /// /// It is just a type for use on the client side. -#[derive(Clone, Debug, PartialEq, Eq, Hash, derive_more::Display)] -#[display(fmt = "Tenant{{{tenant}}}")] +#[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct Tenant { // TODO: consider using NonEmptyString? pub tenant: String, @@ -31,6 +33,17 @@ impl Tenant { } } + pub fn new_or_error_code(tenant: impl ToString, ctx: impl Display) -> Result { + let non_empty = NonEmptyString::new(tenant.to_string()) + .map_err(|_e| ErrorCode::TenantIsEmpty(format!("Tenant is empty when {}", ctx)))?; + + let t = Self { + tenant: non_empty.as_str().to_string(), + }; + + Ok(t) + } + pub fn new_literal(tenant: &str) -> Self { debug_assert!(!tenant.is_empty()); Self { @@ -48,6 +61,14 @@ impl Tenant { pub fn name(&self) -> &str { &self.tenant } + + pub fn to_nonempty(&self) -> NonEmptyString { + NonEmptyString::new(self.tenant.clone()).unwrap() + } + + pub fn display(&self) -> impl Display { + format!("Tenant{}", self.tenant) + } } mod kvapi_key_impl { diff --git a/src/meta/app/src/tenant/tenant_serde.rs b/src/meta/app/src/tenant/tenant_serde.rs new file mode 100644 index 0000000000000..2aee53e0c302e --- /dev/null +++ b/src/meta/app/src/tenant/tenant_serde.rs @@ -0,0 +1,43 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::tenant::Tenant; + +/// A duplicate of [`Tenant`] struct for transport with serde support. +/// +/// This struct is meant not to provide any functionality [`Tenant`] struct provides +/// and is only used for transport. +#[derive( + Clone, Debug, PartialEq, Eq, Hash, derive_more::Display, serde::Serialize, serde::Deserialize, +)] +#[display(fmt = "TenantSerde{{{tenant}}}")] +pub struct TenantSerde { + tenant: String, +} + +impl From for TenantSerde { + fn from(value: Tenant) -> Self { + Self { + tenant: value.tenant, + } + } +} + +impl From for Tenant { + fn from(value: TenantSerde) -> Self { + Tenant { + tenant: value.tenant, + } + } +} diff --git a/src/meta/app/src/tenant_key.rs b/src/meta/app/src/tenant_key.rs index 72528a557ed08..2093bf44ddafd 100644 --- a/src/meta/app/src/tenant_key.rs +++ b/src/meta/app/src/tenant_key.rs @@ -91,6 +91,11 @@ impl TIdent { pub fn name(&self) -> &str { &self.name } + + // + pub fn display(&self) -> impl fmt::Display + '_ { + format!("'{}'/'{}'", self.tenant.name(), self.name) + } } mod kvapi_key_impl { diff --git a/src/meta/proto-conv/src/catalog_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/catalog_from_to_protobuf_impl.rs index 029415fc1915e..628494b3eea1a 100644 --- a/src/meta/proto-conv/src/catalog_from_to_protobuf_impl.rs +++ b/src/meta/proto-conv/src/catalog_from_to_protobuf_impl.rs @@ -30,32 +30,6 @@ use crate::Incompatible; use crate::MIN_READER_VER; use crate::VER; -impl FromToProto for mt::CatalogNameIdent { - type PB = pb::CatalogNameIdent; - fn get_pb_ver(p: &Self::PB) -> u64 { - p.ver - } - fn from_pb(p: pb::CatalogNameIdent) -> Result { - reader_check_msg(p.ver, p.min_reader_ver)?; - - let v = Self { - tenant: p.tenant, - catalog_name: p.catalog_name, - }; - Ok(v) - } - - fn to_pb(&self) -> Result { - let p = pb::CatalogNameIdent { - ver: VER, - min_reader_ver: MIN_READER_VER, - tenant: self.tenant.clone(), - catalog_name: self.catalog_name.clone(), - }; - Ok(p) - } -} - impl FromToProto for mt::CatalogMeta { type PB = pb::CatalogMeta; diff --git a/src/meta/proto-conv/src/from_to_protobuf.rs b/src/meta/proto-conv/src/from_to_protobuf.rs index e7338becabe2e..c8a7b208ec6ce 100644 --- a/src/meta/proto-conv/src/from_to_protobuf.rs +++ b/src/meta/proto-conv/src/from_to_protobuf.rs @@ -49,6 +49,14 @@ pub struct Incompatible { pub reason: String, } +impl Incompatible { + pub fn new(reason: impl Into) -> Self { + Self { + reason: reason.into(), + } + } +} + impl FromToProto for Arc where T: FromToProto { diff --git a/src/meta/proto-conv/src/lib.rs b/src/meta/proto-conv/src/lib.rs index fe903f66fa2d7..6b2edbccd901f 100644 --- a/src/meta/proto-conv/src/lib.rs +++ b/src/meta/proto-conv/src/lib.rs @@ -80,6 +80,7 @@ mod schema_from_to_protobuf_impl; mod share_from_to_protobuf_impl; mod stage_from_to_protobuf_impl; mod table_from_to_protobuf_impl; +mod tident_from_to_protobuf_impl; mod udf_from_to_protobuf_impl; mod user_from_to_protobuf_impl; mod util; diff --git a/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs new file mode 100644 index 0000000000000..309615addec9e --- /dev/null +++ b/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs @@ -0,0 +1,65 @@ +// Copyright 2021 Datafuse Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! This mod is the key point about compatibility. +//! Everytime update anything in this file, update the `VER` and let the tests pass. + +use databend_common_meta_app::tenant::Tenant; +use databend_common_meta_app::tenant_key as tk; +use databend_common_meta_app::tenant_key::TenantResource; +use databend_common_meta_app::KeyWithTenant; +use databend_common_meta_types::NonEmptyString; +use databend_common_protos::pb; + +use crate::reader_check_msg; +use crate::FromToProto; +use crate::Incompatible; +use crate::MIN_READER_VER; +use crate::VER; + +impl FromToProto for tk::TIdent +where R: TenantResource +{ + type PB = pb::TIdent; + fn get_pb_ver(p: &Self::PB) -> u64 { + p.ver + } + fn from_pb(p: pb::TIdent) -> Result { + reader_check_msg(p.ver, p.min_reader_ver)?; + + if p.tenant.is_empty() { + return Err(Incompatible { + reason: "CatalogName.tenant is empty".to_string(), + }); + } + + let non_empty = NonEmptyString::new(p.tenant.clone()) + .map_err(|_e| Incompatible::new("tenant is empty"))?; + + let tenant = Tenant::new_nonempty(non_empty); + + let v = Self::new(tenant, p.name); + Ok(v) + } + + fn to_pb(&self) -> Result { + let p = pb::TIdent { + ver: VER, + min_reader_ver: MIN_READER_VER, + tenant: self.tenant_name().to_string(), + name: self.name().to_string(), + }; + Ok(p) + } +} diff --git a/src/meta/protos/proto/catalog.proto b/src/meta/protos/proto/catalog.proto index c715ba7bdb54a..ab10c8bfc267a 100644 --- a/src/meta/protos/proto/catalog.proto +++ b/src/meta/protos/proto/catalog.proto @@ -18,7 +18,7 @@ package databend_proto; import "config.proto"; -message CatalogNameIdent { +message CatalogName { uint64 ver = 100; uint64 min_reader_ver = 101; diff --git a/src/meta/protos/proto/tenant.proto b/src/meta/protos/proto/tenant.proto new file mode 100644 index 0000000000000..517b5f6e87b79 --- /dev/null +++ b/src/meta/protos/proto/tenant.proto @@ -0,0 +1,30 @@ +// Copyright 2022 Datafuse Labs. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package databend_proto; + +// `[T]enant[Ident]` is a common meta-service key structure in form of `//`. +message TIdent { + uint64 ver = 100; + uint64 min_reader_ver = 101; + + // The user this record belongs to. + // It must not be empty. + string tenant = 1; + + // Resource name + string name = 2; +} diff --git a/src/query/catalog/src/catalog/interface.rs b/src/query/catalog/src/catalog/interface.rs index d6534248383c1..d2b68a423af57 100644 --- a/src/query/catalog/src/catalog/interface.rs +++ b/src/query/catalog/src/catalog/interface.rs @@ -86,6 +86,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; use dyn_clone::DynClone; @@ -120,7 +121,7 @@ pub trait Catalog: DynClone + Send + Sync + Debug { async fn get_database(&self, tenant: &str, db_name: &str) -> Result>; // Get all the databases. - async fn list_databases(&self, tenant: &str) -> Result>>; + async fn list_databases(&self, tenant: &Tenant) -> Result>>; // Operation with database. async fn create_database(&self, req: CreateDatabaseReq) -> Result; diff --git a/src/query/catalog/src/catalog/manager.rs b/src/query/catalog/src/catalog/manager.rs index c0f9b76bd80a4..535ad24bb2e38 100644 --- a/src/query/catalog/src/catalog/manager.rs +++ b/src/query/catalog/src/catalog/manager.rs @@ -33,8 +33,10 @@ use databend_common_meta_app::schema::DropCatalogReq; use databend_common_meta_app::schema::GetCatalogReq; use databend_common_meta_app::schema::HiveCatalogOption; use databend_common_meta_app::schema::ListCatalogReq; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_store::MetaStore; use databend_common_meta_store::MetaStoreProvider; +use databend_common_meta_types::NonEmptyString; use databend_storages_common_txn::TxnManagerRef; use super::Catalog; @@ -99,11 +101,7 @@ impl CatalogManager { })?; let ctl = creator.try_create(&CatalogInfo { id: CatalogId { catalog_id: 0 }.into(), - name_ident: CatalogNameIdent { - tenant: tenant.to_string(), - catalog_name: name.clone(), - } - .into(), + name_ident: CatalogNameIdent::new(tenant.clone(), name).into(), meta: CatalogMeta { catalog_option: CatalogOption::Hive(HiveCatalogOption { address: hive_ctl_cfg.metastore_address.clone(), @@ -161,6 +159,7 @@ impl CatalogManager { #[async_backtrace::framed] pub async fn get_catalog( &self, + // TODO: use Tenant or NonEmptyString tenant: &str, catalog_name: &str, txn_mgr: TxnManagerRef, @@ -173,11 +172,14 @@ impl CatalogManager { return Ok(ctl.clone()); } + let non_empty = NonEmptyString::new(tenant) + .map_err(|_e| ErrorCode::TenantIsEmpty("tenant is empty when get_catalog"))?; + + let tenant = Tenant::new_nonempty(non_empty); + let ident = CatalogNameIdent::new(tenant, catalog_name); + // Get catalog from metasrv. - let info = self - .meta - .get_catalog(GetCatalogReq::new(tenant, catalog_name)) - .await?; + let info = self.meta.get_catalog(GetCatalogReq::new(ident)).await?; self.build_catalog(&info) } @@ -213,7 +215,7 @@ impl CatalogManager { /// Trying to drop default catalog will return an error. #[async_backtrace::framed] pub async fn drop_catalog(&self, req: DropCatalogReq) -> Result<()> { - let catalog_name = &req.name_ident.catalog_name; + let catalog_name = req.name_ident.name(); if catalog_name == CATALOG_DEFAULT { return Err(ErrorCode::BadArguments( diff --git a/src/query/catalog/src/catalog/session_catalog.rs b/src/query/catalog/src/catalog/session_catalog.rs index 8c96a05d5fe96..c5ee3de951728 100644 --- a/src/query/catalog/src/catalog/session_catalog.rs +++ b/src/query/catalog/src/catalog/session_catalog.rs @@ -85,6 +85,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; use databend_storages_common_txn::TxnManagerRef; use databend_storages_common_txn::TxnState; @@ -129,7 +130,7 @@ impl Catalog for SessionCatalog { } // Get all the databases. - async fn list_databases(&self, tenant: &str) -> Result>> { + async fn list_databases(&self, tenant: &Tenant) -> Result>> { self.inner.list_databases(tenant).await } diff --git a/src/query/catalog/src/table_context.rs b/src/query/catalog/src/table_context.rs index 052bd59b0c430..9ea0c6af760ab 100644 --- a/src/query/catalog/src/table_context.rs +++ b/src/query/catalog/src/table_context.rs @@ -37,7 +37,7 @@ use databend_common_meta_app::principal::OnErrorMode; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserDefinedConnection; use databend_common_meta_app::principal::UserInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_pipeline_core::processors::PlanProfile; use databend_common_pipeline_core::InputError; use databend_common_settings::Settings; @@ -180,7 +180,7 @@ pub trait TableContext: Send + Sync { async fn get_visibility_checker(&self) -> Result; fn get_fuse_version(&self) -> String; fn get_format_settings(&self) -> Result; - fn get_tenant(&self) -> NonEmptyString; + fn get_tenant(&self) -> Tenant; /// Get the kind of session running query. fn get_query_kind(&self) -> QueryKind; fn get_function_context(&self) -> Result; diff --git a/src/query/config/Cargo.toml b/src/query/config/Cargo.toml index 14927b55ba25b..9b798567d933f 100644 --- a/src/query/config/Cargo.toml +++ b/src/query/config/Cargo.toml @@ -20,7 +20,6 @@ databend-common-base = { path = "../../common/base" } databend-common-exception = { path = "../../common/exception" } databend-common-grpc = { path = "../../common/grpc" } databend-common-meta-app = { path = "../../meta/app" } -databend-common-meta-types = { path = "../../meta/types" } databend-common-storage = { path = "../../common/storage" } databend-common-tracing = { path = "../../common/tracing" } databend-common-users = { path = "../users" } diff --git a/src/query/config/src/config.rs b/src/query/config/src/config.rs index b00cdce52a3fd..4eef317f20f1b 100644 --- a/src/query/config/src/config.rs +++ b/src/query/config/src/config.rs @@ -42,8 +42,8 @@ use databend_common_meta_app::storage::StorageOssConfig as InnerStorageOssConfig use databend_common_meta_app::storage::StorageParams; use databend_common_meta_app::storage::StorageS3Config as InnerStorageS3Config; use databend_common_meta_app::storage::StorageWebhdfsConfig as InnerStorageWebhdfsConfig; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_app::tenant::TenantQuota; -use databend_common_meta_types::NonEmptyString; use databend_common_storage::StorageConfig as InnerStorageConfig; use databend_common_tracing::Config as InnerLogConfig; use databend_common_tracing::FileConfig as InnerFileLogConfig; @@ -1671,7 +1671,7 @@ impl TryInto for QueryConfig { fn try_into(self) -> Result { Ok(InnerQueryConfig { - tenant_id: NonEmptyString::new(self.tenant_id) + tenant_id: Tenant::new_or_error_code(self.tenant_id, "") .map_err(|_e| ErrorCode::InvalidConfig("tenant-id can not be empty"))?, cluster_id: self.cluster_id, node_id: "".to_string(), @@ -1752,7 +1752,7 @@ impl TryInto for QueryConfig { impl From for QueryConfig { fn from(inner: InnerQueryConfig) -> Self { Self { - tenant_id: inner.tenant_id.to_string(), + tenant_id: inner.tenant_id.name().to_string(), cluster_id: inner.cluster_id, num_cpus: inner.num_cpus, mysql_handler_host: inner.mysql_handler_host, diff --git a/src/query/config/src/inner.rs b/src/query/config/src/inner.rs index aed7150b00bab..f46fce6600da2 100644 --- a/src/query/config/src/inner.rs +++ b/src/query/config/src/inner.rs @@ -27,8 +27,8 @@ use databend_common_exception::Result; use databend_common_grpc::RpcClientConf; use databend_common_grpc::RpcClientTlsConfig; use databend_common_meta_app::principal::UserSettingValue; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_app::tenant::TenantQuota; -use databend_common_meta_types::NonEmptyString; use databend_common_storage::StorageConfig; use databend_common_tracing::Config as LogConfig; use databend_common_users::idm_config::IDMConfig; @@ -148,7 +148,7 @@ impl Debug for InnerConfig { #[derive(Clone, PartialEq, Eq, Debug)] pub struct QueryConfig { /// Tenant id for get the information from the MetaSrv. - pub tenant_id: NonEmptyString, + pub tenant_id: Tenant, /// ID for construct the cluster. pub cluster_id: String, // ID for the query node. @@ -237,7 +237,7 @@ pub struct QueryConfig { impl Default for QueryConfig { fn default() -> Self { Self { - tenant_id: NonEmptyString::new("admin").unwrap(), + tenant_id: Tenant::new_or_error_code("admin", "default()").unwrap(), cluster_id: "".to_string(), node_id: "".to_string(), num_cpus: 0, diff --git a/src/query/ee/src/background_service/background_service_handler.rs b/src/query/ee/src/background_service/background_service_handler.rs index 6c4bfdde52e92..b934dec3d1c3f 100644 --- a/src/query/ee/src/background_service/background_service_handler.rs +++ b/src/query/ee/src/background_service/background_service_handler.rs @@ -37,7 +37,6 @@ use databend_common_meta_app::background::UpdateBackgroundJobStatusReq; use databend_common_meta_app::principal::UserIdentity; use databend_common_meta_app::tenant::Tenant; use databend_common_meta_store::MetaStore; -use databend_common_meta_types::NonEmptyString; use databend_common_users::UserApiProvider; use databend_enterprise_background_service::background_service::BackgroundServiceHandlerWrapper; use databend_enterprise_background_service::BackgroundServiceHandler; @@ -170,12 +169,10 @@ impl RealBackgroundService { params: BackgroundJobParams, creator: UserIdentity, ) -> Result { - let non_empty = NonEmptyString::new(&conf.query.tenant_id).map_err(|_e| { - ErrorCode::TenantIsEmpty("conf.query.tenant is empty when create_compactor_job") - })?; - let tenant = Tenant::new_nonempty(non_empty); + let tenant = conf.query.tenant_id.clone(); - let name = RealBackgroundService::get_compactor_job_name(conf.query.tenant_id.to_string()); + let name = + RealBackgroundService::get_compactor_job_name(conf.query.tenant_id.name().to_string()); let id = BackgroundJobIdent::new(tenant, name); let info = BackgroundJobInfo::new_compactor_job(params, creator); diff --git a/src/query/ee/src/background_service/compaction_job.rs b/src/query/ee/src/background_service/compaction_job.rs index 7c628362f8b72..42bb84b9a6fb5 100644 --- a/src/query/ee/src/background_service/compaction_job.rs +++ b/src/query/ee/src/background_service/compaction_job.rs @@ -26,7 +26,6 @@ use databend_common_base::base::tokio::sync::Mutex; use databend_common_base::base::tokio::time::Instant; use databend_common_base::base::uuid::Uuid; use databend_common_config::InnerConfig; -use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_api::BackgroundApi; use databend_common_meta_app::background::BackgroundJobIdent; @@ -43,10 +42,8 @@ use databend_common_meta_app::background::UpdateBackgroundJobParamsReq; use databend_common_meta_app::background::UpdateBackgroundJobStatusReq; use databend_common_meta_app::background::UpdateBackgroundTaskReq; use databend_common_meta_app::schema::TableStatistics; -use databend_common_meta_app::tenant::Tenant; use databend_common_meta_app::KeyWithTenant; use databend_common_meta_store::MetaStore; -use databend_common_meta_types::NonEmptyString; use databend_common_users::UserApiProvider; use databend_query::sessions::QueryContext; use databend_query::sessions::Session; @@ -149,10 +146,7 @@ impl CompactionJob { name: impl ToString, finish_tx: Arc>>, ) -> Result { - let non_empty = NonEmptyString::new(&config.query.tenant_id).map_err(|_e| { - ErrorCode::TenantIsEmpty("config.query.tenant_id is empty when CompactionJob::create()") - })?; - let tenant = Tenant::new_nonempty(non_empty); + let tenant = config.query.tenant_id.clone(); let creator = BackgroundJobIdent::new(tenant, name); diff --git a/src/query/ee/src/background_service/session.rs b/src/query/ee/src/background_service/session.rs index 67d65f0955f37..8dedc55113e0b 100644 --- a/src/query/ee/src/background_service/session.rs +++ b/src/query/ee/src/background_service/session.rs @@ -39,7 +39,7 @@ pub fn get_background_service_user(conf: &InnerConfig) -> UserInfo { let mut user = UserInfo::new_no_auth( format!( "{}-{}-background-svc", - conf.query.tenant_id.clone(), + conf.query.tenant_id.name(), conf.query.cluster_id.clone() ) .as_str(), diff --git a/src/query/ee/src/enterprise_services.rs b/src/query/ee/src/enterprise_services.rs index 7d42aa4a6358d..26f5d1b74c5c6 100644 --- a/src/query/ee/src/enterprise_services.rs +++ b/src/query/ee/src/enterprise_services.rs @@ -30,7 +30,7 @@ pub struct EnterpriseServices; impl EnterpriseServices { #[async_backtrace::framed] pub async fn init(cfg: InnerConfig) -> Result<()> { - RealLicenseManager::init(cfg.query.tenant_id.to_string())?; + RealLicenseManager::init(cfg.query.tenant_id.name().to_string())?; RealStorageEncryptionHandler::init(&cfg)?; RealVacuumHandler::init()?; RealAggregatingIndexHandler::init()?; diff --git a/src/query/ee/src/stream/handler.rs b/src/query/ee/src/stream/handler.rs index 5cd2c2ade4126..1e3fb5d254ca2 100644 --- a/src/query/ee/src/stream/handler.rs +++ b/src/query/ee/src/stream/handler.rs @@ -64,7 +64,7 @@ impl StreamHandler for RealStreamHandler { let catalog = ctx.get_catalog(&plan.catalog).await?; let mut table = catalog - .get_table(tenant.as_str(), &plan.table_database, &plan.table_name) + .get_table(tenant.name(), &plan.table_database, &plan.table_name) .await?; let table_info = table.get_table_info(); if table_info.options().contains_key("TRANSIENT") { @@ -95,7 +95,7 @@ impl StreamHandler for RealStreamHandler { }; catalog - .upsert_table_option(tenant.as_str(), &plan.table_database, req) + .upsert_table_option(tenant.name(), &plan.table_database, req) .await?; // refreash table. table = table.refresh(ctx.as_ref()).await?; @@ -104,7 +104,7 @@ impl StreamHandler for RealStreamHandler { let (version, snapshot_location) = match &plan.navigation { Some(StreamNavigation::AtStream { database, name }) => { - let stream = catalog.get_table(tenant.as_str(), database, name).await?; + let stream = catalog.get_table(tenant.name(), database, name).await?; let stream = StreamTable::try_from_table(stream.as_ref())?; let stream_opts = stream.get_table_info().options(); let stream_table_name = stream_opts @@ -222,7 +222,7 @@ impl StreamHandler for RealStreamHandler { let catalog = ctx.get_catalog(&plan.catalog).await?; let tenant = ctx.get_tenant(); let tbl = catalog - .get_table(tenant.as_str(), &db_name, &stream_name) + .get_table(tenant.name(), &db_name, &stream_name) .await .ok(); @@ -239,12 +239,12 @@ impl StreamHandler for RealStreamHandler { ))); } - let db = catalog.get_database(tenant.as_str(), &db_name).await?; + let db = catalog.get_database(tenant.name(), &db_name).await?; catalog .drop_table_by_id(DropTableByIdReq { if_exists: plan.if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_name: stream_name.clone(), tb_id: table.get_id(), db_id: db.get_db_info().ident.db_id, diff --git a/src/query/ee/src/test_kits/mock_services.rs b/src/query/ee/src/test_kits/mock_services.rs index e78b1c56fbf8a..74b1585bbcc3a 100644 --- a/src/query/ee/src/test_kits/mock_services.rs +++ b/src/query/ee/src/test_kits/mock_services.rs @@ -31,7 +31,7 @@ pub struct MockServices; impl MockServices { #[async_backtrace::framed] pub async fn init(cfg: &InnerConfig, public_key: String) -> Result<()> { - let rm = RealLicenseManager::new(cfg.query.tenant_id.to_string(), public_key); + let rm = RealLicenseManager::new(cfg.query.tenant_id.name().to_string(), public_key); let wrapper = LicenseManagerWrapper { manager: Box::new(rm), }; diff --git a/src/query/ee/tests/it/aggregating_index/index_refresh.rs b/src/query/ee/tests/it/aggregating_index/index_refresh.rs index 4922ab66df85c..263f3fef89b63 100644 --- a/src/query/ee/tests/it/aggregating_index/index_refresh.rs +++ b/src/query/ee/tests/it/aggregating_index/index_refresh.rs @@ -32,8 +32,6 @@ use databend_common_meta_app::schema::CreateIndexReq; use databend_common_meta_app::schema::IndexMeta; use databend_common_meta_app::schema::IndexNameIdent; use databend_common_meta_app::schema::IndexType; -use databend_common_meta_app::tenant::Tenant; -use databend_common_meta_types::NonEmptyString; use databend_common_sql::plans::Plan; use databend_common_sql::AggregatingIndexRewriter; use databend_common_sql::Planner; @@ -547,13 +545,7 @@ async fn create_index( if let Plan::CreateIndex(plan) = plan { let catalog = ctx.get_catalog("default").await?; - let tenant_name = ctx.get_tenant(); - - let non_empty = NonEmptyString::new(tenant_name.to_string()).map_err(|_| { - ErrorCode::TenantIsEmpty("Tenant is empty(when create_index)".to_string()) - })?; - - let tenant = Tenant::new_nonempty(non_empty); + let tenant = ctx.get_tenant(); let create_index_req = CreateIndexReq { create_option: plan.create_option, diff --git a/src/query/ee/tests/it/inverted_index/pruning.rs b/src/query/ee/tests/it/inverted_index/pruning.rs index e2e682730d7f8..8df7b9b7b9ca9 100644 --- a/src/query/ee/tests/it/inverted_index/pruning.rs +++ b/src/query/ee/tests/it/inverted_index/pruning.rs @@ -104,7 +104,7 @@ async fn test_block_pruner() -> Result<()> { let create_table_plan = CreateTablePlan { catalog: "default".to_owned(), create_option: CreateOption::Create, - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), database: fixture.default_db_name(), table: test_tbl_name.to_string(), schema: test_schema.clone(), @@ -131,7 +131,7 @@ async fn test_block_pruner() -> Result<()> { let catalog = ctx.get_catalog("default").await?; let table = catalog .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), test_tbl_name, ) @@ -391,7 +391,7 @@ async fn test_block_pruner() -> Result<()> { let table = catalog .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), test_tbl_name, ) diff --git a/src/query/ee/tests/it/license/license_mgr.rs b/src/query/ee/tests/it/license/license_mgr.rs index 9dfaacf021d1f..f5718f06f0cd1 100644 --- a/src/query/ee/tests/it/license/license_mgr.rs +++ b/src/query/ee/tests/it/license/license_mgr.rs @@ -43,7 +43,7 @@ async fn test_parse_license() -> databend_common_exception::Result<()> { let key_pair = ES256KeyPair::generate(); let license_mgr = RealLicenseManager::new( - fixture.default_tenant(), + fixture.default_tenant().name().to_string(), key_pair.public_key().to_pem().unwrap(), ); let claims = Claims::with_custom_claims( @@ -91,7 +91,7 @@ async fn test_license_features() -> databend_common_exception::Result<()> { let key_pair = ES256KeyPair::generate(); let license_mgr = RealLicenseManager::new( - fixture.default_tenant(), + fixture.default_tenant().name().to_string(), key_pair.public_key().to_pem().unwrap(), ); let claims = Claims::with_custom_claims( diff --git a/src/query/formats/Cargo.toml b/src/query/formats/Cargo.toml index a589edbf16d06..06166c08ed0f6 100644 --- a/src/query/formats/Cargo.toml +++ b/src/query/formats/Cargo.toml @@ -33,7 +33,6 @@ databend-common-exception = { path = "../../common/exception" } databend-common-expression = { path = "../expression" } databend-common-io = { path = "../../common/io" } databend-common-meta-app = { path = "../../meta/app" } -databend-common-meta-types = { path = "../../meta/types" } databend-common-settings = { path = "../settings" } databend-storages-common-blocks = { path = "../storages/common/blocks" } databend-storages-common-table-meta = { path = "../storages/common/table_meta" } diff --git a/src/query/formats/tests/it/main.rs b/src/query/formats/tests/it/main.rs index 219c9cf3b88d7..c940d1beb0e86 100644 --- a/src/query/formats/tests/it/main.rs +++ b/src/query/formats/tests/it/main.rs @@ -17,7 +17,7 @@ use databend_common_expression::TableSchemaRef; use databend_common_formats::output_format::OutputFormat; use databend_common_formats::ClickhouseFormatType; use databend_common_formats::FileFormatOptionsExt; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; mod field_decoder; @@ -31,6 +31,6 @@ fn get_output_format_clickhouse( schema: TableSchemaRef, ) -> Result> { let format = ClickhouseFormatType::parse_clickhouse_format(format_name)?; - let settings = Settings::create(NonEmptyString::new("default").unwrap()); + let settings = Settings::create(Tenant::new_literal("default")); FileFormatOptionsExt::get_output_format_from_clickhouse_format(format, schema, &settings) } diff --git a/src/query/formats/tests/it/output_format_tcsv.rs b/src/query/formats/tests/it/output_format_tcsv.rs index 35929a3b4cffb..0ac8f672ee3c4 100644 --- a/src/query/formats/tests/it/output_format_tcsv.rs +++ b/src/query/formats/tests/it/output_format_tcsv.rs @@ -23,7 +23,7 @@ use databend_common_expression::TableField; use databend_common_formats::FileFormatOptionsExt; use databend_common_meta_app::principal::FileFormatOptionsAst; use databend_common_meta_app::principal::FileFormatParams; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use pretty_assertions::assert_eq; @@ -63,7 +63,7 @@ fn test_data_block(is_nullable: bool) -> Result<()> { } { - let settings = Settings::create(NonEmptyString::new("default").unwrap()); + let settings = Settings::create(Tenant::new_literal("default")); let mut options = BTreeMap::::new(); options.insert("type".to_string(), "csv".to_string()); options.insert("field_delimiter".to_string(), "$".to_string()); @@ -126,7 +126,7 @@ fn test_data_block_not_nullable() -> Result<()> { fn test_field_delimiter_with_ascii_control_code() -> Result<()> { let (schema, block) = get_simple_block(false); - let settings = Settings::create(NonEmptyString::new("default").unwrap()); + let settings = Settings::create(Tenant::new_literal("default")); let mut options = BTreeMap::::new(); options.insert("type".to_string(), "csv".to_string()); options.insert("field_delimiter".to_string(), "\x01".to_string()); diff --git a/src/query/management/src/quota/quota_mgr.rs b/src/query/management/src/quota/quota_mgr.rs index 3e60e40c92c7d..65982e35d88c8 100644 --- a/src/query/management/src/quota/quota_mgr.rs +++ b/src/query/management/src/quota/quota_mgr.rs @@ -25,7 +25,6 @@ use databend_common_meta_types::IntoSeqV; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::SeqV; use databend_common_meta_types::UpsertKV; use databend_common_meta_types::With; @@ -38,13 +37,10 @@ pub struct QuotaMgr { } impl QuotaMgr { - pub fn create( - kv_api: Arc>, - tenant: &NonEmptyString, - ) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { QuotaMgr { kv_api, - ident: TenantQuotaIdent::new(Tenant::new_nonempty(tenant.clone())), + ident: TenantQuotaIdent::new(tenant.clone()), } } diff --git a/src/query/management/src/role/role_mgr.rs b/src/query/management/src/role/role_mgr.rs index 84f06b54ebfc2..6540213489660 100644 --- a/src/query/management/src/role/role_mgr.rs +++ b/src/query/management/src/role/role_mgr.rs @@ -37,8 +37,6 @@ use databend_common_meta_types::ConditionResult::Eq; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyStr; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::Operation; use databend_common_meta_types::SeqV; use databend_common_meta_types::TxnRequest; @@ -56,17 +54,17 @@ static BUILTIN_ROLE_ACCOUNT_ADMIN: &str = "account_admin"; pub struct RoleMgr { kv_api: Arc + Send + Sync>, - tenant: NonEmptyString, + tenant: Tenant, } impl RoleMgr { pub fn create( kv_api: Arc + Send + Sync>, - tenant: NonEmptyStr, + tenant: &Tenant, ) -> Self { RoleMgr { kv_api, - tenant: tenant.into(), + tenant: tenant.clone(), } } @@ -106,7 +104,7 @@ impl RoleMgr { /// Build meta-service for a role grantee, which is a tenant's database, table, stage, udf, etc. fn ownership_object_key(&self, object: &OwnershipObject) -> String { - let grantee = TenantOwnershipObject::new(Tenant::new(self.tenant.as_str()), object.clone()); + let grantee = TenantOwnershipObject::new(self.tenant.clone(), object.clone()); grantee.to_string_key() } @@ -117,20 +115,17 @@ impl RoleMgr { let dummy = OwnershipObject::UDF { name: "dummy".to_string(), }; - let grantee = TenantOwnershipObject::new(Tenant::new_nonempty(self.tenant.clone()), dummy); + let grantee = TenantOwnershipObject::new(self.tenant.clone(), dummy); grantee.tenant_prefix() } fn role_key(&self, role: &str) -> String { - let r = RoleIdent::new(Tenant::new_nonempty(self.tenant.clone()), role.to_string()); + let r = RoleIdent::new(self.tenant.clone(), role.to_string()); r.to_string_key() } fn role_prefix(&self) -> String { - let r = RoleIdent::new( - Tenant::new_nonempty(self.tenant.clone()), - "dummy".to_string(), - ); + let r = RoleIdent::new(self.tenant.clone(), "dummy".to_string()); r.tenant_prefix() } } diff --git a/src/query/management/src/setting/setting_mgr.rs b/src/query/management/src/setting/setting_mgr.rs index 9fc74217c311d..fd86222880248 100644 --- a/src/query/management/src/setting/setting_mgr.rs +++ b/src/query/management/src/setting/setting_mgr.rs @@ -26,7 +26,6 @@ use databend_common_meta_types::IntoSeqV; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::Operation; use databend_common_meta_types::SeqV; use databend_common_meta_types::SeqValue; @@ -39,13 +38,10 @@ pub struct SettingMgr { } impl SettingMgr { - pub fn create( - kv_api: Arc>, - tenant: &NonEmptyString, - ) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { SettingMgr { kv_api, - tenant: Tenant::new_nonempty(tenant.clone()), + tenant: tenant.clone(), } } diff --git a/src/query/management/src/stage/stage_mgr.rs b/src/query/management/src/stage/stage_mgr.rs index a1b3ce877ff73..93c32aaf6aaa7 100644 --- a/src/query/management/src/stage/stage_mgr.rs +++ b/src/query/management/src/stage/stage_mgr.rs @@ -35,7 +35,6 @@ use databend_common_meta_kvapi::kvapi::Key; use databend_common_meta_types::ConditionResult::Eq; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::TxnOp; use databend_common_meta_types::TxnRequest; use databend_common_meta_types::With; @@ -53,13 +52,10 @@ pub struct StageMgr { } impl StageMgr { - pub fn create( - kv_api: Arc>, - tenant: &NonEmptyString, - ) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { StageMgr { kv_api, - tenant: Tenant::new_nonempty(tenant.clone()), + tenant: tenant.clone(), } } diff --git a/src/query/management/src/udf/udf_mgr.rs b/src/query/management/src/udf/udf_mgr.rs index 6eac4633769c6..7cc3dac499f8f 100644 --- a/src/query/management/src/udf/udf_mgr.rs +++ b/src/query/management/src/udf/udf_mgr.rs @@ -21,13 +21,12 @@ use databend_common_meta_api::kv_pb_api::UpsertPB; use databend_common_meta_app::principal::UdfName; use databend_common_meta_app::principal::UserDefinedFunction; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_kvapi::kvapi; use databend_common_meta_kvapi::kvapi::DirName; use databend_common_meta_kvapi::kvapi::Key; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyStr; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::SeqV; use databend_common_meta_types::With; use futures::TryStreamExt; @@ -37,14 +36,14 @@ use crate::udf::UdfError; pub struct UdfMgr { kv_api: Arc>, - tenant: NonEmptyString, + tenant: Tenant, } impl UdfMgr { - pub fn create(kv_api: Arc>, tenant: NonEmptyStr) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { UdfMgr { kv_api, - tenant: tenant.into(), + tenant: tenant.clone(), } } @@ -62,14 +61,14 @@ impl UdfMgr { let seq = MatchSeq::from(*create_option); - let key = UdfName::new(self.tenant.as_str(), &info.name); + let key = UdfName::new(&self.tenant, &info.name); let req = UpsertPB::insert(key, info.clone()).with(seq); let res = self.kv_api.upsert_pb(&req).await?; if let CreateOption::Create = create_option { if res.prev.is_some() { let err = UdfError::Exists { - tenant: self.tenant.to_string(), + tenant: self.tenant.name().to_string(), name: info.name.to_string(), reason: "".to_string(), }; @@ -92,7 +91,7 @@ impl UdfMgr { return Ok(Err(e)); } - let key = UdfName::new(self.tenant.as_str(), &info.name); + let key = UdfName::new(&self.tenant, &info.name); let req = UpsertPB::update(key, info.clone()).with(seq); let res = self.kv_api.upsert_pb(&req).await?; @@ -100,7 +99,7 @@ impl UdfMgr { Ok(res.result.unwrap().seq) } else { Err(UdfError::NotFound { - tenant: self.tenant.to_string(), + tenant: self.tenant.name().to_string(), name: info.name.to_string(), context: "while update udf".to_string(), }) @@ -115,7 +114,7 @@ impl UdfMgr { &self, udf_name: &str, ) -> Result>, MetaError> { - let key = UdfName::new(self.tenant.as_str(), udf_name); + let key = UdfName::new(&self.tenant, udf_name); let res = self.kv_api.get_pb(&key).await?; Ok(res) } @@ -124,7 +123,7 @@ impl UdfMgr { #[async_backtrace::framed] #[minitrace::trace] pub async fn list_udf(&self) -> Result, ErrorCode> { - let key = DirName::new(UdfName::new(self.tenant.as_str(), "")); + let key = DirName::new(UdfName::new(&self.tenant, "")); let strm = self.kv_api.list_pb_values(&key).await?; match strm.try_collect().await { @@ -166,7 +165,7 @@ impl UdfMgr { udf_name: &str, seq: MatchSeq, ) -> Result>, MetaError> { - let key = UdfName::new(self.tenant.as_str(), udf_name); + let key = UdfName::new(&self.tenant, udf_name); let req = UpsertPB::delete(key).with(seq); let res = self.kv_api.upsert_pb(&req).await?; @@ -180,7 +179,7 @@ impl UdfMgr { fn ensure_non_builtin(&self, name: &str) -> Result<(), UdfError> { if is_builtin_function(name) { return Err(UdfError::Exists { - tenant: self.tenant.to_string(), + tenant: self.tenant.name().to_string(), name: name.to_string(), reason: " It is a builtin function".to_string(), }); diff --git a/src/query/management/src/user/user_mgr.rs b/src/query/management/src/user/user_mgr.rs index acd5d311e2791..7df2eda4a0dc2 100644 --- a/src/query/management/src/user/user_mgr.rs +++ b/src/query/management/src/user/user_mgr.rs @@ -28,7 +28,6 @@ use databend_common_meta_kvapi::kvapi::UpsertKVReq; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MatchSeqExt; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyStr; use databend_common_meta_types::Operation; use databend_common_meta_types::SeqV; @@ -42,10 +41,10 @@ pub struct UserMgr { } impl UserMgr { - pub fn create(kv_api: Arc>, tenant: NonEmptyStr) -> Self { + pub fn create(kv_api: Arc>, tenant: &Tenant) -> Self { UserMgr { kv_api, - tenant: Tenant::new_nonempty(tenant.into()), + tenant: tenant.clone(), } } diff --git a/src/query/management/tests/it/role.rs b/src/query/management/tests/it/role.rs index 2dd7c3892e7a7..6bd71513f1e80 100644 --- a/src/query/management/tests/it/role.rs +++ b/src/query/management/tests/it/role.rs @@ -16,10 +16,10 @@ use std::sync::Arc; use databend_common_base::base::tokio; use databend_common_management::*; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_embedded::MetaEmbedded; use databend_common_meta_kvapi::kvapi::UpsertKVReq; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyStr; use mockall::predicate::*; fn make_role_key(role: &str) -> String { @@ -65,6 +65,7 @@ mod add { async fn new_role_api() -> databend_common_exception::Result<(Arc, RoleMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); - let mgr = RoleMgr::create(test_api.clone(), NonEmptyStr::new("admin").unwrap()); + let tenant = Tenant::new_literal("admin"); + let mgr = RoleMgr::create(test_api.clone(), &tenant); Ok((test_api, mgr)) } diff --git a/src/query/management/tests/it/setting.rs b/src/query/management/tests/it/setting.rs index ac920e01b5807..a1a6d794714e3 100644 --- a/src/query/management/tests/it/setting.rs +++ b/src/query/management/tests/it/setting.rs @@ -19,11 +19,12 @@ use databend_common_exception::Result; use databend_common_management::*; use databend_common_meta_app::principal::UserSetting; use databend_common_meta_app::principal::UserSettingValue; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_embedded::MetaEmbedded; use databend_common_meta_kvapi::kvapi::KVApi; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::SeqV; +use minitrace::func_name; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_set_setting() -> Result<()> { @@ -117,7 +118,7 @@ async fn new_setting_api() -> Result<(Arc, SettingMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); let mgr = SettingMgr::create( test_api.clone(), - &NonEmptyString::new("databend_query").unwrap(), + &Tenant::new_or_error_code("databend_query", func_name!()).unwrap(), ); Ok((test_api, mgr)) } diff --git a/src/query/management/tests/it/stage.rs b/src/query/management/tests/it/stage.rs index d4bbea761db25..cf25fb5102881 100644 --- a/src/query/management/tests/it/stage.rs +++ b/src/query/management/tests/it/stage.rs @@ -24,10 +24,11 @@ use databend_common_meta_app::principal::StageParams; use databend_common_meta_app::schema::CreateOption; use databend_common_meta_app::storage::StorageParams; use databend_common_meta_app::storage::StorageS3Config; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_embedded::MetaEmbedded; use databend_common_meta_kvapi::kvapi::KVApi; -use databend_common_meta_types::NonEmptyString; use databend_common_meta_types::SeqV; +use minitrace::func_name; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_add_stage() -> Result<()> { @@ -139,7 +140,10 @@ fn create_test_stage_info() -> StageInfo { async fn new_stage_api() -> Result<(Arc, StageMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); - let mgr = StageMgr::create(test_api.clone(), &NonEmptyString::new("admin").unwrap()); + let mgr = StageMgr::create( + test_api.clone(), + &Tenant::new_or_error_code("admin", func_name!()).unwrap(), + ); Ok((test_api, mgr)) } diff --git a/src/query/management/tests/it/udf.rs b/src/query/management/tests/it/udf.rs index 3cfccf9033214..007dd0ef3e061 100644 --- a/src/query/management/tests/it/udf.rs +++ b/src/query/management/tests/it/udf.rs @@ -23,10 +23,10 @@ use databend_common_management::udf::UdfMgr; use databend_common_management::*; use databend_common_meta_app::principal::UserDefinedFunction; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_embedded::MetaEmbedded; use databend_common_meta_kvapi::kvapi::KVApi; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyStr; use databend_common_meta_types::SeqV; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -234,6 +234,6 @@ fn create_test_udf_script() -> UserDefinedFunction { async fn new_udf_api() -> Result<(Arc, UdfMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); - let mgr = UdfMgr::create(test_api.clone(), NonEmptyStr::new("admin").unwrap()); + let mgr = UdfMgr::create(test_api.clone(), &Tenant::new_literal("admin")); Ok((test_api, mgr)) } diff --git a/src/query/management/tests/it/user.rs b/src/query/management/tests/it/user.rs index 039d88a9f6a28..2ddd8b4859b5b 100644 --- a/src/query/management/tests/it/user.rs +++ b/src/query/management/tests/it/user.rs @@ -82,7 +82,7 @@ fn default_test_auth_info() -> AuthInfo { mod add { use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::schema::CreateOption; - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::Operation; use super::*; @@ -120,7 +120,7 @@ mod add { .times(1) .return_once(|_u| Ok(UpsertKVReply::new(None, Some(SeqV::new(1, v))))); let api = Arc::new(api); - let user_mgr = UserMgr::create(api, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(api, &Tenant::new_literal("tenant1")); let res = user_mgr.add_user(user_info, &CreateOption::Create); assert!(res.await.is_ok()); @@ -146,7 +146,7 @@ mod add { }); let api = Arc::new(api); - let user_mgr = UserMgr::create(api, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(api, &Tenant::new_literal("tenant1")); let user_info = UserInfo::new(test_user_name, test_hostname, default_test_auth_info()); @@ -164,7 +164,7 @@ mod add { mod get { use databend_common_meta_app::principal::UserInfo; - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use super::*; @@ -187,7 +187,7 @@ mod get { .return_once(move |_k| Ok(Some(SeqV::new(1, value)))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.get_user(user_info.identity(), MatchSeq::Exact(1)); assert!(res.await.is_ok()); @@ -213,7 +213,7 @@ mod get { .return_once(move |_k| Ok(Some(SeqV::new(100, value)))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.get_user(user_info.identity(), MatchSeq::GE(0)); assert!(res.await.is_ok()); Ok(()) @@ -235,7 +235,7 @@ mod get { .return_once(move |_k| Ok(None)); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr .get_user( UserIdentity::new(test_user_name, test_hostname), @@ -263,7 +263,7 @@ mod get { .return_once(move |_k| Ok(Some(SeqV::new(1, vec![])))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr .get_user( UserIdentity::new(test_user_name, test_hostname), @@ -291,7 +291,7 @@ mod get { .return_once(move |_k| Ok(Some(SeqV::new(1, vec![])))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.get_user( UserIdentity::new(test_user_name, test_hostname), MatchSeq::GE(0), @@ -307,7 +307,7 @@ mod get { mod get_users { use databend_common_meta_app::principal::UserInfo; - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use super::*; @@ -356,7 +356,7 @@ mod get_users { } let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.get_users(); assert_eq!(res.await?, user_infos); @@ -385,7 +385,7 @@ mod get_users { } let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.get_users(); assert_eq!( res.await.unwrap_err().code(), @@ -397,7 +397,7 @@ mod get_users { } mod drop { - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use super::*; @@ -420,7 +420,7 @@ mod drop { .times(1) .returning(|_k| Ok(UpsertKVReply::new(Some(SeqV::new(1, vec![])), None))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.drop_user(UserIdentity::new(test_user, test_hostname), MatchSeq::GE(1)); assert!(res.await.is_ok()); @@ -446,7 +446,7 @@ mod drop { .times(1) .returning(|_k| Ok(UpsertKVReply::new(None, None))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.drop_user(UserIdentity::new(test_user, test_hostname), MatchSeq::GE(1)); assert_eq!( res.await.unwrap_err().code(), @@ -459,7 +459,7 @@ mod drop { mod update { use databend_common_meta_app::principal::AuthInfo; use databend_common_meta_app::principal::UserInfo; - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use super::*; @@ -523,7 +523,7 @@ mod update { .return_once(|_| Ok(UpsertKVReply::new(None, Some(SeqV::new(1, vec![]))))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.update_user_with(user_info.identity(), test_seq, |ui: &mut UserInfo| { ui.update_auth_option(Some(new_test_auth_info(full)), None) @@ -552,7 +552,7 @@ mod update { .return_once(move |_k| Ok(None)); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.update_user_with( UserIdentity::new(test_user_name, test_hostname), @@ -597,7 +597,7 @@ mod update { .returning(|_| Ok(UpsertKVReply::new(None, None))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let _ = user_mgr .update_user_with(user_info.identity(), MatchSeq::GE(1), |_x| {}) @@ -611,7 +611,7 @@ mod set_user_privileges { use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::principal::UserPrivilegeType; - use databend_common_meta_types::NonEmptyStr; + use databend_common_meta_app::tenant::Tenant; use super::*; @@ -655,7 +655,7 @@ mod set_user_privileges { .return_once(|_| Ok(UpsertKVReply::new(None, Some(SeqV::new(1, vec![]))))); let kv = Arc::new(kv); - let user_mgr = UserMgr::create(kv, NonEmptyStr::new("tenant1").unwrap()); + let user_mgr = UserMgr::create(kv, &Tenant::new_literal("tenant1")); let res = user_mgr.update_user_with( user_info.identity(), diff --git a/src/query/service/src/api/http/v1/settings.rs b/src/query/service/src/api/http/v1/settings.rs index cb8fa8f37f5e2..caabd07e1f58c 100644 --- a/src/query/service/src/api/http/v1/settings.rs +++ b/src/query/service/src/api/http/v1/settings.rs @@ -14,8 +14,9 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; +use minitrace::func_name; use poem::web::Json; use poem::web::Path; use poem::IntoResponse; @@ -31,13 +32,7 @@ pub struct SettingsItem { } async fn list_settings_impl(tenant: &str) -> Result> { - if tenant.is_empty() { - return Err(ErrorCode::TenantIsEmpty( - "Tenant can not empty(while list settings)", - )); - } - - let settings = Settings::create(NonEmptyString::new(tenant)?); + let settings = Settings::create(Tenant::new_or_error_code(tenant, func_name!())?); settings.load_changes().await?; Ok(settings @@ -66,7 +61,7 @@ async fn set_setting_impl(tenant: &str, key: &str, value: String) -> Result Result )); } - let settings = Settings::create(NonEmptyString::new(tenant)?); + let settings = Settings::create(Tenant::new_or_error_code(tenant, func_name!())?); settings.try_drop_global_setting(key).await?; Ok(settings diff --git a/src/query/service/src/api/http/v1/tenant_tables.rs b/src/query/service/src/api/http/v1/tenant_tables.rs index 4f89fa7e37b25..4cc499ee18f2d 100644 --- a/src/query/service/src/api/http/v1/tenant_tables.rs +++ b/src/query/service/src/api/http/v1/tenant_tables.rs @@ -17,7 +17,9 @@ use chrono::Utc; use databend_common_catalog::catalog::CatalogManager; use databend_common_config::GlobalConfig; use databend_common_exception::Result; +use databend_common_meta_app::tenant::Tenant; use databend_storages_common_txn::TxnManager; +use minitrace::func_name; use poem::web::Json; use poem::web::Path; use poem::IntoResponse; @@ -46,19 +48,20 @@ pub struct TenantTableInfo { pub table_id: u64, } -async fn load_tenant_tables(tenant: &str) -> Result { +async fn load_tenant_tables(tenant: &Tenant) -> Result { let catalog = CatalogManager::instance().get_default_catalog(TxnManager::init())?; + let databases = catalog.list_databases(tenant).await?; let mut table_infos: Vec = vec![]; let mut warnings: Vec = vec![]; for database in databases { - let tables = match catalog.list_tables(tenant, database.name()).await { + let tables = match catalog.list_tables(tenant.name(), database.name()).await { Ok(v) => v, Err(err) => { warnings.push(format!( "failed to list tables of database {}.{}: {}", - tenant, + tenant.name(), database.name(), err )); @@ -96,6 +99,9 @@ async fn load_tenant_tables(tenant: &str) -> Result { pub async fn list_tenant_tables_handler( Path(tenant): Path, ) -> poem::Result { + let tenant = Tenant::new_or_error_code(&tenant, func_name!()) + .map_err(poem::error::InternalServerError)?; + let resp = load_tenant_tables(&tenant) .await .map_err(poem::error::InternalServerError)?; @@ -108,7 +114,7 @@ pub async fn list_tenant_tables_handler( pub async fn list_tables_handler() -> poem::Result { let tenant = &GlobalConfig::instance().query.tenant_id; - let resp = load_tenant_tables(tenant.as_str()) + let resp = load_tenant_tables(tenant) .await .map_err(poem::error::InternalServerError)?; Ok(Json(resp)) diff --git a/src/query/service/src/catalogs/default/database_catalog.rs b/src/query/service/src/catalogs/default/database_catalog.rs index 9d3cb862f45f3..216083e7cdaad 100644 --- a/src/query/service/src/catalogs/default/database_catalog.rs +++ b/src/query/service/src/catalogs/default/database_catalog.rs @@ -93,6 +93,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; use log::info; @@ -184,13 +185,7 @@ impl Catalog for DatabaseCatalog { } #[async_backtrace::framed] - async fn list_databases(&self, tenant: &str) -> Result>> { - if tenant.is_empty() { - return Err(ErrorCode::TenantIsEmpty( - "Tenant can not empty(while list databases)", - )); - } - + async fn list_databases(&self, tenant: &Tenant) -> Result>> { let mut dbs = self.immutable_catalog.list_databases(tenant).await?; let mut other = self.mutable_catalog.list_databases(tenant).await?; dbs.append(&mut other); diff --git a/src/query/service/src/catalogs/default/immutable_catalog.rs b/src/query/service/src/catalogs/default/immutable_catalog.rs index 85a007412d661..08983d23e48e8 100644 --- a/src/query/service/src/catalogs/default/immutable_catalog.rs +++ b/src/query/service/src/catalogs/default/immutable_catalog.rs @@ -84,6 +84,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; use crate::catalogs::InMemoryMetas; @@ -155,7 +156,7 @@ impl Catalog for ImmutableCatalog { } #[async_backtrace::framed] - async fn list_databases(&self, _tenant: &str) -> Result>> { + async fn list_databases(&self, _tenant: &Tenant) -> Result>> { Ok(vec![self.sys_db.clone(), self.info_schema_db.clone()]) } diff --git a/src/query/service/src/catalogs/default/mutable_catalog.rs b/src/query/service/src/catalogs/default/mutable_catalog.rs index 57540184c8571..bf67224c6dee7 100644 --- a/src/query/service/src/catalogs/default/mutable_catalog.rs +++ b/src/query/service/src/catalogs/default/mutable_catalog.rs @@ -97,6 +97,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_store::MetaStoreProvider; use databend_common_meta_types::MetaId; use log::info; @@ -148,7 +149,7 @@ impl MutableCatalog { provider.create_meta_store().await? }; - let tenant = conf.query.tenant_id.to_string(); + let tenant = conf.query.tenant_id.name().to_string(); // Create default database. let req = CreateDatabaseReq { @@ -177,7 +178,7 @@ impl MutableCatalog { }; Ok(MutableCatalog { ctx, - tenant: conf.query.tenant_id.to_string(), + tenant: conf.query.tenant_id.name().to_string(), }) } @@ -216,12 +217,12 @@ impl Catalog for MutableCatalog { } #[async_backtrace::framed] - async fn list_databases(&self, tenant: &str) -> Result>> { + async fn list_databases(&self, tenant: &Tenant) -> Result>> { let dbs = self .ctx .meta .list_databases(ListDatabaseReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), filter: None, }) .await?; diff --git a/src/query/service/src/clusters/cluster.rs b/src/query/service/src/clusters/cluster.rs index c081f4c4ec687..07626f3940f54 100644 --- a/src/query/service/src/clusters/cluster.rs +++ b/src/query/service/src/clusters/cluster.rs @@ -170,10 +170,10 @@ impl ClusterDiscovery { lift_time, provider, cfg.query.cluster_id.clone(), - cfg.query.tenant_id.to_string(), + cfg.query.tenant_id.name().to_string(), )), cluster_id: cfg.query.cluster_id.clone(), - tenant_id: cfg.query.tenant_id.to_string(), + tenant_id: cfg.query.tenant_id.name().to_string(), flight_address: cfg.query.flight_api_address.clone(), })) } @@ -191,7 +191,7 @@ impl ClusterDiscovery { let cluster_id = &cfg.query.cluster_id; let lift_time = Duration::from_secs(60); let cluster_manager = - ClusterMgr::create(metastore, tenant_id.as_str(), cluster_id, lift_time)?; + ClusterMgr::create(metastore, tenant_id.name(), cluster_id, lift_time)?; Ok((lift_time, Arc::new(cluster_manager))) } diff --git a/src/query/service/src/global_services.rs b/src/query/service/src/global_services.rs index eed33c52a11b8..e0171ffdf3fda 100644 --- a/src/query/service/src/global_services.rs +++ b/src/query/service/src/global_services.rs @@ -65,7 +65,10 @@ impl GlobalServices { // 2. log init. let mut log_labels = BTreeMap::new(); log_labels.insert("service".to_string(), "databend-query".to_string()); - log_labels.insert("tenant_id".to_string(), config.query.tenant_id.to_string()); + log_labels.insert( + "tenant_id".to_string(), + config.query.tenant_id.name().to_string(), + ); log_labels.insert("cluster_id".to_string(), config.query.cluster_id.clone()); log_labels.insert("node_id".to_string(), config.query.node_id.clone()); GlobalLogger::init(&app_name_shuffle, &config.log, log_labels); @@ -116,12 +119,12 @@ impl GlobalServices { ShareTableConfig::init( &config.query.share_endpoint_address, &config.query.share_endpoint_auth_token_file, - config.query.tenant_id.to_string(), + config.query.tenant_id.name().to_string(), )?; CacheManager::init( &config.cache, &config.query.max_server_memory_usage, - config.query.tenant_id.to_string(), + config.query.tenant_id.name().to_string(), )?; if let Some(addr) = config.query.cloud_control_grpc_server_address.clone() { diff --git a/src/query/service/src/interpreters/access/privilege_access.rs b/src/query/service/src/interpreters/access/privilege_access.rs index 8946275747fc3..033bb821619ab 100644 --- a/src/query/service/src/interpreters/access/privilege_access.rs +++ b/src/query/service/src/interpreters/access/privilege_access.rs @@ -27,7 +27,7 @@ use databend_common_meta_app::principal::StageType; use databend_common_meta_app::principal::UserGrantSet; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::principal::UserPrivilegeType; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_sql::optimizer::get_udf_names; use databend_common_sql::plans::InsertInputSource; use databend_common_sql::plans::PresignAction; @@ -93,7 +93,7 @@ impl PrivilegeAccess { .ctx .get_catalog(catalog_name) .await? - .get_database(tenant.as_str(), db_name) + .get_database(tenant.name(), db_name) .await? .get_db_info() .ident @@ -109,13 +109,13 @@ impl PrivilegeAccess { } let catalog = self.ctx.get_catalog(catalog_name).await?; let db_id = catalog - .get_database(tenant.as_str(), db_name) + .get_database(tenant.name(), db_name) .await? .get_db_info() .ident .db_id; let table = catalog - .get_table(tenant.as_str(), db_name, table_name) + .get_table(tenant.name(), db_name, table_name) .await?; let table_id = table.get_id(); OwnershipObject::Table { @@ -166,7 +166,7 @@ impl PrivilegeAccess { Err(_err) => { let catalog = self.ctx.get_catalog(catalog_name).await?; match self - .convert_to_id(tenant.as_str(), &catalog, db_name, None) + .convert_to_id(tenant.name(), &catalog, db_name, None) .await { Ok(obj) => { @@ -259,7 +259,7 @@ impl PrivilegeAccess { Ok(_) => return Ok(()), Err(_err) => { match self - .convert_to_id(tenant.as_str(), &catalog, db_name, Some(table_name)) + .convert_to_id(tenant.name(), &catalog, db_name, Some(table_name)) .await { Ok(obj) => { @@ -510,7 +510,7 @@ impl AccessChecker for PrivilegeAccess { return Ok(()); } let catalog = self.ctx.get_catalog(catalog).await?; - let (db_id, table_id) = match self.convert_to_id(tenant.as_str(), &catalog, database, None).await? { + let (db_id, table_id) = match self.convert_to_id(tenant.name(), &catalog, database, None).await? { ObjectId::Table(db_id, table_id) => { (db_id, Some(table_id)) } ObjectId::Database(db_id) => { (db_id, None) } }; @@ -530,7 +530,7 @@ impl AccessChecker for PrivilegeAccess { return Ok(()); } let catalog = self.ctx.get_catalog(&catalog_name).await?; - let (db_id, table_id) = match self.convert_to_id(tenant.as_str(), &catalog, database, None).await? { + let (db_id, table_id) = match self.convert_to_id(tenant.name(), &catalog, database, None).await? { ObjectId::Table(db_id, table_id) => { (db_id, Some(table_id)) } ObjectId::Database(db_id) => { (db_id, None) } }; @@ -550,7 +550,7 @@ impl AccessChecker for PrivilegeAccess { return Ok(()); } let catalog = self.ctx.get_catalog(catalog_name).await?; - let (db_id, table_id) = match self.convert_to_id(tenant.as_str(), &catalog, database, Some(table)).await? { + let (db_id, table_id) = match self.convert_to_id(tenant.name(), &catalog, database, Some(table)).await? { ObjectId::Table(db_id, table_id) => { (db_id, Some(table_id)) } ObjectId::Database(db_id) => { (db_id, None) } }; @@ -635,7 +635,7 @@ impl AccessChecker for PrivilegeAccess { let catalog = self.ctx.get_catalog(&catalog_name).await?; // Use db is special. Should not check the privilege. // Just need to check user grant objects contain the db that be used. - let (db_id, _) = match self.convert_to_id(tenant.as_str(), &catalog, &plan.database, None).await? { + let (db_id, _) = match self.convert_to_id(tenant.name(), &catalog, &plan.database, None).await? { ObjectId::Table(db_id, table_id) => { (db_id, Some(table_id)) } ObjectId::Database(db_id) => { (db_id, None) } }; @@ -1043,7 +1043,7 @@ impl AccessChecker for PrivilegeAccess { // TODO(liyz): replace it with verify_access async fn has_priv( - tenant: &NonEmptyString, + tenant: &Tenant, db_name: &str, table_name: Option<&str>, db_id: u64, @@ -1053,6 +1053,7 @@ async fn has_priv( if db_name.to_lowercase() == "information_schema" { return Ok(true); } + Ok(RoleCacheManager::instance() .find_related_roles(tenant, &grant_set.roles()) .await? diff --git a/src/query/service/src/interpreters/common/grant.rs b/src/query/service/src/interpreters/common/grant.rs index ce5001ab248da..699353003e55f 100644 --- a/src/query/service/src/interpreters/common/grant.rs +++ b/src/query/service/src/interpreters/common/grant.rs @@ -36,7 +36,7 @@ pub async fn validate_grant_object_exists( } if !catalog - .exists_table(tenant.as_str(), database_name, table_name) + .exists_table(tenant.name(), database_name, table_name) .await? { return Err(databend_common_exception::ErrorCode::UnknownTable(format!( @@ -48,7 +48,7 @@ pub async fn validate_grant_object_exists( GrantObject::Database(catalog_name, database_name) => { let catalog = ctx.get_catalog(catalog_name).await?; if !catalog - .exists_database(tenant.as_str(), database_name) + .exists_database(tenant.name(), database_name) .await? { return Err(databend_common_exception::ErrorCode::UnknownDatabase( diff --git a/src/query/service/src/interpreters/common/notification.rs b/src/query/service/src/interpreters/common/notification.rs index d06862c627cc6..b62774267c068 100644 --- a/src/query/service/src/interpreters/common/notification.rs +++ b/src/query/service/src/interpreters/common/notification.rs @@ -29,7 +29,7 @@ pub fn get_notification_client_config( let tenant = ctx.get_tenant(); let user = ctx.get_current_user()?.identity().to_string(); let query_id = ctx.get_id(); - let mut cfg = build_client_config(tenant.to_string(), user, query_id, timeout); + let mut cfg = build_client_config(tenant.name().to_string(), user, query_id, timeout); cfg.add_notification_version_info(); Ok(cfg) } diff --git a/src/query/service/src/interpreters/common/query_log.rs b/src/query/service/src/interpreters/common/query_log.rs index 1cc9e908dd263..b489d002a1fac 100644 --- a/src/query/service/src/interpreters/common/query_log.rs +++ b/src/query/service/src/interpreters/common/query_log.rs @@ -150,7 +150,7 @@ impl InterpreterQueryLog { log_type, log_type_name, handler_type, - tenant_id: tenant_id.to_string(), + tenant_id: tenant_id.name().to_string(), cluster_id, node_id, sql_user, @@ -217,7 +217,7 @@ impl InterpreterQueryLog { ctx.set_finish_time(now); // User. let handler_type = ctx.get_current_session().get_type().to_string(); - let tenant_id = GlobalConfig::instance().query.tenant_id.to_string(); + let tenant_id = GlobalConfig::instance().query.tenant_id.name().to_string(); let cluster_id = GlobalConfig::instance().query.cluster_id.clone(); let node_id = ctx.get_cluster().local_id.clone(); let user = ctx.get_current_user()?; diff --git a/src/query/service/src/interpreters/common/task.rs b/src/query/service/src/interpreters/common/task.rs index d81c8d797670f..06ad7035bb93d 100644 --- a/src/query/service/src/interpreters/common/task.rs +++ b/src/query/service/src/interpreters/common/task.rs @@ -64,7 +64,7 @@ pub fn get_task_client_config(ctx: Arc, timeout: Duration) -> Resu let tenant = ctx.get_tenant(); let user = ctx.get_current_user()?.identity().to_string(); let query_id = ctx.get_id(); - let mut cfg = build_client_config(tenant.to_string(), user, query_id, timeout); + let mut cfg = build_client_config(tenant.name().to_string(), user, query_id, timeout); cfg.add_task_version_info(); Ok(cfg) } diff --git a/src/query/service/src/interpreters/hook/refresh_hook.rs b/src/query/service/src/interpreters/hook/refresh_hook.rs index 2671a968d3274..1165adc1ac012 100644 --- a/src/query/service/src/interpreters/hook/refresh_hook.rs +++ b/src/query/service/src/interpreters/hook/refresh_hook.rs @@ -208,7 +208,7 @@ async fn generate_refresh_index_plan( let mut plans = vec![]; let indexes = catalog .list_indexes_by_table_id(ListIndexesByIdReq { - tenant: ctx.get_tenant().to_string(), + tenant: ctx.get_tenant().name().to_string(), table_id, }) .await?; @@ -303,7 +303,7 @@ async fn generate_refresh_virtual_column_plan( let catalog = ctx.get_catalog(&desc.catalog).await?; let res = catalog .list_virtual_columns(ListVirtualColumnsReq { - tenant: ctx.get_tenant().to_string(), + tenant: ctx.get_tenant().name().to_string(), table_id: Some(table_info.get_id()), }) .await?; diff --git a/src/query/service/src/interpreters/interpreter_catalog_create.rs b/src/query/service/src/interpreters/interpreter_catalog_create.rs index 85c3a9b3d6f3a..c69bdd3aff281 100644 --- a/src/query/service/src/interpreters/interpreter_catalog_create.rs +++ b/src/query/service/src/interpreters/interpreter_catalog_create.rs @@ -72,11 +72,8 @@ impl Interpreter for CreateCatalogInterpreter { let ctl = catalog_manager .build_catalog(&CatalogInfo { id: CatalogId::default().into(), - name_ident: CatalogNameIdent { - tenant: self.plan.tenant.clone(), - catalog_name: self.plan.catalog.clone(), - } - .into(), + name_ident: CatalogNameIdent::new(self.plan.tenant.clone(), &self.plan.catalog) + .into(), meta: CatalogMeta { catalog_option: self.plan.meta.catalog_option.clone(), created_on: chrono::Utc::now(), diff --git a/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs b/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs index 142d4915f0206..7f928e6922d87 100644 --- a/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs +++ b/src/query/service/src/interpreters/interpreter_cluster_key_alter.rs @@ -50,7 +50,7 @@ impl Interpreter for AlterTableClusterKeyInterpreter { let catalog = self.ctx.get_catalog(&plan.catalog).await?; let table = catalog - .get_table(tenant.as_str(), &plan.database, &plan.table) + .get_table(tenant.name(), &plan.database, &plan.table) .await?; let cluster_key_str = format!("({})", plan.cluster_keys.join(", ")); diff --git a/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs b/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs index 798b5113160b6..bb9e1acc87e8a 100644 --- a/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs +++ b/src/query/service/src/interpreters/interpreter_cluster_key_drop.rs @@ -50,7 +50,7 @@ impl Interpreter for DropTableClusterKeyInterpreter { let catalog = self.ctx.get_catalog(&plan.catalog).await?; let table = catalog - .get_table(tenant.as_str(), &plan.database, &plan.table) + .get_table(tenant.name(), &plan.database, &plan.table) .await?; table.drop_table_cluster_keys(self.ctx.clone()).await?; diff --git a/src/query/service/src/interpreters/interpreter_data_mask_desc.rs b/src/query/service/src/interpreters/interpreter_data_mask_desc.rs index 1003efbcf2d83..831b42eed526f 100644 --- a/src/query/service/src/interpreters/interpreter_data_mask_desc.rs +++ b/src/query/service/src/interpreters/interpreter_data_mask_desc.rs @@ -63,7 +63,7 @@ impl Interpreter for DescDataMaskInterpreter { let policy = handler .get_data_mask( meta_api, - self.ctx.get_tenant().to_string(), + self.ctx.get_tenant().name().to_string(), self.plan.name.clone(), ) .await; diff --git a/src/query/service/src/interpreters/interpreter_database_create.rs b/src/query/service/src/interpreters/interpreter_database_create.rs index 8e1be6f8aa55c..979cb60ee93ca 100644 --- a/src/query/service/src/interpreters/interpreter_database_create.rs +++ b/src/query/service/src/interpreters/interpreter_database_create.rs @@ -21,6 +21,7 @@ use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::schema::CreateDatabaseReq; use databend_common_meta_app::share::ShareGrantObjectPrivilege; use databend_common_meta_app::share::ShareNameIdent; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; use databend_common_sharing::ShareEndpointManager; use databend_common_sql::plans::CreateDatabasePlan; @@ -108,11 +109,12 @@ impl Interpreter for CreateDatabaseInterpreter { async fn execute2(&self) -> Result { debug!("ctx.id" = self.ctx.get_id().as_str(); "create_database_execute"); - let tenant = self.plan.tenant.clone(); + let tenant = Tenant::new_nonempty(self.plan.tenant.clone()); + let quota_api = UserApiProvider::instance().tenant_quota_api(&tenant); let quota = quota_api.get_quota(MatchSeq::GE(0)).await?.data; let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; - let databases = catalog.list_databases(tenant.as_str()).await?; + let databases = catalog.list_databases(&tenant).await?; if quota.max_databases != 0 && databases.len() >= quota.max_databases as usize { return Err(ErrorCode::TenantQuotaExceeded(format!( "Max databases quota exceeded {}", @@ -121,7 +123,7 @@ impl Interpreter for CreateDatabaseInterpreter { }; // if create from other tenant, check from share endpoint if let Some(ref share_name) = self.plan.meta.from_share { - self.check_create_database_from_share(&tenant.to_string(), share_name) + self.check_create_database_from_share(&tenant.name().to_string(), share_name) .await?; } @@ -152,7 +154,7 @@ impl Interpreter for CreateDatabaseInterpreter { } save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), Some(spec_vec), Some(share_table_into), diff --git a/src/query/service/src/interpreters/interpreter_database_drop.rs b/src/query/service/src/interpreters/interpreter_database_drop.rs index b435f92bbd758..4c9544da18db1 100644 --- a/src/query/service/src/interpreters/interpreter_database_drop.rs +++ b/src/query/service/src/interpreters/interpreter_database_drop.rs @@ -55,7 +55,7 @@ impl Interpreter for DropDatabaseInterpreter { // unset the ownership of the database, the database may not exists. let db = catalog - .get_database(tenant.as_str(), &self.plan.database) + .get_database(tenant.name(), &self.plan.database) .await; if let Ok(db) = db { let role_api = UserApiProvider::instance().role_api(&tenant); @@ -79,7 +79,7 @@ impl Interpreter for DropDatabaseInterpreter { } save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), Some(spec_vec), Some(share_table_into), diff --git a/src/query/service/src/interpreters/interpreter_database_show_create.rs b/src/query/service/src/interpreters/interpreter_database_show_create.rs index f9f2ab347a5ab..b844402437cbe 100644 --- a/src/query/service/src/interpreters/interpreter_database_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_database_show_create.rs @@ -54,7 +54,7 @@ impl Interpreter for ShowCreateDatabaseInterpreter { let tenant = self.ctx.get_tenant(); let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; let db = catalog - .get_database(tenant.as_str(), &self.plan.database) + .get_database(tenant.name(), &self.plan.database) .await?; let name = db.name(); let mut info = format!("CREATE DATABASE `{}`", name); diff --git a/src/query/service/src/interpreters/interpreter_delete.rs b/src/query/service/src/interpreters/interpreter_delete.rs index e9af8f2c98656..74012b6498fd3 100644 --- a/src/query/service/src/interpreters/interpreter_delete.rs +++ b/src/query/service/src/interpreters/interpreter_delete.rs @@ -113,7 +113,7 @@ impl Interpreter for DeleteInterpreter { let db_name = self.plan.database_name.as_str(); let tbl_name = self.plan.table_name.as_str(); let tbl = catalog - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await?; // Add table lock. diff --git a/src/query/service/src/interpreters/interpreter_index_create.rs b/src/query/service/src/interpreters/interpreter_index_create.rs index 418cfd8e5889b..c22d459e6cfa2 100644 --- a/src/query/service/src/interpreters/interpreter_index_create.rs +++ b/src/query/service/src/interpreters/interpreter_index_create.rs @@ -23,8 +23,6 @@ use databend_common_meta_app::schema::CreateIndexReq; use databend_common_meta_app::schema::IndexMeta; use databend_common_meta_app::schema::IndexNameIdent; use databend_common_meta_app::schema::IndexType; -use databend_common_meta_app::tenant::Tenant; -use databend_common_meta_types::NonEmptyString; use databend_common_sql::plans::CreateIndexPlan; use databend_enterprise_aggregating_index::get_agg_index_handler; @@ -56,13 +54,7 @@ impl Interpreter for CreateIndexInterpreter { #[async_backtrace::framed] async fn execute2(&self) -> Result { - let tenant_name = self.ctx.get_tenant(); - - let non_empty = NonEmptyString::new(tenant_name).map_err(|_| { - ErrorCode::TenantIsEmpty("tenant is empty(when create index)".to_string()) - })?; - - let tenant = Tenant::new_nonempty(non_empty); + let tenant = self.ctx.get_tenant(); let license_manager = get_license_manager(); license_manager diff --git a/src/query/service/src/interpreters/interpreter_index_drop.rs b/src/query/service/src/interpreters/interpreter_index_drop.rs index 4fd976b1b4aa2..9f27d27aa8607 100644 --- a/src/query/service/src/interpreters/interpreter_index_drop.rs +++ b/src/query/service/src/interpreters/interpreter_index_drop.rs @@ -14,14 +14,11 @@ use std::sync::Arc; -use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_license::license::Feature; use databend_common_license::license_manager::get_license_manager; use databend_common_meta_app::schema::DropIndexReq; use databend_common_meta_app::schema::IndexNameIdent; -use databend_common_meta_app::tenant::Tenant; -use databend_common_meta_types::NonEmptyString; use databend_common_sql::plans::DropIndexPlan; use databend_enterprise_aggregating_index::get_agg_index_handler; @@ -53,13 +50,7 @@ impl Interpreter for DropIndexInterpreter { #[async_backtrace::framed] async fn execute2(&self) -> Result { - let tenant_name = self.ctx.get_tenant(); - - let non_empty = NonEmptyString::new(tenant_name).map_err(|_| { - ErrorCode::TenantIsEmpty("tenant is empty(when drop index)".to_string()) - })?; - - let tenant = Tenant::new_nonempty(non_empty); + let tenant = self.ctx.get_tenant(); let license_manager = get_license_manager(); license_manager diff --git a/src/query/service/src/interpreters/interpreter_metrics.rs b/src/query/service/src/interpreters/interpreter_metrics.rs index cd2ad0ccedfb1..25c3f5dba63fe 100644 --- a/src/query/service/src/interpreters/interpreter_metrics.rs +++ b/src/query/service/src/interpreters/interpreter_metrics.rs @@ -41,7 +41,7 @@ impl InterpreterMetrics { vec![ (LABEL_HANDLER, handler_type), (LABEL_KIND, query_kind), - (LABEL_TENANT, tenant_id.to_string()), + (LABEL_TENANT, tenant_id.name().to_string()), (LABEL_CLUSTER, cluster_id), ] } diff --git a/src/query/service/src/interpreters/interpreter_notification_alter.rs b/src/query/service/src/interpreters/interpreter_notification_alter.rs index 6fb4e1a99e906..5e6eaa02ab920 100644 --- a/src/query/service/src/interpreters/interpreter_notification_alter.rs +++ b/src/query/service/src/interpreters/interpreter_notification_alter.rs @@ -47,7 +47,7 @@ impl AlterNotificationInterpreter { match plan.options { AlterNotificationOptions::Set(set_options) => { let req = AlterNotificationRequest { - tenant_id: self.ctx.get_tenant().to_string(), + tenant_id: self.ctx.get_tenant().name().to_string(), name: plan.name, operation_type: "SET".to_string(), enabled: set_options.enabled, diff --git a/src/query/service/src/interpreters/interpreter_privilege_grant.rs b/src/query/service/src/interpreters/interpreter_privilege_grant.rs index 1c5e4a5356502..e909d6075a72b 100644 --- a/src/query/service/src/interpreters/interpreter_privilege_grant.rs +++ b/src/query/service/src/interpreters/interpreter_privilege_grant.rs @@ -21,7 +21,7 @@ use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::principal::PrincipalIdentity; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::principal::UserPrivilegeType::Ownership; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_sql::plans::GrantPrivilegePlan; use databend_common_users::RoleCacheManager; use databend_common_users::UserApiProvider; @@ -112,7 +112,7 @@ impl GrantPrivilegeInterpreter { async fn grant_ownership( &self, ctx: &Arc, - tenant: &NonEmptyString, + tenant: &Tenant, owner_object: &OwnershipObject, new_role: &str, ) -> Result<()> { @@ -191,13 +191,13 @@ impl Interpreter for GrantPrivilegeInterpreter { match plan.principal { PrincipalIdentity::User(user) => { user_mgr - .grant_privileges_to_user(tenant.clone(), user, plan.on, plan.priv_types) + .grant_privileges_to_user(&tenant, user, plan.on, plan.priv_types) .await?; } PrincipalIdentity::Role(role) => { if plan.priv_types.has_privilege(Ownership) && plan.priv_types.len() == 1 { let owner_object = self - .convert_to_ownerobject(tenant.as_str(), &plan.on, plan.on.catalog()) + .convert_to_ownerobject(tenant.name(), &plan.on, plan.on.catalog()) .await?; if self.ctx.get_current_role().is_some() { self.grant_ownership(&self.ctx, &tenant, &owner_object, &role) diff --git a/src/query/service/src/interpreters/interpreter_setting.rs b/src/query/service/src/interpreters/interpreter_setting.rs index 0245f65c2d390..04751746e0079 100644 --- a/src/query/service/src/interpreters/interpreter_setting.rs +++ b/src/query/service/src/interpreters/interpreter_setting.rs @@ -18,10 +18,11 @@ use chrono_tz::Tz; use databend_common_config::GlobalConfig; use databend_common_exception::ErrorCode; use databend_common_exception::Result; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_sql::plans::SettingPlan; use databend_common_sql::plans::VarValue; use databend_common_users::UserApiProvider; +use minitrace::func_name; use crate::interpreters::Interpreter; use crate::pipelines::PipelineBuildResult; @@ -92,9 +93,7 @@ impl Interpreter for SettingInterpreter { if config.query.internal_enable_sandbox_tenant && !tenant.is_empty() { UserApiProvider::try_create_simple( config.meta.to_meta_grpc_client_conf(), - &NonEmptyString::new(tenant).map_err(|_e| { - ErrorCode::TenantIsEmpty("when SettingInterpreter") - })?, + &Tenant::new_or_error_code(tenant, func_name!())?, ) .await?; } diff --git a/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs b/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs index cc629a939da71..6b00b888f4f44 100644 --- a/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs +++ b/src/query/service/src/interpreters/interpreter_share_alter_tenants.rs @@ -57,7 +57,7 @@ impl Interpreter for AlterShareTenantsInterpreter { if self.plan.is_add { let req = AddShareAccountsReq { share_name: ShareNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), share_name: self.plan.share.clone(), }, if_exists: self.plan.if_exists, @@ -67,7 +67,7 @@ impl Interpreter for AlterShareTenantsInterpreter { let resp = meta_api.add_share_tenants(req).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, None, @@ -76,7 +76,7 @@ impl Interpreter for AlterShareTenantsInterpreter { } else { let req = RemoveShareAccountsReq { share_name: ShareNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), share_name: self.plan.share.clone(), }, if_exists: self.plan.if_exists, @@ -85,7 +85,7 @@ impl Interpreter for AlterShareTenantsInterpreter { let resp = meta_api.remove_share_tenants(req).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, None, diff --git a/src/query/service/src/interpreters/interpreter_share_create.rs b/src/query/service/src/interpreters/interpreter_share_create.rs index f23ef74091354..dd13302730bc9 100644 --- a/src/query/service/src/interpreters/interpreter_share_create.rs +++ b/src/query/service/src/interpreters/interpreter_share_create.rs @@ -52,7 +52,7 @@ impl Interpreter for CreateShareInterpreter { let resp = meta_api.create_share(self.plan.clone().into()).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, None, diff --git a/src/query/service/src/interpreters/interpreter_share_desc.rs b/src/query/service/src/interpreters/interpreter_share_desc.rs index 1820017ecb554..af0de35bf3444 100644 --- a/src/query/service/src/interpreters/interpreter_share_desc.rs +++ b/src/query/service/src/interpreters/interpreter_share_desc.rs @@ -56,7 +56,7 @@ impl Interpreter for DescShareInterpreter { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = GetShareGrantObjectReq { share_name: ShareNameIdent { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), share_name: self.plan.share.clone(), }, }; diff --git a/src/query/service/src/interpreters/interpreter_share_drop.rs b/src/query/service/src/interpreters/interpreter_share_drop.rs index d634670bde7af..084bdba541676 100644 --- a/src/query/service/src/interpreters/interpreter_share_drop.rs +++ b/src/query/service/src/interpreters/interpreter_share_drop.rs @@ -52,7 +52,7 @@ impl Interpreter for DropShareInterpreter { let resp = meta_api.drop_share(self.plan.clone().into()).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, Some(vec![(self.plan.share.clone(), None)]), diff --git a/src/query/service/src/interpreters/interpreter_share_grant_object.rs b/src/query/service/src/interpreters/interpreter_share_grant_object.rs index 4b9c913d11fd6..32cc80c3eebd2 100644 --- a/src/query/service/src/interpreters/interpreter_share_grant_object.rs +++ b/src/query/service/src/interpreters/interpreter_share_grant_object.rs @@ -55,7 +55,7 @@ impl Interpreter for GrantShareObjectInterpreter { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = GrantShareObjectReq { share_name: ShareNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), share_name: self.plan.share.clone(), }, object: self.plan.object.clone(), @@ -65,7 +65,7 @@ impl Interpreter for GrantShareObjectInterpreter { let resp = meta_api.grant_share_object(req).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, Some(vec![resp.share_table_info]), diff --git a/src/query/service/src/interpreters/interpreter_share_revoke_object.rs b/src/query/service/src/interpreters/interpreter_share_revoke_object.rs index 24c88eda037a8..fa361c19feff2 100644 --- a/src/query/service/src/interpreters/interpreter_share_revoke_object.rs +++ b/src/query/service/src/interpreters/interpreter_share_revoke_object.rs @@ -55,7 +55,7 @@ impl Interpreter for RevokeShareObjectInterpreter { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = RevokeShareObjectReq { share_name: ShareNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), share_name: self.plan.share.clone(), }, object: self.plan.object.clone(), @@ -65,7 +65,7 @@ impl Interpreter for RevokeShareObjectInterpreter { let resp = meta_api.revoke_share_object(req).await?; save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), resp.spec_vec, Some(vec![resp.share_table_info]), diff --git a/src/query/service/src/interpreters/interpreter_share_show.rs b/src/query/service/src/interpreters/interpreter_share_show.rs index ca500f96de6ae..35e4975ed7eb9 100644 --- a/src/query/service/src/interpreters/interpreter_share_show.rs +++ b/src/query/service/src/interpreters/interpreter_share_show.rs @@ -62,7 +62,7 @@ impl Interpreter for ShowSharesInterpreter { // query all share endpoint for other tenant inbound shares let share_specs = ShareEndpointManager::instance() - .get_inbound_shares(tenant.as_str(), None, None) + .get_inbound_shares(tenant.name(), None, None) .await?; for (from_tenant, share_spec) in share_specs { names.push(share_spec.name.clone()); @@ -70,12 +70,12 @@ impl Interpreter for ShowSharesInterpreter { created_owns.push(share_spec.share_on.unwrap_or_default().to_string()); database_names.push(share_spec.database.unwrap_or_default().name); from.push(from_tenant); - to.push(tenant.to_string()); + to.push(tenant.name().to_string()); comments.push(share_spec.comment.unwrap_or_default()); } let req = ShowSharesReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), }; let resp = meta_api.show_shares(req).await?; diff --git a/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs b/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs index 7309f7c7296a8..74e26db8bf59a 100644 --- a/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs +++ b/src/query/service/src/interpreters/interpreter_share_show_grant_tenants.rs @@ -56,7 +56,7 @@ impl Interpreter for ShowGrantTenantsOfShareInterpreter { let tenant = self.ctx.get_tenant(); let req = GetShareGrantTenantsReq { share_name: ShareNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), share_name: self.plan.share_name.clone(), }, }; diff --git a/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs b/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs index b355778178e97..2c06fa6674af2 100644 --- a/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs +++ b/src/query/service/src/interpreters/interpreter_show_object_grant_privileges.rs @@ -53,7 +53,7 @@ impl Interpreter for ShowObjectGrantPrivilegesInterpreter { async fn execute2(&self) -> Result { let meta_api = UserApiProvider::instance().get_meta_store_client(); let req = GetObjectGrantPrivilegesReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), object: self.plan.object.clone(), }; let resp = meta_api.get_grant_privileges_of_object(req).await?; diff --git a/src/query/service/src/interpreters/interpreter_table_add_column.rs b/src/query/service/src/interpreters/interpreter_table_add_column.rs index 2f06b1c7988bb..92ec683185afd 100644 --- a/src/query/service/src/interpreters/interpreter_table_add_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_add_column.rs @@ -73,7 +73,7 @@ impl Interpreter for AddTableColumnInterpreter { .ctx .get_catalog(catalog_name) .await? - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await .ok(); @@ -135,7 +135,7 @@ impl Interpreter for AddTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index d3ab10558fce3..14eeac41f4359 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -33,8 +33,8 @@ use databend_common_meta_app::schema::CreateTableReq; use databend_common_meta_app::schema::TableMeta; use databend_common_meta_app::schema::TableNameIdent; use databend_common_meta_app::schema::TableStatistics; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use databend_common_sql::field_default_value; use databend_common_sql::plans::CreateTablePlan; use databend_common_sql::BloomIndexColumns; @@ -100,10 +100,9 @@ impl Interpreter for CreateTableInterpreter { #[async_backtrace::framed] async fn execute2(&self) -> Result { - let tenant = self.plan.tenant.clone(); - let tenant = NonEmptyString::new(tenant).map_err(|_e| { - ErrorCode::TenantIsEmpty("tenant is empty when CreateTableInterpreter") - })?; + let tenant = + Tenant::new_or_error_code(&self.plan.tenant, "CreateTableInterpreter::execute2")?; + let has_computed_column = self .plan .schema @@ -170,6 +169,7 @@ impl CreateTableInterpreter { ); let tenant = self.ctx.get_tenant(); + let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; // TODO: maybe the table creation and insertion should be a transaction, but it may require create_table support 2pc. @@ -179,14 +179,14 @@ impl CreateTableInterpreter { } let table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; // grant the ownership of the table to the current role. let current_role = self.ctx.get_current_role(); if let Some(current_role) = current_role { let db = catalog - .get_database(tenant.as_str(), &self.plan.database) + .get_database(tenant.name(), &self.plan.database) .await?; let db_id = db.get_db_info().ident.db_id; @@ -226,7 +226,7 @@ impl CreateTableInterpreter { // update share spec if needed if let Some((spec_vec, share_table_info)) = reply.spec_vec { save_share_spec( - &tenant.to_string(), + &tenant.name().to_string(), self.ctx.get_data_operator()?.operator(), Some(spec_vec), Some(share_table_info), @@ -278,7 +278,7 @@ impl CreateTableInterpreter { if let Some(current_role) = self.ctx.get_current_role() { let tenant = self.ctx.get_tenant(); let db = catalog - .get_database(tenant.as_str(), &self.plan.database) + .get_database(tenant.name(), &self.plan.database) .await?; let db_id = db.get_db_info().ident.db_id; @@ -299,7 +299,7 @@ impl CreateTableInterpreter { // update share spec if needed if let Some((spec_vec, share_table_info)) = reply.spec_vec { save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), Some(spec_vec), Some(share_table_info), diff --git a/src/query/service/src/interpreters/interpreter_table_drop.rs b/src/query/service/src/interpreters/interpreter_table_drop.rs index cce368a886cdd..9c8548948e831 100644 --- a/src/query/service/src/interpreters/interpreter_table_drop.rs +++ b/src/query/service/src/interpreters/interpreter_table_drop.rs @@ -101,13 +101,13 @@ impl Interpreter for DropTableInterpreter { let tenant = self.ctx.get_tenant(); let db = catalog - .get_database(tenant.as_str(), &self.plan.database) + .get_database(tenant.name(), &self.plan.database) .await?; // actually drop table let resp = catalog .drop_table_by_id(DropTableByIdReq { if_exists: self.plan.if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_name: tbl_name.to_string(), tb_id: tbl.get_table_info().ident.table_id, db_id: db.get_db_info().ident.db_id, @@ -154,7 +154,7 @@ impl Interpreter for DropTableInterpreter { // update share spec if needed if let Some((spec_vec, share_table_info)) = resp.spec_vec { save_share_spec( - &self.ctx.get_tenant().to_string(), + &self.ctx.get_tenant().name().to_string(), self.ctx.get_data_operator()?.operator(), Some(spec_vec), Some(share_table_info), diff --git a/src/query/service/src/interpreters/interpreter_table_drop_column.rs b/src/query/service/src/interpreters/interpreter_table_drop_column.rs index 3a66d765af8ca..cb5f06568dcf4 100644 --- a/src/query/service/src/interpreters/interpreter_table_drop_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_drop_column.rs @@ -65,7 +65,7 @@ impl Interpreter for DropTableColumnInterpreter { .ctx .get_catalog(catalog_name) .await? - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await?; // check mutability @@ -132,7 +132,7 @@ impl Interpreter for DropTableColumnInterpreter { let res = catalog.update_table_meta(table_info, req).await?; if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) diff --git a/src/query/service/src/interpreters/interpreter_table_modify_column.rs b/src/query/service/src/interpreters/interpreter_table_modify_column.rs index d797e76367210..4a5f31b6404c2 100644 --- a/src/query/service/src/interpreters/interpreter_table_modify_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_modify_column.rs @@ -87,7 +87,7 @@ impl ModifyTableColumnInterpreter { let policy = handler .get_data_mask( meta_api, - self.ctx.get_tenant().to_string(), + self.ctx.get_tenant().name().to_string(), mask_name.clone(), ) .await?; @@ -121,7 +121,7 @@ impl ModifyTableColumnInterpreter { None }; let req = SetTableColumnMaskPolicyReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), seq: MatchSeq::Exact(table_version), table_id, column, @@ -132,7 +132,7 @@ impl ModifyTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) @@ -166,7 +166,7 @@ impl ModifyTableColumnInterpreter { if let Some(prev_column_mask_name) = prev_column_mask_name { let req = SetTableColumnMaskPolicyReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), seq: MatchSeq::Exact(table_version), table_id, column, @@ -177,7 +177,7 @@ impl ModifyTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) @@ -363,7 +363,7 @@ impl ModifyTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) @@ -504,7 +504,7 @@ impl ModifyTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) @@ -535,7 +535,7 @@ impl Interpreter for ModifyTableColumnInterpreter { .ctx .get_catalog(catalog_name) .await? - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await .ok(); diff --git a/src/query/service/src/interpreters/interpreter_table_optimize.rs b/src/query/service/src/interpreters/interpreter_table_optimize.rs index 244048a0f62ad..8136887a58631 100644 --- a/src/query/service/src/interpreters/interpreter_table_optimize.rs +++ b/src/query/service/src/interpreters/interpreter_table_optimize.rs @@ -80,7 +80,7 @@ impl Interpreter for OptimizeTableInterpreter { let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; let tenant = self.ctx.get_tenant(); let table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; // check mutability table.check_mutable()?; @@ -222,7 +222,7 @@ impl OptimizeTableInterpreter { // refresh table. table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; } @@ -297,7 +297,7 @@ async fn purge( // currently, context caches the table, we have to "refresh" // the table by using the catalog API directly let table = catalog - .get_table(ctx.get_tenant().as_str(), &plan.database, &plan.table) + .get_table(ctx.get_tenant().name(), &plan.database, &plan.table) .await?; let keep_latest = true; diff --git a/src/query/service/src/interpreters/interpreter_table_recluster.rs b/src/query/service/src/interpreters/interpreter_table_recluster.rs index 8db2792cfe3d8..f10ba36bfedaf 100644 --- a/src/query/service/src/interpreters/interpreter_table_recluster.rs +++ b/src/query/service/src/interpreters/interpreter_table_recluster.rs @@ -108,7 +108,7 @@ impl Interpreter for ReclusterTableInterpreter { let catalog = self.ctx.get_catalog(&self.plan.catalog).await?; let tenant = self.ctx.get_tenant(); let mut table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; // check mutability @@ -205,7 +205,7 @@ impl Interpreter for ReclusterTableInterpreter { // refresh table. table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; } diff --git a/src/query/service/src/interpreters/interpreter_table_rename_column.rs b/src/query/service/src/interpreters/interpreter_table_rename_column.rs index 0808d4fb38ff6..68635c08548db 100644 --- a/src/query/service/src/interpreters/interpreter_table_rename_column.rs +++ b/src/query/service/src/interpreters/interpreter_table_rename_column.rs @@ -65,7 +65,7 @@ impl Interpreter for RenameTableColumnInterpreter { .ctx .get_catalog(catalog_name) .await? - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await .ok(); @@ -137,7 +137,7 @@ impl Interpreter for RenameTableColumnInterpreter { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, ) diff --git a/src/query/service/src/interpreters/interpreter_table_revert.rs b/src/query/service/src/interpreters/interpreter_table_revert.rs index 9d08650b0c153..d4756f6f2d160 100644 --- a/src/query/service/src/interpreters/interpreter_table_revert.rs +++ b/src/query/service/src/interpreters/interpreter_table_revert.rs @@ -51,7 +51,7 @@ impl Interpreter for RevertTableInterpreter { let catalog = self.ctx.get_catalog(self.plan.catalog.as_str()).await?; let table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; // check mutability diff --git a/src/query/service/src/interpreters/interpreter_table_set_options.rs b/src/query/service/src/interpreters/interpreter_table_set_options.rs index 31b6de714a68a..94963dc62bf7f 100644 --- a/src/query/service/src/interpreters/interpreter_table_set_options.rs +++ b/src/query/service/src/interpreters/interpreter_table_set_options.rs @@ -95,7 +95,7 @@ impl Interpreter for SetOptionsInterpreter { let database = self.plan.database.as_str(); let table_name = self.plan.table.as_str(); let table = catalog - .get_table(self.ctx.get_tenant().as_str(), database, table_name) + .get_table(self.ctx.get_tenant().name(), database, table_name) .await?; let table_version = table.get_table_info().ident.seq; @@ -124,7 +124,7 @@ impl Interpreter for SetOptionsInterpreter { }; catalog - .upsert_table_option(self.ctx.get_tenant().as_str(), database, req) + .upsert_table_option(self.ctx.get_tenant().name(), database, req) .await?; Ok(PipelineBuildResult::create()) } diff --git a/src/query/service/src/interpreters/interpreter_table_show_create.rs b/src/query/service/src/interpreters/interpreter_table_show_create.rs index cb98343e74dd5..f96f44628ee85 100644 --- a/src/query/service/src/interpreters/interpreter_table_show_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_show_create.rs @@ -66,7 +66,7 @@ impl Interpreter for ShowCreateTableInterpreter { let catalog = self.ctx.get_catalog(self.plan.catalog.as_str()).await?; let table = catalog - .get_table(tenant.as_str(), &self.plan.database, &self.plan.table) + .get_table(tenant.name(), &self.plan.database, &self.plan.table) .await?; match table.engine() { diff --git a/src/query/service/src/interpreters/interpreter_update.rs b/src/query/service/src/interpreters/interpreter_update.rs index f13fb05d195eb..1db5e83589f5a 100644 --- a/src/query/service/src/interpreters/interpreter_update.rs +++ b/src/query/service/src/interpreters/interpreter_update.rs @@ -99,7 +99,7 @@ impl Interpreter for UpdateInterpreter { let db_name = self.plan.database.as_str(); let tbl_name = self.plan.table.as_str(); let tbl = catalog - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await?; // Add table lock. @@ -144,7 +144,7 @@ impl UpdateInterpreter { let db_name = self.plan.database.as_str(); let tbl_name = self.plan.table.as_str(); let tbl = catalog - .get_table(self.ctx.get_tenant().as_str(), db_name, tbl_name) + .get_table(self.ctx.get_tenant().name(), db_name, tbl_name) .await?; // refresh table. let tbl = tbl.refresh(self.ctx.as_ref()).await?; diff --git a/src/query/service/src/interpreters/interpreter_user_drop.rs b/src/query/service/src/interpreters/interpreter_user_drop.rs index 690ace6ab18d7..6d734d595404f 100644 --- a/src/query/service/src/interpreters/interpreter_user_drop.rs +++ b/src/query/service/src/interpreters/interpreter_user_drop.rs @@ -54,7 +54,7 @@ impl Interpreter for DropUserInterpreter { let plan = self.plan.clone(); let tenant = self.ctx.get_tenant(); UserApiProvider::instance() - .drop_user(tenant, plan.user, plan.if_exists) + .drop_user(&tenant, plan.user, plan.if_exists) .await?; Ok(PipelineBuildResult::create()) diff --git a/src/query/service/src/interpreters/interpreter_user_stage_create.rs b/src/query/service/src/interpreters/interpreter_user_stage_create.rs index 953dcdd9cd4aa..a5efeb9ceaf87 100644 --- a/src/query/service/src/interpreters/interpreter_user_stage_create.rs +++ b/src/query/service/src/interpreters/interpreter_user_stage_create.rs @@ -21,14 +21,15 @@ use databend_common_management::RoleApi; use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::principal::StageType; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use databend_common_sql::plans::CreateStagePlan; use databend_common_storages_stage::StageTable; use databend_common_users::RoleCacheManager; use databend_common_users::UserApiProvider; use log::debug; use log::info; +use minitrace::func_name; use crate::interpreters::Interpreter; use crate::pipelines::PipelineBuildResult; @@ -73,9 +74,7 @@ impl Interpreter for CreateUserStageInterpreter { )); } - let tenant = NonEmptyString::new(plan.tenant.clone()).map_err(|_e| { - ErrorCode::TenantIsEmpty("tenant is empty when CreateUserStateInterpreter") - })?; + let tenant = Tenant::new_or_error_code(&plan.tenant, func_name!())?; let quota_api = user_mgr.tenant_quota_api(&tenant); let quota = quota_api.get_quota(MatchSeq::GE(0)).await?.data; @@ -87,9 +86,7 @@ impl Interpreter for CreateUserStageInterpreter { ))); }; - let tenant = NonEmptyString::new(plan.tenant.clone()).map_err(|_e| { - ErrorCode::TenantIsEmpty("tenant is empty when CreateUserStateInterpreter") - })?; + let tenant = Tenant::new_or_error_code(&plan.tenant, "CreateUserStageInterpreter")?; let old_stage = match plan.create_option { CreateOption::CreateOrReplace => user_mgr @@ -99,9 +96,7 @@ impl Interpreter for CreateUserStageInterpreter { _ => None, }; - let tenant = NonEmptyString::new(plan.tenant.clone()).map_err(|_e| { - ErrorCode::TenantIsEmpty("tenant is empty when CreateUserStateInterpreter") - })?; + let tenant = Tenant::new_or_error_code(&plan.tenant, "CreateUserStageInterpreter")?; let mut user_stage = user_stage; user_stage.creator = Some(self.ctx.get_current_user()?.identity()); diff --git a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs index 01c48fcb3909c..5bc8c1d188b2c 100644 --- a/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs +++ b/src/query/service/src/interpreters/interpreter_vacuum_drop_tables.rs @@ -80,7 +80,7 @@ impl VacuumDropTablesInterpreter { for c in drop_db_table_ids.chunks(chunk_size) { info!("vacuum drop {} table ids: {:?}", c.len(), c); let req = GcDroppedTableReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), drop_ids: c.to_vec(), }; let _ = catalog.gc_drop_tables(req).await?; @@ -90,7 +90,7 @@ impl VacuumDropTablesInterpreter { for c in drop_db_ids.chunks(chunk_size) { info!("vacuum drop {} db ids: {:?}", c.len(), c); let req = GcDroppedTableReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), drop_ids: c.to_vec(), }; let _ = catalog.gc_drop_tables(req).await?; @@ -137,7 +137,7 @@ impl Interpreter for VacuumDropTablesInterpreter { let (tables, drop_ids) = catalog .get_drop_table_infos(ListDroppedTableReq { inner: DatabaseNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), db_name: self.plan.database.clone(), }, filter, diff --git a/src/query/service/src/interpreters/interpreter_vacuum_temporary_files.rs b/src/query/service/src/interpreters/interpreter_vacuum_temporary_files.rs index cbc0b454f954f..996963d4a923f 100644 --- a/src/query/service/src/interpreters/interpreter_vacuum_temporary_files.rs +++ b/src/query/service/src/interpreters/interpreter_vacuum_temporary_files.rs @@ -59,7 +59,7 @@ impl Interpreter for VacuumTemporaryFilesInterpreter { let handler = get_vacuum_handler(); - let temporary_files_prefix = query_spill_prefix(self.ctx.get_tenant().as_str()); + let temporary_files_prefix = query_spill_prefix(self.ctx.get_tenant().name()); let remove_files = handler .do_vacuum_temporary_files( temporary_files_prefix, diff --git a/src/query/service/src/interpreters/interpreter_view_create.rs b/src/query/service/src/interpreters/interpreter_view_create.rs index de7d48fc2f3a2..9cfa77084e6de 100644 --- a/src/query/service/src/interpreters/interpreter_view_create.rs +++ b/src/query/service/src/interpreters/interpreter_view_create.rs @@ -67,7 +67,7 @@ impl Interpreter for CreateViewInterpreter { let database_name = table.database(); let table_name = table.name(); if !catalog - .exists_table(tenant.as_str(), database_name, table_name) + .exists_table(tenant.name(), database_name, table_name) .await? && !table_function.contains(&table_name.to_string()) && !table.table().is_stage_table() diff --git a/src/query/service/src/interpreters/interpreter_virtual_column_alter.rs b/src/query/service/src/interpreters/interpreter_virtual_column_alter.rs index 3ca5740e199e3..689740929b9c5 100644 --- a/src/query/service/src/interpreters/interpreter_virtual_column_alter.rs +++ b/src/query/service/src/interpreters/interpreter_virtual_column_alter.rs @@ -74,7 +74,7 @@ impl Interpreter for AlterVirtualColumnInterpreter { let update_virtual_column_req = UpdateVirtualColumnReq { if_exists: self.plan.if_exists, name_ident: VirtualColumnNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_id, }, virtual_columns: self.plan.virtual_columns.clone(), diff --git a/src/query/service/src/interpreters/interpreter_virtual_column_create.rs b/src/query/service/src/interpreters/interpreter_virtual_column_create.rs index 6bddc3598f8c9..bca405782feed 100644 --- a/src/query/service/src/interpreters/interpreter_virtual_column_create.rs +++ b/src/query/service/src/interpreters/interpreter_virtual_column_create.rs @@ -74,7 +74,7 @@ impl Interpreter for CreateVirtualColumnInterpreter { let create_virtual_column_req = CreateVirtualColumnReq { create_option: self.plan.create_option, name_ident: VirtualColumnNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_id, }, virtual_columns: self.plan.virtual_columns.clone(), diff --git a/src/query/service/src/interpreters/interpreter_virtual_column_drop.rs b/src/query/service/src/interpreters/interpreter_virtual_column_drop.rs index 56c422092fdf2..0a2e5b89ebcbb 100644 --- a/src/query/service/src/interpreters/interpreter_virtual_column_drop.rs +++ b/src/query/service/src/interpreters/interpreter_virtual_column_drop.rs @@ -74,7 +74,7 @@ impl Interpreter for DropVirtualColumnInterpreter { let drop_virtual_column_req = DropVirtualColumnReq { if_exists: self.plan.if_exists, name_ident: VirtualColumnNameIdent { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_id, }, }; diff --git a/src/query/service/src/local/mod.rs b/src/query/service/src/local/mod.rs index bdd3e0436fbcc..dae8591b12078 100644 --- a/src/query/service/src/local/mod.rs +++ b/src/query/service/src/local/mod.rs @@ -56,7 +56,7 @@ pub async fn query_local(query_sql: &str, output_format: &str) -> Result<()> { GlobalServices::init(&conf).await?; // init oss license manager - OssLicenseManager::init(conf.query.tenant_id.to_string()).unwrap(); + OssLicenseManager::init(conf.query.tenant_id.name().to_string()).unwrap(); // Cluster register. ClusterDiscovery::instance() diff --git a/src/query/service/src/pipelines/builders/builder_aggregate.rs b/src/query/service/src/pipelines/builders/builder_aggregate.rs index bb63bd0e02fb8..1676994d2a35a 100644 --- a/src/query/service/src/pipelines/builders/builder_aggregate.rs +++ b/src/query/service/src/pipelines/builders/builder_aggregate.rs @@ -168,7 +168,7 @@ impl PipelineBuilder { // If cluster mode, spill write will be completed in exchange serialize, because we need scatter the block data first if self.ctx.get_cluster().is_empty() { let operator = DataOperator::instance().operator(); - let location_prefix = query_spill_prefix(self.ctx.get_tenant().as_str()); + let location_prefix = query_spill_prefix(self.ctx.get_tenant().name()); self.main_pipeline.add_transform(|input, output| { Ok(ProcessorPtr::create( match params.aggregate_functions.is_empty() { diff --git a/src/query/service/src/pipelines/builders/builder_sort.rs b/src/query/service/src/pipelines/builders/builder_sort.rs index 787e23ebba9f2..30dc3b8d542a4 100644 --- a/src/query/service/src/pipelines/builders/builder_sort.rs +++ b/src/query/service/src/pipelines/builders/builder_sort.rs @@ -283,7 +283,7 @@ impl SortPipelineBuilder { if may_spill { let schema = add_order_field(sort_merge_output_schema.clone(), &self.sort_desc); - let config = SpillerConfig::create(query_spill_prefix(self.ctx.get_tenant().as_str())); + let config = SpillerConfig::create(query_spill_prefix(self.ctx.get_tenant().name())); pipeline.add_transform(|input, output| { let op = DataOperator::instance().operator(); let spiller = diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_exchange_injector.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_exchange_injector.rs index 1d75766d2f46a..b9e90c242912c 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_exchange_injector.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/aggregate_exchange_injector.rs @@ -313,7 +313,7 @@ impl AggregateInjecto Arc::new(AggregateInjector:: { ctx, method, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), aggregator_params: params, _phantom: Default::default(), }) diff --git a/src/query/service/src/pipelines/processors/transforms/hash_join/build_spill/build_spill_state.rs b/src/query/service/src/pipelines/processors/transforms/hash_join/build_spill/build_spill_state.rs index 2919a6eaa791e..8fcef2bd83d36 100644 --- a/src/query/service/src/pipelines/processors/transforms/hash_join/build_spill/build_spill_state.rs +++ b/src/query/service/src/pipelines/processors/transforms/hash_join/build_spill/build_spill_state.rs @@ -44,7 +44,7 @@ pub struct BuildSpillState { impl BuildSpillState { pub fn create(ctx: Arc, build_state: Arc) -> Result { let tenant = ctx.get_tenant(); - let spill_config = SpillerConfig::create(query_spill_prefix(tenant.as_str())); + let spill_config = SpillerConfig::create(query_spill_prefix(tenant.name())); let operator = DataOperator::instance().operator(); let spiller = Spiller::create(ctx, operator, spill_config, SpillerType::HashJoinBuild)?; Ok(Self { diff --git a/src/query/service/src/pipelines/processors/transforms/hash_join/probe_spill/probe_spill_state.rs b/src/query/service/src/pipelines/processors/transforms/hash_join/probe_spill/probe_spill_state.rs index a3265823bec6e..956d1af8f5153 100644 --- a/src/query/service/src/pipelines/processors/transforms/hash_join/probe_spill/probe_spill_state.rs +++ b/src/query/service/src/pipelines/processors/transforms/hash_join/probe_spill/probe_spill_state.rs @@ -40,7 +40,7 @@ pub struct ProbeSpillState { impl ProbeSpillState { pub fn create(ctx: Arc, probe_state: Arc) -> Result { let tenant = ctx.get_tenant(); - let spill_config = SpillerConfig::create(query_spill_prefix(tenant.as_str())); + let spill_config = SpillerConfig::create(query_spill_prefix(tenant.name())); let operator = DataOperator::instance().operator(); let spiller = Spiller::create(ctx, operator, spill_config, SpillerType::HashJoinProbe)?; Ok(Self { diff --git a/src/query/service/src/servers/flight_sql/flight_sql_service/catalog.rs b/src/query/service/src/servers/flight_sql/flight_sql_service/catalog.rs index 2c0889ed32036..fff72a1802965 100644 --- a/src/query/service/src/servers/flight_sql/flight_sql_service/catalog.rs +++ b/src/query/service/src/servers/flight_sql/flight_sql_service/catalog.rs @@ -57,12 +57,12 @@ impl CatalogInfoProvider { vec![( catalog_name.clone(), catalog_mgr - .get_catalog(tenant.as_str(), &catalog_name, ctx.txn_mgr()) + .get_catalog(tenant.name(), &catalog_name, ctx.txn_mgr()) .await?, )] } else { catalog_mgr - .list_catalogs(tenant.as_str(), ctx.txn_mgr()) + .list_catalogs(tenant.name(), ctx.txn_mgr()) .await? .iter() .map(|r| (r.name(), r.clone())) @@ -76,14 +76,14 @@ impl CatalogInfoProvider { let table_type = "table".to_string(); for (catalog_name, catalog) in catalogs.into_iter() { let dbs = if let Some(database_name) = &database_name { - vec![catalog.get_database(tenant.as_str(), database_name).await?] + vec![catalog.get_database(tenant.name(), database_name).await?] } else { - catalog.list_databases(tenant.as_str()).await? + catalog.list_databases(&tenant).await? }; for db in dbs { let db_name = db.name().to_string().into_boxed_str(); let db_name: &str = Box::leak(db_name); - let tables = match catalog.list_tables(tenant.as_str(), db_name).await { + let tables = match catalog.list_tables(tenant.name(), db_name).await { Ok(tables) => tables, Err(err) if err.code() == ErrorCode::EMPTY_SHARE_ENDPOINT_CONFIG => { warn!("list tables failed on db {}: {}", db.name(), err); diff --git a/src/query/service/src/sessions/query_ctx.rs b/src/query/service/src/sessions/query_ctx.rs index 35615bc0b562d..3f58d77f78028 100644 --- a/src/query/service/src/sessions/query_ctx.rs +++ b/src/query/service/src/sessions/query_ctx.rs @@ -72,7 +72,7 @@ use databend_common_meta_app::principal::COPY_MAX_FILES_PER_COMMIT; use databend_common_meta_app::schema::CatalogInfo; use databend_common_meta_app::schema::GetTableCopiedFileReq; use databend_common_meta_app::schema::TableInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_metrics::storage::*; use databend_common_pipeline_core::processors::PlanProfile; use databend_common_pipeline_core::InputError; @@ -206,7 +206,7 @@ impl QueryContext { .get_catalog(self.get_current_catalog().as_str()) .await?; match catalog - .get_database(tenant_id.as_str(), &new_database_name) + .get_database(tenant_id.name(), &new_database_name) .await { Ok(_) => self.shared.set_current_database(new_database_name), @@ -518,7 +518,7 @@ impl TableContext for QueryContext { self.shared .catalog_manager .get_catalog( - self.get_tenant().as_str(), + self.get_tenant().name(), catalog_name.as_ref(), self.txn_mgr(), ) @@ -596,7 +596,7 @@ impl TableContext for QueryContext { Ok(format) } - fn get_tenant(&self) -> NonEmptyString { + fn get_tenant(&self) -> Tenant { self.shared.get_tenant() } @@ -819,7 +819,7 @@ impl TableContext for QueryContext { let tenant = self.get_tenant(); let catalog = self.get_catalog(catalog_name).await?; let table = catalog - .get_table(tenant.as_str(), database_name, table_name) + .get_table(tenant.name(), database_name, table_name) .await?; let table_id = table.get_id(); @@ -835,7 +835,7 @@ impl TableContext for QueryContext { let req = GetTableCopiedFileReq { table_id, files }; let start_request = Instant::now(); let copied_files = catalog - .get_table_copied_file_info(tenant.as_str(), database_name, req) + .get_table_copied_file_info(tenant.name(), database_name, req) .await? .file_info; diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index 6573451eccfe2..c614d51753b0e 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -37,7 +37,7 @@ use databend_common_meta_app::principal::OnErrorMode; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserDefinedConnection; use databend_common_meta_app::principal::UserInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_pipeline_core::processors::PlanProfile; use databend_common_pipeline_core::InputError; use databend_common_settings::Settings; @@ -281,7 +281,7 @@ impl QueryContextShared { StorageMetrics::merge(&metrics) } - pub fn get_tenant(&self) -> NonEmptyString { + pub fn get_tenant(&self) -> Tenant { self.session.get_current_tenant() } @@ -335,9 +335,9 @@ impl QueryContextShared { let table_meta_key = (catalog.to_string(), database.to_string(), table.to_string()); let catalog = self .catalog_manager - .get_catalog(tenant.as_str(), catalog, self.session.session_ctx.txn_mgr()) + .get_catalog(tenant.name(), catalog, self.session.session_ctx.txn_mgr()) .await?; - let cache_table = catalog.get_table(tenant.as_str(), database, table).await?; + let cache_table = catalog.get_table(tenant.name(), database, table).await?; let mut tables_refs = self.tables_refs.lock(); @@ -365,13 +365,13 @@ impl QueryContextShared { let tenant = self.get_tenant(); let catalog = self .catalog_manager - .get_catalog(tenant.as_str(), catalog, self.session.session_ctx.txn_mgr()) + .get_catalog(tenant.name(), catalog, self.session.session_ctx.txn_mgr()) .await?; let source_table = match catalog.get_stream_source_table(stream_desc)? { Some(source_table) => source_table, None => { let source_table = catalog - .get_table(tenant.as_str(), database, table_name) + .get_table(tenant.name(), database, table_name) .await?; catalog.cache_stream_source_table( stream.get_table_info().clone(), diff --git a/src/query/service/src/sessions/session.rs b/src/query/service/src/sessions/session.rs index 39fe6b35c6335..6a0d0f16ad109 100644 --- a/src/query/service/src/sessions/session.rs +++ b/src/query/service/src/sessions/session.rs @@ -25,7 +25,7 @@ use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserPrivilegeType; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use databend_common_users::GrantObjectVisibilityChecker; use databend_storages_common_txn::TxnManagerRef; @@ -79,7 +79,7 @@ impl Session { ("session_database".to_string(), self.get_current_database()), ( "session_tenant".to_string(), - self.get_current_tenant().to_string(), + self.get_current_tenant().name().to_string(), ), ]; if let Some(query_id) = self.get_current_query_id() { @@ -191,7 +191,7 @@ impl Session { self.session_ctx.get_current_catalog() } - pub fn get_current_tenant(self: &Arc) -> NonEmptyString { + pub fn get_current_tenant(self: &Arc) -> Tenant { self.session_ctx.get_current_tenant() } diff --git a/src/query/service/src/sessions/session_ctx.rs b/src/query/service/src/sessions/session_ctx.rs index a4d480f125503..e7b407181f041 100644 --- a/src/query/service/src/sessions/session_ctx.rs +++ b/src/query/service/src/sessions/session_ctx.rs @@ -23,7 +23,7 @@ use databend_common_config::GlobalConfig; use databend_common_exception::Result; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use databend_storages_common_txn::TxnManager; use databend_storages_common_txn::TxnManagerRef; @@ -151,20 +151,25 @@ impl SessionContext { *lock = role } - pub fn get_current_tenant(&self) -> NonEmptyString { + pub fn get_current_tenant(&self) -> Tenant { let conf = GlobalConfig::instance(); if conf.query.internal_enable_sandbox_tenant { let sandbox_tenant = self.settings.get_sandbox_tenant().unwrap_or_default(); if !sandbox_tenant.is_empty() { - return NonEmptyString::new(sandbox_tenant).unwrap(); + return Tenant::new_or_error_code(sandbox_tenant, "create from sandbox_tenant") + .unwrap(); } } if conf.query.management_mode || self.typ == SessionType::Local { let lock = self.current_tenant.read(); if !lock.is_empty() { - return NonEmptyString::new(lock.clone()).unwrap(); + return Tenant::new_or_error_code( + lock.clone(), + "create from SessionContext.current_tenant", + ) + .unwrap(); } } diff --git a/src/query/service/src/table_functions/cloud/task_dependents.rs b/src/query/service/src/table_functions/cloud/task_dependents.rs index e5e9019c0f852..d3aaf0aff659d 100644 --- a/src/query/service/src/table_functions/cloud/task_dependents.rs +++ b/src/query/service/src/table_functions/cloud/task_dependents.rs @@ -188,7 +188,7 @@ impl TaskDependentsSource { fn build_request(&self) -> GetTaskDependentsRequest { GetTaskDependentsRequest { task_name: self.task_name.clone(), - tenant_id: self.ctx.get_tenant().to_string(), + tenant_id: self.ctx.get_tenant().name().to_string(), recursive: self.recursive, } } @@ -262,7 +262,12 @@ impl AsyncSource for TaskDependentsSource { let user = self.ctx.get_current_user()?.identity().to_string(); let query_id = self.ctx.get_id(); - let cfg = build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let cfg = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); let dependents = cloud_api .get_task_client() diff --git a/src/query/service/src/table_functions/cloud/task_dependents_enable.rs b/src/query/service/src/table_functions/cloud/task_dependents_enable.rs index d1c7300658283..a06094d459eef 100644 --- a/src/query/service/src/table_functions/cloud/task_dependents_enable.rs +++ b/src/query/service/src/table_functions/cloud/task_dependents_enable.rs @@ -153,7 +153,7 @@ impl TaskDependentsEnableSource { fn build_request(&self) -> EnableTaskDependentsRequest { EnableTaskDependentsRequest { task_name: self.task_name.clone(), - tenant_id: self.ctx.get_tenant().to_string(), + tenant_id: self.ctx.get_tenant().name().to_string(), } } } @@ -176,7 +176,12 @@ impl AsyncSource for TaskDependentsEnableSource { let user = self.ctx.get_current_user()?.identity().to_string(); let query_id = self.ctx.get_id(); - let cfg = build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let cfg = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); let req = make_request(self.build_request(), cfg); cloud_api .get_task_client() diff --git a/src/query/service/src/table_functions/openai/ai_to_sql.rs b/src/query/service/src/table_functions/openai/ai_to_sql.rs index 3bb83a5e33e8a..e9ab4e84834f2 100644 --- a/src/query/service/src/table_functions/openai/ai_to_sql.rs +++ b/src/query/service/src/table_functions/openai/ai_to_sql.rs @@ -195,7 +195,7 @@ impl AsyncSource for GPT2SQLSource { template.push("### Postgres SQL tables, with their properties:".to_string()); template.push("#".to_string()); - for table in catalog.list_tables(tenant.as_str(), &database).await? { + for table in catalog.list_tables(tenant.name(), &database).await? { let fields = if matches!(table.engine(), VIEW_ENGINE | STREAM_ENGINE) { continue; } else { diff --git a/src/query/service/src/table_functions/others/execute_background_job.rs b/src/query/service/src/table_functions/others/execute_background_job.rs index b813979c3f0fa..a8e3468193691 100644 --- a/src/query/service/src/table_functions/others/execute_background_job.rs +++ b/src/query/service/src/table_functions/others/execute_background_job.rs @@ -32,7 +32,6 @@ use databend_common_expression::TableSchemaRefExt; use databend_common_meta_app::schema::TableIdent; use databend_common_meta_app::schema::TableInfo; use databend_common_meta_app::schema::TableMeta; -use databend_common_meta_app::tenant::Tenant; use databend_common_pipeline_core::processors::OutputPort; use databend_common_pipeline_core::processors::ProcessorPtr; use databend_common_pipeline_core::Pipeline; @@ -155,8 +154,7 @@ impl AsyncSource for ExecuteBackgroundJobSource { async fn generate(&mut self) -> Result> { let background_handler = get_background_service_handler(); - let non_empty = self.ctx.get_tenant(); - let tenant = Tenant::new_nonempty(non_empty); + let tenant = self.ctx.get_tenant(); background_handler .execute_scheduled_job( diff --git a/src/query/service/src/table_functions/others/license_info.rs b/src/query/service/src/table_functions/others/license_info.rs index 2c4babc5369ba..2038e316707c9 100644 --- a/src/query/service/src/table_functions/others/license_info.rs +++ b/src/query/service/src/table_functions/others/license_info.rs @@ -218,11 +218,15 @@ impl AsyncSource for LicenseInfoSource { // sync global changes on distributed node cluster. settings.load_changes().await?; let license = unsafe { - settings - .get_enterprise_license() - .map_err_to_code(ErrorCode::LicenseKeyInvalid, || { - format!("failed to get license for {}", self.ctx.get_tenant()) - })? + settings.get_enterprise_license().map_err_to_code( + ErrorCode::LicenseKeyInvalid, + || { + format!( + "failed to get license for {}", + self.ctx.get_tenant().display() + ) + }, + )? }; get_license_manager() @@ -233,7 +237,10 @@ impl AsyncSource for LicenseInfoSource { .manager .parse_license(license.as_str()) .map_err_to_code(ErrorCode::LicenseKeyInvalid, || { - format!("current license invalid for {}", self.ctx.get_tenant()) + format!( + "current license invalid for {}", + self.ctx.get_tenant().display() + ) })?; Ok(Some(self.to_block(&info)?)) } diff --git a/src/query/service/src/table_functions/others/suggested_background_tasks.rs b/src/query/service/src/table_functions/others/suggested_background_tasks.rs index 1a2381d807cc9..3c9fab97c1138 100644 --- a/src/query/service/src/table_functions/others/suggested_background_tasks.rs +++ b/src/query/service/src/table_functions/others/suggested_background_tasks.rs @@ -198,7 +198,7 @@ impl SuggestedBackgroundTasksSource { let ctx = ctx.clone(); info!( background = true, - tenant = ctx.get_tenant().to_string(); + tenant = ctx.get_tenant().name().to_string(); "list all lsuggestions" ); Self::get_suggested_compaction_tasks(ctx).await diff --git a/src/query/service/src/table_functions/others/tenant_quota.rs b/src/query/service/src/table_functions/others/tenant_quota.rs index 8ba238f6d660c..b493d7f604513 100644 --- a/src/query/service/src/table_functions/others/tenant_quota.rs +++ b/src/query/service/src/table_functions/others/tenant_quota.rs @@ -43,6 +43,7 @@ use databend_common_meta_app::principal::UserOptionFlag; use databend_common_meta_app::schema::TableIdent; use databend_common_meta_app::schema::TableInfo; use databend_common_meta_app::schema::TableMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_app::tenant::TenantQuota; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::NonEmptyString; @@ -53,6 +54,7 @@ use databend_common_pipeline_sources::AsyncSource; use databend_common_pipeline_sources::AsyncSourcer; use databend_common_storages_factory::Table; use databend_common_users::UserApiProvider; +use minitrace::func_name; pub struct TenantQuotaTable { table_info: TableInfo, @@ -243,7 +245,7 @@ impl AsyncSource for TenantQuotaSource { UserOptionFlag::TenantSetting ))); } - tenant = args[0].clone(); + tenant = Tenant::new_or_error_code(args[0].clone(), func_name!())?; } let quota_api = UserApiProvider::instance().tenant_quota_api(&tenant); let res = quota_api.get_quota(MatchSeq::GE(0)).await?; diff --git a/src/query/service/src/test_kits/config.rs b/src/query/service/src/test_kits/config.rs index a32e3d737aa14..05d891b1ffbef 100644 --- a/src/query/service/src/test_kits/config.rs +++ b/src/query/service/src/test_kits/config.rs @@ -19,7 +19,7 @@ use databend_common_config::InnerConfig; use databend_common_meta_app::principal::AuthInfo; use databend_common_meta_app::storage::StorageFsConfig; use databend_common_meta_app::storage::StorageParams; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::idm_config::IDMConfig; use tempfile::TempDir; @@ -30,7 +30,7 @@ pub struct ConfigBuilder { impl ConfigBuilder { pub fn create() -> ConfigBuilder { let mut conf = InnerConfig::default(); - conf.query.tenant_id = NonEmptyString::new("test").unwrap(); + conf.query.tenant_id = Tenant::new_literal("test"); conf.log = databend_common_tracing::Config::new_testing(); // add idm users for test let mut users = HashMap::new(); diff --git a/src/query/service/src/test_kits/fixture.rs b/src/query/service/src/test_kits/fixture.rs index c42045e1a31c9..80362ae044e66 100644 --- a/src/query/service/src/test_kits/fixture.rs +++ b/src/query/service/src/test_kits/fixture.rs @@ -53,6 +53,7 @@ use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::schema::CreateOption; use databend_common_meta_app::schema::DatabaseMeta; use databend_common_meta_app::storage::StorageParams; +use databend_common_meta_app::tenant::Tenant; use databend_common_pipeline_core::processors::ProcessorPtr; use databend_common_pipeline_sinks::EmptySink; use databend_common_pipeline_sources::BlocksSource; @@ -211,7 +212,7 @@ impl TestFixture { } GlobalServices::init_with(config).await?; - OssLicenseManager::init(config.query.tenant_id.to_string())?; + OssLicenseManager::init(config.query.tenant_id.name().to_string())?; // Cluster register. { @@ -266,8 +267,8 @@ impl TestFixture { } } - pub fn default_tenant(&self) -> String { - self.conf.query.tenant_id.to_string() + pub fn default_tenant(&self) -> Tenant { + self.conf.query.tenant_id.clone() } pub fn default_db_name(&self) -> String { @@ -303,7 +304,7 @@ impl TestFixture { pub fn default_create_table_plan(&self) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: self.default_tenant(), + tenant: self.default_tenant().name().to_string(), catalog: self.default_catalog_name(), database: self.default_db_name(), table: self.default_table_name(), @@ -328,7 +329,7 @@ impl TestFixture { pub fn normal_create_table_plan(&self) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: self.default_tenant(), + tenant: self.default_tenant().name().to_string(), catalog: self.default_catalog_name(), database: self.default_db_name(), table: self.default_table_name(), @@ -364,7 +365,7 @@ impl TestFixture { pub fn variant_create_table_plan(&self) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: self.default_tenant(), + tenant: self.default_tenant().name().to_string(), catalog: self.default_catalog_name(), database: self.default_db_name(), table: self.default_table_name(), @@ -400,7 +401,7 @@ impl TestFixture { pub fn string_create_table_plan(&self) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: self.default_tenant(), + tenant: self.default_tenant().name().to_string(), catalog: self.default_catalog_name(), database: self.default_db_name(), table: self.default_table_name(), @@ -445,7 +446,7 @@ impl TestFixture { pub fn computed_create_table_plan(&self) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: self.default_tenant(), + tenant: self.default_tenant().name().to_string(), catalog: self.default_catalog_name(), database: self.default_db_name(), table: self.default_table_name(), @@ -504,7 +505,7 @@ impl TestFixture { let db_name = gen_db_name(&self.prefix); let plan = CreateDatabasePlan { catalog: "default".to_owned(), - tenant, + tenant: tenant.to_nonempty(), create_option: CreateOption::Create, database: db_name, meta: DatabaseMeta { @@ -786,7 +787,7 @@ impl TestFixture { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - self.default_tenant().as_str(), + self.default_tenant().name(), self.default_db_name().as_str(), self.default_table_name().as_str(), ) diff --git a/src/query/service/tests/it/api/http/status.rs b/src/query/service/tests/it/api/http/status.rs index f8283f9ec70c9..66e7bde2e6268 100644 --- a/src/query/service/tests/it/api/http/status.rs +++ b/src/query/service/tests/it/api/http/status.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use databend_common_base::base::tokio; use databend_common_exception::Result; use databend_common_meta_app::principal::UserIdentity; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; use databend_query::api::http::v1::instance_status::instance_status_handler; use databend_query::api::http::v1::instance_status::InstanceStatus; @@ -58,10 +58,7 @@ async fn get_status(ep: &Route) -> InstanceStatus { async fn run_query(query_ctx: &Arc) -> Result> { let sql = "select sleep(3) from numbers(1)"; let user = UserApiProvider::instance() - .get_user( - &NonEmptyString::new("test").unwrap(), - UserIdentity::new("root", "%"), - ) + .get_user(&Tenant::new_literal("test"), UserIdentity::new("root", "%")) .await?; query_ctx .get_current_session() diff --git a/src/query/service/tests/it/auth.rs b/src/query/service/tests/it/auth.rs index 316d1892b30d3..60595e7bb29f4 100644 --- a/src/query/service/tests/it/auth.rs +++ b/src/query/service/tests/it/auth.rs @@ -653,7 +653,7 @@ async fn test_jwt_auth_mgr_with_management() -> Result<()> { .await?; let user_info = ctx.get_current_user()?; let current_tenant = ctx.get_tenant(); - assert_eq!(current_tenant.to_string(), tenant.to_string()); + assert_eq!(current_tenant.name().to_string(), tenant.to_string()); assert_eq!(user_info.grants.roles().len(), 0); } diff --git a/src/query/service/tests/it/catalogs/database_catalog.rs b/src/query/service/tests/it/catalogs/database_catalog.rs index ad5f858cb8aca..217e0ae0e98ef 100644 --- a/src/query/service/tests/it/catalogs/database_catalog.rs +++ b/src/query/service/tests/it/catalogs/database_catalog.rs @@ -31,24 +31,26 @@ use databend_common_meta_app::schema::DropTableByIdReq; use databend_common_meta_app::schema::RenameDatabaseReq; use databend_common_meta_app::schema::TableMeta; use databend_common_meta_app::schema::TableNameIdent; +use databend_common_meta_app::tenant::Tenant; use databend_query::catalogs::Catalog; use crate::tests::create_catalog; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_catalogs_get_database() -> Result<()> { - let tenant = "test"; + let tenant_name = "test"; + let tenant = Tenant::new_literal(tenant_name); let catalog = create_catalog().await?; // get system database - let database = catalog.get_database(tenant, "system").await?; + let database = catalog.get_database(tenant_name, "system").await?; assert_eq!(database.name(), "system"); - let db_list = catalog.list_databases(tenant).await?; + let db_list = catalog.list_databases(&tenant).await?; assert_eq!(db_list.len(), 3); // get default database - let db_2 = catalog.get_database(tenant, "default").await?; + let db_2 = catalog.get_database(tenant_name, "default").await?; assert_eq!(db_2.name(), "default"); // get non-exist database @@ -64,10 +66,11 @@ async fn test_catalogs_get_database() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_catalogs_database() -> Result<()> { - let tenant = "admin"; + let tenant_name = "admin"; + let tenant = Tenant::new_literal(tenant_name); let catalog = create_catalog().await?; - let db_list = catalog.list_databases(tenant).await?; + let db_list = catalog.list_databases(&tenant).await?; let db_count = db_list.len(); // Create. @@ -75,7 +78,7 @@ async fn test_catalogs_database() -> Result<()> { let mut req = CreateDatabaseReq { create_option: CreateOption::Create, name_ident: DatabaseNameIdent { - tenant: tenant.to_string(), + tenant: tenant_name.to_string(), db_name: "db1".to_string(), }, meta: DatabaseMeta { @@ -86,7 +89,7 @@ async fn test_catalogs_database() -> Result<()> { let res = catalog.create_database(req.clone()).await; assert!(res.is_ok()); - let db_list_1 = catalog.list_databases(tenant).await?; + let db_list_1 = catalog.list_databases(&tenant).await?; assert_eq!(db_list_1.len(), db_count + 1); // Tenant empty. @@ -100,7 +103,7 @@ async fn test_catalogs_database() -> Result<()> { let mut req = RenameDatabaseReq { if_exists: false, name_ident: DatabaseNameIdent { - tenant: tenant.to_string(), + tenant: tenant_name.to_string(), db_name: "db1".to_string(), }, new_db_name: "db2".to_string(), @@ -108,7 +111,7 @@ async fn test_catalogs_database() -> Result<()> { let res = catalog.rename_database(req.clone()).await; assert!(res.is_ok()); - let db_list_1 = catalog.list_databases(tenant).await?; + let db_list_1 = catalog.list_databases(&tenant).await?; assert_eq!(db_list_1.len(), db_count + 1); // Tenant empty. @@ -122,7 +125,7 @@ async fn test_catalogs_database() -> Result<()> { let req = DropDatabaseReq { if_exists: false, name_ident: DatabaseNameIdent { - tenant: tenant.to_string(), + tenant: tenant_name.to_string(), db_name: "db1".to_string(), }, }; @@ -135,14 +138,14 @@ async fn test_catalogs_database() -> Result<()> { let mut req = DropDatabaseReq { if_exists: false, name_ident: DatabaseNameIdent { - tenant: tenant.to_string(), + tenant: tenant_name.to_string(), db_name: "db2".to_string(), }, }; let res = catalog.drop_database(req.clone()).await; assert!(res.is_ok()); - let db_list_drop = catalog.list_databases(tenant).await?; + let db_list_drop = catalog.list_databases(&tenant).await?; assert_eq!(db_list_drop.len(), db_count); // Tenant empty. diff --git a/src/query/service/tests/it/sessions/session.rs b/src/query/service/tests/it/sessions/session.rs index e67ac0baf98ba..a6423b28f8fc2 100644 --- a/src/query/service/tests/it/sessions/session.rs +++ b/src/query/service/tests/it/sessions/session.rs @@ -26,12 +26,12 @@ async fn test_session() -> Result<()> { // Tenant. { let actual = session.get_current_tenant(); - assert_eq!(&actual, "test"); + assert_eq!(actual.name(), "test"); // We are not in management mode, so always get the config tenant. session.set_current_tenant("tenant2".to_string()); let actual = session.get_current_tenant(); - assert_eq!(&actual, "test"); + assert_eq!(actual.name(), "test"); } // Settings. @@ -54,11 +54,11 @@ async fn test_session_in_management_mode() -> Result<()> { // Tenant. { let actual = session.get_current_tenant(); - assert_eq!(&actual, "test"); + assert_eq!(actual.name(), "test"); session.set_current_tenant("tenant2".to_string()); let actual = session.get_current_tenant(); - assert_eq!(&actual, "tenant2"); + assert_eq!(actual.name(), "tenant2"); } Ok(()) diff --git a/src/query/service/tests/it/sessions/session_context.rs b/src/query/service/tests/it/sessions/session_context.rs index e7b7e29a0b0f7..1eaeaf36dbbf0 100644 --- a/src/query/service/tests/it/sessions/session_context.rs +++ b/src/query/service/tests/it/sessions/session_context.rs @@ -17,14 +17,14 @@ use std::net::SocketAddr; use databend_common_base::base::tokio; use databend_common_exception::Result; use databend_common_meta_app::principal::UserInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use databend_query::sessions::SessionContext; use databend_query::sessions::SessionType; #[tokio::test(flavor = "multi_thread")] async fn test_session_context() -> Result<()> { - let settings = Settings::create(NonEmptyString::new("default").unwrap()); + let settings = Settings::create(Tenant::new_literal("default")); let session_ctx = SessionContext::try_create(settings, SessionType::MySQL)?; // Abort status. diff --git a/src/query/service/tests/it/spillers/spiller.rs b/src/query/service/tests/it/spillers/spiller.rs index 88e908cec578b..bc319339242aa 100644 --- a/src/query/service/tests/it/spillers/spiller.rs +++ b/src/query/service/tests/it/spillers/spiller.rs @@ -35,7 +35,7 @@ async fn test_spill_with_partition() -> Result<()> { let ctx = fixture.new_query_ctx().await?; let tenant = ctx.get_tenant(); - let spiller_config = SpillerConfig::create(query_spill_prefix(tenant.as_str())); + let spiller_config = SpillerConfig::create(query_spill_prefix(tenant.name())); let operator = DataOperator::instance().operator(); let mut spiller = Spiller::create(ctx, operator, spiller_config, SpillerType::HashJoinBuild)?; diff --git a/src/query/service/tests/it/sql/exec/get_table_bind_test.rs b/src/query/service/tests/it/sql/exec/get_table_bind_test.rs index 141f9aa3eb8ae..ad38fb5a8dc5d 100644 --- a/src/query/service/tests/it/sql/exec/get_table_bind_test.rs +++ b/src/query/service/tests/it/sql/exec/get_table_bind_test.rs @@ -113,8 +113,8 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; -use databend_common_meta_types::NonEmptyString; use databend_common_pipeline_core::InputError; use databend_common_pipeline_core::PlanProfile; use databend_common_settings::Settings; @@ -156,7 +156,7 @@ impl Catalog for FakedCatalog { todo!() } - async fn list_databases(&self, _tenant: &str) -> Result>> { + async fn list_databases(&self, _tenant: &Tenant) -> Result>> { todo!() } @@ -590,7 +590,7 @@ impl TableContext for CtxDelegation { todo!() } - fn get_tenant(&self) -> NonEmptyString { + fn get_tenant(&self) -> Tenant { self.ctx.get_tenant() } @@ -607,7 +607,7 @@ impl TableContext for CtxDelegation { } fn get_settings(&self) -> Arc { - Settings::create(NonEmptyString::new("fake_settings").unwrap()) + Settings::create(Tenant::new_literal("fake_settings")) } fn get_shared_settings(&self) -> Arc { @@ -676,7 +676,7 @@ impl TableContext for CtxDelegation { let tenant = self.ctx.get_tenant(); let db = database.to_string(); let tbl = table.to_string(); - let table_meta_key = (tenant.to_string(), db, tbl); + let table_meta_key = (tenant.name().to_string(), db, tbl); let already_in_cache = { self.cache.lock().contains_key(&table_meta_key) }; if already_in_cache { self.table_from_cache @@ -692,7 +692,7 @@ impl TableContext for CtxDelegation { .fetch_add(1, std::sync::atomic::Ordering::SeqCst); let tbl = self .cat - .get_table(self.ctx.get_tenant().as_str(), database, table) + .get_table(self.ctx.get_tenant().name(), database, table) .await?; let tbl2 = tbl.clone(); let mut guard = self.cache.lock(); diff --git a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs index f473db1d85a12..254002e838d14 100644 --- a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs +++ b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs @@ -62,7 +62,7 @@ struct TestSuite { fn create_table_plan(fixture: &TestFixture, format: &str) -> CreateTablePlan { CreateTablePlan { create_option: CreateOption::Create, - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), catalog: fixture.default_catalog_name(), database: "default".to_string(), table: "t".to_string(), diff --git a/src/query/service/tests/it/storages/fuse/operations/alter_table.rs b/src/query/service/tests/it/storages/fuse/operations/alter_table.rs index b1beece109fe5..4d4fcd32b5112 100644 --- a/src/query/service/tests/it/storages/fuse/operations/alter_table.rs +++ b/src/query/service/tests/it/storages/fuse/operations/alter_table.rs @@ -60,7 +60,7 @@ async fn check_segment_column_ids( // get the latest tbl let table = catalog .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), fixture.default_table_name().as_str(), ) @@ -177,7 +177,7 @@ async fn test_fuse_table_optimize_alter_table() -> Result<()> { .with_default_expr(Some("(1,15.0)".to_string())); let add_table_column_plan = AddTableColumnPlan { - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), catalog: fixture.default_catalog_name(), database: fixture.default_db_name(), table: fixture.default_table_name(), @@ -205,7 +205,7 @@ async fn test_fuse_table_optimize_alter_table() -> Result<()> { .get_catalog(&catalog_name) .await? .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), fixture.default_table_name().as_str(), ) diff --git a/src/query/service/tests/it/storages/fuse/operations/analyze.rs b/src/query/service/tests/it/storages/fuse/operations/analyze.rs index 17cfa63763cf0..237df3097989a 100644 --- a/src/query/service/tests/it/storages/fuse/operations/analyze.rs +++ b/src/query/service/tests/it/storages/fuse/operations/analyze.rs @@ -82,7 +82,7 @@ async fn test_fuse_snapshot_analyze_and_truncate() -> Result<()> { .get_catalog(fixture.default_catalog_name().as_str()) .await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), &db, &tbl) + .get_table(ctx.get_tenant().name(), &db, &tbl) .await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; let snapshot_opt = fuse_table.read_table_snapshot().await?; diff --git a/src/query/service/tests/it/storages/fuse/operations/clustering.rs b/src/query/service/tests/it/storages/fuse/operations/clustering.rs index be6f9a8afbb2e..f40ab29a28af0 100644 --- a/src/query/service/tests/it/storages/fuse/operations/clustering.rs +++ b/src/query/service/tests/it/storages/fuse/operations/clustering.rs @@ -40,7 +40,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul let create_table_plan = CreateTablePlan { create_option: CreateOption::Create, - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), catalog: fixture.default_catalog_name(), database: fixture.default_db_name(), table: fixture.default_table_name(), @@ -66,7 +66,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul // add cluster key let alter_table_cluster_key_plan = AlterTableClusterKeyPlan { - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), catalog: fixture.default_catalog_name(), database: fixture.default_db_name(), table: fixture.default_table_name(), @@ -103,7 +103,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul // drop cluster key let drop_table_cluster_key_plan = DropTableClusterKeyPlan { - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), catalog: fixture.default_catalog_name(), database: fixture.default_db_name(), table: fixture.default_table_name(), diff --git a/src/query/service/tests/it/storages/fuse/operations/commit.rs b/src/query/service/tests/it/storages/fuse/operations/commit.rs index 55f039fecb2cf..caa9bf02fd542 100644 --- a/src/query/service/tests/it/storages/fuse/operations/commit.rs +++ b/src/query/service/tests/it/storages/fuse/operations/commit.rs @@ -112,8 +112,8 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; -use databend_common_meta_types::NonEmptyString; use databend_common_pipeline_core::InputError; use databend_common_pipeline_core::PlanProfile; use databend_common_settings::Settings; @@ -549,7 +549,7 @@ impl TableContext for CtxDelegation { todo!() } - fn get_tenant(&self) -> NonEmptyString { + fn get_tenant(&self) -> Tenant { self.ctx.get_tenant() } @@ -566,7 +566,7 @@ impl TableContext for CtxDelegation { } fn get_settings(&self) -> Arc { - Settings::create(NonEmptyString::new("fake_settings").unwrap()) + Settings::create(Tenant::new_literal("fake_settings")) } fn get_shared_settings(&self) -> Arc { @@ -780,7 +780,7 @@ impl Catalog for FakedCatalog { todo!() } - async fn list_databases(&self, _tenant: &str) -> Result>> { + async fn list_databases(&self, _tenant: &Tenant) -> Result>> { todo!() } diff --git a/src/query/service/tests/it/storages/fuse/operations/mutation/block_compact_mutator.rs b/src/query/service/tests/it/storages/fuse/operations/mutation/block_compact_mutator.rs index 84eee36d073d2..c8305190f2333 100644 --- a/src/query/service/tests/it/storages/fuse/operations/mutation/block_compact_mutator.rs +++ b/src/query/service/tests/it/storages/fuse/operations/mutation/block_compact_mutator.rs @@ -60,7 +60,7 @@ async fn test_compact() -> Result<()> { .get_catalog(fixture.default_catalog_name().as_str()) .await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), &db_name, &tbl_name) + .get_table(ctx.get_tenant().name(), &db_name, &tbl_name) .await?; let res = do_compact(ctx.clone(), table.clone()).await; assert!(res.is_ok()); @@ -77,7 +77,7 @@ async fn test_compact() -> Result<()> { .get_catalog(fixture.default_catalog_name().as_str()) .await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), &db_name, &tbl_name) + .get_table(ctx.get_tenant().name(), &db_name, &tbl_name) .await?; let res = do_compact(ctx.clone(), table.clone()).await; assert!(res.is_ok()); diff --git a/src/query/service/tests/it/storages/fuse/operations/mutation/segments_compact_mutator.rs b/src/query/service/tests/it/storages/fuse/operations/mutation/segments_compact_mutator.rs index 53a66ed90e011..cf06f63d9b9cc 100644 --- a/src/query/service/tests/it/storages/fuse/operations/mutation/segments_compact_mutator.rs +++ b/src/query/service/tests/it/storages/fuse/operations/mutation/segments_compact_mutator.rs @@ -86,7 +86,7 @@ async fn test_compact_segment_normal_case() -> Result<()> { let catalog = ctx.get_catalog("default").await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), "default", "t") + .get_table(ctx.get_tenant().name(), "default", "t") .await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; let mutator = build_mutator(fuse_table, ctx.clone(), None).await?; @@ -128,7 +128,7 @@ async fn test_compact_segment_resolvable_conflict() -> Result<()> { let catalog = ctx.get_catalog("default").await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), "default", "t") + .get_table(ctx.get_tenant().name(), "default", "t") .await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; let mutator = build_mutator(fuse_table, ctx.clone(), None).await?; @@ -188,7 +188,7 @@ async fn test_compact_segment_unresolvable_conflict() -> Result<()> { let ctx = fixture.new_query_ctx().await?; let catalog = ctx.get_catalog("default").await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), "default", "t") + .get_table(ctx.get_tenant().name(), "default", "t") .await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; let mutator = build_mutator(fuse_table, ctx.clone(), None).await?; diff --git a/src/query/service/tests/it/storages/fuse/operations/table_analyze.rs b/src/query/service/tests/it/storages/fuse/operations/table_analyze.rs index 4e68a763da9dd..ac4545f8684ad 100644 --- a/src/query/service/tests/it/storages/fuse/operations/table_analyze.rs +++ b/src/query/service/tests/it/storages/fuse/operations/table_analyze.rs @@ -50,7 +50,7 @@ async fn test_table_modify_column_ndv_statistics() -> Result<()> { fixture.execute_command(statistics_sql).await?; let table = catalog - .get_table(ctx.get_tenant().as_str(), "default", "t") + .get_table(ctx.get_tenant().name(), "default", "t") .await?; // check count diff --git a/src/query/service/tests/it/storages/fuse/pruning.rs b/src/query/service/tests/it/storages/fuse/pruning.rs index 469c469680c19..e31e2d4f11db6 100644 --- a/src/query/service/tests/it/storages/fuse/pruning.rs +++ b/src/query/service/tests/it/storages/fuse/pruning.rs @@ -91,7 +91,7 @@ async fn test_block_pruner() -> Result<()> { let create_table_plan = CreateTablePlan { catalog: "default".to_owned(), create_option: CreateOption::Create, - tenant: fixture.default_tenant(), + tenant: fixture.default_tenant().name().to_string(), database: fixture.default_db_name(), table: test_tbl_name.to_string(), schema: test_schema.clone(), @@ -118,7 +118,7 @@ async fn test_block_pruner() -> Result<()> { let catalog = ctx.get_catalog("default").await?; let table = catalog .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), test_tbl_name, ) @@ -151,7 +151,7 @@ async fn test_block_pruner() -> Result<()> { // get the latest tbl let table = catalog .get_table( - fixture.default_tenant().as_str(), + fixture.default_tenant().name(), fixture.default_db_name().as_str(), test_tbl_name, ) diff --git a/src/query/settings/src/settings.rs b/src/query/settings/src/settings.rs index 55dd45f8c53e5..1aad2ee907838 100644 --- a/src/query/settings/src/settings.rs +++ b/src/query/settings/src/settings.rs @@ -21,7 +21,7 @@ use dashmap::DashMap; use databend_common_config::GlobalConfig; use databend_common_exception::Result; use databend_common_meta_app::principal::UserSettingValue; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use itertools::Itertools; use crate::settings_default::DefaultSettingValue; @@ -62,15 +62,15 @@ pub struct ChangeValue { pub value: UserSettingValue, } -#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[derive(Debug)] pub struct Settings { - pub(crate) tenant: NonEmptyString, + pub(crate) tenant: Tenant, pub(crate) changes: Arc>, pub(crate) configs: HashMap, } impl Settings { - pub fn create(tenant: NonEmptyString) -> Arc { + pub fn create(tenant: Tenant) -> Arc { let configs = match GlobalConfig::try_get_instance() { Some(conf) => conf.query.settings.clone(), None => HashMap::new(), diff --git a/src/query/settings/src/settings_global.rs b/src/query/settings/src/settings_global.rs index 46a33dbdc3330..c82b554c57c8a 100644 --- a/src/query/settings/src/settings_global.rs +++ b/src/query/settings/src/settings_global.rs @@ -19,8 +19,8 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::UserSetting; use databend_common_meta_app::principal::UserSettingValue; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use databend_common_users::UserApiProvider; use log::warn; @@ -33,7 +33,7 @@ impl Settings { #[async_backtrace::framed] pub async fn load_settings( user_api: Arc, - tenant: &NonEmptyString, + tenant: &Tenant, ) -> Result> { user_api.setting_api(tenant).get_settings().await } diff --git a/src/query/settings/tests/it/setting.rs b/src/query/settings/tests/it/setting.rs index 1a4f314deae8c..b318034a0d79f 100644 --- a/src/query/settings/tests/it/setting.rs +++ b/src/query/settings/tests/it/setting.rs @@ -14,12 +14,12 @@ use databend_common_config::GlobalConfig; use databend_common_config::InnerConfig; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_set_settings() { - let settings = Settings::create(NonEmptyString::new("test").unwrap()); + let settings = Settings::create(Tenant::new_literal("test")); // Number range. { settings.set_max_threads(2).unwrap(); @@ -102,7 +102,7 @@ async fn test_set_settings() { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_set_global_settings() { - let settings = Settings::create(NonEmptyString::new("test").unwrap()); + let settings = Settings::create(Tenant::new_literal("test")); let result = settings .set_global_setting( "query_flight_compression_notfound".to_string(), @@ -124,7 +124,7 @@ async fn test_set_data_retention_time_in_days() { GlobalConfig::init(&InnerConfig::default()).unwrap(); } - let settings = Settings::create(NonEmptyString::new("test").unwrap()); + let settings = Settings::create(Tenant::new_literal("test")); // Default. { @@ -176,7 +176,7 @@ async fn test_set_data_retention_time_in_days_from_config() { GlobalConfig::init(&conf).unwrap(); } - let settings = Settings::create(NonEmptyString::new("test").unwrap()); + let settings = Settings::create(Tenant::new_literal("test")); // Ok, 0. { diff --git a/src/query/sharing/src/share_endpoint.rs b/src/query/sharing/src/share_endpoint.rs index 03957bafec5fb..16ed63f3edfba 100644 --- a/src/query/sharing/src/share_endpoint.rs +++ b/src/query/sharing/src/share_endpoint.rs @@ -133,6 +133,7 @@ impl ShareEndpointManager { .as_ref() .query .tenant_id + .name() .to_string(); let req = Request::builder() .method(Method::POST) @@ -198,6 +199,7 @@ impl ShareEndpointManager { .as_ref() .query .tenant_id + .name() .to_string(); let req = Request::builder() .method(Method::POST) diff --git a/src/query/sharing/src/signer.rs b/src/query/sharing/src/signer.rs index 7b379c78877e2..9905cbc1ae8a8 100644 --- a/src/query/sharing/src/signer.rs +++ b/src/query/sharing/src/signer.rs @@ -160,6 +160,7 @@ impl SharedSigner { .as_ref() .query .tenant_id + .name() .to_string(); let req = Request::builder() .method(Method::POST) diff --git a/src/query/sql/src/executor/physical_plans/physical_table_scan.rs b/src/query/sql/src/executor/physical_plans/physical_table_scan.rs index a5350b30774f8..02a694d8b1a85 100644 --- a/src/query/sql/src/executor/physical_plans/physical_table_scan.rs +++ b/src/query/sql/src/executor/physical_plans/physical_table_scan.rs @@ -256,7 +256,7 @@ impl PhysicalPlanBuilder { let catalogs = CatalogManager::instance(); let table = catalogs .get_default_catalog(self.ctx.txn_mgr())? - .get_table(self.ctx.get_tenant().as_str(), "system", "one") + .get_table(self.ctx.get_tenant().name(), "system", "one") .await?; if !table.result_can_be_cached() { diff --git a/src/query/sql/src/executor/table_read_plan.rs b/src/query/sql/src/executor/table_read_plan.rs index 5d29735200994..14b6ccc2fc1fa 100644 --- a/src/query/sql/src/executor/table_read_plan.rs +++ b/src/query/sql/src/executor/table_read_plan.rs @@ -35,7 +35,7 @@ use databend_common_expression::Scalar; use databend_common_expression::TableField; use databend_common_license::license::Feature::DataMask; use databend_common_license::license_manager::get_license_manager; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use databend_common_users::UserApiProvider; use databend_enterprise_data_mask_feature::get_datamask_handler; @@ -212,7 +212,7 @@ impl ToReadDataSourcePlan for dyn Table { if let Ok(policy) = handler .get_data_mask( meta_api.clone(), - tenant.to_string(), + tenant.name().to_string(), mask_policy.clone(), ) .await @@ -244,8 +244,7 @@ impl ToReadDataSourcePlan for dyn Table { let ast_expr = parse_expr(&tokens, ctx.get_settings().get_sql_dialect()?)?; let mut bind_context = BindContext::new(); - let settings = - Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let name_resolution_ctx = NameResolutionContext::try_from(settings.as_ref())?; let metadata = Arc::new(RwLock::new(Metadata::default())); @@ -266,7 +265,11 @@ impl ToReadDataSourcePlan for dyn Table { let expr = scalar.0.as_expr()?.project_column_ref(|col| col.index); mask_policy_map.insert(i, expr.as_remote_expr()); } else { - info!("cannot find mask policy {}/{}", tenant, mask_policy); + info!( + "cannot find mask policy {}/{}", + tenant.display(), + mask_policy + ); } } } diff --git a/src/query/sql/src/planner/binder/ddl/account.rs b/src/query/sql/src/planner/binder/ddl/account.rs index 86630ffcb0ae8..5929eb0021011 100644 --- a/src/query/sql/src/planner/binder/ddl/account.rs +++ b/src/query/sql/src/planner/binder/ddl/account.rs @@ -145,13 +145,13 @@ impl Binder { .clone() .unwrap_or_else(|| self.ctx.get_current_database()); let db_id = catalog - .get_database(tenant.as_str(), &database_name) + .get_database(tenant.name(), &database_name) .await? .get_db_info() .ident .db_id; let table_id = catalog - .get_table(tenant.as_str(), &database_name, table_name) + .get_table(tenant.name(), &database_name, table_name) .await? .get_id(); Ok(GrantObject::TableById(catalog_name, db_id, table_id)) @@ -161,7 +161,7 @@ impl Binder { .clone() .unwrap_or_else(|| self.ctx.get_current_database()); let db_id = catalog - .get_database(tenant.as_str(), &database_name) + .get_database(tenant.name(), &database_name) .await? .get_db_info() .ident @@ -190,13 +190,13 @@ impl Binder { .clone() .unwrap_or_else(|| self.ctx.get_current_database()); let db_id = catalog - .get_database(tenant.as_str(), &database_name) + .get_database(tenant.name(), &database_name) .await? .get_db_info() .ident .db_id; let table_id = catalog - .get_table(tenant.as_str(), &database_name, table_name) + .get_table(tenant.name(), &database_name, table_name) .await? .get_id(); Ok(vec![ @@ -209,7 +209,7 @@ impl Binder { .clone() .unwrap_or_else(|| self.ctx.get_current_database()); let db_id = catalog - .get_database(tenant.as_str(), &database_name) + .get_database(tenant.name(), &database_name) .await? .get_db_info() .ident diff --git a/src/query/sql/src/planner/binder/ddl/catalog.rs b/src/query/sql/src/planner/binder/ddl/catalog.rs index e2a4e8682bd59..bb68a5b2a1c45 100644 --- a/src/query/sql/src/planner/binder/ddl/catalog.rs +++ b/src/query/sql/src/planner/binder/ddl/catalog.rs @@ -109,7 +109,7 @@ impl Binder { Ok(Plan::CreateCatalog(Box::new(CreateCatalogPlan { if_not_exists: *if_not_exists, - tenant: tenant.to_string(), + tenant, catalog: catalog.to_string(), meta, }))) @@ -125,7 +125,7 @@ impl Binder { let catalog = normalize_identifier(catalog, &self.name_resolution_ctx).name; Ok(Plan::DropCatalog(Box::new(DropCatalogPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant, catalog, }))) } diff --git a/src/query/sql/src/planner/binder/ddl/column.rs b/src/query/sql/src/planner/binder/ddl/column.rs index b11defd542dcc..827ddc3617fe3 100644 --- a/src/query/sql/src/planner/binder/ddl/column.rs +++ b/src/query/sql/src/planner/binder/ddl/column.rs @@ -53,7 +53,7 @@ impl Binder { Some(ident) => { let database = normalize_identifier(ident, &self.name_resolution_ctx).name; catalog - .get_database(self.ctx.get_tenant().as_str(), &database) + .get_database(self.ctx.get_tenant().name(), &database) .await?; database } @@ -62,7 +62,7 @@ impl Binder { let table = { let table = normalize_identifier(table, &self.name_resolution_ctx).name; catalog - .get_table(self.ctx.get_tenant().as_str(), database.as_str(), &table) + .get_table(self.ctx.get_tenant().name(), database.as_str(), &table) .await?; table }; diff --git a/src/query/sql/src/planner/binder/ddl/data_mask.rs b/src/query/sql/src/planner/binder/ddl/data_mask.rs index e231ac70ae0eb..437359a6377fa 100644 --- a/src/query/sql/src/planner/binder/ddl/data_mask.rs +++ b/src/query/sql/src/planner/binder/ddl/data_mask.rs @@ -47,7 +47,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = CreateDatamaskPolicyPlan { create_option: *create_option, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), policy: policy.clone(), }; @@ -64,7 +64,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = DropDatamaskPolicyPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), }; Ok(Plan::DropDatamaskPolicy(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/database.rs b/src/query/sql/src/planner/binder/ddl/database.rs index 97f33975288e2..7510242c67e0c 100644 --- a/src/query/sql/src/planner/binder/ddl/database.rs +++ b/src/query/sql/src/planner/binder/ddl/database.rs @@ -148,7 +148,7 @@ impl Binder { }; Ok(Plan::RenameDatabase(Box::new(RenameDatabasePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), entities: vec![entry], }))) } @@ -175,7 +175,7 @@ impl Binder { Ok(Plan::DropDatabase(Box::new(DropDatabasePlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, }))) @@ -196,7 +196,7 @@ impl Binder { let database = normalize_identifier(database, &self.name_resolution_ctx).name; Ok(Plan::UndropDatabase(Box::new(UndropDatabasePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, }))) @@ -232,7 +232,7 @@ impl Binder { Ok(Plan::CreateDatabase(Box::new(CreateDatabasePlan { create_option: *create_option, - tenant, + tenant: tenant.to_nonempty(), catalog, database, meta, diff --git a/src/query/sql/src/planner/binder/ddl/index.rs b/src/query/sql/src/planner/binder/ddl/index.rs index 0a15e57971335..77adb087c1950 100644 --- a/src/query/sql/src/planner/binder/ddl/index.rs +++ b/src/query/sql/src/planner/binder/ddl/index.rs @@ -116,7 +116,7 @@ impl Binder { { let indexes = self .resolve_table_indexes( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), catalog.as_str(), table.get_id(), ) @@ -265,7 +265,7 @@ impl Binder { let tenant_name = self.ctx.get_tenant(); - let non_empty = NonEmptyString::new(tenant_name.to_string()).map_err(|_| { + let non_empty = NonEmptyString::new(tenant_name.name().to_string()).map_err(|_| { ErrorCode::TenantIsEmpty( "Tenant is empty(when Binder::build_refresh_index()".to_string(), ) diff --git a/src/query/sql/src/planner/binder/ddl/network_policy.rs b/src/query/sql/src/planner/binder/ddl/network_policy.rs index f9ff90f4d1375..7d3674730d9b2 100644 --- a/src/query/sql/src/planner/binder/ddl/network_policy.rs +++ b/src/query/sql/src/planner/binder/ddl/network_policy.rs @@ -61,7 +61,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = CreateNetworkPolicyPlan { create_option: *create_option, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), allowed_ip_list: allowed_ip_list.clone(), blocked_ip_list: blocked_ip_list.clone().unwrap_or_default(), @@ -107,7 +107,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = AlterNetworkPolicyPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), allowed_ip_list: allowed_ip_list.clone(), blocked_ip_list: blocked_ip_list.clone(), @@ -126,7 +126,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = DropNetworkPolicyPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), }; Ok(Plan::DropNetworkPolicy(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/notification.rs b/src/query/sql/src/planner/binder/ddl/notification.rs index f47b11ff7b6a0..a9e3a5bfbdec1 100644 --- a/src/query/sql/src/planner/binder/ddl/notification.rs +++ b/src/query/sql/src/planner/binder/ddl/notification.rs @@ -94,7 +94,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = CreateNotificationPlan { if_not_exists: *if_not_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), notification_type: t, enabled: *enabled, @@ -130,7 +130,7 @@ impl Binder { } let plan = AlterNotificationPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), options: options.clone(), }; @@ -147,7 +147,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = DropNotificationPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), }; Ok(Plan::DropNotification(Box::new(plan))) @@ -162,7 +162,7 @@ impl Binder { let DescribeNotificationStmt { name } = stmt; let tenant = self.ctx.get_tenant(); let plan = DescNotificationPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), }; Ok(Plan::DescNotification(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/password_policy.rs b/src/query/sql/src/planner/binder/ddl/password_policy.rs index ea9b8e669d847..f92f2b7f00f6e 100644 --- a/src/query/sql/src/planner/binder/ddl/password_policy.rs +++ b/src/query/sql/src/planner/binder/ddl/password_policy.rs @@ -40,7 +40,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = CreatePasswordPolicyPlan { create_option: *create_option, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), set_options: set_options.clone(), }; @@ -61,7 +61,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = AlterPasswordPolicyPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), action: action.clone(), }; @@ -78,7 +78,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = DropPasswordPolicyPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: name.to_string(), }; Ok(Plan::DropPasswordPolicy(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/share.rs b/src/query/sql/src/planner/binder/ddl/share.rs index 5203bf752fb9f..2579afe097088 100644 --- a/src/query/sql/src/planner/binder/ddl/share.rs +++ b/src/query/sql/src/planner/binder/ddl/share.rs @@ -53,7 +53,7 @@ impl Binder { let plan = CreateShareEndpointPlan { create_option: *create_option, endpoint: ShareEndpointIdent { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), endpoint, }, tenant: tenant.to_string(), @@ -70,7 +70,7 @@ impl Binder { _stmt: &ShowShareEndpointStmt, ) -> Result { let plan = ShowShareEndpointPlan { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), }; Ok(Plan::ShowShareEndpoint(Box::new(plan))) } @@ -86,7 +86,7 @@ impl Binder { } = stmt; let plan = DropShareEndpointPlan { if_exists: *if_exists, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), endpoint: endpoint.to_string(), }; Ok(Plan::DropShareEndpoint(Box::new(plan))) @@ -107,7 +107,7 @@ impl Binder { let plan = CreateSharePlan { if_not_exists: *if_not_exists, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), share, comment: comment.as_ref().cloned(), }; @@ -125,7 +125,7 @@ impl Binder { let plan = DropSharePlan { if_exists: *if_exists, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), share, }; Ok(Plan::DropShare(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/stage.rs b/src/query/sql/src/planner/binder/ddl/stage.rs index 0d5ecfdc7b3b7..0415faed66c76 100644 --- a/src/query/sql/src/planner/binder/ddl/stage.rs +++ b/src/query/sql/src/planner/binder/ddl/stage.rs @@ -116,7 +116,7 @@ impl Binder { Ok(Plan::CreateStage(Box::new(CreateStagePlan { create_option: *create_option, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), stage_info, }))) } diff --git a/src/query/sql/src/planner/binder/ddl/stream.rs b/src/query/sql/src/planner/binder/ddl/stream.rs index cb24311ff7222..21e0181b9c0b3 100644 --- a/src/query/sql/src/planner/binder/ddl/stream.rs +++ b/src/query/sql/src/planner/binder/ddl/stream.rs @@ -80,7 +80,7 @@ impl Binder { let plan = CreateStreamPlan { create_option: *create_option, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, stream_name, @@ -110,7 +110,7 @@ impl Binder { self.normalize_object_identifier_triple(catalog, database, stream); let plan = DropStreamPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, stream_name, diff --git a/src/query/sql/src/planner/binder/ddl/table.rs b/src/query/sql/src/planner/binder/ddl/table.rs index 1ca08b0e5f300..51ba076d54bf0 100644 --- a/src/query/sql/src/planner/binder/ddl/table.rs +++ b/src/query/sql/src/planner/binder/ddl/table.rs @@ -390,7 +390,7 @@ impl Binder { self.ctx .get_catalog(&ctl_name) .await? - .get_database(self.ctx.get_tenant().as_str(), &database) + .get_database(self.ctx.get_tenant().name(), &database) .await?; Ok(database) } @@ -586,7 +586,7 @@ impl Binder { // safely eliminate this "FUSE" constant and the table meta option entry. let catalog = self.ctx.get_catalog(&catalog).await?; let db = catalog - .get_database(self.ctx.get_tenant().as_str(), &database) + .get_database(self.ctx.get_tenant().name(), &database) .await?; let db_id = db.get_db_info().ident.db_id; options.insert(OPT_KEY_DATABASE_ID.to_owned(), db_id.to_string()); @@ -646,7 +646,7 @@ impl Binder { let plan = CreateTablePlan { create_option: *create_option, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), catalog: catalog.clone(), database: database.clone(), table, @@ -728,7 +728,7 @@ impl Binder { Ok(Plan::CreateTable(Box::new(CreateTablePlan { create_option: CreateOption::Create, - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), catalog, database, table, @@ -788,7 +788,7 @@ impl Binder { self.normalize_object_identifier_triple(catalog, database, table); Ok(Plan::UndropTable(Box::new(UndropTablePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, table, @@ -826,7 +826,7 @@ impl Binder { match action { AlterTableAction::RenameTable { new_table } => { Ok(Plan::RenameTable(Box::new(RenameTablePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), if_exists: *if_exists, new_database: database.clone(), new_table: normalize_identifier(new_table, &self.name_resolution_ctx).name, @@ -848,7 +848,7 @@ impl Binder { .analyze_rename_column(old_column, new_column, schema) .await?; Ok(Plan::RenameTableColumn(Box::new(RenameTableColumnPlan { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), catalog, database, table, @@ -875,7 +875,7 @@ impl Binder { AstAddColumnOption::End => AddColumnOption::End, }; Ok(Plan::AddTableColumn(Box::new(AddTableColumnPlan { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), catalog, database, table, @@ -938,7 +938,7 @@ impl Binder { Ok(Plan::AlterTableClusterKey(Box::new( AlterTableClusterKeyPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, table, @@ -948,7 +948,7 @@ impl Binder { } AlterTableAction::DropTableClusterKey => Ok(Plan::DropTableClusterKey(Box::new( DropTableClusterKeyPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, table, @@ -981,7 +981,7 @@ impl Binder { }; Ok(Plan::ReclusterTable(Box::new(ReclusterTablePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, table, @@ -994,7 +994,7 @@ impl Binder { AlterTableAction::FlashbackTo { point } => { let point = self.resolve_data_travel_point(bind_context, point).await?; Ok(Plan::RevertTable(Box::new(RevertTablePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, table, @@ -1042,7 +1042,7 @@ impl Binder { } Ok(Plan::RenameTable(Box::new(RenameTablePlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), if_exists: *if_exists, catalog, database: db_name, diff --git a/src/query/sql/src/planner/binder/ddl/task.rs b/src/query/sql/src/planner/binder/ddl/task.rs index ac3e8c5394fa3..8c367b1f4124c 100644 --- a/src/query/sql/src/planner/binder/ddl/task.rs +++ b/src/query/sql/src/planner/binder/ddl/task.rs @@ -123,7 +123,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = CreateTaskPlan { if_not_exists: *if_not_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), task_name: name.to_string(), warehouse_opts: warehouse_opts.clone(), schedule_opts: schedule_opts.clone(), @@ -181,7 +181,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = AlterTaskPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), task_name: name.to_string(), alter_options: options.clone(), }; @@ -199,7 +199,7 @@ impl Binder { let plan = DropTaskPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), task_name: name.to_string(), }; Ok(Plan::DropTask(Box::new(plan))) @@ -215,7 +215,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = DescribeTaskPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), task_name: name.to_string(), }; Ok(Plan::DescribeTask(Box::new(plan))) @@ -231,7 +231,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = ExecuteTaskPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), task_name: name.to_string(), }; Ok(Plan::ExecuteTask(Box::new(plan))) @@ -247,7 +247,7 @@ impl Binder { let tenant = self.ctx.get_tenant(); let plan = ShowTasksPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), limit: limit.clone(), }; Ok(Plan::ShowTasks(Box::new(plan))) diff --git a/src/query/sql/src/planner/binder/ddl/view.rs b/src/query/sql/src/planner/binder/ddl/view.rs index cc0be8d8a06ca..abfc74d542b9c 100644 --- a/src/query/sql/src/planner/binder/ddl/view.rs +++ b/src/query/sql/src/planner/binder/ddl/view.rs @@ -67,7 +67,7 @@ impl Binder { let plan = CreateViewPlan { create_option: *create_option, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, view_name, @@ -105,7 +105,7 @@ impl Binder { let subquery = format!("{}", query); let plan = AlterViewPlan { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, view_name, @@ -132,7 +132,7 @@ impl Binder { self.normalize_object_identifier_triple(catalog, database, view); let plan = DropViewPlan { if_exists: *if_exists, - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), catalog, database, view_name, diff --git a/src/query/sql/src/planner/binder/ddl/virtual_column.rs b/src/query/sql/src/planner/binder/ddl/virtual_column.rs index fdd0733e27523..28a23026af9b7 100644 --- a/src/query/sql/src/planner/binder/ddl/virtual_column.rs +++ b/src/query/sql/src/planner/binder/ddl/virtual_column.rs @@ -168,7 +168,7 @@ impl Binder { let catalog_info = self.ctx.get_catalog(&catalog).await?; let res = catalog_info .list_virtual_columns(ListVirtualColumnsReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), table_id: Some(table_info.get_id()), }) .await?; @@ -298,7 +298,7 @@ impl Binder { Some(ident) => { let database = normalize_identifier(ident, &self.name_resolution_ctx).name; catalog - .get_database(self.ctx.get_tenant().as_str(), &database) + .get_database(self.ctx.get_tenant().name(), &database) .await?; database } diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index 746667760df8d..bfb5b97214e12 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -221,7 +221,7 @@ impl Binder { // Resolve table with catalog let table_meta = match self .resolve_data_source( - tenant.as_str(), + tenant.name(), catalog.as_str(), database.as_str(), table_name.as_str(), diff --git a/src/query/sql/src/planner/bloom_index.rs b/src/query/sql/src/planner/bloom_index.rs index de3461a13dc65..b728e534c65fa 100644 --- a/src/query/sql/src/planner/bloom_index.rs +++ b/src/query/sql/src/planner/bloom_index.rs @@ -26,7 +26,7 @@ use databend_common_expression::FieldIndex; use databend_common_expression::TableDataType; use databend_common_expression::TableField; use databend_common_expression::TableSchemaRef; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use crate::normalize_identifier; @@ -54,7 +54,7 @@ impl FromStr for BloomIndexColumns { let tokens = tokenize_sql(s)?; let idents = parse_comma_separated_idents(&tokens, sql_dialect)?; - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let name_resolution_ctx = NameResolutionContext::try_from(settings.as_ref())?; let mut cols = Vec::with_capacity(idents.len()); @@ -80,7 +80,7 @@ impl BloomIndexColumns { return Ok(()); } - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let name_resolution_ctx = NameResolutionContext::try_from(settings.as_ref())?; let sql_dialect = Dialect::default(); diff --git a/src/query/sql/src/planner/dataframe.rs b/src/query/sql/src/planner/dataframe.rs index db0c1e76028e6..a07eb780e8490 100644 --- a/src/query/sql/src/planner/dataframe.rs +++ b/src/query/sql/src/planner/dataframe.rs @@ -88,7 +88,7 @@ impl Dataframe { let database = "system"; let tenant = query_ctx.get_tenant(); let table_meta: Arc = binder - .resolve_data_source(tenant.as_str(), catalog, database, "one", &None, &None) + .resolve_data_source(tenant.name(), catalog, database, "one", &None, &None) .await?; let table_index = metadata.write().add_table( diff --git a/src/query/sql/src/planner/expression_parser.rs b/src/query/sql/src/planner/expression_parser.rs index c6c9478395d70..3adf8db2e6984 100644 --- a/src/query/sql/src/planner/expression_parser.rs +++ b/src/query/sql/src/planner/expression_parser.rs @@ -40,7 +40,7 @@ use databend_common_expression::TableField; use databend_common_expression::TableSchemaRef; use databend_common_functions::BUILTIN_FUNCTIONS; use databend_common_meta_app::schema::TableInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_settings::Settings; use derive_visitor::DriveMut; use parking_lot::RwLock; @@ -116,7 +116,7 @@ pub fn parse_exprs( sql: &str, ) -> Result> { let (mut bind_context, metadata) = bind_one_table(table_meta)?; - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let name_resolution_ctx = NameResolutionContext::try_from(settings.as_ref())?; let sql_dialect = ctx.get_settings().get_sql_dialect().unwrap_or_default(); let mut type_checker = TypeChecker::try_create( @@ -185,7 +185,7 @@ pub fn parse_computed_expr( schema: DataSchemaRef, sql: &str, ) -> Result { - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let mut bind_context = BindContext::new(); let mut metadata = Metadata::default(); let table_schema = infer_table_schema(&schema)?; @@ -241,7 +241,7 @@ pub fn parse_default_expr_to_string( ast: &AExpr, is_add_column: bool, ) -> Result { - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let mut bind_context = BindContext::new(); let metadata = Metadata::default(); @@ -287,7 +287,7 @@ pub fn parse_computed_expr_to_string( field: &TableField, ast: &AExpr, ) -> Result { - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let mut bind_context = BindContext::new(); let mut metadata = Metadata::default(); for (index, field) in table_schema.fields().iter().enumerate() { @@ -350,7 +350,7 @@ pub fn parse_lambda_expr( columns: &[(String, DataType)], ast: &AExpr, ) -> Result> { - let settings = Settings::create(NonEmptyString::new("dummy").unwrap()); + let settings = Settings::create(Tenant::new_literal("dummy")); let mut bind_context = BindContext::new(); let mut metadata = Metadata::default(); diff --git a/src/query/sql/src/planner/plans/ddl/catalog.rs b/src/query/sql/src/planner/plans/ddl/catalog.rs index 49ed0bb6c2a6d..8498e3fb6adf6 100644 --- a/src/query/sql/src/planner/plans/ddl/catalog.rs +++ b/src/query/sql/src/planner/plans/ddl/catalog.rs @@ -17,11 +17,12 @@ use databend_common_meta_app::schema::CatalogMeta; use databend_common_meta_app::schema::CatalogNameIdent; use databend_common_meta_app::schema::CreateCatalogReq; use databend_common_meta_app::schema::DropCatalogReq; +use databend_common_meta_app::tenant::Tenant; #[derive(Clone, Debug, PartialEq, Eq)] pub struct CreateCatalogPlan { pub if_not_exists: bool, - pub tenant: String, + pub tenant: Tenant, pub catalog: String, pub meta: CatalogMeta, } @@ -30,10 +31,7 @@ impl From for CreateCatalogReq { fn from(p: CreateCatalogPlan) -> Self { CreateCatalogReq { if_not_exists: p.if_not_exists, - name_ident: CatalogNameIdent { - tenant: p.tenant, - catalog_name: p.catalog, - }, + name_ident: CatalogNameIdent::new(p.tenant, p.catalog), meta: p.meta, } } @@ -43,10 +41,7 @@ impl From<&CreateCatalogPlan> for CreateCatalogReq { fn from(p: &CreateCatalogPlan) -> Self { CreateCatalogReq { if_not_exists: p.if_not_exists, - name_ident: CatalogNameIdent { - tenant: p.tenant.clone(), - catalog_name: p.catalog.clone(), - }, + name_ident: CatalogNameIdent::new(p.tenant.clone(), p.catalog.clone()), meta: p.meta.clone(), } } @@ -55,7 +50,7 @@ impl From<&CreateCatalogPlan> for CreateCatalogReq { #[derive(Clone, Debug, PartialEq, Eq)] pub struct DropCatalogPlan { pub if_exists: bool, - pub tenant: String, + pub tenant: Tenant, pub catalog: String, } @@ -63,10 +58,7 @@ impl From for DropCatalogReq { fn from(value: DropCatalogPlan) -> DropCatalogReq { DropCatalogReq { if_exists: value.if_exists, - name_ident: CatalogNameIdent { - tenant: value.tenant, - catalog_name: value.catalog, - }, + name_ident: CatalogNameIdent::new(value.tenant, value.catalog), } } } @@ -75,10 +67,7 @@ impl From<&DropCatalogPlan> for DropCatalogReq { fn from(value: &DropCatalogPlan) -> DropCatalogReq { DropCatalogReq { if_exists: value.if_exists, - name_ident: CatalogNameIdent { - tenant: value.tenant.clone(), - catalog_name: value.catalog.clone(), - }, + name_ident: CatalogNameIdent::new(value.tenant.clone(), value.catalog.clone()), } } } diff --git a/src/query/sql/src/planner/plans/ddl/table.rs b/src/query/sql/src/planner/plans/ddl/table.rs index 8b5107494c0ce..d0bea6fbca3a5 100644 --- a/src/query/sql/src/planner/plans/ddl/table.rs +++ b/src/query/sql/src/planner/plans/ddl/table.rs @@ -31,7 +31,7 @@ use databend_common_meta_app::schema::CreateOption; use databend_common_meta_app::schema::TableNameIdent; use databend_common_meta_app::schema::UndropTableReq; use databend_common_meta_app::storage::StorageParams; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use crate::plans::Plan; @@ -84,7 +84,7 @@ impl DescribeTablePlan { #[derive(Clone, Debug, PartialEq, Eq)] pub struct DropTablePlan { pub if_exists: bool, - pub tenant: NonEmptyString, + pub tenant: Tenant, pub catalog: String, pub database: String, /// The table name diff --git a/src/query/sql/src/planner/semantic/virtual_column_rewriter.rs b/src/query/sql/src/planner/semantic/virtual_column_rewriter.rs index cb2c536cfc639..d1afdb1cee25f 100644 --- a/src/query/sql/src/planner/semantic/virtual_column_rewriter.rs +++ b/src/query/sql/src/planner/semantic/virtual_column_rewriter.rs @@ -94,7 +94,7 @@ impl VirtualColumnRewriter { let table_id = table.get_id(); let req = ListVirtualColumnsReq { - tenant: self.ctx.get_tenant().to_string(), + tenant: self.ctx.get_tenant().name().to_string(), table_id: Some(table_id), }; let catalog = self.ctx.get_catalog(table_entry.catalog()).await?; diff --git a/src/query/storages/fuse/src/operations/analyze.rs b/src/query/storages/fuse/src/operations/analyze.rs index 134d3a7da2b63..c025537b6b3e7 100644 --- a/src/query/storages/fuse/src/operations/analyze.rs +++ b/src/query/storages/fuse/src/operations/analyze.rs @@ -112,10 +112,10 @@ impl SinkAnalyzeState { // always use the latest table let tenant = self.ctx.get_tenant(); let catalog = CatalogManager::instance() - .get_catalog(tenant.as_str(), &self.catalog, self.ctx.txn_mgr()) + .get_catalog(tenant.name(), &self.catalog, self.ctx.txn_mgr()) .await?; let table = catalog - .get_table(tenant.as_str(), &self.database, &self.table) + .get_table(tenant.name(), &self.database, &self.table) .await?; let table = FuseTable::try_from_table(table.as_ref())?; diff --git a/src/query/storages/fuse/src/operations/gc.rs b/src/query/storages/fuse/src/operations/gc.rs index 59c672341642d..ce1b8fa419757 100644 --- a/src/query/storages/fuse/src/operations/gc.rs +++ b/src/query/storages/fuse/src/operations/gc.rs @@ -83,7 +83,7 @@ impl FuseTable { let catalog = ctx.get_catalog(&ctx.get_current_catalog()).await?; let table_agg_index_ids = catalog .list_index_ids_by_table_id(ListIndexesByIdReq { - tenant: ctx.get_tenant().to_string(), + tenant: ctx.get_tenant().name().to_string(), table_id: self.get_id(), }) .await?; diff --git a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs index b16eb62166215..f48d41cf8fb01 100644 --- a/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs +++ b/src/query/storages/fuse/src/table_functions/clustering_information/clustering_information_table.rs @@ -171,7 +171,7 @@ impl AsyncSource for ClusteringInformationSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs index 43037c4f933a4..7fa7cbaf2548f 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_blocks/fuse_block_table.rs @@ -184,7 +184,7 @@ impl AsyncSource for FuseBlockSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/fuse/src/table_functions/fuse_columns/fuse_column_table.rs b/src/query/storages/fuse/src/table_functions/fuse_columns/fuse_column_table.rs index 3cdf87840bd07..c76efb833c041 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_columns/fuse_column_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_columns/fuse_column_table.rs @@ -184,7 +184,7 @@ impl AsyncSource for FuseColumnSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/fuse/src/table_functions/fuse_encodings/fuse_encoding_table.rs b/src/query/storages/fuse/src/table_functions/fuse_encodings/fuse_encoding_table.rs index 359f62730caac..6b5a0e846d667 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_encodings/fuse_encoding_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_encodings/fuse_encoding_table.rs @@ -170,7 +170,7 @@ impl AsyncSource for FuseEncodingSource { .ctx .get_catalog(CATALOG_DEFAULT) .await? - .get_database(tenant_id.as_str(), self.arg_database_name.as_str()) + .get_database(tenant_id.name(), self.arg_database_name.as_str()) .await? .list_tables() .await?; diff --git a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs index 77c8ed570c171..c7edc2ff86824 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_segments/fuse_segment_table.rs @@ -184,7 +184,7 @@ impl AsyncSource for FuseSegmentSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs index 43462b34ab71f..812e877cf57b2 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_snapshots/fuse_snapshot_table.rs @@ -186,7 +186,7 @@ impl AsyncSource for FuseSnapshotSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs index e767bea884ded..6afb376716d37 100644 --- a/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs +++ b/src/query/storages/fuse/src/table_functions/fuse_statistics/fuse_statistic_table.rs @@ -182,7 +182,7 @@ impl AsyncSource for FuseStatisticSource { .get_catalog(CATALOG_DEFAULT) .await? .get_table( - tenant_id.as_str(), + tenant_id.name(), self.arg_database_name.as_str(), self.arg_table_name.as_str(), ) diff --git a/src/query/storages/hive/hive/src/hive_catalog.rs b/src/query/storages/hive/hive/src/hive_catalog.rs index f2d366d9a4e9a..06663f5b055cd 100644 --- a/src/query/storages/hive/hive/src/hive_catalog.rs +++ b/src/query/storages/hive/hive/src/hive_catalog.rs @@ -92,6 +92,7 @@ use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; use databend_common_meta_app::storage::StorageParams; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::*; use faststr::FastStr; use hive_metastore::Partition; @@ -285,7 +286,7 @@ impl Catalog for HiveCatalog { // Get all the databases. #[minitrace::trace] #[async_backtrace::framed] - async fn list_databases(&self, _tenant: &str) -> Result>> { + async fn list_databases(&self, _tenant: &Tenant) -> Result>> { let db_names = self .client .get_all_databases() diff --git a/src/query/storages/iceberg/src/catalog.rs b/src/query/storages/iceberg/src/catalog.rs index f2c8680f67e7d..3b2dbd77451a2 100644 --- a/src/query/storages/iceberg/src/catalog.rs +++ b/src/query/storages/iceberg/src/catalog.rs @@ -89,6 +89,7 @@ use databend_common_meta_app::schema::UpdateVirtualColumnReq; use databend_common_meta_app::schema::UpsertTableOptionReply; use databend_common_meta_app::schema::UpsertTableOptionReq; use databend_common_meta_app::schema::VirtualColumnMeta; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MetaId; use databend_common_storage::DataOperator; use futures::TryStreamExt; @@ -215,7 +216,7 @@ impl Catalog for IcebergCatalog { } #[async_backtrace::framed] - async fn list_databases(&self, _tenant: &str) -> Result>> { + async fn list_databases(&self, _tenant: &Tenant) -> Result>> { self.list_database_from_read().await } diff --git a/src/query/storages/result_cache/src/read/reader.rs b/src/query/storages/result_cache/src/read/reader.rs index 34cea384507b4..6d008edfa7586 100644 --- a/src/query/storages/result_cache/src/read/reader.rs +++ b/src/query/storages/result_cache/src/read/reader.rs @@ -52,7 +52,7 @@ impl ResultCacheReader { tolerate_inconsistent: bool, ) -> Self { let tenant = ctx.get_tenant(); - let meta_key = gen_result_cache_meta_key(tenant.as_str(), key); + let meta_key = gen_result_cache_meta_key(tenant.name(), key); let partitions_shas = ctx.get_partitions_shas(); Self { diff --git a/src/query/storages/result_cache/src/write/sink.rs b/src/query/storages/result_cache/src/write/sink.rs index b478e143ecbe9..9774114d6ab6f 100644 --- a/src/query/storages/result_cache/src/write/sink.rs +++ b/src/query/storages/result_cache/src/write/sink.rs @@ -131,7 +131,7 @@ impl WriteResultCacheSink { let sql = ctx.get_query_str(); let partitions_shas = ctx.get_partitions_shas(); - let meta_key = gen_result_cache_meta_key(tenant.as_str(), key); + let meta_key = gen_result_cache_meta_key(tenant.name(), key); let location = gen_result_cache_dir(key); let operator = DataOperator::instance().operator(); diff --git a/src/query/storages/stream/src/stream_status_table_func.rs b/src/query/storages/stream/src/stream_status_table_func.rs index e90559fcd59f9..88916c1c23c02 100644 --- a/src/query/storages/stream/src/stream_status_table_func.rs +++ b/src/query/storages/stream/src/stream_status_table_func.rs @@ -223,7 +223,7 @@ impl AsyncSource for StreamStatusDataSource { .ctx .get_catalog(&self.cat_name) .await? - .get_table(tenant_id.as_str(), &self.db_name, &self.stream_name) + .get_table(tenant_id.name(), &self.db_name, &self.stream_name) .await?; let tbl = StreamTable::try_from_table(tbl.as_ref())?; diff --git a/src/query/storages/system/src/background_jobs_table.rs b/src/query/storages/system/src/background_jobs_table.rs index 1044e5d0de1f7..6ca5311efa6e2 100644 --- a/src/query/storages/system/src/background_jobs_table.rs +++ b/src/query/storages/system/src/background_jobs_table.rs @@ -60,7 +60,7 @@ impl AsyncSystemTable for BackgroundJobTable { let meta_api = UserApiProvider::instance().get_meta_store_client(); let jobs = meta_api .list_background_jobs(ListBackgroundJobsReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), }) .await?; let mut names = Vec::with_capacity(jobs.len()); diff --git a/src/query/storages/system/src/background_tasks_table.rs b/src/query/storages/system/src/background_tasks_table.rs index 77a2fb6499247..37e346aea9bad 100644 --- a/src/query/storages/system/src/background_tasks_table.rs +++ b/src/query/storages/system/src/background_tasks_table.rs @@ -60,7 +60,7 @@ impl AsyncSystemTable for BackgroundTaskTable { let meta_api = UserApiProvider::instance().get_meta_store_client(); let tasks = meta_api .list_background_tasks(ListBackgroundTasksReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), }) .await?; let mut names = Vec::with_capacity(tasks.len()); diff --git a/src/query/storages/system/src/catalogs_table.rs b/src/query/storages/system/src/catalogs_table.rs index ef5fc86e8c6b7..987abe97e17d0 100644 --- a/src/query/storages/system/src/catalogs_table.rs +++ b/src/query/storages/system/src/catalogs_table.rs @@ -53,7 +53,7 @@ impl AsyncSystemTable for CatalogsTable { let mgr = CatalogManager::instance(); let catalog_names = mgr - .list_catalogs(ctx.get_tenant().as_str(), ctx.txn_mgr()) + .list_catalogs(ctx.get_tenant().name(), ctx.txn_mgr()) .await? .into_iter() .map(|v| v.name()) diff --git a/src/query/storages/system/src/columns_table.rs b/src/query/storages/system/src/columns_table.rs index 95b0143b465e5..8ffea5cbdf850 100644 --- a/src/query/storages/system/src/columns_table.rs +++ b/src/query/storages/system/src/columns_table.rs @@ -223,7 +223,7 @@ pub(crate) async fn dump_tables( let mut final_dbs: Vec<(String, u64)> = Vec::new(); if databases.is_empty() { - let all_databases = catalog.list_databases(tenant.as_str()).await?; + let all_databases = catalog.list_databases(&tenant).await?; for db in all_databases { let db_id = db.get_db_info().ident.db_id; let db_name = db.name(); @@ -234,7 +234,7 @@ pub(crate) async fn dump_tables( } else { for db in databases { let db_id = catalog - .get_database(tenant.as_str(), &db) + .get_database(tenant.name(), &db) .await? .get_db_info() .ident @@ -248,7 +248,7 @@ pub(crate) async fn dump_tables( let mut final_tables: Vec<(String, Vec>)> = Vec::with_capacity(final_dbs.len()); for (database, db_id) in final_dbs { let tables = if tables.is_empty() { - if let Ok(table) = catalog.list_tables(tenant.as_str(), &database).await { + if let Ok(table) = catalog.list_tables(tenant.name(), &database).await { table } else { vec![] @@ -256,7 +256,7 @@ pub(crate) async fn dump_tables( } else { let mut res = Vec::new(); for table in &tables { - if let Ok(table) = catalog.get_table(tenant.as_str(), &database, table).await { + if let Ok(table) = catalog.get_table(tenant.name(), &database, table).await { res.push(table); } } diff --git a/src/query/storages/system/src/databases_table.rs b/src/query/storages/system/src/databases_table.rs index 3fbf755472130..3f1b096c9a241 100644 --- a/src/query/storages/system/src/databases_table.rs +++ b/src/query/storages/system/src/databases_table.rs @@ -56,9 +56,10 @@ impl AsyncSystemTable for DatabasesTable { _push_downs: Option, ) -> Result { let tenant = ctx.get_tenant(); + let catalogs = CatalogManager::instance(); let catalogs: Vec<(String, Arc)> = catalogs - .list_catalogs(tenant.as_str(), ctx.txn_mgr()) + .list_catalogs(tenant.name(), ctx.txn_mgr()) .await? .iter() .map(|e| (e.name(), e.clone())) @@ -73,7 +74,7 @@ impl AsyncSystemTable for DatabasesTable { let visibility_checker = ctx.get_visibility_checker().await?; for (ctl_name, catalog) in catalogs.into_iter() { - let databases = catalog.list_databases(tenant.as_str()).await?; + let databases = catalog.list_databases(&tenant).await?; let final_dbs = databases .into_iter() .filter(|db| { diff --git a/src/query/storages/system/src/indexes_table.rs b/src/query/storages/system/src/indexes_table.rs index b514ab5b2609d..be1473fb742b7 100644 --- a/src/query/storages/system/src/indexes_table.rs +++ b/src/query/storages/system/src/indexes_table.rs @@ -55,7 +55,7 @@ impl AsyncSystemTable for IndexesTable { let catalog = ctx.get_catalog(CATALOG_DEFAULT).await?; let indexes = catalog .list_indexes(ListIndexesReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_id: None, }) .await?; diff --git a/src/query/storages/system/src/locks_table.rs b/src/query/storages/system/src/locks_table.rs index a3aa6b2381d83..556402907e289 100644 --- a/src/query/storages/system/src/locks_table.rs +++ b/src/query/storages/system/src/locks_table.rs @@ -61,7 +61,7 @@ impl AsyncSystemTable for LocksTable { let tenant = ctx.get_tenant(); let catalog_mgr = CatalogManager::instance(); let ctls = catalog_mgr - .list_catalogs(tenant.as_str(), ctx.txn_mgr()) + .list_catalogs(tenant.name(), ctx.txn_mgr()) .await?; let mut lock_table_id = Vec::new(); diff --git a/src/query/storages/system/src/notification_history_table.rs b/src/query/storages/system/src/notification_history_table.rs index 98b9582a21dc6..4bf36fcc13b2f 100644 --- a/src/query/storages/system/src/notification_history_table.rs +++ b/src/query/storages/system/src/notification_history_table.rs @@ -123,7 +123,7 @@ impl AsyncSystemTable for NotificationHistoryTable { } let req = ListNotificationHistoryRequest { - tenant_id: tenant.to_string(), + tenant_id: tenant.name().to_string(), result_limit, notification_name, ..Default::default() @@ -131,8 +131,12 @@ impl AsyncSystemTable for NotificationHistoryTable { let cloud_api = CloudControlApiProvider::instance(); let notification_client = cloud_api.get_notification_client(); - let mut cfg = - build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let mut cfg = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); cfg.add_notification_version_info(); let req = make_request(req, cfg); diff --git a/src/query/storages/system/src/notifications_table.rs b/src/query/storages/system/src/notifications_table.rs index 615712c2920b2..d21af99acaaa4 100644 --- a/src/query/storages/system/src/notifications_table.rs +++ b/src/query/storages/system/src/notifications_table.rs @@ -107,13 +107,17 @@ impl AsyncSystemTable for NotificationsTable { let query_id = ctx.get_id(); let user = ctx.get_current_user()?.identity().to_string(); let req = ListNotificationRequest { - tenant_id: tenant.to_string().clone(), + tenant_id: tenant.name().to_string().clone(), }; let cloud_api = CloudControlApiProvider::instance(); let notification_client = cloud_api.get_notification_client(); - let mut cfg = - build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let mut cfg = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); cfg.add_notification_version_info(); let req = make_request(req, cfg); diff --git a/src/query/storages/system/src/query_cache_table.rs b/src/query/storages/system/src/query_cache_table.rs index 8139a52af4ced..0dba57779267d 100644 --- a/src/query/storages/system/src/query_cache_table.rs +++ b/src/query/storages/system/src/query_cache_table.rs @@ -59,7 +59,7 @@ impl AsyncSystemTable for QueryCacheTable { let meta_client = UserApiProvider::instance().get_meta_store_client(); let result_cache_mgr = ResultCacheMetaManager::create(meta_client, 0); let tenant = ctx.get_tenant(); - let prefix = gen_result_cache_prefix(tenant.as_str()); + let prefix = gen_result_cache_prefix(tenant.name()); let cached_values = result_cache_mgr.list(prefix.as_str()).await?; diff --git a/src/query/storages/system/src/stages_table.rs b/src/query/storages/system/src/stages_table.rs index 51264f0b555c9..60da6c96e9e26 100644 --- a/src/query/storages/system/src/stages_table.rs +++ b/src/query/storages/system/src/stages_table.rs @@ -57,6 +57,7 @@ impl AsyncSystemTable for StagesTable { _push_downs: Option, ) -> Result { let tenant = ctx.get_tenant(); + let stages = UserApiProvider::instance().get_stages(&tenant).await?; let enable_experimental_rbac_check = ctx.get_settings().get_enable_experimental_rbac_check()?; diff --git a/src/query/storages/system/src/streams_table.rs b/src/query/storages/system/src/streams_table.rs index 8f91078c425a0..3c09a38da9492 100644 --- a/src/query/storages/system/src/streams_table.rs +++ b/src/query/storages/system/src/streams_table.rs @@ -65,9 +65,10 @@ impl AsyncSystemTable for StreamsTable { push_downs: Option, ) -> Result { let tenant = ctx.get_tenant(); + let catalog_mgr = CatalogManager::instance(); let ctls: Vec<(String, Arc)> = catalog_mgr - .list_catalogs(tenant.as_str(), ctx.txn_mgr()) + .list_catalogs(tenant.name(), ctx.txn_mgr()) .await? .iter() .map(|e| (e.name(), e.clone())) @@ -108,7 +109,7 @@ impl AsyncSystemTable for StreamsTable { } }); for db in db_name { - if let Ok(database) = ctl.get_database(tenant.as_str(), db.as_str()).await { + if let Ok(database) = ctl.get_database(tenant.name(), db.as_str()).await { dbs.push(database); } } @@ -116,7 +117,7 @@ impl AsyncSystemTable for StreamsTable { } if dbs.is_empty() { - dbs = ctl.list_databases(tenant.as_str()).await?; + dbs = ctl.list_databases(&tenant).await?; } let ctl_name: &str = Box::leak(ctl_name.into_boxed_str()); @@ -133,7 +134,7 @@ impl AsyncSystemTable for StreamsTable { for db in final_dbs { let name = db.name().to_string().into_boxed_str(); let name: &str = Box::leak(name); - let tables = match ctl.list_tables(tenant.as_str(), name).await { + let tables = match ctl.list_tables(tenant.name(), name).await { Ok(tables) => tables, Err(err) => { // Swallow the errors related with sharing. Listing tables in a shared database diff --git a/src/query/storages/system/src/tables_table.rs b/src/query/storages/system/src/tables_table.rs index eca067fd32a32..5b44438b955a6 100644 --- a/src/query/storages/system/src/tables_table.rs +++ b/src/query/storages/system/src/tables_table.rs @@ -106,7 +106,7 @@ where TablesTable: HistoryAware let tenant = ctx.get_tenant(); let catalog_mgr = CatalogManager::instance(); let catalogs = catalog_mgr - .list_catalogs(tenant.as_str(), ctx.txn_mgr()) + .list_catalogs(tenant.name(), ctx.txn_mgr()) .await?; let visibility_checker = ctx.get_visibility_checker().await?; @@ -179,6 +179,7 @@ where TablesTable: HistoryAware visibility_checker: GrantObjectVisibilityChecker, ) -> DataBlock { let tenant = ctx.get_tenant(); + let ctls: Vec<(String, Arc)> = catalogs.iter().map(|e| (e.name(), e.clone())).collect(); @@ -205,7 +206,7 @@ where TablesTable: HistoryAware } }); for db in db_name { - match ctl.get_database(tenant.as_str(), db.as_str()).await { + match ctl.get_database(tenant.name(), db.as_str()).await { Ok(database) => dbs.push(database), Err(err) => { let msg = format!("Failed to get database: {}, {}", db, err); @@ -218,7 +219,7 @@ where TablesTable: HistoryAware } if dbs.is_empty() { - dbs = match ctl.list_databases(tenant.as_str()).await { + dbs = match ctl.list_databases(&tenant).await { Ok(dbs) => dbs, Err(err) => { let msg = @@ -248,7 +249,7 @@ where TablesTable: HistoryAware let name = db.name().to_string().into_boxed_str(); let db_id = db.get_db_info().ident.db_id; let name: &str = Box::leak(name); - let tables = match Self::list_tables(&ctl, tenant.as_str(), name).await { + let tables = match Self::list_tables(&ctl, tenant.name(), name).await { Ok(tables) => tables, Err(err) => { // swallow the errors related with remote database or tables, avoid ANY of bad table config corrupt ALL of the results. diff --git a/src/query/storages/system/src/task_history_table.rs b/src/query/storages/system/src/task_history_table.rs index 2f5bb612ec687..7624c60eb4f40 100644 --- a/src/query/storages/system/src/task_history_table.rs +++ b/src/query/storages/system/src/task_history_table.rs @@ -176,7 +176,7 @@ impl AsyncSystemTable for TaskHistoryTable { } } let req = ShowTaskRunsRequest { - tenant_id: tenant.to_string(), + tenant_id: tenant.name().to_string(), scheduled_time_start: scheduled_time_start.unwrap_or("".to_string()), scheduled_time_end: scheduled_time_end.unwrap_or("".to_string()), task_name: task_name.unwrap_or("".to_string()), @@ -194,8 +194,12 @@ impl AsyncSystemTable for TaskHistoryTable { let cloud_api = CloudControlApiProvider::instance(); let task_client = cloud_api.get_task_client(); - let config = - build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let config = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); let resp = task_client.show_task_runs_full(config, req).await?; let trs = resp diff --git a/src/query/storages/system/src/tasks_table.rs b/src/query/storages/system/src/tasks_table.rs index 121c79c2f0f77..2aa34253ee837 100644 --- a/src/query/storages/system/src/tasks_table.rs +++ b/src/query/storages/system/src/tasks_table.rs @@ -132,7 +132,7 @@ impl AsyncSystemTable for TasksTable { let user = ctx.get_current_user()?.identity().to_string(); let available_roles = ctx.get_available_roles().await?; let req = ShowTasksRequest { - tenant_id: tenant.to_string(), + tenant_id: tenant.name().to_string(), name_like: "".to_string(), result_limit: 10000, // TODO: use plan.limit pushdown owners: available_roles @@ -144,7 +144,12 @@ impl AsyncSystemTable for TasksTable { let cloud_api = CloudControlApiProvider::instance(); let task_client = cloud_api.get_task_client(); - let cfg = build_client_config(tenant.to_string(), user, query_id, cloud_api.get_timeout()); + let cfg = build_client_config( + tenant.name().to_string(), + user, + query_id, + cloud_api.get_timeout(), + ); let req = make_request(req, cfg); let resp = task_client.show_tasks(req).await?; diff --git a/src/query/storages/system/src/temp_files_table.rs b/src/query/storages/system/src/temp_files_table.rs index b2fa090b5063f..a242592d55f95 100644 --- a/src/query/storages/system/src/temp_files_table.rs +++ b/src/query/storages/system/src/temp_files_table.rs @@ -68,7 +68,7 @@ impl AsyncSystemTable for TempFilesTable { let mut temp_files_content_length = vec![]; let mut temp_files_last_modified = vec![]; - let location_prefix = format!("{}/", query_spill_prefix(tenant.as_str())); + let location_prefix = format!("{}/", query_spill_prefix(tenant.name())); if let Ok(lister) = operator .lister_with(&location_prefix) .metakey(Metakey::LastModified | Metakey::ContentLength) diff --git a/src/query/storages/system/src/virtual_columns_table.rs b/src/query/storages/system/src/virtual_columns_table.rs index cb51cd23d293e..d02bcbe11aa80 100644 --- a/src/query/storages/system/src/virtual_columns_table.rs +++ b/src/query/storages/system/src/virtual_columns_table.rs @@ -59,7 +59,7 @@ impl AsyncSystemTable for VirtualColumnsTable { let catalog = ctx.get_catalog(CATALOG_DEFAULT).await?; let virtual_column_metas = catalog .list_virtual_columns(ListVirtualColumnsReq { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), table_id: None, }) .await?; diff --git a/src/query/users/src/connection.rs b/src/query/users/src/connection.rs index 50176725eb732..5bb16125b3ab2 100644 --- a/src/query/users/src/connection.rs +++ b/src/query/users/src/connection.rs @@ -16,8 +16,8 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::UserDefinedConnection; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::UserApiProvider; @@ -27,7 +27,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_connection( &self, - tenant: &NonEmptyString, + tenant: &Tenant, connection: UserDefinedConnection, create_option: &CreateOption, ) -> Result<()> { @@ -42,7 +42,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_connection( &self, - tenant: &NonEmptyString, + tenant: &Tenant, connection_name: &str, ) -> Result { let connection_api_provider = self.connection_api(tenant); @@ -52,10 +52,7 @@ impl UserApiProvider { // Get the tenant all connection list. #[async_backtrace::framed] - pub async fn get_connections( - &self, - tenant: &NonEmptyString, - ) -> Result> { + pub async fn get_connections(&self, tenant: &Tenant) -> Result> { let connection_api_provider = self.connection_api(tenant); let get_connections = connection_api_provider.list(); @@ -69,7 +66,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_connection( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, if_exists: bool, ) -> Result<()> { diff --git a/src/query/users/src/file_format.rs b/src/query/users/src/file_format.rs index f86272a890c5b..b321cb1f022fc 100644 --- a/src/query/users/src/file_format.rs +++ b/src/query/users/src/file_format.rs @@ -16,8 +16,8 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::UserDefinedFileFormat; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::UserApiProvider; @@ -27,7 +27,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_file_format( &self, - tenant: &NonEmptyString, + tenant: &Tenant, file_format_options: UserDefinedFileFormat, create_option: &CreateOption, ) -> Result<()> { @@ -42,7 +42,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_file_format( &self, - tenant: &NonEmptyString, + tenant: &Tenant, file_format_name: &str, ) -> Result { let file_format_api_provider = self.file_format_api(tenant); @@ -52,10 +52,7 @@ impl UserApiProvider { // Get the tenant all file_format list. #[async_backtrace::framed] - pub async fn get_file_formats( - &self, - tenant: &NonEmptyString, - ) -> Result> { + pub async fn get_file_formats(&self, tenant: &Tenant) -> Result> { let file_format_api_provider = self.file_format_api(tenant); let get_file_formats = file_format_api_provider.list(); @@ -69,7 +66,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_file_format( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, if_exists: bool, ) -> Result<()> { diff --git a/src/query/users/src/network_policy.rs b/src/query/users/src/network_policy.rs index a561d3664cf1c..df8c644ac5092 100644 --- a/src/query/users/src/network_policy.rs +++ b/src/query/users/src/network_policy.rs @@ -18,8 +18,8 @@ use databend_common_exception::Result; use databend_common_meta_api::crud::CrudError; use databend_common_meta_app::principal::NetworkPolicy; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::UserApiProvider; @@ -28,7 +28,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_network_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, network_policy: NetworkPolicy, create_option: &CreateOption, ) -> Result<()> { @@ -41,7 +41,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn update_network_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, allowed_ip_list: Option>, blocked_ip_list: Option>, @@ -94,7 +94,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_network_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, if_exists: bool, ) -> Result<()> { @@ -133,7 +133,7 @@ impl UserApiProvider { // Check whether a network policy is exist. #[async_backtrace::framed] - pub async fn exists_network_policy(&self, tenant: &NonEmptyString, name: &str) -> Result { + pub async fn exists_network_policy(&self, tenant: &Tenant, name: &str) -> Result { match self.get_network_policy(tenant, name).await { Ok(_) => Ok(true), Err(e) => { @@ -148,11 +148,7 @@ impl UserApiProvider { // Get a network_policy by tenant. #[async_backtrace::framed] - pub async fn get_network_policy( - &self, - tenant: &NonEmptyString, - name: &str, - ) -> Result { + pub async fn get_network_policy(&self, tenant: &Tenant, name: &str) -> Result { let client = self.network_policy_api(tenant); let network_policy = client.get(name, MatchSeq::GE(0)).await?.data; Ok(network_policy) @@ -160,10 +156,7 @@ impl UserApiProvider { // Get all network policies by tenant. #[async_backtrace::framed] - pub async fn get_network_policies( - &self, - tenant: &NonEmptyString, - ) -> Result> { + pub async fn get_network_policies(&self, tenant: &Tenant) -> Result> { let client = self.network_policy_api(tenant); let network_policies = client.list().await.map_err(|e| { let e = ErrorCode::from(e); diff --git a/src/query/users/src/password_policy.rs b/src/query/users/src/password_policy.rs index 8a5e71ecae636..1169ae2bbf2bf 100644 --- a/src/query/users/src/password_policy.rs +++ b/src/query/users/src/password_policy.rs @@ -26,8 +26,8 @@ use databend_common_meta_app::principal::UserIdentity; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserOption; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use log::info; use passwords::analyzer; @@ -65,7 +65,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_password_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, password_policy: PasswordPolicy, create_option: &CreateOption, ) -> Result<()> { @@ -81,7 +81,7 @@ impl UserApiProvider { #[allow(clippy::too_many_arguments)] pub async fn update_password_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, min_length: Option, max_length: Option, @@ -168,7 +168,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_password_policy( &self, - tenant: &NonEmptyString, + tenant: &Tenant, name: &str, if_exists: bool, ) -> Result<()> { @@ -207,11 +207,7 @@ impl UserApiProvider { // Check whether a password policy is exist. #[async_backtrace::framed] - pub async fn exists_password_policy( - &self, - tenant: &NonEmptyString, - name: &str, - ) -> Result { + pub async fn exists_password_policy(&self, tenant: &Tenant, name: &str) -> Result { match self.get_password_policy(tenant, name).await { Ok(_) => Ok(true), Err(e) => { @@ -226,11 +222,7 @@ impl UserApiProvider { // Get a password_policy by tenant. #[async_backtrace::framed] - pub async fn get_password_policy( - &self, - tenant: &NonEmptyString, - name: &str, - ) -> Result { + pub async fn get_password_policy(&self, tenant: &Tenant, name: &str) -> Result { let client = self.password_policy_api(tenant); let password_policy = client.get(name, MatchSeq::GE(0)).await?.data; Ok(password_policy) @@ -238,10 +230,7 @@ impl UserApiProvider { // Get all password policies by tenant. #[async_backtrace::framed] - pub async fn get_password_policies( - &self, - tenant: &NonEmptyString, - ) -> Result> { + pub async fn get_password_policies(&self, tenant: &Tenant) -> Result> { let client = self.password_policy_api(tenant); let password_policies = client.list().await.map_err(|e| { let e = ErrorCode::from(e); @@ -258,7 +247,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn verify_password( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user_option: &UserOption, auth_option: &AuthOption, user_info: Option<&UserInfo>, @@ -378,7 +367,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn check_login_password( &self, - tenant: &NonEmptyString, + tenant: &Tenant, identity: UserIdentity, user_info: &UserInfo, ) -> Result<()> { diff --git a/src/query/users/src/role_cache_mgr.rs b/src/query/users/src/role_cache_mgr.rs index b8fde70c68c3a..ce77721b3de9c 100644 --- a/src/query/users/src/role_cache_mgr.rs +++ b/src/query/users/src/role_cache_mgr.rs @@ -23,7 +23,7 @@ use databend_common_base::base::GlobalInstance; use databend_common_exception::Result; use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::principal::RoleInfo; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use log::warn; use parking_lot::RwLock; @@ -37,7 +37,7 @@ struct CachedRoles { pub struct RoleCacheManager { user_manager: Arc, - cache: Arc>>, + cache: Arc>>, polling_interval: Duration, polling_join_handle: Option>, } @@ -82,7 +82,8 @@ impl RoleCacheManager { Err(err) => { warn!( "role_cache_mgr load roles data of tenant {} failed: {}", - tenant, err, + tenant.display(), + err, ) } Ok(data) => { @@ -96,13 +97,13 @@ impl RoleCacheManager { })); } - pub fn invalidate_cache(&self, tenant: &NonEmptyString) { + pub fn invalidate_cache(&self, tenant: &Tenant) { let mut cached = self.cache.write(); cached.remove(tenant); } #[async_backtrace::framed] - pub async fn find_role(&self, tenant: &NonEmptyString, role: &str) -> Result> { + pub async fn find_role(&self, tenant: &Tenant, role: &str) -> Result> { let cached = self.cache.read(); let cached_roles = match cached.get(tenant) { None => return Ok(None), @@ -115,7 +116,7 @@ impl RoleCacheManager { #[async_backtrace::framed] pub async fn find_object_owner( &self, - tenant: &NonEmptyString, + tenant: &Tenant, object: &OwnershipObject, ) -> Result> { match self.user_manager.get_ownership(tenant, object).await? { @@ -128,7 +129,7 @@ impl RoleCacheManager { #[async_backtrace::framed] pub async fn find_related_roles( &self, - tenant: &NonEmptyString, + tenant: &Tenant, roles: &[String], ) -> Result> { self.maybe_reload(tenant).await?; @@ -141,7 +142,7 @@ impl RoleCacheManager { } #[async_backtrace::framed] - pub async fn force_reload(&self, tenant: &NonEmptyString) -> Result<()> { + pub async fn force_reload(&self, tenant: &Tenant) -> Result<()> { let data = load_roles_data(&self.user_manager, tenant).await?; let mut cached = self.cache.write(); cached.insert(tenant.clone(), data); @@ -151,7 +152,7 @@ impl RoleCacheManager { // Load roles data if not found in cache. Watch this tenant's role data in background if // once it loads successfully. #[async_backtrace::framed] - async fn maybe_reload(&self, tenant: &NonEmptyString) -> Result<()> { + async fn maybe_reload(&self, tenant: &Tenant) -> Result<()> { let need_reload = { let cached = self.cache.read(); match cached.get(tenant) { @@ -173,10 +174,7 @@ impl RoleCacheManager { } } -async fn load_roles_data( - user_api: &Arc, - tenant: &NonEmptyString, -) -> Result { +async fn load_roles_data(user_api: &Arc, tenant: &Tenant) -> Result { let roles = user_api.get_roles(tenant).await?; let roles_map = roles .into_iter() diff --git a/src/query/users/src/role_mgr.rs b/src/query/users/src/role_mgr.rs index 487d578f2784c..168041407d305 100644 --- a/src/query/users/src/role_mgr.rs +++ b/src/query/users/src/role_mgr.rs @@ -22,8 +22,8 @@ use databend_common_meta_app::principal::OwnershipInfo; use databend_common_meta_app::principal::OwnershipObject; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserPrivilegeSet; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::role_util::find_all_related_roles; use crate::UserApiProvider; @@ -34,7 +34,7 @@ pub const BUILTIN_ROLE_PUBLIC: &str = "public"; impl UserApiProvider { // Get one role from by tenant. #[async_backtrace::framed] - pub async fn get_role(&self, tenant: &NonEmptyString, role: String) -> Result { + pub async fn get_role(&self, tenant: &Tenant, role: String) -> Result { let builtin_roles = self.builtin_roles(); if let Some(role_info) = builtin_roles.get(&role) { return Ok(role_info.clone()); @@ -47,7 +47,7 @@ impl UserApiProvider { // Get the tenant all roles list. #[async_backtrace::framed] - pub async fn get_roles(&self, tenant: &NonEmptyString) -> Result> { + pub async fn get_roles(&self, tenant: &Tenant) -> Result> { let builtin_roles = self.builtin_roles(); let seq_roles = self .role_api(tenant) @@ -86,7 +86,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_ownerships( &self, - tenant: &NonEmptyString, + tenant: &Tenant, ) -> Result> { let seq_owns = self .role_api(tenant) @@ -102,7 +102,7 @@ impl UserApiProvider { } #[async_backtrace::framed] - pub async fn exists_role(&self, tenant: &NonEmptyString, role: String) -> Result { + pub async fn exists_role(&self, tenant: &Tenant, role: String) -> Result { match self.get_role(tenant, role).await { Ok(_) => Ok(true), Err(e) => { @@ -119,7 +119,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, role_info: RoleInfo, if_not_exists: bool, ) -> Result { @@ -144,7 +144,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn grant_ownership_to_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, object: &OwnershipObject, new_role: &str, ) -> Result<()> { @@ -161,7 +161,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_ownership( &self, - tenant: &NonEmptyString, + tenant: &Tenant, object: &OwnershipObject, ) -> Result> { let client = self.role_api(tenant); @@ -192,7 +192,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn grant_privileges_to_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, role: &String, object: GrantObject, privileges: UserPrivilegeSet, @@ -209,7 +209,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn revoke_privileges_from_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, role: &String, object: GrantObject, privileges: UserPrivilegeSet, @@ -227,7 +227,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn grant_role_to_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, target_role: &String, grant_role: String, ) -> Result> { @@ -254,7 +254,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn revoke_role_from_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, role: &String, revoke_role: &String, ) -> Result> { @@ -269,12 +269,7 @@ impl UserApiProvider { // Drop a role by name #[async_backtrace::framed] - pub async fn drop_role( - &self, - tenant: &NonEmptyString, - role: String, - if_exists: bool, - ) -> Result<()> { + pub async fn drop_role(&self, tenant: &Tenant, role: String, if_exists: bool) -> Result<()> { let client = self.role_api(tenant); let drop_role = client.drop_role(role, MatchSeq::GE(1)); match drop_role.await { @@ -294,7 +289,7 @@ impl UserApiProvider { #[async_backtrace::framed] async fn find_related_roles( &self, - tenant: &NonEmptyString, + tenant: &Tenant, role_identities: &[String], ) -> Result> { let tenant_roles_map = self diff --git a/src/query/users/src/user_api.rs b/src/query/users/src/user_api.rs index 16d961dd47d60..2beef30ee61b4 100644 --- a/src/query/users/src/user_api.rs +++ b/src/query/users/src/user_api.rs @@ -35,13 +35,13 @@ use databend_common_management::UserApi; use databend_common_management::UserMgr; use databend_common_meta_app::principal::AuthInfo; use databend_common_meta_app::principal::RoleInfo; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_app::tenant::TenantQuota; use databend_common_meta_kvapi::kvapi; use databend_common_meta_store::MetaStore; use databend_common_meta_store::MetaStoreProvider; use databend_common_meta_types::MatchSeq; use databend_common_meta_types::MetaError; -use databend_common_meta_types::NonEmptyString; use crate::idm_config::IDMConfig; use crate::BUILTIN_ROLE_PUBLIC; @@ -57,7 +57,7 @@ impl UserApiProvider { pub async fn init( conf: RpcClientConf, idm_config: IDMConfig, - tenant: &NonEmptyString, + tenant: &Tenant, quota: Option, ) -> Result<()> { GlobalInstance::set(Self::try_create(conf, idm_config, tenant).await?); @@ -74,7 +74,7 @@ impl UserApiProvider { pub async fn try_create( conf: RpcClientConf, idm_config: IDMConfig, - tenant: &NonEmptyString, + tenant: &Tenant, ) -> Result> { let client = MetaStoreProvider::new(conf).create_meta_store().await?; let user_mgr = UserApiProvider { @@ -104,7 +104,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn try_create_simple( conf: RpcClientConf, - tenant: &NonEmptyString, + tenant: &Tenant, ) -> Result> { Self::try_create(conf, IDMConfig::default(), tenant).await } @@ -113,45 +113,45 @@ impl UserApiProvider { GlobalInstance::get() } - pub fn udf_api(&self, tenant: &NonEmptyString) -> UdfMgr { - UdfMgr::create(self.client.clone(), tenant.as_non_empty_str()) + pub fn udf_api(&self, tenant: &Tenant) -> UdfMgr { + UdfMgr::create(self.client.clone(), tenant) } - pub fn user_api(&self, tenant: &NonEmptyString) -> Arc { - let user_mgr = UserMgr::create(self.client.clone(), tenant.as_non_empty_str()); + pub fn user_api(&self, tenant: &Tenant) -> Arc { + let user_mgr = UserMgr::create(self.client.clone(), tenant); Arc::new(user_mgr) } - pub fn role_api(&self, tenant: &NonEmptyString) -> Arc { - let role_mgr = RoleMgr::create(self.client.clone(), tenant.as_non_empty_str()); + pub fn role_api(&self, tenant: &Tenant) -> Arc { + let role_mgr = RoleMgr::create(self.client.clone(), tenant); Arc::new(role_mgr) } - pub fn stage_api(&self, tenant: &NonEmptyString) -> Arc { + pub fn stage_api(&self, tenant: &Tenant) -> Arc { Arc::new(StageMgr::create(self.client.clone(), tenant)) } - pub fn file_format_api(&self, tenant: &NonEmptyString) -> FileFormatMgr { + pub fn file_format_api(&self, tenant: &Tenant) -> FileFormatMgr { FileFormatMgr::create(self.client.clone(), tenant) } - pub fn connection_api(&self, tenant: &NonEmptyString) -> ConnectionMgr { + pub fn connection_api(&self, tenant: &Tenant) -> ConnectionMgr { ConnectionMgr::create(self.client.clone(), tenant) } - pub fn tenant_quota_api(&self, tenant: &NonEmptyString) -> Arc { + pub fn tenant_quota_api(&self, tenant: &Tenant) -> Arc { Arc::new(QuotaMgr::create(self.client.clone(), tenant)) } - pub fn setting_api(&self, tenant: &NonEmptyString) -> Arc { + pub fn setting_api(&self, tenant: &Tenant) -> Arc { Arc::new(SettingMgr::create(self.client.clone(), tenant)) } - pub fn network_policy_api(&self, tenant: &NonEmptyString) -> NetworkPolicyMgr { + pub fn network_policy_api(&self, tenant: &Tenant) -> NetworkPolicyMgr { NetworkPolicyMgr::create(self.client.clone(), tenant) } - pub fn password_policy_api(&self, tenant: &NonEmptyString) -> PasswordPolicyMgr { + pub fn password_policy_api(&self, tenant: &Tenant) -> PasswordPolicyMgr { PasswordPolicyMgr::create(self.client.clone(), tenant) } diff --git a/src/query/users/src/user_mgr.rs b/src/query/users/src/user_mgr.rs index 2fd4fe88b9086..27ba30ad17e3b 100644 --- a/src/query/users/src/user_mgr.rs +++ b/src/query/users/src/user_mgr.rs @@ -27,8 +27,8 @@ use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserOption; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::role_mgr::BUILTIN_ROLE_ACCOUNT_ADMIN; use crate::UserApiProvider; @@ -36,7 +36,7 @@ use crate::UserApiProvider; impl UserApiProvider { // Get one user from by tenant. #[async_backtrace::framed] - pub async fn get_user(&self, tenant: &NonEmptyString, user: UserIdentity) -> Result { + pub async fn get_user(&self, tenant: &Tenant, user: UserIdentity) -> Result { if let Some(auth_info) = self.get_configured_user(&user.username) { let mut user_info = UserInfo::new(&user.username, "%", auth_info.clone()); user_info.grants.grant_privileges( @@ -63,7 +63,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_user_with_client_ip( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, client_ip: Option<&str>, ) -> Result { @@ -107,7 +107,7 @@ impl UserApiProvider { // Get the tenant all users list. #[async_backtrace::framed] - pub async fn get_users(&self, tenant: &NonEmptyString) -> Result> { + pub async fn get_users(&self, tenant: &Tenant) -> Result> { let client = self.user_api(tenant); let get_users = client.get_users(); @@ -128,7 +128,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_user( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user_info: UserInfo, create_option: &CreateOption, ) -> Result<()> { @@ -161,7 +161,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn grant_privileges_to_user( &self, - tenant: NonEmptyString, + tenant: &Tenant, user: UserIdentity, object: GrantObject, privileges: UserPrivilegeSet, @@ -172,7 +172,7 @@ impl UserApiProvider { user.username ))); } - let client = self.user_api(&tenant); + let client = self.user_api(tenant); client .update_user_with(user, MatchSeq::GE(1), |ui: &mut UserInfo| { ui.grants.grant_privileges(&object, privileges) @@ -184,7 +184,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn revoke_privileges_from_user( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, object: GrantObject, privileges: UserPrivilegeSet, @@ -207,7 +207,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn grant_role_to_user( &self, - tenant: NonEmptyString, + tenant: Tenant, user: UserIdentity, grant_role: String, ) -> Result> { @@ -229,7 +229,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn revoke_role_from_user( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, revoke_role: String, ) -> Result> { @@ -252,7 +252,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_user( &self, - tenant: NonEmptyString, + tenant: &Tenant, user: UserIdentity, if_exists: bool, ) -> Result<()> { @@ -262,7 +262,7 @@ impl UserApiProvider { user.username ))); } - let client = self.user_api(&tenant); + let client = self.user_api(tenant); let drop_user = client.drop_user(user, MatchSeq::GE(1)); match drop_user.await { Ok(res) => Ok(res), @@ -280,7 +280,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn update_user( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, auth_info: Option, user_option: Option, @@ -327,7 +327,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn update_user_default_role( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, default_role: Option, ) -> Result> { @@ -340,7 +340,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn update_user_login_result( &self, - tenant: NonEmptyString, + tenant: Tenant, user: UserIdentity, authed: bool, ) -> Result<()> { @@ -367,7 +367,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn update_user_lockout_time( &self, - tenant: &NonEmptyString, + tenant: &Tenant, user: UserIdentity, lockout_time: DateTime, ) -> Result<()> { diff --git a/src/query/users/src/user_setting.rs b/src/query/users/src/user_setting.rs index 4bb12cc946f6e..a22b4e44dd130 100644 --- a/src/query/users/src/user_setting.rs +++ b/src/query/users/src/user_setting.rs @@ -14,29 +14,29 @@ use databend_common_exception::Result; use databend_common_meta_app::principal::UserSetting; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::UserApiProvider; impl UserApiProvider { // Set a setting. #[async_backtrace::framed] - pub async fn set_setting(&self, tenant: &NonEmptyString, setting: UserSetting) -> Result { + pub async fn set_setting(&self, tenant: &Tenant, setting: UserSetting) -> Result { let setting_api_provider = self.setting_api(tenant); setting_api_provider.set_setting(setting).await } // Get all settings list. #[async_backtrace::framed] - pub async fn get_settings(&self, tenant: &NonEmptyString) -> Result> { + pub async fn get_settings(&self, tenant: &Tenant) -> Result> { let setting_api_provider = self.setting_api(tenant); setting_api_provider.get_settings().await } // Drop a setting by name. #[async_backtrace::framed] - pub async fn drop_setting(&self, tenant: &NonEmptyString, name: &str) -> Result<()> { + pub async fn drop_setting(&self, tenant: &Tenant, name: &str) -> Result<()> { let setting_api_provider = self.setting_api(tenant); setting_api_provider .try_drop_setting(name, MatchSeq::GE(1)) diff --git a/src/query/users/src/user_stage.rs b/src/query/users/src/user_stage.rs index e17fa0e1b6537..cc136517b9caa 100644 --- a/src/query/users/src/user_stage.rs +++ b/src/query/users/src/user_stage.rs @@ -16,7 +16,7 @@ use databend_common_exception::ErrorCode; use databend_common_exception::Result; use databend_common_meta_app::principal::StageInfo; use databend_common_meta_app::schema::CreateOption; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use crate::UserApiProvider; @@ -26,7 +26,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_stage( &self, - tenant: &NonEmptyString, + tenant: &Tenant, info: StageInfo, create_option: &CreateOption, ) -> Result<()> { @@ -36,13 +36,13 @@ impl UserApiProvider { // Get one stage from by tenant. #[async_backtrace::framed] - pub async fn get_stage(&self, tenant: &NonEmptyString, stage_name: &str) -> Result { + pub async fn get_stage(&self, tenant: &Tenant, stage_name: &str) -> Result { let stage_api_provider = self.stage_api(tenant); stage_api_provider.get_stage(stage_name).await } #[async_backtrace::framed] - pub async fn exists_stage(&self, tenant: &NonEmptyString, stage_name: &str) -> Result { + pub async fn exists_stage(&self, tenant: &Tenant, stage_name: &str) -> Result { match self.get_stage(tenant, stage_name).await { Ok(_) => Ok(true), Err(err) => { @@ -57,7 +57,7 @@ impl UserApiProvider { // Get the tenant all stage list. #[async_backtrace::framed] - pub async fn get_stages(&self, tenant: &NonEmptyString) -> Result> { + pub async fn get_stages(&self, tenant: &Tenant) -> Result> { let stage_api_provider = self.stage_api(tenant); let get_stages = stage_api_provider.get_stages(); @@ -69,12 +69,7 @@ impl UserApiProvider { // Drop a stage by name. #[async_backtrace::framed] - pub async fn drop_stage( - &self, - tenant: &NonEmptyString, - name: &str, - if_exists: bool, - ) -> Result<()> { + pub async fn drop_stage(&self, tenant: &Tenant, name: &str, if_exists: bool) -> Result<()> { let stage_api_provider = self.stage_api(tenant); let drop_stage = stage_api_provider.drop_stage(name); match drop_stage.await { diff --git a/src/query/users/src/user_udf.rs b/src/query/users/src/user_udf.rs index c1e2db983717a..bd6cecfa89f1c 100644 --- a/src/query/users/src/user_udf.rs +++ b/src/query/users/src/user_udf.rs @@ -17,8 +17,8 @@ use databend_common_management::udf::UdfApiError; use databend_common_management::udf::UdfError; use databend_common_meta_app::principal::UserDefinedFunction; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_meta_types::MatchSeq; -use databend_common_meta_types::NonEmptyString; use crate::UserApiProvider; @@ -28,7 +28,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn add_udf( &self, - tenant: &NonEmptyString, + tenant: &Tenant, info: UserDefinedFunction, create_option: &CreateOption, ) -> Result<()> { @@ -39,11 +39,7 @@ impl UserApiProvider { // Update a UDF. #[async_backtrace::framed] - pub async fn update_udf( - &self, - tenant: &NonEmptyString, - info: UserDefinedFunction, - ) -> Result { + pub async fn update_udf(&self, tenant: &Tenant, info: UserDefinedFunction) -> Result { let res = self .udf_api(tenant) .update_udf(info, MatchSeq::GE(1)) @@ -57,7 +53,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn get_udf( &self, - tenant: &NonEmptyString, + tenant: &Tenant, udf_name: &str, ) -> Result, UdfApiError> { let seqv = self.udf_api(tenant).get_udf(udf_name).await?; @@ -65,14 +61,14 @@ impl UserApiProvider { } #[async_backtrace::framed] - pub async fn exists_udf(&self, tenant: &NonEmptyString, udf_name: &str) -> Result { + pub async fn exists_udf(&self, tenant: &Tenant, udf_name: &str) -> Result { let res = self.get_udf(tenant, udf_name).await?; Ok(res.is_some()) } // Get all UDFs for the tenant. #[async_backtrace::framed] - pub async fn list_udf(&self, tenant: &NonEmptyString) -> Result> { + pub async fn list_udf(&self, tenant: &Tenant) -> Result> { let udf_api = self.udf_api(tenant); let udfs = udf_api @@ -87,7 +83,7 @@ impl UserApiProvider { #[async_backtrace::framed] pub async fn drop_udf( &self, - tenant: &NonEmptyString, + tenant: &Tenant, udf_name: &str, allow_no_change: bool, ) -> std::result::Result, UdfApiError> { @@ -101,7 +97,7 @@ impl UserApiProvider { Ok(()) } else { Err(UdfError::NotFound { - tenant: tenant.to_string(), + tenant: tenant.name().to_string(), name: udf_name.to_string(), context: "while drop_udf".to_string(), }) diff --git a/src/query/users/tests/it/network_policy.rs b/src/query/users/tests/it/network_policy.rs index 438fc99db01d4..c6ce9a41d7f44 100644 --- a/src/query/users/tests/it/network_policy.rs +++ b/src/query/users/tests/it/network_policy.rs @@ -24,14 +24,13 @@ use databend_common_meta_app::principal::UserIdentity; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserOption; use databend_common_meta_app::schema::CreateOption; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_network_policy() -> Result<()> { let conf = RpcClientConf::default(); - let tenant_name = "test"; - let tenant = NonEmptyString::new(tenant_name.to_string()).unwrap(); + let tenant = Tenant::new_literal("test"); let user_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; let username = "test-user1"; @@ -128,9 +127,7 @@ async fn test_network_policy() -> Result<()> { .await; assert!(res.is_err()); - user_mgr - .drop_user(tenant.clone(), user.clone(), false) - .await?; + user_mgr.drop_user(&tenant, user.clone(), false).await?; let res = user_mgr .drop_network_policy(&tenant, policy_name.as_ref(), false) diff --git a/src/query/users/tests/it/password_policy.rs b/src/query/users/tests/it/password_policy.rs index 090f0ac364020..715a598958f40 100644 --- a/src/query/users/tests/it/password_policy.rs +++ b/src/query/users/tests/it/password_policy.rs @@ -26,14 +26,14 @@ use databend_common_meta_app::principal::UserIdentity; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserOption; use databend_common_meta_app::schema::CreateOption; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_password_policy() -> Result<()> { let conf = RpcClientConf::default(); let tenant_name = "test"; - let tenant = NonEmptyString::new(tenant_name.to_string()).unwrap(); + let tenant = Tenant::new_literal(tenant_name); let user_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; let username = "test-user1"; @@ -370,7 +370,7 @@ async fn test_password_policy() -> Result<()> { .await; assert!(res.is_err()); - user_mgr.drop_user(tenant.clone(), identity, false).await?; + user_mgr.drop_user(&tenant, identity, false).await?; let res = user_mgr .drop_password_policy(&tenant, policy_name.as_ref(), false) diff --git a/src/query/users/tests/it/role_cache_mgr.rs b/src/query/users/tests/it/role_cache_mgr.rs index 2ebdb080c9f5a..6e0ca530af605 100644 --- a/src/query/users/tests/it/role_cache_mgr.rs +++ b/src/query/users/tests/it/role_cache_mgr.rs @@ -21,7 +21,7 @@ use databend_common_grpc::RpcClientConf; use databend_common_meta_app::principal::GrantObject; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserPrivilegeSet; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::role_util::find_all_related_roles; use databend_common_users::RoleCacheManager; use databend_common_users::UserApiProvider; @@ -31,7 +31,8 @@ pub const CATALOG_DEFAULT: &str = "default"; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_role_cache_mgr() -> Result<()> { let conf = RpcClientConf::default(); - let tenant = NonEmptyString::new("tenant1").unwrap(); + let tenant = Tenant::new_literal("tenant1"); + let user_manager = UserApiProvider::try_create_simple(conf, &tenant).await?; let role_cache_manager = RoleCacheManager::try_create(user_manager.clone())?; diff --git a/src/query/users/tests/it/role_mgr.rs b/src/query/users/tests/it/role_mgr.rs index e006c882bb305..82fe69a62b713 100644 --- a/src/query/users/tests/it/role_mgr.rs +++ b/src/query/users/tests/it/role_mgr.rs @@ -20,15 +20,14 @@ use databend_common_meta_app::principal::GrantObject; use databend_common_meta_app::principal::RoleInfo; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::principal::UserPrivilegeType; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; use pretty_assertions::assert_eq; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_role_manager() -> Result<()> { let conf = RpcClientConf::default(); - let tenant_name = "tenant1"; - let tenant = NonEmptyString::new(tenant_name).unwrap(); + let tenant = Tenant::new_literal("tenant1"); let role_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; diff --git a/src/query/users/tests/it/user_mgr.rs b/src/query/users/tests/it/user_mgr.rs index 189e9de0a8e8a..d05ea8785df1f 100644 --- a/src/query/users/tests/it/user_mgr.rs +++ b/src/query/users/tests/it/user_mgr.rs @@ -25,7 +25,7 @@ use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::principal::UserPrivilegeSet; use databend_common_meta_app::principal::UserPrivilegeType; use databend_common_meta_app::schema::CreateOption; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; use pretty_assertions::assert_eq; @@ -33,7 +33,7 @@ use pretty_assertions::assert_eq; async fn test_user_manager() -> Result<()> { let conf = RpcClientConf::default(); let tenant_name = "test"; - let tenant = NonEmptyString::new(tenant_name.to_string()).unwrap(); + let tenant = Tenant::new_literal(tenant_name); let user_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; let username = "test-user1"; @@ -90,7 +90,7 @@ async fn test_user_manager() -> Result<()> { // drop. { user_mgr - .drop_user(tenant.clone(), UserIdentity::new(username, hostname), false) + .drop_user(&tenant, UserIdentity::new(username, hostname), false) .await?; let users = user_mgr.get_users(&tenant).await?; assert_eq!(0, users.len()); @@ -99,7 +99,7 @@ async fn test_user_manager() -> Result<()> { // repeat drop same user not with if exist. { let res = user_mgr - .drop_user(tenant.clone(), UserIdentity::new(username, hostname), false) + .drop_user(&tenant, UserIdentity::new(username, hostname), false) .await; assert!(res.is_err()); } @@ -107,7 +107,7 @@ async fn test_user_manager() -> Result<()> { // repeat drop same user with if exist. { let res = user_mgr - .drop_user(tenant.clone(), UserIdentity::new(username, hostname), true) + .drop_user(&tenant, UserIdentity::new(username, hostname), true) .await; assert!(res.is_ok()); } @@ -124,12 +124,7 @@ async fn test_user_manager() -> Result<()> { let mut add_priv = UserPrivilegeSet::empty(); add_priv.set_privilege(UserPrivilegeType::Set); user_mgr - .grant_privileges_to_user( - tenant.clone(), - user_info.identity(), - GrantObject::Global, - add_priv, - ) + .grant_privileges_to_user(&tenant, user_info.identity(), GrantObject::Global, add_priv) .await?; let new_user = user_mgr.get_user(&tenant, user_info.identity()).await?; assert!( @@ -143,7 +138,7 @@ async fn test_user_manager() -> Result<()> { .verify_privilege(&GrantObject::Global, UserPrivilegeType::Create) ); user_mgr - .drop_user(tenant.clone(), new_user.identity(), true) + .drop_user(&tenant, new_user.identity(), true) .await?; } @@ -155,7 +150,7 @@ async fn test_user_manager() -> Result<()> { .await?; user_mgr .grant_privileges_to_user( - tenant.clone(), + &tenant, user_info.identity(), GrantObject::Global, UserPrivilegeSet::all_privileges(), @@ -175,7 +170,7 @@ async fn test_user_manager() -> Result<()> { let user_info = user_mgr.get_user(&tenant, user_info.identity()).await?; assert_eq!(user_info.grants.entries().len(), 0); user_mgr - .drop_user(tenant.clone(), user_info.identity(), true) + .drop_user(&tenant, user_info.identity(), true) .await?; } diff --git a/src/query/users/tests/it/user_udf.rs b/src/query/users/tests/it/user_udf.rs index c355e79c1352a..0097c9c7daaa8 100644 --- a/src/query/users/tests/it/user_udf.rs +++ b/src/query/users/tests/it/user_udf.rs @@ -18,7 +18,7 @@ use databend_common_expression::types::DataType; use databend_common_grpc::RpcClientConf; use databend_common_meta_app::principal::UserDefinedFunction; use databend_common_meta_app::schema::CreateOption; -use databend_common_meta_types::NonEmptyString; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::UserApiProvider; use pretty_assertions::assert_eq; @@ -26,7 +26,7 @@ use pretty_assertions::assert_eq; async fn test_user_lambda_udf() -> Result<()> { let conf = RpcClientConf::default(); let tenant_name = "test"; - let tenant = NonEmptyString::new(tenant_name).unwrap(); + let tenant = Tenant::new_literal(tenant_name); let user_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; let description = "this is a description"; @@ -92,8 +92,7 @@ async fn test_user_lambda_udf() -> Result<()> { #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_user_udf_server() -> Result<()> { let conf = RpcClientConf::default(); - let tenant_name = "test"; - let tenant = NonEmptyString::new(tenant_name).unwrap(); + let tenant = Tenant::new_literal("test"); let user_mgr = UserApiProvider::try_create_simple(conf, &tenant).await?; let address = "http://127.0.0.1:8888"; From eeb174d05a2b6de5c47d75107717df4b522358b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E7=82=8E=E6=B3=BC?= Date: Wed, 27 Mar 2024 23:20:52 +0800 Subject: [PATCH 2/4] chore: refactor --- src/bendpy/src/context.rs | 2 +- src/meta/app/src/app_error.rs | 10 ++++- src/meta/app/src/tenant/mod.rs | 2 - src/meta/app/src/tenant/tenant.rs | 10 +++-- src/meta/app/src/tenant/tenant_serde.rs | 43 ------------------- .../src/tident_from_to_protobuf_impl.rs | 2 +- src/meta/protos/proto/catalog.proto | 13 +----- src/query/config/src/config.rs | 2 +- src/query/config/src/inner.rs | 2 +- src/query/management/tests/it/setting.rs | 2 +- src/query/management/tests/it/stage.rs | 2 +- src/query/service/src/api/http/v1/settings.rs | 6 +-- .../service/src/api/http/v1/tenant_tables.rs | 4 +- src/query/service/src/auth.rs | 3 ++ .../src/interpreters/interpreter_setting.rs | 2 +- .../interpreters/interpreter_table_create.rs | 3 +- .../interpreter_user_stage_create.rs | 6 +-- .../service/src/servers/http/middleware.rs | 5 ++- .../service/src/sessions/query_ctx_shared.rs | 2 +- src/query/service/src/sessions/session.rs | 2 +- src/query/service/src/sessions/session_ctx.rs | 17 +++----- .../table_functions/others/tenant_quota.rs | 2 +- .../service/tests/it/sessions/session.rs | 5 ++- 23 files changed, 50 insertions(+), 97 deletions(-) delete mode 100644 src/meta/app/src/tenant/tenant_serde.rs diff --git a/src/bendpy/src/context.rs b/src/bendpy/src/context.rs index af038b8260f91..6d2ce2b8e305c 100644 --- a/src/bendpy/src/context.rs +++ b/src/bendpy/src/context.rs @@ -65,7 +65,7 @@ impl PySessionContext { .await .unwrap(); - session.set_current_tenant(tenant.name().to_string()); + session.set_current_tenant(tenant); let mut user = UserInfo::new_no_auth("root", "%"); user.grants.grant_privileges( diff --git a/src/meta/app/src/app_error.rs b/src/meta/app/src/app_error.rs index d459bfa546426..662c8de351dc7 100644 --- a/src/meta/app/src/app_error.rs +++ b/src/meta/app/src/app_error.rs @@ -33,13 +33,19 @@ pub struct TenantIsEmpty { } impl TenantIsEmpty { - pub fn new(context: impl Into) -> Self { + pub fn new(context: impl ToString) -> Self { Self { - context: context.into(), + context: context.to_string(), } } } +impl From for ErrorCode { + fn from(err: TenantIsEmpty) -> Self { + ErrorCode::TenantIsEmpty(err.to_string()) + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, thiserror::Error)] #[error("DatabaseAlreadyExists: `{db_name}` while `{context}`")] pub struct DatabaseAlreadyExists { diff --git a/src/meta/app/src/tenant/mod.rs b/src/meta/app/src/tenant/mod.rs index ba7a7c7d1b72e..ef2d46835f3be 100644 --- a/src/meta/app/src/tenant/mod.rs +++ b/src/meta/app/src/tenant/mod.rs @@ -16,9 +16,7 @@ mod quota; #[allow(clippy::module_inception)] mod tenant; mod tenant_quota_ident; -mod tenant_serde; pub use quota::TenantQuota; pub use tenant::Tenant; pub use tenant_quota_ident::TenantQuotaIdent; -pub use tenant_serde::TenantSerde; diff --git a/src/meta/app/src/tenant/tenant.rs b/src/meta/app/src/tenant/tenant.rs index ff7cce949aef7..f3acffa7a7090 100644 --- a/src/meta/app/src/tenant/tenant.rs +++ b/src/meta/app/src/tenant/tenant.rs @@ -14,9 +14,10 @@ use std::fmt::Display; -use databend_common_exception::ErrorCode; use databend_common_meta_types::NonEmptyString; +use crate::app_error::TenantIsEmpty; + /// Tenant is not stored directly in meta-store. /// /// It is just a type for use on the client side. @@ -27,15 +28,16 @@ pub struct Tenant { } impl Tenant { + // #[deprecated] pub fn new(tenant: impl ToString) -> Self { Self { tenant: tenant.to_string(), } } - pub fn new_or_error_code(tenant: impl ToString, ctx: impl Display) -> Result { - let non_empty = NonEmptyString::new(tenant.to_string()) - .map_err(|_e| ErrorCode::TenantIsEmpty(format!("Tenant is empty when {}", ctx)))?; + pub fn new_or_err(tenant: impl ToString, ctx: impl Display) -> Result { + let non_empty = + NonEmptyString::new(tenant.to_string()).map_err(|_e| TenantIsEmpty::new(ctx))?; let t = Self { tenant: non_empty.as_str().to_string(), diff --git a/src/meta/app/src/tenant/tenant_serde.rs b/src/meta/app/src/tenant/tenant_serde.rs deleted file mode 100644 index 2aee53e0c302e..0000000000000 --- a/src/meta/app/src/tenant/tenant_serde.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2021 Datafuse Labs -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::tenant::Tenant; - -/// A duplicate of [`Tenant`] struct for transport with serde support. -/// -/// This struct is meant not to provide any functionality [`Tenant`] struct provides -/// and is only used for transport. -#[derive( - Clone, Debug, PartialEq, Eq, Hash, derive_more::Display, serde::Serialize, serde::Deserialize, -)] -#[display(fmt = "TenantSerde{{{tenant}}}")] -pub struct TenantSerde { - tenant: String, -} - -impl From for TenantSerde { - fn from(value: Tenant) -> Self { - Self { - tenant: value.tenant, - } - } -} - -impl From for Tenant { - fn from(value: TenantSerde) -> Self { - Tenant { - tenant: value.tenant, - } - } -} diff --git a/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs b/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs index 309615addec9e..109db968a82df 100644 --- a/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs +++ b/src/meta/proto-conv/src/tident_from_to_protobuf_impl.rs @@ -40,7 +40,7 @@ where R: TenantResource if p.tenant.is_empty() { return Err(Incompatible { - reason: "CatalogName.tenant is empty".to_string(), + reason: "tenant is empty".to_string(), }); } diff --git a/src/meta/protos/proto/catalog.proto b/src/meta/protos/proto/catalog.proto index ab10c8bfc267a..4c5cc68422821 100644 --- a/src/meta/protos/proto/catalog.proto +++ b/src/meta/protos/proto/catalog.proto @@ -18,22 +18,11 @@ package databend_proto; import "config.proto"; -message CatalogName { - uint64 ver = 100; - uint64 min_reader_ver = 101; - - // The user this db belongs to - string tenant = 1; - - // Catalog name - string catalog_name = 2; -} - message CatalogMeta { uint64 ver = 100; uint64 min_reader_ver = 101; - // catalog options + // Catalog options CatalogOption option = 2; // The time catalog created. diff --git a/src/query/config/src/config.rs b/src/query/config/src/config.rs index 4eef317f20f1b..1850f157b8a28 100644 --- a/src/query/config/src/config.rs +++ b/src/query/config/src/config.rs @@ -1671,7 +1671,7 @@ impl TryInto for QueryConfig { fn try_into(self) -> Result { Ok(InnerQueryConfig { - tenant_id: Tenant::new_or_error_code(self.tenant_id, "") + tenant_id: Tenant::new_or_err(self.tenant_id, "") .map_err(|_e| ErrorCode::InvalidConfig("tenant-id can not be empty"))?, cluster_id: self.cluster_id, node_id: "".to_string(), diff --git a/src/query/config/src/inner.rs b/src/query/config/src/inner.rs index f46fce6600da2..0f03f7293399d 100644 --- a/src/query/config/src/inner.rs +++ b/src/query/config/src/inner.rs @@ -237,7 +237,7 @@ pub struct QueryConfig { impl Default for QueryConfig { fn default() -> Self { Self { - tenant_id: Tenant::new_or_error_code("admin", "default()").unwrap(), + tenant_id: Tenant::new_or_err("admin", "default()").unwrap(), cluster_id: "".to_string(), node_id: "".to_string(), num_cpus: 0, diff --git a/src/query/management/tests/it/setting.rs b/src/query/management/tests/it/setting.rs index a1a6d794714e3..a30e1e39730cd 100644 --- a/src/query/management/tests/it/setting.rs +++ b/src/query/management/tests/it/setting.rs @@ -118,7 +118,7 @@ async fn new_setting_api() -> Result<(Arc, SettingMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); let mgr = SettingMgr::create( test_api.clone(), - &Tenant::new_or_error_code("databend_query", func_name!()).unwrap(), + &Tenant::new_or_err("databend_query", func_name!()).unwrap(), ); Ok((test_api, mgr)) } diff --git a/src/query/management/tests/it/stage.rs b/src/query/management/tests/it/stage.rs index cf25fb5102881..936015af8a0cc 100644 --- a/src/query/management/tests/it/stage.rs +++ b/src/query/management/tests/it/stage.rs @@ -142,7 +142,7 @@ async fn new_stage_api() -> Result<(Arc, StageMgr)> { let test_api = Arc::new(MetaEmbedded::new_temp().await?); let mgr = StageMgr::create( test_api.clone(), - &Tenant::new_or_error_code("admin", func_name!()).unwrap(), + &Tenant::new_or_err("admin", func_name!()).unwrap(), ); Ok((test_api, mgr)) } diff --git a/src/query/service/src/api/http/v1/settings.rs b/src/query/service/src/api/http/v1/settings.rs index caabd07e1f58c..ec88c4c6d4901 100644 --- a/src/query/service/src/api/http/v1/settings.rs +++ b/src/query/service/src/api/http/v1/settings.rs @@ -32,7 +32,7 @@ pub struct SettingsItem { } async fn list_settings_impl(tenant: &str) -> Result> { - let settings = Settings::create(Tenant::new_or_error_code(tenant, func_name!())?); + let settings = Settings::create(Tenant::new_or_err(tenant, func_name!())?); settings.load_changes().await?; Ok(settings @@ -61,7 +61,7 @@ async fn set_setting_impl(tenant: &str, key: &str, value: String) -> Result Result )); } - let settings = Settings::create(Tenant::new_or_error_code(tenant, func_name!())?); + let settings = Settings::create(Tenant::new_or_err(tenant, func_name!())?); settings.try_drop_global_setting(key).await?; Ok(settings diff --git a/src/query/service/src/api/http/v1/tenant_tables.rs b/src/query/service/src/api/http/v1/tenant_tables.rs index 4cc499ee18f2d..67608c93a6a1d 100644 --- a/src/query/service/src/api/http/v1/tenant_tables.rs +++ b/src/query/service/src/api/http/v1/tenant_tables.rs @@ -99,8 +99,8 @@ async fn load_tenant_tables(tenant: &Tenant) -> Result { pub async fn list_tenant_tables_handler( Path(tenant): Path, ) -> poem::Result { - let tenant = Tenant::new_or_error_code(&tenant, func_name!()) - .map_err(poem::error::InternalServerError)?; + let tenant = + Tenant::new_or_err(&tenant, func_name!()).map_err(poem::error::InternalServerError)?; let resp = load_tenant_tables(&tenant) .await diff --git a/src/query/service/src/auth.rs b/src/query/service/src/auth.rs index 16644b10bb817..ebee9c42def48 100644 --- a/src/query/service/src/auth.rs +++ b/src/query/service/src/auth.rs @@ -22,8 +22,10 @@ use databend_common_meta_app::principal::AuthInfo; use databend_common_meta_app::principal::UserIdentity; use databend_common_meta_app::principal::UserInfo; use databend_common_meta_app::schema::CreateOption; +use databend_common_meta_app::tenant::Tenant; use databend_common_users::JwtAuthenticator; use databend_common_users::UserApiProvider; +use minitrace::func_name; use crate::sessions::Session; @@ -83,6 +85,7 @@ impl AuthMgr { // setup tenant if the JWT claims contain extra.tenant_id if let Some(tenant) = jwt.custom.tenant_id { + let tenant = Tenant::new_or_err(tenant, func_name!())?; session.set_current_tenant(tenant); }; diff --git a/src/query/service/src/interpreters/interpreter_setting.rs b/src/query/service/src/interpreters/interpreter_setting.rs index 04751746e0079..78e4a2efb8b5e 100644 --- a/src/query/service/src/interpreters/interpreter_setting.rs +++ b/src/query/service/src/interpreters/interpreter_setting.rs @@ -93,7 +93,7 @@ impl Interpreter for SettingInterpreter { if config.query.internal_enable_sandbox_tenant && !tenant.is_empty() { UserApiProvider::try_create_simple( config.meta.to_meta_grpc_client_conf(), - &Tenant::new_or_error_code(tenant, func_name!())?, + &Tenant::new_or_err(tenant, func_name!())?, ) .await?; } diff --git a/src/query/service/src/interpreters/interpreter_table_create.rs b/src/query/service/src/interpreters/interpreter_table_create.rs index 14eeac41f4359..edc413b9a911b 100644 --- a/src/query/service/src/interpreters/interpreter_table_create.rs +++ b/src/query/service/src/interpreters/interpreter_table_create.rs @@ -100,8 +100,7 @@ impl Interpreter for CreateTableInterpreter { #[async_backtrace::framed] async fn execute2(&self) -> Result { - let tenant = - Tenant::new_or_error_code(&self.plan.tenant, "CreateTableInterpreter::execute2")?; + let tenant = Tenant::new_or_err(&self.plan.tenant, "CreateTableInterpreter::execute2")?; let has_computed_column = self .plan diff --git a/src/query/service/src/interpreters/interpreter_user_stage_create.rs b/src/query/service/src/interpreters/interpreter_user_stage_create.rs index a5efeb9ceaf87..cc1a1f194f418 100644 --- a/src/query/service/src/interpreters/interpreter_user_stage_create.rs +++ b/src/query/service/src/interpreters/interpreter_user_stage_create.rs @@ -74,7 +74,7 @@ impl Interpreter for CreateUserStageInterpreter { )); } - let tenant = Tenant::new_or_error_code(&plan.tenant, func_name!())?; + let tenant = Tenant::new_or_err(&plan.tenant, func_name!())?; let quota_api = user_mgr.tenant_quota_api(&tenant); let quota = quota_api.get_quota(MatchSeq::GE(0)).await?.data; @@ -86,7 +86,7 @@ impl Interpreter for CreateUserStageInterpreter { ))); }; - let tenant = Tenant::new_or_error_code(&plan.tenant, "CreateUserStageInterpreter")?; + let tenant = Tenant::new_or_err(&plan.tenant, "CreateUserStageInterpreter")?; let old_stage = match plan.create_option { CreateOption::CreateOrReplace => user_mgr @@ -96,7 +96,7 @@ impl Interpreter for CreateUserStageInterpreter { _ => None, }; - let tenant = Tenant::new_or_error_code(&plan.tenant, "CreateUserStageInterpreter")?; + let tenant = Tenant::new_or_err(&plan.tenant, "CreateUserStageInterpreter")?; let mut user_stage = user_stage; user_stage.creator = Some(self.ctx.get_current_user()?.identity()); diff --git a/src/query/service/src/servers/http/middleware.rs b/src/query/service/src/servers/http/middleware.rs index 0ac354fcd53ed..86a6a9267e1f5 100644 --- a/src/query/service/src/servers/http/middleware.rs +++ b/src/query/service/src/servers/http/middleware.rs @@ -19,6 +19,7 @@ use std::time::Instant; use databend_common_exception::ErrorCode; use databend_common_exception::Result; +use databend_common_meta_app::tenant::Tenant; use databend_common_metrics::http::metrics_incr_http_request_count; use databend_common_metrics::http::metrics_incr_http_response_panics_count; use databend_common_metrics::http::metrics_incr_http_slow_request_count; @@ -32,6 +33,7 @@ use http::HeaderMap; use http::HeaderValue; use log::error; use log::warn; +use minitrace::func_name; use opentelemetry::baggage::BaggageExt; use opentelemetry::propagation::TextMapPropagator; use opentelemetry_http::HeaderExtractor; @@ -201,7 +203,8 @@ impl HTTPSessionEndpoint { let ctx = session.create_query_context().await?; if let Some(tenant_id) = req.headers().get("X-DATABEND-TENANT") { let tenant_id = tenant_id.to_str().unwrap().to_string(); - session.set_current_tenant(tenant_id); + let tenant = Tenant::new_or_err(tenant_id.clone(), func_name!())?; + session.set_current_tenant(tenant); } let node_id = ctx.get_cluster().local_id.clone(); diff --git a/src/query/service/src/sessions/query_ctx_shared.rs b/src/query/service/src/sessions/query_ctx_shared.rs index c614d51753b0e..cf8168c72f7b3 100644 --- a/src/query/service/src/sessions/query_ctx_shared.rs +++ b/src/query/service/src/sessions/query_ctx_shared.rs @@ -264,7 +264,7 @@ impl QueryContextShared { self.session.get_current_role() } - pub fn set_current_tenant(&self, tenant: String) { + pub fn set_current_tenant(&self, tenant: Tenant) { self.session.set_current_tenant(tenant); } diff --git a/src/query/service/src/sessions/session.rs b/src/query/service/src/sessions/session.rs index 6a0d0f16ad109..50857e06a4236 100644 --- a/src/query/service/src/sessions/session.rs +++ b/src/query/service/src/sessions/session.rs @@ -195,7 +195,7 @@ impl Session { self.session_ctx.get_current_tenant() } - pub fn set_current_tenant(self: &Arc, tenant: String) { + pub fn set_current_tenant(self: &Arc, tenant: Tenant) { self.session_ctx.set_current_tenant(tenant); } diff --git a/src/query/service/src/sessions/session_ctx.rs b/src/query/service/src/sessions/session_ctx.rs index e7b407181f041..0417ebbdb6f87 100644 --- a/src/query/service/src/sessions/session_ctx.rs +++ b/src/query/service/src/sessions/session_ctx.rs @@ -41,7 +41,7 @@ pub struct SessionContext { // The current tenant can be determined by databend-query's config file, or by X-DATABEND-TENANT // if it's in management mode. If databend-query is not in management mode, the current tenant // can not be modified at runtime. - current_tenant: RwLock, + current_tenant: RwLock>, // The current user is determined by the authentication phase on each connection. It will not be // changed during a session. current_user: RwLock>, @@ -157,28 +157,23 @@ impl SessionContext { if conf.query.internal_enable_sandbox_tenant { let sandbox_tenant = self.settings.get_sandbox_tenant().unwrap_or_default(); if !sandbox_tenant.is_empty() { - return Tenant::new_or_error_code(sandbox_tenant, "create from sandbox_tenant") - .unwrap(); + return Tenant::new_or_err(sandbox_tenant, "create from sandbox_tenant").unwrap(); } } if conf.query.management_mode || self.typ == SessionType::Local { let lock = self.current_tenant.read(); - if !lock.is_empty() { - return Tenant::new_or_error_code( - lock.clone(), - "create from SessionContext.current_tenant", - ) - .unwrap(); + if let Some(tenant) = &*lock { + return tenant.clone(); } } conf.query.tenant_id.clone() } - pub fn set_current_tenant(&self, tenant: String) { + pub fn set_current_tenant(&self, tenant: Tenant) { let mut lock = self.current_tenant.write(); - *lock = tenant; + *lock = Some(tenant); } // Get current user diff --git a/src/query/service/src/table_functions/others/tenant_quota.rs b/src/query/service/src/table_functions/others/tenant_quota.rs index b493d7f604513..13bc2be6b6e28 100644 --- a/src/query/service/src/table_functions/others/tenant_quota.rs +++ b/src/query/service/src/table_functions/others/tenant_quota.rs @@ -245,7 +245,7 @@ impl AsyncSource for TenantQuotaSource { UserOptionFlag::TenantSetting ))); } - tenant = Tenant::new_or_error_code(args[0].clone(), func_name!())?; + tenant = Tenant::new_or_err(args[0].clone(), func_name!())?; } let quota_api = UserApiProvider::instance().tenant_quota_api(&tenant); let res = quota_api.get_quota(MatchSeq::GE(0)).await?; diff --git a/src/query/service/tests/it/sessions/session.rs b/src/query/service/tests/it/sessions/session.rs index a6423b28f8fc2..ab0da6ff134dc 100644 --- a/src/query/service/tests/it/sessions/session.rs +++ b/src/query/service/tests/it/sessions/session.rs @@ -14,6 +14,7 @@ use databend_common_base::base::tokio; use databend_common_exception::Result; +use databend_common_meta_app::tenant::Tenant; use databend_query::sessions::SessionType; use databend_query::test_kits::ConfigBuilder; use databend_query::test_kits::TestFixture; @@ -29,7 +30,7 @@ async fn test_session() -> Result<()> { assert_eq!(actual.name(), "test"); // We are not in management mode, so always get the config tenant. - session.set_current_tenant("tenant2".to_string()); + session.set_current_tenant(Tenant::new_literal("tenant2")); let actual = session.get_current_tenant(); assert_eq!(actual.name(), "test"); } @@ -56,7 +57,7 @@ async fn test_session_in_management_mode() -> Result<()> { let actual = session.get_current_tenant(); assert_eq!(actual.name(), "test"); - session.set_current_tenant("tenant2".to_string()); + session.set_current_tenant(Tenant::new_literal("tenant2")); let actual = session.get_current_tenant(); assert_eq!(actual.name(), "tenant2"); } From 110b1d469e75687c72f5bbfa0162468282ca4b61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E7=82=8E=E6=B3=BC?= Date: Wed, 27 Mar 2024 23:28:32 +0800 Subject: [PATCH 3/4] chore: fix unused dep --- src/meta/app/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/src/meta/app/Cargo.toml b/src/meta/app/Cargo.toml index d8286ee253807..e446654f4dff4 100644 --- a/src/meta/app/Cargo.toml +++ b/src/meta/app/Cargo.toml @@ -24,7 +24,6 @@ anyerror = { workspace = true } chrono = { workspace = true } chrono-tz = { workspace = true } cron = "0.12.0" -derive_more = { workspace = true } enumflags2 = { workspace = true } hex = "0.4.3" itertools = { workspace = true } From b5a682c5297c6c92bd5307fbb4b50aad50c406d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E7=82=8E=E6=B3=BC?= Date: Thu, 28 Mar 2024 11:35:38 +0800 Subject: [PATCH 4/4] chore: fix conflict --- Cargo.lock | 1 - src/query/storages/fuse/src/operations/inverted_index.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a387d5ff3ff02..06557527fd44b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3270,7 +3270,6 @@ dependencies = [ "databend-common-io", "databend-common-meta-kvapi", "databend-common-meta-types", - "derive_more", "enumflags2", "hex", "itertools 0.10.5", diff --git a/src/query/storages/fuse/src/operations/inverted_index.rs b/src/query/storages/fuse/src/operations/inverted_index.rs index 396884540dc7f..7dff0ee9d925f 100644 --- a/src/query/storages/fuse/src/operations/inverted_index.rs +++ b/src/query/storages/fuse/src/operations/inverted_index.rs @@ -145,7 +145,7 @@ impl AsyncSink for InvertedIndexSink { if let Some(share_table_info) = res.share_table_info { save_share_table_info( - self.ctx.get_tenant().as_str(), + self.ctx.get_tenant().name(), self.ctx.get_data_operator()?.operator(), share_table_info, )