From ac24e1198682ed02caea5ec6b36b01984350b134 Mon Sep 17 00:00:00 2001 From: Jelte Fennema Date: Mon, 3 Jul 2023 11:08:24 +0200 Subject: [PATCH 1/5] Change default rebalance strategy to by_disk_size (#7033) DESCRIPTION: Change default rebalance strategy to by_disk_size When introducing rebalancing by disk size we didn't make it the default initially. The main reason was, because we expected some problems with it. We have indeed had some problems/bugs with it over the years, and have fixed all of them. By now we're quite confident in its stability, and that it pretty much always gives better results than by_shard_count. So this PR makes by_disk_size the new default. We don't change the default when some other strategy than by_shard_count is the current default. This is in case someone defined their own rebalance strategy and marked this as the default themselves. Note: It explicitly does nothing during a downgrade, because there's no way of knowing if the rebalance strategy before the upgrade was by_disk_size or by_shard_count. And even in previous versions by_disk_size is considered superior for quite some time. --- .../distributed/sql/citus--11.3-1--12.0-1.sql | 7 +++++++ .../sql/downgrades/citus--12.0-1--11.3-1.sql | 5 +++++ src/test/regress/expected/shard_rebalancer.out | 14 ++++++++++++++ .../regress/expected/single_shard_table_udfs.out | 2 +- src/test/regress/sql/shard_rebalancer.sql | 4 ++++ src/test/regress/sql/single_shard_table_udfs.sql | 2 +- 6 files changed, 32 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql b/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql index 998ffc2be43..a35d772b7e0 100644 --- a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql +++ b/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql @@ -42,3 +42,10 @@ DROP FUNCTION citus_shard_sizes; #include "udfs/drop_old_time_partitions/12.0-1.sql" #include "udfs/get_missing_time_partition_ranges/12.0-1.sql" + +-- Update the default rebalance strategy to 'by_disk_size', but only if the +-- default is currently 'by_shard_count' +SELECT citus_set_default_rebalance_strategy(name) +FROM pg_dist_rebalance_strategy +WHERE name = 'by_disk_size' + AND (SELECT default_strategy FROM pg_dist_rebalance_strategy WHERE name = 'by_shard_count'); diff --git a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql index 1adb4cb7298..4a25cfda095 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql @@ -76,3 +76,8 @@ DROP FUNCTION pg_catalog.citus_stat_tenants_local_internal( #include "../udfs/drop_old_time_partitions/10.2-1.sql" #include "../udfs/get_missing_time_partition_ranges/10.2-1.sql" + +-- This explicitly does not reset the rebalance strategy to by_shard_count, +-- because there's no way of knowing if the rebalance strategy before the +-- upgrade was by_disk_size or by_shard_count. And even in previous versions +-- by_disk_size is considered superior for quite some time. diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index 62ae17487ac..b8f4010b14a 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -3,6 +3,14 @@ -- SET citus.next_shard_id TO 433000; SET citus.propagate_session_settings_for_loopback_connection TO ON; +-- Because of historic reasons this test was written in a way that assumes that +-- by_shard_count is the default strategy. +SELECT citus_set_default_rebalance_strategy('by_shard_count'); + citus_set_default_rebalance_strategy +--------------------------------------------------------------------- + +(1 row) + -- Lower the minimum disk size that a shard group is considered as. Otherwise -- we need to create shards of more than 100MB. ALTER SYSTEM SET citus.rebalancer_by_disk_size_base_cost = 0; @@ -2863,6 +2871,12 @@ select 1 from citus_add_node('localhost', :worker_2_port); select rebalance_table_shards(); ERROR: cannot use logical replication to transfer shards of the relation table_without_primary_key since it doesn't have a REPLICA IDENTITY or PRIMARY KEY DROP TABLE table_with_primary_key, table_without_primary_key; +SELECT citus_set_default_rebalance_strategy('by_disk_size'); + citus_set_default_rebalance_strategy +--------------------------------------------------------------------- + +(1 row) + ALTER SYSTEM RESET citus.rebalancer_by_disk_size_base_cost; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/single_shard_table_udfs.out b/src/test/regress/expected/single_shard_table_udfs.out index 3b73473b012..d49027b60d6 100644 --- a/src/test/regress/expected/single_shard_table_udfs.out +++ b/src/test/regress/expected/single_shard_table_udfs.out @@ -334,7 +334,7 @@ ERROR: Table 'single_shard_table_col2_1' is streaming replicated. Shards of str SELECT citus_copy_shard_placement(1820005, :worker_1_node, :worker_2_node); ERROR: Table 'single_shard_table_col2_1' is streaming replicated. Shards of streaming replicated tables cannot be copied -- no changes because it's already balanced -SELECT rebalance_table_shards(); +SELECT rebalance_table_shards(rebalance_strategy := 'by_shard_count'); rebalance_table_shards --------------------------------------------------------------------- diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index ba22a8abdca..d64fb68260e 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -5,6 +5,9 @@ SET citus.next_shard_id TO 433000; SET citus.propagate_session_settings_for_loopback_connection TO ON; +-- Because of historic reasons this test was written in a way that assumes that +-- by_shard_count is the default strategy. +SELECT citus_set_default_rebalance_strategy('by_shard_count'); -- Lower the minimum disk size that a shard group is considered as. Otherwise -- we need to create shards of more than 100MB. ALTER SYSTEM SET citus.rebalancer_by_disk_size_base_cost = 0; @@ -1574,6 +1577,7 @@ select 1 from citus_add_node('localhost', :worker_2_port); select rebalance_table_shards(); DROP TABLE table_with_primary_key, table_without_primary_key; +SELECT citus_set_default_rebalance_strategy('by_disk_size'); ALTER SYSTEM RESET citus.rebalancer_by_disk_size_base_cost; SELECT pg_reload_conf(); \c - - - :worker_1_port diff --git a/src/test/regress/sql/single_shard_table_udfs.sql b/src/test/regress/sql/single_shard_table_udfs.sql index 615e566db95..7566f53e355 100644 --- a/src/test/regress/sql/single_shard_table_udfs.sql +++ b/src/test/regress/sql/single_shard_table_udfs.sql @@ -160,7 +160,7 @@ SELECT master_copy_shard_placement(1820005, 'localhost', :worker_1_port, 'localh SELECT citus_copy_shard_placement(1820005, :worker_1_node, :worker_2_node); -- no changes because it's already balanced -SELECT rebalance_table_shards(); +SELECT rebalance_table_shards(rebalance_strategy := 'by_shard_count'); -- same placements SELECT shardid, nodeport FROM pg_dist_shard_placement WHERE shardid > 1820000 ORDER BY shardid; From e0d347652650ec75afdfe4683aaca8a481cacd1a Mon Sep 17 00:00:00 2001 From: Gokhan Gulbiz Date: Mon, 3 Jul 2023 13:08:03 +0300 Subject: [PATCH 2/5] Add locking mechanism for tenant monitoring probabilistic approach (#7026) This PR * Addresses a concurrency issue in the probabilistic approach of tenant monitoring by acquiring a shared lock for tenant existence checks. * Changes `citus.stat_tenants_sample_rate_for_new_tenants` type to double * Renames `citus.stat_tenants_sample_rate_for_new_tenants` to `citus.stat_tenants_untracked_sample_rate` --- src/backend/distributed/shared_library_init.c | 21 ++-- .../distributed/utils/citus_stat_tenants.c | 22 +++- .../distributed/utils/citus_stat_tenants.h | 2 +- .../regress/expected/citus_stat_tenants.out | 106 +++++++++++++++++- src/test/regress/sql/citus_stat_tenants.sql | 45 +++++++- 5 files changed, 177 insertions(+), 19 deletions(-) diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index 2fbe2f5f570..2493a8ea9e9 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -2489,17 +2489,6 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); - - DefineCustomIntVariable( - "citus.stat_tenants_sample_rate_for_new_tenants", - gettext_noop("Sampling rate for new tenants in citus_stat_tenants."), - NULL, - &StatTenantsSampleRateForNewTenants, - 100, 1, 100, - PGC_USERSET, - GUC_STANDARD, - NULL, NULL, NULL); - DefineCustomEnumVariable( "citus.stat_tenants_track", gettext_noop("Enables/Disables the stats collection for citus_stat_tenants."), @@ -2513,6 +2502,16 @@ RegisterCitusConfigVariables(void) GUC_STANDARD, NULL, NULL, NULL); + DefineCustomRealVariable( + "citus.stat_tenants_untracked_sample_rate", + gettext_noop("Sampling rate for new tenants in citus_stat_tenants."), + NULL, + &StatTenantsSampleRateForNewTenants, + 1, 0, 1, + PGC_USERSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.subquery_pushdown", gettext_noop("Usage of this GUC is highly discouraged, please read the long " diff --git a/src/backend/distributed/utils/citus_stat_tenants.c b/src/backend/distributed/utils/citus_stat_tenants.c index 2f174f7645a..aa813e15243 100644 --- a/src/backend/distributed/utils/citus_stat_tenants.c +++ b/src/backend/distributed/utils/citus_stat_tenants.c @@ -36,6 +36,10 @@ #include +#if (PG_VERSION_NUM >= PG_VERSION_15) + #include "common/pg_prng.h" +#endif + static void AttributeMetricsIfApplicable(void); ExecutorEnd_hook_type prev_ExecutorEnd = NULL; @@ -80,7 +84,7 @@ int StatTenantsLogLevel = CITUS_LOG_LEVEL_OFF; int StatTenantsPeriod = (time_t) 60; int StatTenantsLimit = 100; int StatTenantsTrack = STAT_TENANTS_TRACK_NONE; -int StatTenantsSampleRateForNewTenants = 100; +double StatTenantsSampleRateForNewTenants = 1; PG_FUNCTION_INFO_V1(citus_stat_tenants_local); PG_FUNCTION_INFO_V1(citus_stat_tenants_local_reset); @@ -281,13 +285,25 @@ AttributeTask(char *tenantId, int colocationId, CmdType commandType) MultiTenantMonitor *monitor = GetMultiTenantMonitor(); bool found = false; + + /* Acquire the lock in shared mode to check if the tenant is already in the hash table. */ + LWLockAcquire(&monitor->lock, LW_SHARED); + hash_search(monitor->tenants, &key, HASH_FIND, &found); + LWLockRelease(&monitor->lock); + /* If the tenant is not found in the hash table, we will track the query with a probability of StatTenantsSampleRateForNewTenants. */ if (!found) { - int randomValue = rand() % 100; - bool shouldTrackQuery = randomValue < StatTenantsSampleRateForNewTenants; +#if (PG_VERSION_NUM >= PG_VERSION_15) + double randomValue = pg_prng_double(&pg_global_prng_state); +#else + + /* Generate a random double between 0 and 1 */ + double randomValue = (double) random() / MAX_RANDOM_VALUE; +#endif + bool shouldTrackQuery = randomValue <= StatTenantsSampleRateForNewTenants; if (!shouldTrackQuery) { return; diff --git a/src/include/distributed/utils/citus_stat_tenants.h b/src/include/distributed/utils/citus_stat_tenants.h index 8dac393a8f1..0a482b2417b 100644 --- a/src/include/distributed/utils/citus_stat_tenants.h +++ b/src/include/distributed/utils/citus_stat_tenants.h @@ -121,6 +121,6 @@ extern int StatTenantsLogLevel; extern int StatTenantsPeriod; extern int StatTenantsLimit; extern int StatTenantsTrack; -extern int StatTenantsSampleRateForNewTenants; +extern double StatTenantsSampleRateForNewTenants; #endif /*CITUS_ATTRIBUTE_H */ diff --git a/src/test/regress/expected/citus_stat_tenants.out b/src/test/regress/expected/citus_stat_tenants.out index e7ddadb714f..14582e55b4d 100644 --- a/src/test/regress/expected/citus_stat_tenants.out +++ b/src/test/regress/expected/citus_stat_tenants.out @@ -240,12 +240,21 @@ SELECT tenant_attribute, query_count_in_this_period, score FROM citus_stat_tenan (5 rows) -- test period passing +\c - - - :worker_1_port +SET search_path TO citus_stat_tenants; +SET citus.stat_tenants_period TO 2; SELECT citus_stat_tenants_reset(); citus_stat_tenants_reset --------------------------------------------------------------------- (1 row) +SELECT sleep_until_next_period(); + sleep_until_next_period +--------------------------------------------------------------------- + +(1 row) + SELECT count(*)>=0 FROM dist_tbl WHERE a = 1; ?column? --------------------------------------------------------------------- @@ -253,7 +262,6 @@ SELECT count(*)>=0 FROM dist_tbl WHERE a = 1; (1 row) INSERT INTO dist_tbl VALUES (5, 'abcd'); -\c - - - :worker_1_port SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period FROM citus_stat_tenants_local @@ -265,13 +273,18 @@ ORDER BY tenant_attribute; (2 rows) -- simulate passing the period -SET citus.stat_tenants_period TO 5; SELECT sleep_until_next_period(); sleep_until_next_period --------------------------------------------------------------------- (1 row) +SELECT pg_sleep(1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period FROM citus_stat_tenants_local @@ -288,6 +301,12 @@ SELECT sleep_until_next_period(); (1 row) +SELECT pg_sleep(1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period FROM citus_stat_tenants_local @@ -1009,6 +1028,89 @@ SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, q \c - - - :master_port SET search_path TO citus_stat_tenants; +SET citus.enable_schema_based_sharding TO OFF; +SELECT citus_stat_tenants_reset(); + citus_stat_tenants_reset +--------------------------------------------------------------------- + +(1 row) + +-- test sampling +-- set rate to 0 to disable sampling +SELECT result FROM run_command_on_all_nodes('ALTER SYSTEM set citus.stat_tenants_untracked_sample_rate to 0;'); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +INSERT INTO dist_tbl VALUES (1, 'abcd'); +INSERT INTO dist_tbl VALUES (2, 'abcd'); +UPDATE dist_tbl SET b = a + 1 WHERE a = 3; +UPDATE dist_tbl SET b = a + 1 WHERE a = 4; +DELETE FROM dist_tbl WHERE a = 5; +SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants ORDER BY tenant_attribute; + tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period +--------------------------------------------------------------------- +(0 rows) + +-- test sampling +-- set rate to 1 to track all tenants +SELECT result FROM run_command_on_all_nodes('ALTER SYSTEM set citus.stat_tenants_untracked_sample_rate to 1;'); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT sleep_until_next_period(); + sleep_until_next_period +--------------------------------------------------------------------- + +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO dist_tbl VALUES (1, 'abcd'); +INSERT INTO dist_tbl VALUES (2, 'abcd'); +UPDATE dist_tbl SET b = a + 1 WHERE a = 3; +UPDATE dist_tbl SET b = a + 1 WHERE a = 4; +DELETE FROM dist_tbl WHERE a = 5; +SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, + (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period +FROM citus_stat_tenants(true) +ORDER BY tenant_attribute; + tenant_attribute | read_count_in_this_period | read_count_in_last_period | query_count_in_this_period | query_count_in_last_period | cpu_is_used_in_this_period | cpu_is_used_in_last_period +--------------------------------------------------------------------- + 1 | 0 | 0 | 1 | 0 | t | f + 2 | 0 | 0 | 1 | 0 | t | f + 3 | 0 | 0 | 1 | 0 | t | f + 4 | 0 | 0 | 1 | 0 | t | f + 5 | 0 | 0 | 1 | 0 | t | f +(5 rows) + SET client_min_messages TO ERROR; DROP SCHEMA citus_stat_tenants CASCADE; DROP SCHEMA citus_stat_tenants_t1 CASCADE; diff --git a/src/test/regress/sql/citus_stat_tenants.sql b/src/test/regress/sql/citus_stat_tenants.sql index 9160b34996a..2e4ed145048 100644 --- a/src/test/regress/sql/citus_stat_tenants.sql +++ b/src/test/regress/sql/citus_stat_tenants.sql @@ -83,20 +83,24 @@ SELECT count(*)>=0 FROM dist_tbl_text WHERE a = 'defg'; SELECT tenant_attribute, query_count_in_this_period, score FROM citus_stat_tenants(true) WHERE nodeid = :worker_2_nodeid ORDER BY score DESC, tenant_attribute; -- test period passing +\c - - - :worker_1_port + +SET search_path TO citus_stat_tenants; +SET citus.stat_tenants_period TO 2; SELECT citus_stat_tenants_reset(); +SELECT sleep_until_next_period(); SELECT count(*)>=0 FROM dist_tbl WHERE a = 1; INSERT INTO dist_tbl VALUES (5, 'abcd'); -\c - - - :worker_1_port SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period FROM citus_stat_tenants_local ORDER BY tenant_attribute; -- simulate passing the period -SET citus.stat_tenants_period TO 5; SELECT sleep_until_next_period(); +SELECT pg_sleep(1); SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period @@ -104,6 +108,7 @@ FROM citus_stat_tenants_local ORDER BY tenant_attribute; SELECT sleep_until_next_period(); +SELECT pg_sleep(1); SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period @@ -377,6 +382,42 @@ SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, q \c - - - :master_port SET search_path TO citus_stat_tenants; +SET citus.enable_schema_based_sharding TO OFF; + +SELECT citus_stat_tenants_reset(); + +-- test sampling +-- set rate to 0 to disable sampling +SELECT result FROM run_command_on_all_nodes('ALTER SYSTEM set citus.stat_tenants_untracked_sample_rate to 0;'); +SELECT result FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + +INSERT INTO dist_tbl VALUES (1, 'abcd'); +INSERT INTO dist_tbl VALUES (2, 'abcd'); +UPDATE dist_tbl SET b = a + 1 WHERE a = 3; +UPDATE dist_tbl SET b = a + 1 WHERE a = 4; +DELETE FROM dist_tbl WHERE a = 5; + +SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period FROM citus_stat_tenants ORDER BY tenant_attribute; + +-- test sampling +-- set rate to 1 to track all tenants +SELECT result FROM run_command_on_all_nodes('ALTER SYSTEM set citus.stat_tenants_untracked_sample_rate to 1;'); +SELECT result FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + +SELECT sleep_until_next_period(); +SELECT pg_sleep(0.1); + +INSERT INTO dist_tbl VALUES (1, 'abcd'); +INSERT INTO dist_tbl VALUES (2, 'abcd'); +UPDATE dist_tbl SET b = a + 1 WHERE a = 3; +UPDATE dist_tbl SET b = a + 1 WHERE a = 4; +DELETE FROM dist_tbl WHERE a = 5; + +SELECT tenant_attribute, read_count_in_this_period, read_count_in_last_period, query_count_in_this_period, query_count_in_last_period, + (cpu_usage_in_this_period>0) AS cpu_is_used_in_this_period, (cpu_usage_in_last_period>0) AS cpu_is_used_in_last_period +FROM citus_stat_tenants(true) +ORDER BY tenant_attribute; + SET client_min_messages TO ERROR; DROP SCHEMA citus_stat_tenants CASCADE; DROP SCHEMA citus_stat_tenants_t1 CASCADE; From 5051be86ff02d05751d8908aad3624abd584e944 Mon Sep 17 00:00:00 2001 From: Ahmet Gedemenli Date: Tue, 4 Jul 2023 15:19:07 +0300 Subject: [PATCH 3/5] Skip distributed schema insertion into pg_dist_schema, if already exists (#7044) Inserting into `pg_dist_schema` causes unexpected duplicate key errors, for distributed schemas that already exist. With this commit we skip the insertion if the schema already exists in `pg_dist_schema`. The error: ```sql SET citus.enable_schema_based_sharding TO ON; CREATE SCHEMA sc2; CREATE SCHEMA IF NOT EXISTS sc2; NOTICE: schema "sc2" already exists, skipping ERROR: duplicate key value violates unique constraint "pg_dist_schema_pkey" DETAIL: Key (schemaid)=(17294) already exists. ``` fixes: #7042 --- src/backend/distributed/commands/schema.c | 35 ++++++++++++------- .../expected/schema_based_sharding.out | 2 ++ .../regress/sql/schema_based_sharding.sql | 1 + 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index 966a264d628..6eaacc993d2 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -103,20 +103,29 @@ PostprocessCreateSchemaStmt(Node *node, const char *queryString) } /* - * Register the tenant schema on the coordinator and save the command - * to register it on the workers. + * Skip if the schema is already inserted into pg_dist_schema. + * This could occur when trying to create an already existing schema, + * with IF NOT EXISTS clause. */ - int shardCount = 1; - int replicationFactor = 1; - Oid distributionColumnType = InvalidOid; - Oid distributionColumnCollation = InvalidOid; - uint32 colocationId = CreateColocationGroup( - shardCount, replicationFactor, distributionColumnType, - distributionColumnCollation); - - InsertTenantSchemaLocally(schemaId, colocationId); - - commands = lappend(commands, TenantSchemaInsertCommand(schemaId, colocationId)); + if (!IsTenantSchema(schemaId)) + { + /* + * Register the tenant schema on the coordinator and save the command + * to register it on the workers. + */ + int shardCount = 1; + int replicationFactor = 1; + Oid distributionColumnType = InvalidOid; + Oid distributionColumnCollation = InvalidOid; + uint32 colocationId = CreateColocationGroup( + shardCount, replicationFactor, distributionColumnType, + distributionColumnCollation); + + InsertTenantSchemaLocally(schemaId, colocationId); + + commands = lappend(commands, TenantSchemaInsertCommand(schemaId, + colocationId)); + } } commands = lappend(commands, ENABLE_DDL_PROPAGATION); diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index d7b7b4710cf..c7e515df4de 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -50,6 +50,8 @@ SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'regu -- empty tenant CREATE SCHEMA "tenant\'_1"; +CREATE SCHEMA IF NOT EXISTS "tenant\'_1"; +NOTICE: schema "tenant\'_1" already exists, skipping -- non-empty tenant CREATE SCHEMA "tenant\'_2"; CREATE TABLE "tenant\'_2".test_table(a int, b text); diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index 1e520833211..dd0c20d1c1c 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -33,6 +33,7 @@ SELECT COUNT(*)=0 FROM pg_dist_schema WHERE schemaid::regnamespace::text = 'regu -- empty tenant CREATE SCHEMA "tenant\'_1"; +CREATE SCHEMA IF NOT EXISTS "tenant\'_1"; -- non-empty tenant CREATE SCHEMA "tenant\'_2"; From 719d92c8b9745cdacd22b7cf3812183f33107435 Mon Sep 17 00:00:00 2001 From: aykut-bozkurt <51649454+aykut-bozkurt@users.noreply.github.com> Date: Tue, 4 Jul 2023 17:28:03 +0300 Subject: [PATCH 4/5] mat view should not be converted to tenant table (#7043) We allow materialized view to exist in distrbuted schema but they should not be tried to be converted to a tenant table since they cannot be distributed. Fixes https://github.com/citusdata/citus/issues/7041 --- src/backend/distributed/commands/table.c | 9 ++++++ .../expected/schema_based_sharding.out | 28 ++++++++++++++++++- .../regress/sql/schema_based_sharding.sql | 20 ++++++++++++- 3 files changed, 55 insertions(+), 2 deletions(-) diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index cd26a741f74..390a81286e2 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -4154,6 +4154,15 @@ ConvertNewTableIfNecessary(Node *createStmt) return; } + /* + * We allow mat views in a distributed schema but do not make them a tenant + * table. We should skip converting them. + */ + if (get_rel_relkind(createdRelationId) == RELKIND_MATVIEW) + { + return; + } + CreateTenantSchemaTable(createdRelationId); } diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index c7e515df4de..4493f96141a 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -1694,8 +1694,34 @@ $$); t (3 rows) +-- mat view can be created under tenant schema +SET citus.enable_schema_based_sharding TO ON; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA sc1; +CREATE TABLE sc1.t1 (a int); +CREATE MATERIALIZED VIEW sc1.v1 AS SELECT * FROM sc1.t1; +SET citus.enable_schema_based_sharding TO OFF; +-- on coordinator, verify that schema is distributed +SELECT colocationid > 0 FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'sc1'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- on workers, verify that schema is distributed +SELECT result FROM run_command_on_workers($$ + SELECT array_agg(colocationid > 0) FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'sc1' +$$); + result +--------------------------------------------------------------------- + {t} + {t} +(2 rows) + SET client_min_messages TO WARNING; -DROP SCHEMA regular_schema, tenant_3, tenant_5, tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema CASCADE; +DROP SCHEMA regular_schema, tenant_3, tenant_5, tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema, sc1 CASCADE; DROP ROLE citus_schema_role, citus_schema_nonpri, authschema; SELECT citus_remove_node('localhost', :master_port); citus_remove_node diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index dd0c20d1c1c..2b9bbf51627 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -1154,8 +1154,26 @@ SELECT result FROM run_command_on_all_nodes($$ WHERE schemaid::regnamespace::text = 'authschema'; $$); +-- mat view can be created under tenant schema +SET citus.enable_schema_based_sharding TO ON; +SET citus.shard_replication_factor TO 1; +CREATE SCHEMA sc1; +CREATE TABLE sc1.t1 (a int); +CREATE MATERIALIZED VIEW sc1.v1 AS SELECT * FROM sc1.t1; +SET citus.enable_schema_based_sharding TO OFF; + +-- on coordinator, verify that schema is distributed +SELECT colocationid > 0 FROM pg_dist_schema +WHERE schemaid::regnamespace::text = 'sc1'; + +-- on workers, verify that schema is distributed +SELECT result FROM run_command_on_workers($$ + SELECT array_agg(colocationid > 0) FROM pg_dist_schema + WHERE schemaid::regnamespace::text = 'sc1' +$$); + SET client_min_messages TO WARNING; -DROP SCHEMA regular_schema, tenant_3, tenant_5, tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema CASCADE; +DROP SCHEMA regular_schema, tenant_3, tenant_5, tenant_7, tenant_6, type_sch, citus_sch1, citus_sch2, citus_empty_sch1, citus_empty_sch2, authschema, sc1 CASCADE; DROP ROLE citus_schema_role, citus_schema_nonpri, authschema; SELECT citus_remove_node('localhost', :master_port); From 613cced1ae323171679cdcc4815e0b4317174408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Halil=20Ozan=20Akg=C3=BCl?= Date: Wed, 5 Jul 2023 11:40:34 +0300 Subject: [PATCH 5/5] Use citus_shard_sizes in citus_tables (#7018) Fixes #7019 This PR updates citus_tables view to use citus_shard_sizes function, instead of citus_total_relation_size to improve performance. --- .../distributed/metadata/metadata_utility.c | 54 +++++++++++++------ .../distributed/sql/citus--11.3-1--12.0-1.sql | 2 + .../sql/downgrades/citus--12.0-1--11.3-1.sql | 3 ++ .../sql/udfs/citus_tables/12.0-1.sql | 9 +++- .../sql/udfs/citus_tables/latest.sql | 9 +++- .../citus_update_table_statistics.out | 12 ++--- 6 files changed, 64 insertions(+), 25 deletions(-) diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index 53a963029c2..6fc22473819 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -91,7 +91,8 @@ static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, SizeQueryType sizeQueryType, bool failOnError, uint64 *tableSize); static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId); -static char * GenerateShardStatisticsQueryForShardList(List *shardIntervalList); +static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList, + bool firstValue); static char * GenerateSizeQueryForRelationNameList(List *quotedShardNames, char *sizeFunction); static char * GetWorkerPartitionedSizeUDFNameBySizeQueryType(SizeQueryType sizeQueryType); @@ -104,7 +105,7 @@ static List * OpenConnectionToNodes(List *workerNodeList); static void ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore, TupleDesc tupleDescriptor); -static void AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval); +static void AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval); static HeapTuple CreateDiskSpaceTuple(TupleDesc tupleDesc, uint64 availableBytes, uint64 totalBytes); @@ -916,6 +917,12 @@ static char * GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableIds) { StringInfo allShardStatisticsQuery = makeStringInfo(); + bool insertedValues = false; + + appendStringInfoString(allShardStatisticsQuery, "SELECT shard_id, "); + appendStringInfo(allShardStatisticsQuery, PG_TOTAL_RELATION_SIZE_FUNCTION, + "table_name"); + appendStringInfoString(allShardStatisticsQuery, " FROM (VALUES "); Oid relationId = InvalidOid; foreach_oid(relationId, citusTableIds) @@ -930,34 +937,49 @@ GenerateAllShardStatisticsQueryForNode(WorkerNode *workerNode, List *citusTableI { List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); - char *shardStatisticsQuery = - GenerateShardStatisticsQueryForShardList(shardIntervalsOnNode); - appendStringInfoString(allShardStatisticsQuery, shardStatisticsQuery); + if (list_length(shardIntervalsOnNode) == 0) + { + relation_close(relation, AccessShareLock); + continue; + } + char *shardIdNameValues = + GenerateShardIdNameValuesForShardList(shardIntervalsOnNode, + !insertedValues); + insertedValues = true; + appendStringInfoString(allShardStatisticsQuery, shardIdNameValues); relation_close(relation, AccessShareLock); } } - /* Add a dummy entry so that UNION ALL doesn't complain */ - appendStringInfo(allShardStatisticsQuery, "SELECT 0::bigint, 0::bigint;"); + if (!insertedValues) + { + return "SELECT 0 AS shard_id, '' AS table_name LIMIT 0"; + } + appendStringInfoString(allShardStatisticsQuery, ") t(shard_id, table_name) " + "WHERE to_regclass(table_name) IS NOT NULL"); return allShardStatisticsQuery->data; } /* - * GenerateShardStatisticsQueryForShardList generates a query that returns: - * SELECT shard_id, shard_name, shard_size for all shards in the list + * GenerateShardIdNameValuesForShardList generates a list of (shard_id, shard_name) values + * for all shards in the list */ static char * -GenerateShardStatisticsQueryForShardList(List *shardIntervalList) +GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue) { StringInfo selectQuery = makeStringInfo(); ShardInterval *shardInterval = NULL; foreach_ptr(shardInterval, shardIntervalList) { - AppendShardSizeQuery(selectQuery, shardInterval); - appendStringInfo(selectQuery, " UNION ALL "); + if (!firstValue) + { + appendStringInfoString(selectQuery, ", "); + } + firstValue = false; + AppendShardIdNameValues(selectQuery, shardInterval); } return selectQuery->data; @@ -965,11 +987,10 @@ GenerateShardStatisticsQueryForShardList(List *shardIntervalList) /* - * AppendShardSizeQuery appends a query in the following form to selectQuery - * SELECT shard_id, shard_name, shard_size + * AppendShardIdNameValues appends (shard_id, shard_name) for shard */ static void -AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval) +AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval) { uint64 shardId = shardInterval->shardId; Oid schemaId = get_rel_namespace(shardInterval->relationId); @@ -981,8 +1002,7 @@ AppendShardSizeQuery(StringInfo selectQuery, ShardInterval *shardInterval) char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); char *quotedShardName = quote_literal_cstr(shardQualifiedName); - appendStringInfo(selectQuery, "SELECT " UINT64_FORMAT " AS shard_id, ", shardId); - appendStringInfo(selectQuery, PG_TOTAL_RELATION_SIZE_FUNCTION, quotedShardName); + appendStringInfo(selectQuery, "(" UINT64_FORMAT ", %s)", shardId, quotedShardName); } diff --git a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql b/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql index a35d772b7e0..fac95dbd4b6 100644 --- a/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql +++ b/src/backend/distributed/sql/citus--11.3-1--12.0-1.sql @@ -25,6 +25,8 @@ GRANT SELECT ON pg_catalog.pg_dist_schema TO public; #include "udfs/citus_drop_trigger/12.0-1.sql" DROP VIEW citus_shards; +DROP VIEW IF EXISTS pg_catalog.citus_tables; +DROP VIEW IF EXISTS public.citus_tables; DROP FUNCTION citus_shard_sizes; #include "udfs/citus_shard_sizes/12.0-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql index 4a25cfda095..c391837f4b0 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.0-1--11.3-1.sql @@ -47,6 +47,9 @@ DROP FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(Oid, t DROP VIEW IF EXISTS public.citus_schemas; DROP VIEW IF EXISTS pg_catalog.citus_schemas; +DROP VIEW IF EXISTS public.citus_tables; +DROP VIEW IF EXISTS pg_catalog.citus_tables; + DROP VIEW pg_catalog.citus_shards; DROP FUNCTION pg_catalog.citus_shard_sizes; #include "../udfs/citus_shard_sizes/10.0-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_tables/12.0-1.sql b/src/backend/distributed/sql/udfs/citus_tables/12.0-1.sql index 608fe838700..f4cf70eebc8 100644 --- a/src/backend/distributed/sql/udfs/citus_tables/12.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_tables/12.0-1.sql @@ -14,7 +14,7 @@ citus_tables_create_query=$CTCQ$ END AS citus_table_type, coalesce(column_to_column_name(logicalrelid, partkey), '') AS distribution_column, colocationid AS colocation_id, - pg_size_pretty(citus_total_relation_size(logicalrelid, fail_on_error := false)) AS table_size, + pg_size_pretty(table_sizes.table_size) AS table_size, (select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count, pg_get_userbyid(relowner) AS table_owner, amname AS access_method @@ -24,6 +24,13 @@ citus_tables_create_query=$CTCQ$ pg_class c ON (p.logicalrelid = c.oid) LEFT JOIN pg_am a ON (a.oid = c.relam) + JOIN + ( + SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size + FROM citus_shard_sizes() css, pg_dist_shard ds + WHERE css.shard_id = ds.shardid + GROUP BY ds.logicalrelid + ) table_sizes ON (table_sizes.table_id = p.logicalrelid) WHERE -- filter out tables owned by extensions logicalrelid NOT IN ( diff --git a/src/backend/distributed/sql/udfs/citus_tables/latest.sql b/src/backend/distributed/sql/udfs/citus_tables/latest.sql index 608fe838700..f4cf70eebc8 100644 --- a/src/backend/distributed/sql/udfs/citus_tables/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_tables/latest.sql @@ -14,7 +14,7 @@ citus_tables_create_query=$CTCQ$ END AS citus_table_type, coalesce(column_to_column_name(logicalrelid, partkey), '') AS distribution_column, colocationid AS colocation_id, - pg_size_pretty(citus_total_relation_size(logicalrelid, fail_on_error := false)) AS table_size, + pg_size_pretty(table_sizes.table_size) AS table_size, (select count(*) from pg_dist_shard where logicalrelid = p.logicalrelid) AS shard_count, pg_get_userbyid(relowner) AS table_owner, amname AS access_method @@ -24,6 +24,13 @@ citus_tables_create_query=$CTCQ$ pg_class c ON (p.logicalrelid = c.oid) LEFT JOIN pg_am a ON (a.oid = c.relam) + JOIN + ( + SELECT ds.logicalrelid AS table_id, SUM(css.size) AS table_size + FROM citus_shard_sizes() css, pg_dist_shard ds + WHERE css.shard_id = ds.shardid + GROUP BY ds.logicalrelid + ) table_sizes ON (table_sizes.table_id = p.logicalrelid) WHERE -- filter out tables owned by extensions logicalrelid NOT IN ( diff --git a/src/test/regress/expected/citus_update_table_statistics.out b/src/test/regress/expected/citus_update_table_statistics.out index a8f90945bb8..24d29e54abe 100644 --- a/src/test/regress/expected/citus_update_table_statistics.out +++ b/src/test/regress/expected/citus_update_table_statistics.out @@ -64,15 +64,15 @@ SET citus.multi_shard_modify_mode TO sequential; SELECT citus_update_table_statistics('test_table_statistics_hash'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT 0 AS shard_id, '' AS table_name LIMIT 0 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 981000 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981000, 'public.test_table_statistics_hash_981000'), (981001, 'public.test_table_statistics_hash_981001'), (981002, 'public.test_table_statistics_hash_981002'), (981003, 'public.test_table_statistics_hash_981003'), (981004, 'public.test_table_statistics_hash_981004'), (981005, 'public.test_table_statistics_hash_981005'), (981006, 'public.test_table_statistics_hash_981006'), (981007, 'public.test_table_statistics_hash_981007')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 981000 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981000') UNION ALL SELECT 981001 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981001') UNION ALL SELECT 981002 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981002') UNION ALL SELECT 981003 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981003') UNION ALL SELECT 981004 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981004') UNION ALL SELECT 981005 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981005') UNION ALL SELECT 981006 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981006') UNION ALL SELECT 981007 AS shard_id, pg_total_relation_size('public.test_table_statistics_hash_981007') UNION ALL SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981000, 'public.test_table_statistics_hash_981000'), (981001, 'public.test_table_statistics_hash_981001'), (981002, 'public.test_table_statistics_hash_981002'), (981003, 'public.test_table_statistics_hash_981003'), (981004, 'public.test_table_statistics_hash_981004'), (981005, 'public.test_table_statistics_hash_981005'), (981006, 'public.test_table_statistics_hash_981006'), (981007, 'public.test_table_statistics_hash_981007')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -158,15 +158,15 @@ SET citus.multi_shard_modify_mode TO sequential; SELECT citus_update_table_statistics('test_table_statistics_append'); NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT 0 AS shard_id, '' AS table_name LIMIT 0 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 981008 AS shard_id, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981008, 'public.test_table_statistics_append_981008'), (981009, 'public.test_table_statistics_append_981009')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT 981008 AS shard_id, pg_total_relation_size('public.test_table_statistics_append_981008') UNION ALL SELECT 981009 AS shard_id, pg_total_relation_size('public.test_table_statistics_append_981009') UNION ALL SELECT 0::bigint, 0::bigint; +NOTICE: issuing SELECT shard_id, pg_total_relation_size(table_name) FROM (VALUES (981008, 'public.test_table_statistics_append_981008'), (981009, 'public.test_table_statistics_append_981009')) t(shard_id, table_name) WHERE to_regclass(table_name) IS NOT NULL DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing COMMIT DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx