diff --git a/.devcontainer/.psqlrc b/.devcontainer/.psqlrc index 7642a97149d..07ea06cddcc 100644 --- a/.devcontainer/.psqlrc +++ b/.devcontainer/.psqlrc @@ -3,5 +3,5 @@ \pset border 2 \setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar' \set HISTSIZE 100000 -\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%>-%p%R%[%033[0m%]%# ' +\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%> (PID: %p)%R%[%033[0m%]%# ' \set PROMPT2 ' ' diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 13762e1e550..33bba98d5c5 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.11 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.12 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.6 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.7 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.2 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.3 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -211,7 +211,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.2 +RUN pgenv switch 16.3 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.devcontainer/src/test/regress/Pipfile b/.devcontainer/src/test/regress/Pipfile index d4b2cc39f07..8811bbd8c67 100644 --- a/.devcontainer/src/test/regress/Pipfile +++ b/.devcontainer/src/test/regress/Pipfile @@ -5,7 +5,7 @@ verify_ssl = true [packages] mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} -construct = "==2.9.45" +construct = "*" docopt = "==0.6.2" cryptography = ">=41.0.4" pytest = "*" @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock index bdb42a1c319..fb82a6573b4 100644 --- a/.devcontainer/src/test/regress/Pipfile.lock +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + "sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc" }, "pipfile-spec": 6, "requires": { @@ -119,11 +119,11 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.2.2" }, "cffi": { "hashes": [ @@ -180,7 +180,7 @@ "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "markers": "python_version >= '3.8'", + "markers": "platform_python_implementation != 'PyPy'", "version": "==1.16.0" }, "click": { @@ -193,40 +193,51 @@ }, "construct": { "hashes": [ - "sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c" + "sha256:4d2472f9684731e58cc9c56c463be63baa1447d674e0d66aeb5627b22f512c29", + "sha256:c80be81ef595a1a821ec69dc16099550ed22197615f4320b57cc9ce2a672cb30" ], "index": "pypi", - "version": "==2.9.45" + "markers": "python_version >= '3.6'", + "version": "==2.10.70" }, "cryptography": { "hashes": [ - "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", - "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", - "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", - "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", - "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", - "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", - "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", - "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", - "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", - "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", - "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", - "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", - "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", - "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", - "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", - "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", - "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", - "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", - "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", - "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", - "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", - "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", - "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==41.0.4" + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -237,11 +248,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ @@ -253,12 +264,12 @@ }, "filelock": { "hashes": [ - "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", - "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.12.4" + "version": "==3.13.1" }, "flask": { "hashes": [ @@ -318,11 +329,11 @@ }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], "markers": "python_version >= '3.7'", - "version": "==3.1.2" + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -342,69 +353,69 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", - "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", - "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" ], "markers": "python_version >= '3.7'", - "version": "==2.1.3" + "version": "==2.1.5" }, "mitmproxy": { "editable": true, @@ -491,11 +502,11 @@ }, "pluggy": { "hashes": [ - "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", - "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], "markers": "python_version >= '3.8'", - "version": "==1.3.0" + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -526,12 +537,12 @@ }, "psycopg": { "hashes": [ - "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", - "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.11" + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -542,11 +553,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==0.5.0" + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -557,11 +568,11 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "markers": "python_version >= '3.6'", - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ @@ -579,48 +590,48 @@ }, "pytest": { "hashes": [ - "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", - "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==7.4.2" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.9.1" + "markers": "python_version >= '3.7'", + "version": "==0.9.3" }, "pytest-timeout": { "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" ], "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2.1.0" + "markers": "python_version >= '3.7'", + "version": "==2.2.0" }, "pytest-xdist": { "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.3.1" + "version": "==3.5.0" }, "pyyaml": { "hashes": [ @@ -653,6 +664,7 @@ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -693,36 +705,37 @@ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", - "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", - "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", - "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", - "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", - "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", @@ -731,7 +744,6 @@ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", - "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", @@ -760,28 +772,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" ], "markers": "python_version >= '3.8'", - "version": "==6.3.3" + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -791,12 +803,12 @@ }, "werkzeug": { "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", + "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.0.1" + "version": "==2.3.7" }, "wsproto": { "hashes": [ @@ -864,40 +876,40 @@ "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], "markers": "python_version >= '3.7'", - "version": "==23.1.0" + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", - "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", - "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", - "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", - "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", - "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", - "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", - "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", - "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", - "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", - "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", - "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", - "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", - "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", - "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", - "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", - "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", - "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", - "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", - "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", - "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", - "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==23.9.1" + "version": "==24.2.0" }, "click": { "hashes": [ @@ -909,30 +921,30 @@ }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==6.1.0" + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", - "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==23.9.16" + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", "markers": "python_full_version >= '3.8.0'", - "version": "==5.12.0" + "version": "==5.13.2" }, "mccabe": { "hashes": [ @@ -960,19 +972,19 @@ }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", - "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "markers": "python_version >= '3.7'", - "version": "==3.11.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -984,11 +996,11 @@ }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], "markers": "python_version >= '3.8'", - "version": "==3.1.0" + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -1000,11 +1012,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" } } } diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index cd4995e20e1..70bc0bcb9a6 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -24,18 +24,19 @@ jobs: runs-on: ubuntu-latest name: Initialize parameters outputs: - build_image_name: "citus/extbuilder" - test_image_name: "citus/exttester" - citusupgrade_image_name: "citus/citusupgradetester" - fail_test_image_name: "citus/failtester" - pgupgrade_image_name: "citus/pgupgradetester" - style_checker_image_name: "citus/stylechecker" + build_image_name: "ghcr.io/citusdata/extbuilder" + test_image_name: "ghcr.io/citusdata/exttester" + citusupgrade_image_name: "ghcr.io/citusdata/citusupgradetester" + fail_test_image_name: "ghcr.io/citusdata/failtester" + pgupgrade_image_name: "ghcr.io/citusdata/pgupgradetester" + style_checker_image_name: "ghcr.io/citusdata/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v390dab3" - pg14_version: '{ "major": "14", "full": "14.11" }' - pg15_version: '{ "major": "15", "full": "15.6" }' - pg16_version: '{ "major": "16", "full": "16.2" }' - upgrade_pg_versions: "14.11-15.6-16.2" + sql_snapshot_pg_version: "16.3" + image_suffix: "-v13fd57c" + pg14_version: '{ "major": "14", "full": "14.12" }' + pg15_version: '{ "major": "15", "full": "15.7" }' + pg16_version: '{ "major": "16", "full": "16.3" }' + upgrade_pg_versions: "14.12-15.7-16.3" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters @@ -44,7 +45,7 @@ jobs: needs: params runs-on: ubuntu-20.04 container: - image: ${{ needs.params.outputs.build_image_name }}:latest + image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} options: --user root steps: - uses: actions/checkout@v3.5.0 diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 4ae741a911a..26b5cfc9593 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -19,7 +19,7 @@ jobs: pg_versions: ${{ steps.get-postgres-versions.outputs.pg_versions }} steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 2 - name: Get Postgres Versions @@ -51,18 +51,6 @@ jobs: - almalinux-8 - almalinux-9 POSTGRES_VERSION: ${{ fromJson(needs.get_postgres_versions_from_file.outputs.pg_versions) }} - # Postgres removed support for CentOS 7 in PG 16. Below block is needed to - # keep the build for CentOS 7 working for PG 14 and PG 15. - # Once dependent systems drop support for Centos 7, we can remove this block. - include: - - packaging_docker_image: centos-7 - POSTGRES_VERSION: 14 - - packaging_docker_image: centos-7 - POSTGRES_VERSION: 15 - - packaging_docker_image: oraclelinux-7 - POSTGRES_VERSION: 14 - - packaging_docker_image: oraclelinux-7 - POSTGRES_VERSION: 15 container: image: citus/packaging:${{ matrix.packaging_docker_image }}-pg${{ matrix.POSTGRES_VERSION }} @@ -70,7 +58,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set Postgres and python parameters for rpm based distros run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 02156009972..94c85bcdf14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,21 @@ +### citus v12.1.6 (Nov 14, 2024) ### + +* Propagates `SECURITY LABEL .. ON ROLE` statements (#7304) + +* Fixes crash caused by running queries with window partition (#7718) + +### citus v12.1.5 (July 17, 2024) ### + +* Adds support for MERGE commands with single shard distributed target tables + (#7643) + +* Fixes an error with MERGE commands when insert value does not have source + distribution column (#7627) + +### citus v12.1.4 (May 28, 2024) ### + +* Adds null check for node in HasRangeTableRef (#7604) + ### citus v12.1.3 (April 18, 2024) ### * Allows overwriting host name for all inter-node connections by diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e1900642d3e..70cc486e74e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,6 +35,28 @@ To get citus installed from source we run `make install -s` in the first termina With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator. +### Debugging in the VS code + +1. Start Debugging: Press F5 in VS Code to start debugging. When prompted, you'll need to attach the debugger to the appropriate PostgreSQL process. + +2. Identify the Process: If you're running a psql command, take note of the PID that appears in your psql prompt. For example: +``` +[local] citus@citus:9700 (PID: 5436)=# +``` +This PID (5436 in this case) indicates the process that you should attach the debugger to. +If you are uncertain about which process to attach, you can list all running PostgreSQL processes using the following command: +``` +ps aux | grep postgres +``` + +Look for the process associated with the PID you noted. For example: +``` +citus 5436 0.0 0.0 0 0 ? S 14:00 0:00 postgres: citus citus +``` +4. Attach the Debugger: Once you've identified the correct PID, select that process when prompted in VS Code to attach the debugger. You should now be able to debug the PostgreSQL session tied to the psql command. + +5. Set Breakpoints and Debug: With the debugger attached, you can set breakpoints within the code. This allows you to step through the code execution, inspect variables, and fully debug the PostgreSQL instance running in your container. + ### Getting and building [PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a diff --git a/citus-tools b/citus-tools new file mode 160000 index 00000000000..3376bd6845f --- /dev/null +++ b/citus-tools @@ -0,0 +1 @@ +Subproject commit 3376bd6845f0614908ed304f5033bd644c82d3bf diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index ca3a5f4c4aa..fd3d171c6f5 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -3021,6 +3021,8 @@ AvailableExtensionVersionColumnar(void) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not found"))); + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 23847ac0150..cb64ef7f55a 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2568,7 +2568,7 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu * Find the shard interval and id for the partition column value for * non-reference tables. * - * For reference table, this function blindly returns the tables single + * For reference table, and single shard distributed table this function blindly returns the tables single * shard. */ ShardInterval *shardInterval = FindShardInterval(partitionColumnValue, cacheEntry); diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index f8e4816ed7d..825911edb25 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -773,9 +773,31 @@ ShutdownConnection(MultiConnection *connection) if (PQstatus(connection->pgConn) == CONNECTION_OK && PQtransactionStatus(connection->pgConn) == PQTRANS_ACTIVE) { - SendCancelationRequest(connection); + RemoteTransaction* transaction = &connection->remoteTransaction; + if (Enable2PCQuickResponse && + (transaction->transactionState == REMOTE_TRANS_2PC_ABORTING || + transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING)) + { +#if IN_MY_DEBUGGING_PHASE + if (LogRemoteCommands) + { + elog(NOTICE, "Not send cancel request for 2PC aborting or committing"); + } +#endif + } + else + { + SendCancelationRequest(connection); + } } CitusPQFinish(connection); +#if IN_MY_DEBUGGING_PHASE + if (LogRemoteCommands) + { + elog(NOTICE, "connection shutdown connectionId=%ld hostname=%s port=%d", + connection->connectionId, connection->hostname, connection->port); + } +#endif } @@ -1573,6 +1595,26 @@ RemoteTransactionIdle(MultiConnection *connection) return true; } +#if 0 +/* + * TODO: if we want to avoid shutting down the connection when aborting or committing 2PC + * distributed transactions, we need do the following: + * 1. in background worker, we need consume results of this connection + * 2. in assignning task, we should not use the this connection until the transaction is idle + */ + if (Enable2PCQuickResponse && + (connection->remoteTransaction.transactionState == REMOTE_TRANS_2PC_COMMITTING || + connection->remoteTransaction.transactionState == REMOTE_TRANS_2PC_ABORTING)) + { + /* + * As RemoteTransactionIdle() is called from ShouldShutdownConnection() only, when + * we are aborting or committing a 2PC distributed transaction, we keep the transaction + * state by returning true to avoid shutting down the connection. + */ + return true; + } +#endif + return PQtransactionStatus(connection->pgConn) == PQTRANS_IDLE; } @@ -1684,3 +1726,35 @@ CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch) return success; } + +/* compile with CFLAGS="-g -O0" */ +void +PrintConnectionHash(void) +{ + HASH_SEQ_STATUS status; + ConnectionHashEntry *entry; + + elog(NOTICE, "Connection Hash:"); + hash_seq_init(&status, ConnectionHash); + while ((entry = (ConnectionHashEntry *) hash_seq_search(&status)) != NULL) + { + dlist_iter iter; + elog(NOTICE, "key(Hostname=%s Port=%d User=%s Database=%s Replication=%s):", + entry->key.hostname, + entry->key.port, + entry->key.user, + entry->key.database, + entry->key.replicationConnParam ? "true" : "false"); + dlist_foreach(iter, entry->connections) + { + MultiConnection *connection = dlist_container(MultiConnection, connectionNode, iter.cur); + elog(NOTICE, "value(Connection=%lu connectionState=%d xactStatus=%d pqstatus=%d busy=%s Claimed=%s)", + connection->connectionId, + connection->connectionState, + PQtransactionStatus(connection->pgConn), + PQstatus(connection->pgConn), + PQisBusy(connection->pgConn) ? "true" : "false", + connection->claimedExclusively ? "true" : "false"); + } + } +} diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index cbd74ff51b1..9da820c6047 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -38,6 +38,7 @@ int RemoteCopyFlushThreshold = 8 * 1024 * 1024; /* GUC, determining whether statements sent to remote nodes are logged */ bool LogRemoteCommands = false; char *GrepRemoteCommands = ""; +bool Enable2PCQuickResponse = false; static bool ClearResultsInternal(MultiConnection *connection, bool raiseErrors, @@ -986,20 +987,30 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) } else if (sendStatus == 0) { - /* done writing, only wait for read events */ - bool success = - CitusModifyWaitEvent(waitEventSet, event->pos, - WL_SOCKET_READABLE, NULL); - if (!success) + if (Enable2PCQuickResponse && + (connection->remoteTransaction.transactionState == REMOTE_TRANS_2PC_COMMITTING || + connection->remoteTransaction.transactionState == REMOTE_TRANS_2PC_ABORTING)) { - ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection establishment for " - "node %s:%d failed", - connection->hostname, - connection->port), - errhint("Check both the local and remote " - "server logs for the connection " - "establishment errors."))); + /* we dont wait for 2pc committing response */ + connectionIsReady = true; + } + else + { + /* done writing, only wait for read events */ + bool success = + CitusModifyWaitEvent(waitEventSet, event->pos, + WL_SOCKET_READABLE, NULL); + if (!success) + { + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("connection establishment for " + "node %s:%d failed", + connection->hostname, + connection->port), + errhint("Check both the local and remote " + "server logs for the connection " + "establishment errors."))); + } } } } diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index e912f418d6f..645c2dd5e8e 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -788,6 +788,16 @@ AdaptiveExecutor(CitusScanState *scanState) Job *job = distributedPlan->workerJob; List *taskList = job->taskList; +#if IN_MY_DEBUGGING_PHASE + { + if (LogRemoteCommands) + { + elog(NOTICE, "AdaptiveExecutor: taskList size: %d", list_length(taskList)); + PrintConnectionHash(); + } + } +#endif + /* we should only call this once before the scan finished */ Assert(!scanState->finishedRemoteScan); @@ -883,6 +893,14 @@ AdaptiveExecutor(CitusScanState *scanState) RunDistributedExecution(execution); } +#if IN_MY_DEBUGGING_PHASE + if (LogRemoteCommands) + { + elog(NOTICE, "end of RunDistributedExecution in AdaptiveExecutor: taskList size: %d", list_length(taskList)); + PrintConnectionHash(); + } +#endif + /* execute tasks local to the node (if any) */ if (list_length(execution->localTaskList) > 0) { @@ -897,6 +915,14 @@ AdaptiveExecutor(CitusScanState *scanState) } FinishDistributedExecution(execution); + +#if IN_MY_DEBUGGING_PHASE + if (LogRemoteCommands) + { + elog(NOTICE, "end2 of RunDistributedExecution in AdaptiveExecutor: taskList size: %d", list_length(taskList)); + PrintConnectionHash(); + } +#endif if (SortReturning && distributedPlan->expectResults && commandType != CMD_SELECT) { @@ -1458,11 +1484,29 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) int connectionFlags = 0; char *nodeName = NULL; int nodePort = 0; + +#if IN_MY_DEBUGGING_PHASE + { + if (LogRemoteCommands) + { + elog(NOTICE, "before assign shardId=%ld groupId=%d nodeName=%s nodePort=%d", taskPlacement->shardId, taskPlacement->groupId, taskPlacement->nodeName, taskPlacement->nodePort); + } + } +#endif LookupTaskPlacementHostAndPort(taskPlacement, &nodeName, &nodePort); WorkerPool *workerPool = FindOrCreateWorkerPool(execution, nodeName, nodePort); +#if IN_MY_DEBUGGING_PHASE + { + if (LogRemoteCommands) + { + elog(NOTICE, "after assign shardId=%ld groupId=%d nodeName=%s nodePort=%d", taskPlacement->shardId, taskPlacement->groupId, taskPlacement->nodeName, taskPlacement->nodePort); + } + } +#endif + /* * Execution of a command on a shard placement, which may not always * happen if the query is read-only and the shard has multiple placements. @@ -1517,9 +1561,15 @@ AssignTasksToConnectionsOrWorkerPool(DistributedExecution *execution) WorkerSession *session = FindOrCreateWorkerSession(workerPool, connection); - ereport(DEBUG4, (errmsg("Session %ld (%s:%d) has an assigned task", - session->sessionId, connection->hostname, - connection->port))); +#if IN_MY_DEBUGGING_PHASE + if (LogRemoteCommands) + { + ereport(NOTICE, (errmsg("Session %ld (%s:%d) has an assigned task shardId=%ld connectionState=%d", + session->sessionId, connection->hostname, + connection->port, session->currentTask->shardPlacement->shardId, + connection->connectionState))); + } +#endif placementExecution->assignedSession = session; @@ -3009,6 +3059,17 @@ ConnectionStateMachine(WorkerSession *session) do { currentState = connection->connectionState; +#if IN_MY_DEBUGGING_PHASE + { + if (LogRemoteCommands) + { + elog(NOTICE, "ConnectionStateMachine: connection=%lu state=%d node=%s:%d, pqstatus=%d", + connection->connectionId, connection->connectionState, connection->hostname, + connection->port, PQstatus(connection->pgConn)); + } + } +#endif + switch (currentState) { case MULTI_CONNECTION_INITIAL: @@ -3805,7 +3866,7 @@ PopAssignedPlacementExecution(WorkerSession *session) /* - * PopAssignedPlacementExecution finds an executable task from the queue of assigned tasks. + * PopUnAssignedPlacementExecution finds an executable task from the queue of assigned tasks. */ static TaskPlacementExecution * PopUnassignedPlacementExecution(WorkerPool *workerPool) diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 402dedb8a91..4f1b942a085 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -2522,6 +2522,8 @@ AvailableExtensionVersion(void) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("citus extension is not found"))); + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index d93b133eaed..d9220594317 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -217,6 +217,9 @@ citus_set_coordinator_host(PG_FUNCTION_ARGS) EnsureTransactionalMetadataSyncMode(); } + /* prevent concurrent modification */ + LockRelationOid(DistNodeRelationId(), RowExclusiveLock); + bool isCoordinatorInMetadata = false; WorkerNode *coordinatorNode = PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata); diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 60d6ce466ca..15588025396 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -1810,6 +1810,8 @@ CastExpr(Expr *expr, Oid sourceType, Oid targetType, Oid targetCollation, ereport(ERROR, (errmsg("could not find a conversion path from type %d to %d", sourceType, targetType))); } + + return NULL; /* keep compiler happy */ } diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 09d2d90acec..f8a18154627 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -243,14 +243,27 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ CitusTableCacheEntry *targetRelation = GetCitusTableCacheEntry(targetRelationId); - /* - * Get the index of the column in the source query that will be utilized - * to repartition the source rows, ensuring colocation with the target - */ - distributedPlan->sourceResultRepartitionColumnIndex = - SourceResultPartitionColumnIndex(mergeQuery, - sourceQuery->targetList, - targetRelation); + + if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED)) + { + /* + * if target table is SINGLE_SHARD_DISTRIBUTED let's set this to invalid -1 + * so later in execution phase we don't rely on this value and try to find single shard of target instead. + */ + distributedPlan->sourceResultRepartitionColumnIndex = -1; + } + else + { + /* + * Get the index of the column in the source query that will be utilized + * to repartition the source rows, ensuring colocation with the target + */ + + distributedPlan->sourceResultRepartitionColumnIndex = + SourceResultPartitionColumnIndex(mergeQuery, + sourceQuery->targetList, + targetRelation); + } /* * Make a copy of the source query, since following code scribbles it @@ -262,11 +275,11 @@ CreateNonPushableMergePlan(Oid targetRelationId, uint64 planId, Query *originalQ int cursorOptions = CURSOR_OPT_PARALLEL_OK; PlannedStmt *sourceRowsPlan = pg_plan_query(sourceQueryCopy, NULL, cursorOptions, boundParams); - bool repartitioned = IsRedistributablePlan(sourceRowsPlan->planTree) && - IsSupportedRedistributionTarget(targetRelationId); + bool isRepartitionAllowed = IsRedistributablePlan(sourceRowsPlan->planTree) && + IsSupportedRedistributionTarget(targetRelationId); /* If plan is distributed, no work at the coordinator */ - if (repartitioned) + if (isRepartitionAllowed) { distributedPlan->modifyWithSelectMethod = MODIFY_WITH_SELECT_REPARTITION; } @@ -845,7 +858,7 @@ ConvertRelationRTEIntoSubquery(Query *mergeQuery, RangeTblEntry *sourceRte, newRangeTableRef->rtindex = SINGLE_RTE_INDEX; sourceResultsQuery->jointree = makeFromExpr(list_make1(newRangeTableRef), NULL); sourceResultsQuery->targetList = - CreateAllTargetListForRelation(sourceRte->relid, requiredAttributes); + CreateFilteredTargetListForRelation(sourceRte->relid, requiredAttributes); List *restrictionList = GetRestrictInfoListForRelation(sourceRte, plannerRestrictionContext); List *copyRestrictionList = copyObject(restrictionList); @@ -1273,13 +1286,6 @@ static int SourceResultPartitionColumnIndex(Query *mergeQuery, List *sourceTargetList, CitusTableCacheEntry *targetRelation) { - if (IsCitusTableType(targetRelation->relationId, SINGLE_SHARD_DISTRIBUTED)) - { - ereport(ERROR, (errmsg("MERGE operation across distributed schemas " - "or with a row-based distributed table is " - "not yet supported"))); - } - /* Get all the Join conditions from the ON clause */ List *mergeJoinConditionList = WhereClauseList(mergeQuery->jointree); Var *targetColumn = targetRelation->partitionColumn; diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 76e38237ad6..28680deb036 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -1557,9 +1557,10 @@ MasterAggregateMutator(Node *originalNode, MasterAggregateWalkerContext *walkerC } else if (IsA(originalNode, Var)) { - Var *newColumn = copyObject((Var *) originalNode); - newColumn->varno = masterTableId; - newColumn->varattno = walkerContext->columnId; + Var *origColumn = (Var *) originalNode; + Var *newColumn = makeVar(masterTableId, walkerContext->columnId, + origColumn->vartype, origColumn->vartypmod, + origColumn->varcollid, origColumn->varlevelsup); walkerContext->columnId++; newNode = (Node *) newColumn; @@ -4753,22 +4754,35 @@ WorkerLimitCount(Node *limitCount, Node *limitOffset, OrderByLimitReference if (workerLimitNode != NULL && limitOffset != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; - Const *workerOffsetConst = (Const *) limitOffset; - int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); - int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue); - workerLimitCount = workerLimitCount + workerOffsetCount; - workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); + /* Only update the worker limit if the const is not null.*/ + if (!workerLimitConst->constisnull) + { + Const *workerOffsetConst = (Const *) limitOffset; + int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); + + /* If the offset is null, it defaults to 0 when cast to int64. */ + int64 workerOffsetCount = DatumGetInt64(workerOffsetConst->constvalue); + workerLimitCount = workerLimitCount + workerOffsetCount; + workerLimitNode = (Node *) MakeIntegerConstInt64(workerLimitCount); + } } /* display debug message on limit push down */ if (workerLimitNode != NULL) { Const *workerLimitConst = (Const *) workerLimitNode; - int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); + if (!workerLimitConst->constisnull) + { + int64 workerLimitCount = DatumGetInt64(workerLimitConst->constvalue); - ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, - workerLimitCount))); + ereport(DEBUG1, (errmsg("push down of limit count: " INT64_FORMAT, + workerLimitCount))); + } + else + { + ereport(DEBUG1, (errmsg("push down of limit count: ALL"))); + } } return workerLimitNode; diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index bef91618e42..d298b0f4639 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -45,8 +45,6 @@ static RangeTblEntry * AnchorRte(Query *subquery); static List * UnionRelationRestrictionLists(List *firstRelationList, List *secondRelationList); -static List * CreateFilteredTargetListForRelation(Oid relationId, - List *requiredAttributes); static List * CreateDummyTargetList(Oid relationId, List *requiredAttributes); static TargetEntry * CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index rteIndex, @@ -378,7 +376,7 @@ CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes) * only the required columns of the given relation. If there is not required * columns then a dummy NULL column is put as the only entry. */ -static List * +List * CreateFilteredTargetListForRelation(Oid relationId, List *requiredAttributes) { Relation relation = relation_open(relationId, AccessShareLock); diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index bd65fa60c01..18a23129c5a 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -994,6 +994,16 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.allow_2pc_quick_response", + gettext_noop("Enables to not wait response for 2pc last phase(commit or abort)"), + NULL, + &Enable2PCQuickResponse, + false, + PGC_USERSET, + GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.background_task_queue_interval", gettext_noop("Time to wait between checks for scheduled background tasks."), diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql index 7b7d357ff9f..816341c5d57 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-1.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql index 2b4bb17f6b4..4a253b15124 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-2.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql index fae94a04a11..d6ba4a2b8f7 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/11.0-3.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql index fae94a04a11..d6ba4a2b8f7 100644 --- a/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finalize_upgrade_to_citus11/latest.sql @@ -96,7 +96,7 @@ END; IF all_nodes_can_connect_to_each_other != True THEN RAISE EXCEPTION 'There are unhealth primary nodes, you need to ensure all ' - 'nodes are up and runnnig. Also, make sure that all nodes can connect ' + 'nodes are up and running. Also, make sure that all nodes can connect ' 'to each other. Use SELECT * FROM citus_check_cluster_node_health(); ' 'to check the cluster health'; ELSE diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 4c26e2478ca..4e3029fe8c2 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -578,6 +578,12 @@ FinishRemoteTransactionCommit(MultiConnection *connection) transaction->transactionState == REMOTE_TRANS_1PC_COMMITTING || transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING); + if (Enable2PCQuickResponse && transaction->transactionState == REMOTE_TRANS_2PC_COMMITTING) + { + /* we don't set transactionState to COMMITTED here, as ShutdownConnection() will depend on this state */ + return; + } + PGresult *result = GetRemoteCommandResult(connection, raiseErrors); if (!IsResponseOK(result)) @@ -1036,6 +1042,10 @@ ResetRemoteTransaction(struct MultiConnection *connection) { /* XXX: Should we error out for a critical transaction? */ + if (Enable2PCQuickResponse && LogRemoteCommands) + { + elog(NOTICE, "ResetRemoteTransaction for connection %s:%d", connection->hostname, connection->port); + } dlist_delete(&connection->transactionNode); connection->transactionInProgress = false; memset(&connection->transactionNode, 0, sizeof(connection->transactionNode)); diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 9c7b456807e..7839476ed92 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -1203,3 +1203,23 @@ HasAnyObjectInPropagatedObjects(List *objectList) return false; } + + +static void __attribute__((unused)) +PrintInProgressTransactions(void) +{ + dlist_iter iter; + fprintf(stderr, "InProgressTransactions:\n"); + dlist_foreach(iter, &InProgressTransactions) + { + MultiConnection *connection = + dlist_container(MultiConnection, transactionNode, iter.cur); + RemoteTransaction *transaction = &connection->remoteTransaction; + fprintf(stderr, "Connection: %lu, remote status: %d, Status: %d, busy: %s, Failed: %d\n", + connection->connectionId, + transaction->transactionState, + PQtransactionStatus(connection->pgConn), + PQisBusy(connection->pgConn) ? "true" : "false", + transaction->transactionFailed); + } +} \ No newline at end of file diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index d93e4483abf..685c3a3e58d 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -347,4 +347,6 @@ extern bool CitusModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, extern double MillisecondsPassedSince(instr_time moment); extern long MillisecondsToTimeout(instr_time start, long msAfterStart); +extern void PrintConnectionHash(void); + #endif /* CONNECTION_MANAGMENT_H */ diff --git a/src/include/distributed/query_colocation_checker.h b/src/include/distributed/query_colocation_checker.h index 2a46d364cfe..485e4a03359 100644 --- a/src/include/distributed/query_colocation_checker.h +++ b/src/include/distributed/query_colocation_checker.h @@ -39,5 +39,7 @@ extern Query * WrapRteRelationIntoSubquery(RangeTblEntry *rteRelation, List *requiredAttributes, RTEPermissionInfo *perminfo); extern List * CreateAllTargetListForRelation(Oid relationId, List *requiredAttributes); +extern List * CreateFilteredTargetListForRelation(Oid relationId, + List *requiredAttributes); #endif /* QUERY_COLOCATION_CHECKER_H */ diff --git a/src/include/distributed/remote_commands.h b/src/include/distributed/remote_commands.h index 71cb9dad27f..1f2268f4a13 100644 --- a/src/include/distributed/remote_commands.h +++ b/src/include/distributed/remote_commands.h @@ -21,6 +21,7 @@ /* GUC, determining whether statements sent to remote nodes are logged */ extern bool LogRemoteCommands; extern char *GrepRemoteCommands; +extern bool Enable2PCQuickResponse; /* GUC that determines the number of bytes after which remote COPY is flushed */ extern int RemoteCopyFlushThreshold; diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index a863d795ec3..8811bbd8c67 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -5,7 +5,7 @@ verify_ssl = true [packages] mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} -construct = "==2.9.45" +construct = "*" docopt = "==0.6.2" cryptography = ">=41.0.4" pytest = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index c0f8734a02e..fb82a6573b4 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "bf20354a2d9c93d46041ac4c6fa427588ebfe29343ea0b02138b9079f2d82f18" + "sha256": "f8db86383082539f626f1402e720f5f2e3f9718b44a8f26110cf9f52e7ca46bc" }, "pipfile-spec": 6, "requires": { @@ -193,10 +193,12 @@ }, "construct": { "hashes": [ - "sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c" + "sha256:4d2472f9684731e58cc9c56c463be63baa1447d674e0d66aeb5627b22f512c29", + "sha256:c80be81ef595a1a821ec69dc16099550ed22197615f4320b57cc9ce2a672cb30" ], "index": "pypi", - "version": "==2.9.45" + "markers": "python_version >= '3.6'", + "version": "==2.10.70" }, "cryptography": { "hashes": [ diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index fb51bdc33ca..2dc5d6e8851 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -32,6 +32,7 @@ s/"t2_[0-9]+"/"t2_xxxxxxx"/g # shard table names for MERGE tests s/merge_schema\.([_a-z0-9]+)_40[0-9]+ /merge_schema.\1_xxxxxxx /g s/pgmerge_schema\.([_a-z0-9]+)_40[0-9]+ /pgmerge_schema.\1_xxxxxxx /g +s/merge_vcore_schema\.([_a-z0-9]+)_40[0-9]+ /pgmerge_schema.\1_xxxxxxx /g # shard table names for multi_subquery s/ keyval(1|2|ref)_[0-9]+ / keyval\1_xxxxxxx /g diff --git a/src/test/regress/expected/issue_7705.out b/src/test/regress/expected/issue_7705.out new file mode 100644 index 00000000000..20b078226ad --- /dev/null +++ b/src/test/regress/expected/issue_7705.out @@ -0,0 +1,248 @@ +--- Test for verifying that column references (var nodes) in targets that cannot be pushed down +--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the +--- varnullingrels field of a VAR node may contain relids of join relations that can make the var +--- NULL; in a rewritten distributed query without a join such relids do not have a meaning. +--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault +--- https://github.com/citusdata/citus/issues/7705 +CREATE SCHEMA issue_7705; +SET search_path to 'issue_7705'; +SET citus.next_shard_id TO 30070000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1), (2); +CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id)); +INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL); +SELECT create_distributed_table('t1', 'id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t1$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('t2', 'account_id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$issue_7705.t2$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Test the issue seen in #7705; a target expression with +-- a window function that cannot be pushed down because the +-- partion by is not on the distribution column also includes +-- a column from the inner side of a left outer join, which +-- produces a non-empty varnullingrels set in PG 16 (and higher) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | 10 + 2 | 20 + 1 | +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | 10 + 2 | 20 + 1 | +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t2_30070004 t2 RIGHT JOIN issue_7705.t1_30070000 t1 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + id | max +--------------------------------------------------------------------- + 1 | + 1 | 10 + 2 | 20 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + QUERY PLAN +--------------------------------------------------------------------- + HashAggregate + Output: remote_scan.id, (max(remote_scan.max) OVER (?)), remote_scan.worker_column_3 + Group Key: remote_scan.id, max(remote_scan.max) OVER (?) + -> WindowAgg + Output: remote_scan.id, max(remote_scan.max) OVER (?), remote_scan.worker_column_3 + -> Sort + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Sort Key: remote_scan.worker_column_3 + -> Custom Scan (Citus Adaptive) + Output: remote_scan.worker_column_3, remote_scan.id, remote_scan.max + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS max, worker_column_3 FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2, t2.id AS worker_column_3 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2, t2.id + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(25 rows) + +CREATE SEQUENCE test_seq START 101; +CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ; +-- Issue #7705 also occurs if a target expression includes a column +-- of a distributed table that is on the inner side of a left outer +-- join and a call to nextval(), because nextval() cannot be pushed +-- down, and must be run on the coordinator +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + id | test_f +--------------------------------------------------------------------- + 1 | 153 + 1 | + 2 | 165 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + QUERY PLAN +--------------------------------------------------------------------- + Result + Output: remote_scan.id, ((remote_scan.test_f + (nextval('test_seq'::regclass))::integer) + 42) + -> Sort + Output: remote_scan.id, remote_scan.test_f + Sort Key: remote_scan.id + -> Custom Scan (Citus Adaptive) + Output: remote_scan.id, remote_scan.test_f + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS test_f FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2 + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + id | case +--------------------------------------------------------------------- + 1 | 10 + 1 | 1 + 2 | 20 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + QUERY PLAN +--------------------------------------------------------------------- + Result + Output: remote_scan.id, CASE ((nextval('test_seq'::regclass) % '2'::bigint) = 0) WHEN CASE_TEST_EXPR THEN remote_scan."case" ELSE 1 END + -> Sort + Output: remote_scan.id, remote_scan."case" + Sort Key: remote_scan.id + -> Custom Scan (Citus Adaptive) + Output: remote_scan.id, remote_scan."case" + Task Count: 4 + Tasks Shown: One of 4 + -> Task + Query: SELECT worker_column_1 AS id, worker_column_2 AS "case" FROM (SELECT t1.id AS worker_column_1, t2.a2 AS worker_column_2 FROM (issue_7705.t1_30070000 t1 LEFT JOIN issue_7705.t2_30070004 t2 ON ((t1.id OPERATOR(pg_catalog.=) t2.account_id)))) worker_subquery + Node: host=localhost port=xxxxx dbname=regression + -> Hash Right Join + Output: t1.id, t2.a2 + Inner Unique: true + Hash Cond: (t2.account_id = t1.id) + -> Seq Scan on issue_7705.t2_30070004 t2 + Output: t2.id, t2.account_id, t2.a2 + -> Hash + Output: t1.id + -> Seq Scan on issue_7705.t1_30070000 t1 + Output: t1.id +(22 rows) + +--- cleanup +\set VERBOSITY TERSE +DROP SCHEMA issue_7705 CASCADE; +NOTICE: drop cascades to 4 other objects +RESET all; diff --git a/src/test/regress/expected/merge_schema_sharding.out b/src/test/regress/expected/merge_schema_sharding.out index 8a9ba89dd46..17f6f6adb6e 100644 --- a/src/test/regress/expected/merge_schema_sharding.out +++ b/src/test/regress/expected/merge_schema_sharding.out @@ -98,14 +98,26 @@ WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c2_t1 ON (citus_table_alias.a OPERATOR(pg_catalog.=) nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table1.nullkey_c1_t1 USING nullkey_c2_t1 ON (schema_shard_table1.nullkey_c1_t1.a = nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b); DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) nullkey_c2_t1 ON (citus_table_alias.a OPERATOR(pg_catalog.=) nullkey_c2_t1.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c2_t1.b WHEN NOT MATCHED THEN INSERT (a, b) VALUES (nullkey_c2_t1.a, nullkey_c2_t1.b) +DEBUG: Execute MERGE task list -- with a distributed table SET search_path TO schema_shard_table1; MERGE INTO nullkey_c1_t1 USING schema_shard_table.distributed_table ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a) @@ -114,7 +126,12 @@ WHEN NOT MATCHED THEN INSERT VALUES (schema_shard_table.distributed_table.a, sch DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) distributed_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) distributed_table.a) WHEN MATCHED THEN UPDATE SET b = distributed_table.b WHEN NOT MATCHED THEN INSERT (a, b) VALUES (distributed_table.a, distributed_table.b) +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.distributed_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.distributed_table.a) WHEN MATCHED THEN DELETE WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b); @@ -163,7 +180,13 @@ WHEN MATCHED THEN UPDATE SET b = schema_shard_table.reference_table.b; DEBUG: A mix of distributed and reference table, try repartitioning DEBUG: A mix of distributed and reference table, routable query is not possible DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) reference_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) reference_table.a) WHEN MATCHED THEN UPDATE SET b = reference_table.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.reference_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.reference_table.a) WHEN MATCHED THEN UPDATE SET b = nullkey_c1_t1.b WHEN NOT MATCHED THEN INSERT VALUES (nullkey_c1_t1.a, nullkey_c1_t1.b); @@ -174,7 +197,13 @@ WHEN MATCHED THEN UPDATE SET b = schema_shard_table.citus_local_table.b; DEBUG: A mix of distributed and local table, try repartitioning DEBUG: A mix of distributed and citus-local table, routable query is not possible DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Distributed planning for a fast-path router query +DEBUG: Creating router plan +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) citus_local_table ON (citus_table_alias.a OPERATOR(pg_catalog.=) citus_local_table.a) WHEN MATCHED THEN UPDATE SET b = citus_local_table.b +DEBUG: Execute MERGE task list MERGE INTO schema_shard_table.citus_local_table USING nullkey_c1_t1 ON (nullkey_c1_t1.a = schema_shard_table.citus_local_table.a) WHEN MATCHED THEN DELETE; DEBUG: A mix of distributed and local table, try repartitioning @@ -210,7 +239,12 @@ WHEN MATCHED THEN UPDATE SET b = cte.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte ON (citus_table_alias.a OPERATOR(pg_catalog.=) cte.a) WHEN MATCHED THEN UPDATE SET b = cte.b +DEBUG: Execute MERGE task list WITH cte AS materialized ( SELECT * FROM schema_shard_table.distributed_table ) @@ -219,7 +253,12 @@ WHEN MATCHED THEN UPDATE SET b = cte.b; DEBUG: Distributed tables are not co-located, try repartitioning DEBUG: For MERGE command, all the distributed tables must be colocated DEBUG: Creating MERGE repartition plan -ERROR: MERGE operation across distributed schemas or with a row-based distributed table is not yet supported +DEBUG: Router planner cannot handle multi-shard select queries +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: distributed statement: MERGE INTO schema_shard_table1.nullkey_c1_t1_4005006 citus_table_alias USING (SELECT intermediate_result.a, intermediate_result.b FROM read_intermediate_result('merge_into_XXX_4005006'::text, 'binary'::citus_copy_format) intermediate_result(a integer, b integer)) cte ON (citus_table_alias.a OPERATOR(pg_catalog.=) cte.a) WHEN MATCHED THEN UPDATE SET b = cte.b +DEBUG: Execute MERGE task list SET client_min_messages TO WARNING; DROP SCHEMA schema_shard_table1 CASCADE; DROP SCHEMA schema_shard_table2 CASCADE; diff --git a/src/test/regress/expected/merge_vcore.out b/src/test/regress/expected/merge_vcore.out new file mode 100644 index 00000000000..0eccb811b17 --- /dev/null +++ b/src/test/regress/expected/merge_vcore.out @@ -0,0 +1,587 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif +-- MERGE command performs a join from data_source to target_table_name +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; +NOTICE: schema "merge_vcore_schema" does not exist, skipping +--MERGE INTO target +--USING source +--WHEN NOT MATCHED +--WHEN MATCHED AND +--WHEN MATCHED +CREATE SCHEMA merge_vcore_schema; +SET search_path TO merge_vcore_schema; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 4000000; +SET citus.explain_all_tasks TO true; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SET client_min_messages = warning; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +-- ****************************************** CASE 1 : Both are singleSharded*************************************** +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000000 source +(8 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 2 : source is single sharded and target is distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null, colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000002 source +(8 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 3 : source is distributed and target is single sharded ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000007 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000008 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000009 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000010 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 4 : both are distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: repartition + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000012 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000013 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000014 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000015 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 5 : both are distributed & colocated ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'source'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target method: repartition + -> Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000020 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000021 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000022 source + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_4000023 source +(17 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- *************** CASE 6 : both are singlesharded & colocated ******************************* +CREATE TABLE source ( + id bigint, + doc text +); +CREATE TABLE target ( + id bigint, + doc text +); +SELECT create_distributed_table('source', null); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', null, colocate_with=>'source'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"a" : 1} + 2 | {"a" : 2} +(2 rows) + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); +SELECT * FROM target; + id | doc +--------------------------------------------------------------------- + 2 | {"b" : 1} + 2 | {"b" : 1} +(2 rows) + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_4000029 target + -> Nested Loop + -> Seq Scan on source_4000028 source + -> Materialize + -> Seq Scan on target_4000029 target + Filter: ('2'::bigint = id) +(11 rows) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Bug Fix Test as part of this PR +-- Test 1 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (id, salary) VALUES (source.id, source.salary); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | | 100000 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Test 2 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id) VALUES (source.salary, source.id); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | | 100000 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +-- Test 3 +CREATE TABLE source ( + id int, + age int, + salary int +); +CREATE TABLE target ( + id int, + age int, + salary int +); +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id, age) VALUES (source.age, source.id, source.salary); +SELECT * FROM TARGET; + id | age | salary +--------------------------------------------------------------------- + 1 | 100000 | 30 +(1 row) + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; diff --git a/src/test/regress/expected/merge_vcore_0.out b/src/test/regress/expected/merge_vcore_0.out new file mode 100644 index 00000000000..a7e3fbf2062 --- /dev/null +++ b/src/test/regress/expected/merge_vcore_0.out @@ -0,0 +1,6 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q diff --git a/src/test/regress/expected/multi_limit_clause.out b/src/test/regress/expected/multi_limit_clause.out index 65304b777e8..83cd0583703 100644 --- a/src/test/regress/expected/multi_limit_clause.out +++ b/src/test/regress/expected/multi_limit_clause.out @@ -521,6 +521,86 @@ SELECT 1 | 1 (1 row) +-- check if we can correctly push the limit when it is null +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey LIMIT null; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 1 + 2 +(7 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT null; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 2 +(6 rows) + +SELECT count(*) FROM lineitem LIMIT null; +DEBUG: push down of limit count: ALL + count +--------------------------------------------------------------------- + 12000 +(1 row) + +SELECT count(*) FROM lineitem OFFSET 0 LIMIT null; +DEBUG: push down of limit count: ALL + count +--------------------------------------------------------------------- + 12000 +(1 row) + +-- check if we push the right limit when both offset and limit are given +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT 3; +DEBUG: push down of limit count: 4 + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET null LIMIT 1; +DEBUG: push down of limit count: 1 + l_orderkey +--------------------------------------------------------------------- + 1 +(1 row) + +-- check if we can correctly push the limit when it is all +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 LIMIT all; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 + 1 + 1 +(6 rows) + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 OFFSET 2 LIMIT all; +DEBUG: push down of limit count: ALL + l_orderkey +--------------------------------------------------------------------- + 1 + 1 + 1 + 1 +(4 rows) + SET client_min_messages TO NOTICE; -- non constants should not push down CREATE OR REPLACE FUNCTION my_limit() diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 7f0c7ca57f6..bbb4047a950 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -103,7 +103,7 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement test: binary_protocol test: alter_table_set_access_method test: alter_distributed_table -test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 +test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 issue_7705 test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes @@ -120,6 +120,7 @@ test: merge pgmerge test: merge_repartition2 test: merge_repartition1 merge_schema_sharding test: merge_partition_tables +test: merge_vcore # --------- # test that no tests leaked intermediate results. This should always be last diff --git a/src/test/regress/sql/issue_7705.sql b/src/test/regress/sql/issue_7705.sql new file mode 100644 index 00000000000..950933017ad --- /dev/null +++ b/src/test/regress/sql/issue_7705.sql @@ -0,0 +1,72 @@ +--- Test for verifying that column references (var nodes) in targets that cannot be pushed down +--- do not cause issues for the postgres planner, in particular postgres versions 16+, where the +--- varnullingrels field of a VAR node may contain relids of join relations that can make the var +--- NULL; in a rewritten distributed query without a join such relids do not have a meaning. +--- Issue #7705: [SEGFAULT] Querying distributed tables with window partition causes segmentation fault +--- https://github.com/citusdata/citus/issues/7705 + +CREATE SCHEMA issue_7705; +SET search_path to 'issue_7705'; +SET citus.next_shard_id TO 30070000; +SET citus.shard_replication_factor TO 1; +SET citus.enable_local_execution TO ON; + +CREATE TABLE t1 (id INT PRIMARY KEY); +INSERT INTO t1 VALUES (1), (2); + +CREATE TABLE t2 (id INT, account_id INT, a2 INT, PRIMARY KEY(id, account_id)); +INSERT INTO t2 VALUES (3, 1, 10), (4, 2, 20), (5, 1, NULL); + +SELECT create_distributed_table('t1', 'id'); +SELECT create_distributed_table('t2', 'account_id'); + +-- Test the issue seen in #7705; a target expression with +-- a window function that cannot be pushed down because the +-- partion by is not on the distribution column also includes +-- a column from the inner side of a left outer join, which +-- produces a non-empty varnullingrels set in PG 16 (and higher) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t2 RIGHT OUTER JOIN t1 ON t1.id = t2.account_id; + +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT DISTINCT t1.id, MAX(t2.a2) OVER (PARTITION BY t2.id) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id; + +CREATE SEQUENCE test_seq START 101; +CREATE OR REPLACE FUNCTION TEST_F(int) returns INT language sql stable as $$ select $1 + 42; $$ ; + +-- Issue #7705 also occurs if a target expression includes a column +-- of a distributed table that is on the inner side of a left outer +-- join and a call to nextval(), because nextval() cannot be pushed +-- down, and must be run on the coordinator +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, TEST_F(t2.a2 + nextval('test_seq') :: int) +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + +SELECT t1.id, CASE nextval('test_seq') % 2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; +EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF) +SELECT t1.id, CASE nextval('test_seq') %2 = 0 WHEN true THEN t2.a2 ELSE 1 END +FROM t1 LEFT OUTER JOIN t2 ON t1.id = t2.account_id +ORDER BY t1.id; + +--- cleanup +\set VERBOSITY TERSE +DROP SCHEMA issue_7705 CASCADE; +RESET all; diff --git a/src/test/regress/sql/merge_vcore.sql b/src/test/regress/sql/merge_vcore.sql new file mode 100644 index 00000000000..2ab95e874ae --- /dev/null +++ b/src/test/regress/sql/merge_vcore.sql @@ -0,0 +1,403 @@ +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif + +-- MERGE command performs a join from data_source to target_table_name +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; +--MERGE INTO target +--USING source +--WHEN NOT MATCHED +--WHEN MATCHED AND +--WHEN MATCHED + +CREATE SCHEMA merge_vcore_schema; +SET search_path TO merge_vcore_schema; +SET citus.shard_count TO 4; +SET citus.next_shard_id TO 4000000; +SET citus.explain_all_tasks TO true; +SET citus.shard_replication_factor TO 1; +SET citus.max_adaptive_executor_pool_size TO 1; +SET client_min_messages = warning; +SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); +RESET client_min_messages; + + +-- ****************************************** CASE 1 : Both are singleSharded*************************************** +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null, colocate_with=>'none'); +SELECT create_distributed_table('target', null, colocate_with=>'none'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 2 : source is single sharded and target is distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null, colocate_with=>'none'); +SELECT create_distributed_table('target', 'id'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + + + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 3 : source is distributed and target is single sharded ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', null); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 4 : both are distributed ******************************* +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', 'id'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 5 : both are distributed & colocated ******************************* + +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', 'id'); +SELECT create_distributed_table('target', 'id', colocate_with=>'source'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- *************** CASE 6 : both are singlesharded & colocated ******************************* + +CREATE TABLE source ( + id bigint, + doc text +); + +CREATE TABLE target ( + id bigint, + doc text +); + +SELECT create_distributed_table('source', null); +SELECT create_distributed_table('target', null, colocate_with=>'source'); + +INSERT INTO source (id, doc) VALUES (1, '{"a" : 1}'), (1, '{"a" : 2}'); + +-- insert +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- update +MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) AND src.doc = target.doc +WHEN MATCHED THEN +UPDATE SET doc = '{"b" : 1}' +WHEN NOT MATCHED THEN +INSERT (id, doc) +VALUES (src.t_id, doc); + +SELECT * FROM target; + +-- Explain +EXPLAIN (costs off, timing off, summary off) MERGE INTO ONLY target USING (SELECT 2::bigint AS t_id, doc FROM source) src +ON (src.t_id = target.id) +WHEN MATCHED THEN DO NOTHING; + +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Bug Fix Test as part of this PR +-- Test 1 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (id, salary) VALUES (source.id, source.salary); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Test 2 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id) VALUES (source.salary, source.id); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + +-- Test 3 +CREATE TABLE source ( + id int, + age int, + salary int +); + +CREATE TABLE target ( + id int, + age int, + salary int +); + +SELECT create_distributed_table('source', 'id', colocate_with=>'none'); +SELECT create_distributed_table('target', 'id', colocate_with=>'none'); + +INSERT INTO source (id, age, salary) VALUES (1,30, 100000); + +MERGE INTO ONLY target USING source ON (source.id = target.id) +WHEN NOT MATCHED THEN +INSERT (salary, id, age) VALUES (source.age, source.id, source.salary); + +SELECT * FROM TARGET; +DROP TABLE IF EXISTS source; +DROP TABLE IF EXISTS target; + + + +DROP SCHEMA IF EXISTS merge_vcore_schema CASCADE; + + + + diff --git a/src/test/regress/sql/multi_limit_clause.sql b/src/test/regress/sql/multi_limit_clause.sql index 8d14bbbc8ed..5e3b3e3deb2 100644 --- a/src/test/regress/sql/multi_limit_clause.sql +++ b/src/test/regress/sql/multi_limit_clause.sql @@ -222,6 +222,25 @@ SELECT ORDER BY 2 DESC, 1 LIMIT 5; +-- check if we can correctly push the limit when it is null +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey LIMIT null; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT null; + +SELECT count(*) FROM lineitem LIMIT null; + +SELECT count(*) FROM lineitem OFFSET 0 LIMIT null; + +-- check if we push the right limit when both offset and limit are given +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET 1 LIMIT 3; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 3 ORDER BY l_orderkey OFFSET null LIMIT 1; + +-- check if we can correctly push the limit when it is all +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 LIMIT all; + +SELECT l_orderkey FROM lineitem WHERE l_orderkey < 2 OFFSET 2 LIMIT all; + SET client_min_messages TO NOTICE; -- non constants should not push down