diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml
index 443d28e80d6..d0ec06ffa5e 100644
--- a/.github/workflows/assign_milestone.yml
+++ b/.github/workflows/assign_milestone.yml
@@ -12,7 +12,7 @@ env:
jobs:
build:
name: Assign Milestone
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
pull-requests: write
@@ -20,7 +20,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Checkout code
uses: actions/checkout@v4
diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml
index 08104997714..172df34d3a8 100644
--- a/.github/workflows/check_make_vtadmin_authz_testgen.yml
+++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml
@@ -8,7 +8,7 @@ env:
jobs:
build:
name: Check Make vtadmin_authz_testgen
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
@@ -49,7 +49,7 @@ jobs:
uses: actions/setup-go@v5
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml
index f45b9455b00..94a85ed063e 100644
--- a/.github/workflows/check_make_vtadmin_web_proto.yml
+++ b/.github/workflows/check_make_vtadmin_web_proto.yml
@@ -6,7 +6,7 @@ permissions: read-all
jobs:
build:
name: Check Make VTAdmin Web Proto
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
@@ -49,7 +49,7 @@ jobs:
uses: actions/setup-go@v5
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
diff --git a/.github/workflows/close_stale_pull_requests.yml b/.github/workflows/close_stale_pull_requests.yml
index e0201c0104b..7b994d7fff2 100644
--- a/.github/workflows/close_stale_pull_requests.yml
+++ b/.github/workflows/close_stale_pull_requests.yml
@@ -9,7 +9,7 @@ permissions: read-all
jobs:
close_stale_pull_requests:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
pull-requests: write
diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml
index 3713a571b13..bc48adddf2d 100644
--- a/.github/workflows/cluster_endtoend_12.yml
+++ b/.github/workflows/cluster_endtoend_12.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml
index d4b81e06957..b3ed66ab3bd 100644
--- a/.github/workflows/cluster_endtoend_13.yml
+++ b/.github/workflows/cluster_endtoend_13.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml
index 3785fdcc28e..bf67bcb3b51 100644
--- a/.github/workflows/cluster_endtoend_15.yml
+++ b/.github/workflows/cluster_endtoend_15.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml
index d68f88430fc..c1cce2f4c9a 100644
--- a/.github/workflows/cluster_endtoend_18.yml
+++ b/.github/workflows/cluster_endtoend_18.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml
index a06b7d72940..c5af6cfb722 100644
--- a/.github/workflows/cluster_endtoend_21.yml
+++ b/.github/workflows/cluster_endtoend_21.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml
index 62dfc6fe059..619b5d4c9bb 100644
--- a/.github/workflows/cluster_endtoend_22.yml
+++ b/.github/workflows/cluster_endtoend_22.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml
index f7e08685b97..4c7273d5b25 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
index e92a4270a74..943e4755c99 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
index a6703085c33..e9c90a8bc00 100644
--- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
+++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml
index 78375ea549e..cb202be9689 100644
--- a/.github/workflows/cluster_endtoend_mysql80.yml
+++ b/.github/workflows/cluster_endtoend_mysql80.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
index 16974e3d159..dc3d117052e 100644
--- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml
+++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
index 1fe69e60be8..1a2bb4bd971 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
index 115b59efa0a..2ba54745d1c 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
index fba4bf7e009..8abe2cfd34e 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 3612cb498c5..8570c789026 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index cdd5217a3f4..995a1dab4c2 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index 7daab66a4aa..f7aaedd4bb8 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index 7329b0e0892..d160f627fd6 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index 2aba8197130..35986e24bfb 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
index a14a5c3e7aa..78be71909c1 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
index afc57695e83..544fd10d761 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
index 55e52394db3..f3108f83bb6 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
index f3aa40dbb18..e8ad14cd785 100644
--- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml
+++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index e5c88b13147..6e23c655e30 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 86691286bdc..2ca48c7f323 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index 671b27d8c3e..9365d562d85 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
index c61bddf2e10..125504487e4 100644
--- a/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
index 958b9a6c2b8..f7033345537 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
index c928b08ea9a..6bfaeca4c89 100644
--- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index e391e698d8e..8f1b29b2a92 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vstream.yml b/.github/workflows/cluster_endtoend_vstream.yml
index 3946614f674..528c1037f9f 100644
--- a/.github/workflows/cluster_endtoend_vstream.yml
+++ b/.github/workflows/cluster_endtoend_vstream.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml
index 4dea0079881..02f0ea9dbae 100644
--- a/.github/workflows/cluster_endtoend_vtbackup.yml
+++ b/.github/workflows/cluster_endtoend_vtbackup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
index 1cbe2be67e3..3faad56f42e 100644
--- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
index 2a7ba3c8d7f..9c462e46231 100644
--- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
index 87765c30955..d1bc22107cf 100644
--- a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
index 84c0cc1c850..e0fde354a59 100644
--- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
index 302204aa91b..c58159ed76a 100644
--- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
index 33a080768ef..03af7c4220f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
index 24585039482..0e68f404dea 100644
--- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml
index 196eb5d9804..452ecdce3a0 100644
--- a/.github/workflows/cluster_endtoend_vtgate_queries.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
index bc9923e4421..cdc91abf538 100644
--- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
index dfc3658e405..4d046b10b2d 100644
--- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml
index 3f51b9a8a72..731deafe05a 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
index abc2ab056a6..f5474a37a76 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
index c3d5787e76a..26280690ba0 100644
--- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml
index 4bb6142fc27..9be71b3b0d3 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
index ece3d5e8fee..515a13ab94d 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
index 9210541ecba..9bd92a184ea 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
index 84d6eba4b80..6d5f56ab7f6 100644
--- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
index 5ba68f749bb..5b5b77ad25f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
index 2a0d2713f63..c9d51f8775d 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
index e9882112a8b..735d6c0e7f7 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml
index eb391f3999d..99d3e82892d 100644
--- a/.github/workflows/cluster_endtoend_vtorc.yml
+++ b/.github/workflows/cluster_endtoend_vtorc.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
index d2a4d099925..c0320e0d202 100644
--- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
+++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml
index 0ea9a48f175..02eef909707 100644
--- a/.github/workflows/cluster_endtoend_xb_backup.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml
index cbb5d8630fa..ac068d29619 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml
index 5270c01b63b..a9444e9d823 100644
--- a/.github/workflows/codeql_analysis.yml
+++ b/.github/workflows/codeql_analysis.yml
@@ -14,7 +14,7 @@ permissions: read-all
jobs:
analyze:
name: Analyze
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
permissions:
actions: read
contents: read
@@ -32,7 +32,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
@@ -75,13 +75,6 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
- sudo apt-get install -y gnupg2
- sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
- sudo percona-release enable-only pxb-24
- sudo apt-get update
- sudo apt-get install -y percona-xtrabackup-24
-
- name: Building binaries
timeout-minutes: 30
run: |
diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml
index 5803929123f..08351dc677f 100644
--- a/.github/workflows/docker_test_cluster_10.yml
+++ b/.github/workflows/docker_test_cluster_10.yml
@@ -55,7 +55,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml
index 51f2baca2d2..e2590f343f5 100644
--- a/.github/workflows/docker_test_cluster_25.yml
+++ b/.github/workflows/docker_test_cluster_25.yml
@@ -55,7 +55,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml
index c0282dc4f9e..a99cbb5217f 100644
--- a/.github/workflows/e2e_race.yml
+++ b/.github/workflows/e2e_race.yml
@@ -54,7 +54,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml
index a70449558bc..e4c81b7dad1 100644
--- a/.github/workflows/endtoend.yml
+++ b/.github/workflows/endtoend.yml
@@ -53,7 +53,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml
index 14d6e0f0779..63656e70d65 100644
--- a/.github/workflows/local_example.yml
+++ b/.github/workflows/local_example.yml
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml
index d99a132bf8e..a2f67b871ad 100644
--- a/.github/workflows/region_example.yml
+++ b/.github/workflows/region_example.yml
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml
index da15edbb2d3..a7b0efb17ab 100644
--- a/.github/workflows/static_checks_etc.yml
+++ b/.github/workflows/static_checks_etc.yml
@@ -119,7 +119,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml
index 7384fe24147..7aa1504094a 100644
--- a/.github/workflows/unit_race.yml
+++ b/.github/workflows/unit_race.yml
@@ -59,7 +59,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index e40ae045bd3..d0e883ef812 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -71,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/.github/workflows/update_golang_dependencies.yml b/.github/workflows/update_golang_dependencies.yml
index b416c09f949..1e24edc93a4 100644
--- a/.github/workflows/update_golang_dependencies.yml
+++ b/.github/workflows/update_golang_dependencies.yml
@@ -19,7 +19,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Check out code
uses: actions/checkout@v4
diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml
index 519fac82482..312486c273c 100644
--- a/.github/workflows/update_golang_version.yml
+++ b/.github/workflows/update_golang_version.yml
@@ -22,7 +22,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Check out code
uses: actions/checkout@v4
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index d7e0e213037..b1a69765a31 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -73,7 +73,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index 326233a710f..a021421a5dc 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -72,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index 0c006625510..1ca174232be 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -77,7 +77,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 7a14447608a..0bee50d9c24 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -75,7 +75,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index 93daba76d6d..4947ec95c50 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,63 +121,58 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo percona-release enable-only pxb-24
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
-
- # Checkout to the last release of Vitess
- - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v4
- with:
- ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
-
- - name: Get dependencies for the last release
+=======
+ # Build current commit's binaries
+ - name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building last release's binaries
+ - name: Building the binaries for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-other/
- cp -R bin /tmp/vitess-build-other/
+ mkdir -p /tmp/vitess-build-current/
+ cp -R bin /tmp/vitess-build-current/
rm -Rf bin/*
+>>>>>>> v19.0.6
- # Checkout to this build's commit
- - name: Check out commit's code
+ # Checkout to the last release of Vitess
+ - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@v4
+ with:
+ ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
- - name: Get dependencies for this commit
+ - name: Get dependencies for the last release
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building the binaries for this commit
+ - name: Building last release's binaries
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-current/
- cp -R bin /tmp/vitess-build-current/
+ mkdir -p /tmp/vitess-build-other/
+ cp -R bin /tmp/vitess-build-other/
+ rm -Rf bin/*
- # Running a test with vtgate and vttablet using version n
- - name: Run query serving tests (vtgate=N, vttablet=N)
+ - name: Convert ErrorContains checks to Error checks
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
# Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n
- name: Use last release's VTGate
@@ -185,12 +180,13 @@ jobs:
run: |
source build.env
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
rm -f $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate
vtgate --version
- # Running a test with vtgate at version n-1 and vttablet at version n
- - name: Run query serving tests (vtgate=N-1, vttablet=N)
+ # Running a test with vtgate at version n-1 and vttablet/vtctld at version n
+ - name: Run query serving tests (vtgate=N-1, vttablet=N, vtctld=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
@@ -199,22 +195,38 @@ jobs:
source build.env
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
- # Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n-1
- - name: Use current version VTGate, and other version VTTablet
+ - name: Check out commit's code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ # Swap the binaries again. This time, vtgate will be at version n, and vttablet/vtctld will be at version n-1
+ - name: Use current version VTGate, and other version VTTablet/VTctld
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
+
+ rm -Rf bin/*
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
+
+ rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld
+
+ cp /tmp/vitess-build-other/bin/vtctld $PWD/bin/vtctld
+ cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin/vtctldclient
+ cp /tmp/vitess-build-other/bin/vtctl $PWD/bin/vtctl
+ cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin/vtctlclient
+
vtgate --version
vttablet --version
+ vtctl --version
- # Running a test with vtgate at version n and vttablet at version n-1
- - name: Run query serving tests (vtgate=N, vttablet=N-1)
+ # Running a test with vtgate at version n and vttablet/vtctld at version n-1
+ - name: Run query serving tests (vtgate=N, vttablet=N-1, vtctld=N-1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
index a666e7a90fd..95232b001e2 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,63 +115,58 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo percona-release enable-only pxb-24
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
-
- # Checkout to the next release of Vitess
- - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
- uses: actions/checkout@v4
- with:
- ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
-
- - name: Get dependencies for the next release
+=======
+ # Build current commit's binaries
+ - name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building next release's binaries
+ - name: Building the binaries for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-other/
- cp -R bin /tmp/vitess-build-other/
+ mkdir -p /tmp/vitess-build-current/
+ cp -R bin /tmp/vitess-build-current/
rm -Rf bin/*
+>>>>>>> v19.0.6
- # Checkout to this build's commit
- - name: Check out commit's code
+ # Checkout to the next release of Vitess
+ - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@v4
+ with:
+ ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
- - name: Get dependencies for this commit
+ - name: Get dependencies for the next release
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- - name: Building the binaries for this commit
+ - name: Building next release's binaries
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env
NOVTADMINBUILD=1 make build
- mkdir -p /tmp/vitess-build-current/
- cp -R bin /tmp/vitess-build-current/
+ mkdir -p /tmp/vitess-build-other/
+ cp -R bin /tmp/vitess-build-other/
+ rm -Rf bin/*
- # Running a test with vtgate and vttablet using version n
- - name: Run query serving tests (vtgate=N, vttablet=N)
+ - name: Convert ErrorContains checks to Error checks
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- rm -rf /tmp/vtdataroot
- mkdir -p /tmp/vtdataroot
-
- source build.env
- eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
# Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n
- name: Use next release's VTGate
@@ -179,6 +174,7 @@ jobs:
run: |
source build.env
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
rm -f $PWD/bin/vtgate
cp /tmp/vitess-build-other/bin/vtgate $PWD/bin/vtgate
vtgate --version
@@ -193,28 +189,38 @@ jobs:
source build.env
eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries
+ - name: Check out commit's code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
# Swap the binaries again. This time, vtgate will be at version n, and vttablet will be at version n+1
- - name: Use current version VTGate, and other version VTTablet
+ - name: Use current version VTGate, and other version VTTablet/VTctld
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
- rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
- cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate
+ rm -Rf bin/*
+ cp -r /tmp/vitess-build-current/bin/* $PWD/bin/
- cp /tmp/vitess-build-other/bin/vtctld $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctl $PWD/bin
- cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin
+ rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld
+
+ cp /tmp/vitess-build-other/bin/vtctld $PWD/bin/vtctld
+ cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin/vtctldclient
+ cp /tmp/vitess-build-other/bin/vtctl $PWD/bin/vtctl
+ cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin/vtctlclient
+
vtgate --version
vttablet --version
+ vtctl --version
# Running a test with vtgate at version n and vttablet at version n+1
- - name: Run query serving tests (vtgate=N, vttablet=N+1)
+ - name: Run query serving tests (vtgate=N, vttablet=N+1, vtctld=N+1)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
rm -rf /tmp/vtdataroot
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index af0084315a9..bfd54a29bd0 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,6 +121,7 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
@@ -128,6 +129,8 @@ jobs:
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
+=======
+>>>>>>> v19.0.6
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -169,6 +172,12 @@ jobs:
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
+ - name: Convert ErrorContains checks to Error checks
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
+
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
index 6c30b4acb05..398f5c7a1a1 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,6 +115,7 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
@@ -122,6 +123,8 @@ jobs:
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
+=======
+>>>>>>> v19.0.6
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -163,6 +166,12 @@ jobs:
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
+ - name: Convert ErrorContains checks to Error checks
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/ErrorContains/Error/g' {} +
+ find ./go/test/endtoend -name '*.go' -exec sed -i 's/EqualError/Error/g' {} +
+
# Running a test with vtgate and vttablet using version n
- name: Run query serving tests (vtgate=N, vttablet=N)
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
index e9b33cdce90..1e07d274812 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,6 +115,7 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
@@ -122,6 +123,8 @@ jobs:
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
+=======
+>>>>>>> v19.0.6
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index f67a3214d24..80d02d138fd 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index b05631a4862..98db591ac53 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,6 +121,7 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
@@ -128,6 +129,8 @@ jobs:
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
+=======
+>>>>>>> v19.0.6
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 372f223f06a..0fce01dd939 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -121,6 +121,7 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
+<<<<<<< HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
@@ -128,6 +129,8 @@ jobs:
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-24
+=======
+>>>>>>> v19.0.6
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/vitess_tester_vtgate.yml b/.github/workflows/vitess_tester_vtgate.yml
index 63d7332fc39..af1345ff4b1 100644
--- a/.github/workflows/vitess_tester_vtgate.yml
+++ b/.github/workflows/vitess_tester_vtgate.yml
@@ -60,7 +60,7 @@ jobs:
end_to_end:
- 'go/**/*.go'
- 'go/vt/sidecardb/**/*.sql'
- - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'go/test/endtoend/vtgate/vitess_tester/**'
- 'test.go'
- 'Makefile'
- 'build.env'
@@ -76,7 +76,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -119,7 +119,11 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
# install vitess tester
+<<<<<<< HEAD
go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+=======
+ go install github.com/vitessio/vitess-tester@89dd933a9ea0e15f69ca58b9c8ea09a358762cca
+>>>>>>> v19.0.6
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
@@ -150,9 +154,15 @@ jobs:
# We go over all the directories in the given path.
# If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
if [ -f $dir/vschema.json ]; then
+<<<<<<< HEAD
vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
else
vitess-tester --sharded --xunit --test-dir $dir
+=======
+ vitess-tester --xunit --vschema "$dir"vschema.json $dir/*.test
+ else
+ vitess-tester --sharded --xunit $dir/*.test
+>>>>>>> v19.0.6
fi
# Number the reports by changing their file names.
mv report.xml report"$i".xml
diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml
index 61cb452ec6d..e8bb24052c3 100644
--- a/.github/workflows/vtadmin_web_build.yml
+++ b/.github/workflows/vtadmin_web_build.yml
@@ -16,8 +16,12 @@ permissions: read-all
jobs:
build:
+<<<<<<< HEAD
runs-on:
group: vitess-ubuntu20
+=======
+ runs-on: ubuntu-latest
+>>>>>>> v19.0.6
steps:
- name: Skip CI
run: |
diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml
index 035850a3c9b..37ca4bbcfcd 100644
--- a/.github/workflows/vtadmin_web_lint.yml
+++ b/.github/workflows/vtadmin_web_lint.yml
@@ -16,7 +16,7 @@ permissions: read-all
jobs:
lint:
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-latest
steps:
- name: Skip CI
run: |
diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml
index c789e2fdc65..66f714b7fa5 100644
--- a/.github/workflows/vtadmin_web_unit_tests.yml
+++ b/.github/workflows/vtadmin_web_unit_tests.yml
@@ -16,8 +16,12 @@ permissions: read-all
jobs:
unit-tests:
+<<<<<<< HEAD
runs-on:
group: vitess-ubuntu20
+=======
+ runs-on: ubuntu-latest
+>>>>>>> v19.0.6
steps:
- name: Skip CI
run: |
diff --git a/Makefile b/Makefile
index 5b84184f5a9..5801788335c 100644
--- a/Makefile
+++ b/Makefile
@@ -280,9 +280,9 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
# Please read docker/README.md to understand the different available images.
# This rule builds the bootstrap images for all flavors.
-DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80
+DOCKER_IMAGES_FOR_TEST = mysql80 percona80
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
-BOOTSTRAP_VERSION=27.5
+BOOTSTRAP_VERSION=27.7
ensure_bootstrap_version:
find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \;
sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go
diff --git a/build.env b/build.env
index da683559425..ccb03ca3ed6 100755
--- a/build.env
+++ b/build.env
@@ -17,7 +17,7 @@
source ./tools/shell_functions.inc
go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions."
-goversion_min 1.22.5 || echo "Go version reported: `go version`. Version 1.22.5+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
+goversion_min 1.22.7 || echo "Go version reported: `go version`. Version 1.22.7+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
mkdir -p dist
mkdir -p bin
@@ -49,7 +49,7 @@ git config core.hooksPath .git/hooks
export EXTRA_BIN=$PWD/test/bin
# support private github.com/slackhq/vitess-addons repo
+export GOPRIVATE=github.com/slackhq/vitess-addons
if [[ -n "${GH_ACCESS_TOKEN}" ]]; then
- export GOPRIVATE=github.com/slackhq/vitess-addons
git config --global url.https://${GH_ACCESS_TOKEN}@github.com/.insteadOf https://github.com/
fi
diff --git a/changelog/19.0/19.0.6/changelog.md b/changelog/19.0/19.0.6/changelog.md
new file mode 100644
index 00000000000..3a3506ac1ff
--- /dev/null
+++ b/changelog/19.0/19.0.6/changelog.md
@@ -0,0 +1,48 @@
+# Changelog of Vitess v19.0.6
+
+### Bug fixes
+#### Query Serving
+ * [release-19.0] bugfix: don't treat join predicates as filter predicates (#16472) [#16474](https://github.com/vitessio/vitess/pull/16474)
+ * [release-19.0] fix: reference table join merge (#16488) [#16496](https://github.com/vitessio/vitess/pull/16496)
+ * [release-19.0] simplify merging logic (#16525) [#16532](https://github.com/vitessio/vitess/pull/16532)
+ * [release-19.0] Fix: Offset planning in hash joins (#16540) [#16551](https://github.com/vitessio/vitess/pull/16551)
+ * [release-19.0] Fix query plan cache misses metric (#16562) [#16627](https://github.com/vitessio/vitess/pull/16627)
+ * [release-19.0] JSON Encoding: Use Type_RAW for marshalling json (#16637) [#16681](https://github.com/vitessio/vitess/pull/16681)
+#### Throttler
+ * v19 backport: Throttler/vreplication: fix app name used by VPlayer (#16578) [#16580](https://github.com/vitessio/vitess/pull/16580)
+#### VReplication
+ * [release-19.0] VStream API: validate that last PK has fields defined (#16478) [#16486](https://github.com/vitessio/vitess/pull/16486)
+#### VTAdmin
+ * [release-19.0] VTAdmin: Upgrade websockets js package (#16504) [#16512](https://github.com/vitessio/vitess/pull/16512)
+#### VTGate
+ * [release-19.0] Fix `RemoveTablet` during `TabletExternallyReparented` causing connection issues (#16371) [#16567](https://github.com/vitessio/vitess/pull/16567)
+#### VTorc
+ * [release-19.0] FindErrantGTIDs: superset is not an errant GTID situation (#16725) [#16728](https://github.com/vitessio/vitess/pull/16728)
+### CI/Build
+#### General
+ * [release-19.0] Upgrade the Golang version to `go1.22.6` [#16543](https://github.com/vitessio/vitess/pull/16543)
+#### VTAdmin
+ * [release-19.0] Update micromatch to 4.0.8 (#16660) [#16666](https://github.com/vitessio/vitess/pull/16666)
+### Enhancement
+#### Build/CI
+ * [release-19.0] Improve the queries upgrade/downgrade CI workflow by using same test code version as binary (#16494) [#16501](https://github.com/vitessio/vitess/pull/16501)
+#### Online DDL
+ * [release-19.0] VReplication workflows: retry "wrong tablet type" errors (#16645) [#16652](https://github.com/vitessio/vitess/pull/16652)
+### Internal Cleanup
+#### Build/CI
+ * [release-19.0] Move from 4-cores larger runners to `ubuntu-latest` (#16714) [#16717](https://github.com/vitessio/vitess/pull/16717)
+#### Docker
+ * [release-19.0] Remove mysql57/percona57 bootstrap images (#16620) [#16622](https://github.com/vitessio/vitess/pull/16622)
+### Performance
+#### Online DDL
+ * v19 backport: Online DDL: avoid SQL's `CONVERT(...)`, convert programmatically if needed [#16603](https://github.com/vitessio/vitess/pull/16603)
+### Regression
+#### Query Serving
+ * [release-19.0] bugfix: Allow cross-keyspace joins (#16520) [#16523](https://github.com/vitessio/vitess/pull/16523)
+### Release
+#### General
+ * [release-19.0] Bump to `v19.0.6-SNAPSHOT` after the `v19.0.5` release [#16456](https://github.com/vitessio/vitess/pull/16456)
+### Testing
+#### Query Serving
+ * [release-19.0] Replace ErrorContains checks with Error checks before running upgrade downgrade [#16700](https://github.com/vitessio/vitess/pull/16700)
+
diff --git a/changelog/19.0/19.0.6/release_notes.md b/changelog/19.0/19.0.6/release_notes.md
new file mode 100644
index 00000000000..422bb50d1eb
--- /dev/null
+++ b/changelog/19.0/19.0.6/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v19.0.6
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.6/changelog.md).
+
+The release includes 21 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @shlomi-noach, @systay, @vitess-bot
+
diff --git a/changelog/19.0/README.md b/changelog/19.0/README.md
index ae90ef2df1b..cd43ee9bbd3 100644
--- a/changelog/19.0/README.md
+++ b/changelog/19.0/README.md
@@ -1,4 +1,8 @@
## v19.0
+* **[19.0.6](19.0.6)**
+ * [Changelog](19.0.6/changelog.md)
+ * [Release Notes](19.0.6/release_notes.md)
+
* **[19.0.5](19.0.5)**
* [Changelog](19.0.5/changelog.md)
* [Release Notes](19.0.5/release_notes.md)
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 1db25591f71..d01f1268111 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57
index 8d66a1e3604..c5cd2464326 100644
--- a/docker/base/Dockerfile.mysql57
+++ b/docker/base/Dockerfile.mysql57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57
index d73eed5b917..30d2d75b5f9 100644
--- a/docker/base/Dockerfile.percona57
+++ b/docker/base/Dockerfile.percona57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80
index 597979e05ea..9bc3497d356 100644
--- a/docker/base/Dockerfile.percona80
+++ b/docker/base/Dockerfile.percona80
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}"
diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md
index d739c55b80b..47e356be08a 100644
--- a/docker/bootstrap/CHANGELOG.md
+++ b/docker/bootstrap/CHANGELOG.md
@@ -129,3 +129,12 @@ List of changes between bootstrap image versions.
## [27.5] - 2024-07-02
### Changes
- Update build to golang 1.22.5
+
+## [27.6] - 2024-08-07
+### Changes
+- Update build to golang 1.22.6
+- MySQL57 and Percona57 tags will be removed thereafter
+
+## [27.7] - 2024-09-05
+### Changes
+- Update build to golang 1.22.7
diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common
index ac7859c0a1e..96e47030a00 100644
--- a/docker/bootstrap/Dockerfile.common
+++ b/docker/bootstrap/Dockerfile.common
@@ -1,4 +1,4 @@
-FROM --platform=linux/amd64 golang:1.22.5-bullseye
+FROM --platform=linux/amd64 golang:1.22.7-bullseye
# Install Vitess build dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57
deleted file mode 100644
index d523241f499..00000000000
--- a/docker/bootstrap/Dockerfile.mysql57
+++ /dev/null
@@ -1,26 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install MySQL 5.7
-RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates && \
- for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \
- add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' && \
- for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
- { \
- echo debconf debconf/frontend select Noninteractive; \
- echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
- echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
- } | debconf-set-selections && \
- percona-release enable-only tools \
- apt-get update -y && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server libmysqlclient-dev libdbd-mysql-perl rsync libev4 percona-xtrabackup-24 && \
- rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57
deleted file mode 100644
index f43c655b3d7..00000000000
--- a/docker/bootstrap/Dockerfile.percona57
+++ /dev/null
@@ -1,24 +0,0 @@
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/amd64 "${image}"
-
-# Install Percona 5.7
-RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \
- { \
- echo debconf debconf/frontend select Noninteractive; \
- echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
- echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
- } | debconf-set-selections && \
- percona-release enable-only tools \
- apt-get update && \
- apt-get install -y --no-install-recommends percona-server-server-5.7 && \
- apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \
- rm -rf /var/lib/apt/lists/*
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/README.md b/docker/bootstrap/README.md
index 717f4336442..b273305d6b9 100644
--- a/docker/bootstrap/README.md
+++ b/docker/bootstrap/README.md
@@ -6,9 +6,7 @@ after successfully running `bootstrap.sh` and `dev.env`.
The `vitess/bootstrap` image comes in different flavors:
* `vitess/bootstrap:common` - dependencies that are common to all flavors
-* `vitess/bootstrap:mysql57` - bootstrap image for MySQL 5.7
* `vitess/bootstrap:mysql80` - bootstrap image for MySQL 8.0
-* `vitess/bootstrap:percona57` - bootstrap image for Percona Server 5.7
* `vitess/bootstrap:percona80` - bootstrap image for Percona Server 8.0
**NOTE: Unlike the base image that builds Vitess itself, this bootstrap image
diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57
index d8f38f32496..ab8d2403cf5 100644
--- a/docker/lite/Dockerfile.mysql57
+++ b/docker/lite/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80
index bb73c0a8ff5..5161e1a2b2f 100644
--- a/docker/lite/Dockerfile.mysql80
+++ b/docker/lite/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57
index da5798bf228..e3d2bfc6062 100644
--- a/docker/lite/Dockerfile.percona57
+++ b/docker/lite/Dockerfile.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80
index 66478659c20..96bbef7a587 100644
--- a/docker/lite/Dockerfile.percona80
+++ b/docker/lite/Dockerfile.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing
index 254502a08c1..892468cbe46 100644
--- a/docker/lite/Dockerfile.testing
+++ b/docker/lite/Dockerfile.testing
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57
index 6625fe3cf53..e68104838ce 100644
--- a/docker/lite/Dockerfile.ubi7.mysql57
+++ b/docker/lite/Dockerfile.ubi7.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80
index 3807e67c230..6d10bfdda98 100644
--- a/docker/lite/Dockerfile.ubi7.mysql80
+++ b/docker/lite/Dockerfile.ubi7.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57
index 86fa1ca2038..c7717d96bf2 100644
--- a/docker/lite/Dockerfile.ubi7.percona57
+++ b/docker/lite/Dockerfile.ubi7.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80
index aff6af97cb2..c9092d648ca 100644
--- a/docker/lite/Dockerfile.ubi7.percona80
+++ b/docker/lite/Dockerfile.ubi7.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80
index f4bde08d2b3..3dcaf9a6e73 100644
--- a/docker/lite/Dockerfile.ubi8.arm64.mysql80
+++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80
index 2bc5d9ddd07..b779835603c 100644
--- a/docker/lite/Dockerfile.ubi8.mysql80
+++ b/docker/lite/Dockerfile.ubi8.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile
index 8d74247bce7..8529d26e42a 100644
--- a/docker/local/Dockerfile
+++ b/docker/local/Dockerfile
@@ -1,4 +1,4 @@
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-common"
FROM "${image}"
diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57
index 444df680f12..5abbfc5ee15 100644
--- a/docker/vttestserver/Dockerfile.mysql57
+++ b/docker/vttestserver/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80
index f6ca717180a..b4d9d5b0969 100644
--- a/docker/vttestserver/Dockerfile.mysql80
+++ b/docker/vttestserver/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index 2e816d6a1c1..8af25f3b173 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -58,7 +58,7 @@ services:
- "3306"
vtctld:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
@@ -81,7 +81,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -111,7 +111,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
command:
- sh
- -c
@@ -144,12 +144,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
vttablet100:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -181,7 +181,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -213,7 +213,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -245,7 +245,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -277,7 +277,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -307,7 +307,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index 8626e6f3c85..f4944ae6b97 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
vtctld:
@@ -143,7 +143,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index f4abaad543c..8f88a2d08ae 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- .:/script
vtctld:
@@ -159,7 +159,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index 25a1a19bce5..ce04a876759 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -533,7 +533,7 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15000:%[1]d"
- "%[2]d"
@@ -696,7 +696,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -738,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- ".:/script"
environment:
@@ -763,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- ".:/script"
environment:
@@ -791,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- ".:/script"
environment:
@@ -828,7 +828,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:v19.0.5
+ image: vitess/lite:v19.0.6
volumes:
- ".:/script"
environment:
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 4c4d92f1f1f..9fdb58641a3 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.6
+ vtadmin: vitess/vtadmin:v19.0.6
+ vtgate: vitess/lite:v19.0.6
+ vttablet: vitess/lite:v19.0.6
+ vtbackup: vitess/lite:v19.0.6
+ vtorc: vitess/lite:v19.0.6
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.6
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index d49cec49120..e61eba759bd 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.6
+ vtadmin: vitess/vtadmin:v19.0.6
+ vtgate: vitess/lite:v19.0.6
+ vttablet: vitess/lite:v19.0.6
+ vtbackup: vitess/lite:v19.0.6
+ vtorc: vitess/lite:v19.0.6
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.6
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 5a0e8e141d1..0690f11c9d7 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.6
+ vtadmin: vitess/vtadmin:v19.0.6
+ vtgate: vitess/lite:v19.0.6
+ vttablet: vitess/lite:v19.0.6
+ vtbackup: vitess/lite:v19.0.6
+ vtorc: vitess/lite:v19.0.6
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.6
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index 1b28fe76bc6..4491776cdc2 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v19.0.5
- vtadmin: vitess/vtadmin:v19.0.5
- vtgate: vitess/lite:v19.0.5
- vttablet: vitess/lite:v19.0.5
- vtbackup: vitess/lite:v19.0.5
- vtorc: vitess/lite:v19.0.5
+ vtctld: vitess/lite:v19.0.6
+ vtadmin: vitess/vtadmin:v19.0.6
+ vtgate: vitess/lite:v19.0.6
+ vttablet: vitess/lite:v19.0.6
+ vtbackup: vitess/lite:v19.0.6
+ vtorc: vitess/lite:v19.0.6
mysqld:
- mysql80Compatible: vitess/lite:v19.0.5
+ mysql80Compatible: vitess/lite:v19.0.6
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/go.mod b/go.mod
index 40f2b19e449..e6b2cb1453a 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module vitess.io/vitess
-go 1.22.5
+go 1.22.7
require (
cloud.google.com/go/storage v1.39.0
diff --git a/go/mysql/json/marshal.go b/go/mysql/json/marshal.go
index d1a0072ccbb..97d14a336c8 100644
--- a/go/mysql/json/marshal.go
+++ b/go/mysql/json/marshal.go
@@ -175,6 +175,6 @@ func MarshalSQLValue(buf []byte) (*sqltypes.Value, error) {
return nil, err
}
- newVal := sqltypes.MakeTrusted(querypb.Type_JSON, jsonVal.MarshalSQLTo(nil))
+ newVal := sqltypes.MakeTrusted(querypb.Type_RAW, jsonVal.MarshalSQLTo(nil))
return &newVal, nil
}
diff --git a/go/mysql/replication/replication_status.go b/go/mysql/replication/replication_status.go
index 6b3d1bf2214..0b8ba0f785f 100644
--- a/go/mysql/replication/replication_status.go
+++ b/go/mysql/replication/replication_status.go
@@ -201,6 +201,14 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS
otherSets = append(otherSets, otherSet)
}
+ if len(otherSets) == 1 {
+ // If there is only one replica to compare against, and one is a subset of the other, then we consider them not to be errant.
+ // It simply means that one replica might be behind on replication.
+ if relayLogSet.Contains(otherSets[0]) || otherSets[0].Contains(relayLogSet) {
+ return nil, nil
+ }
+ }
+
// Copy set for final diffSet so we don't mutate receiver.
diffSet := make(Mysql56GTIDSet, len(relayLogSet))
for sid, intervals := range relayLogSet {
diff --git a/go/mysql/replication/replication_status_test.go b/go/mysql/replication/replication_status_test.go
index c1f5991f253..a88cb1570f7 100644
--- a/go/mysql/replication/replication_status_test.go
+++ b/go/mysql/replication/replication_status_test.go
@@ -105,6 +105,16 @@ func TestFindErrantGTIDs(t *testing.T) {
otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set1}}},
// servers with the same GTID sets should not be diagnosed with errant GTIDs
want: nil,
+ }, {
+ mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set2}},
+ otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set3}}},
+ // set2 is a strict subset of set3
+ want: nil,
+ }, {
+ mainRepStatus: &ReplicationStatus{SourceUUID: sourceSID, RelayLogPosition: Position{GTIDSet: set3}},
+ otherRepStatuses: []*ReplicationStatus{{SourceUUID: sid1, RelayLogPosition: Position{GTIDSet: set2}}},
+ // set3 is a strict superset of set2
+ want: nil,
}}
for _, testcase := range testcases {
diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go
index 33474ec5ff8..ebb0767a6a7 100644
--- a/go/test/endtoend/backup/vtbackup/backup_only_test.go
+++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go
@@ -69,15 +69,10 @@ func TestTabletInitialBackup(t *testing.T) {
// Initialize the tablets
initTablets(t, false, false)
- vtTabletVersion, err := cluster.GetMajorVersion("vttablet")
- require.NoError(t, err)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode. Let's verify that is working correctly.
- if vtTabletVersion >= 17 {
- err := primary.VttabletProcess.CreateDB("testDB")
- require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
- err = replica1.VttabletProcess.CreateDB("testDB")
- require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
- }
+ err := primary.VttabletProcess.CreateDB("testDB")
+ require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
+ err = replica1.VttabletProcess.CreateDB("testDB")
+ require.ErrorContains(t, err, "The MySQL server is running with the --super-read-only option so it cannot execute this statement")
// Restore the Tablet
restore(t, primary, "replica", "NOT_SERVING")
@@ -172,7 +167,7 @@ func firstBackupTest(t *testing.T, tabletType string) {
restore(t, replica2, "replica", "SERVING")
// Replica2 takes time to serve. Sleeping for 5 sec.
time.Sleep(5 * time.Second)
- //check the new replica has the data
+ // check the new replica has the data
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2)
removeBackups(t)
diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go
index d87427af9b9..6ac6ed5d2b0 100644
--- a/go/test/endtoend/cluster/vtctld_process.go
+++ b/go/test/endtoend/cluster/vtctld_process.go
@@ -65,15 +65,10 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error)
"--log_dir", vtctld.LogDir,
"--port", fmt.Sprintf("%d", vtctld.Port),
"--grpc_port", fmt.Sprintf("%d", vtctld.GrpcPort),
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vtctld"); err != nil {
- return err
- } else if v >= 18 {
- vtctld.proc.Args = append(vtctld.proc.Args, "--bind-address", "127.0.0.1")
- vtctld.proc.Args = append(vtctld.proc.Args, "--grpc_bind_address", "127.0.0.1")
- }
-
if *isCoverage {
vtctld.proc.Args = append(vtctld.proc.Args, "--test.coverprofile="+getCoveragePath("vtctld.out"))
}
diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go
index d1877fb89bb..cec137bfabe 100644
--- a/go/test/endtoend/cluster/vtgate_process.go
+++ b/go/test/endtoend/cluster/vtgate_process.go
@@ -85,12 +85,8 @@ func (vtgate *VtgateProcess) Setup() (err error) {
"--tablet_types_to_wait", vtgate.TabletTypesToWait,
"--service_map", vtgate.ServiceMap,
"--mysql_auth_server_impl", vtgate.MySQLAuthServerImpl,
- }
- if v, err := GetMajorVersion("vtgate"); err != nil {
- return err
- } else if v >= 18 {
- args = append(args, "--bind-address", "127.0.0.1")
- args = append(args, "--grpc_bind_address", "127.0.0.1")
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
}
// If no explicit mysql_server_version has been specified then we autodetect
// the MySQL version that will be used for the test and base the vtgate's
diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go
index 25bbb74c36c..c6ab9c5471a 100644
--- a/go/test/endtoend/cluster/vtorc_process.go
+++ b/go/test/endtoend/cluster/vtorc_process.go
@@ -126,14 +126,9 @@ func (orc *VTOrcProcess) Setup() (err error) {
"--instance-poll-time", "1s",
// Faster topo information refresh speeds up the tests. This doesn't add any significant load either
"--topo-information-refresh-duration", "3s",
+ "--bind-address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vtorc"); err != nil {
- return err
- } else if v >= 18 {
- orc.proc.Args = append(orc.proc.Args, "--bind-address", "127.0.0.1")
- }
-
if *isCoverage {
orc.proc.Args = append(orc.proc.Args, "--test.coverprofile="+getCoveragePath("orc.out"))
}
diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go
index 69f1cd4bb88..f5b19094195 100644
--- a/go/test/endtoend/cluster/vttablet_process.go
+++ b/go/test/endtoend/cluster/vttablet_process.go
@@ -110,13 +110,9 @@ func (vttablet *VttabletProcess) Setup() (err error) {
"--file_backup_storage_root", vttablet.FileBackupStorageRoot,
"--service_map", vttablet.ServiceMap,
"--db_charset", vttablet.Charset,
+ "--bind-address", "127.0.0.1",
+ "--grpc_bind_address", "127.0.0.1",
)
- if v, err := GetMajorVersion("vttablet"); err != nil {
- return err
- } else if v >= 18 {
- vttablet.proc.Args = append(vttablet.proc.Args, "--bind-address", "127.0.0.1")
- vttablet.proc.Args = append(vttablet.proc.Args, "--grpc_bind_address", "127.0.0.1")
- }
if *isCoverage {
vttablet.proc.Args = append(vttablet.proc.Args, "--test.coverprofile="+getCoveragePath("vttablet.out"))
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/allow_schemadiff_normalization b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/allow_schemadiff_normalization
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter
new file mode 100644
index 00000000000..b5ec82b1a8b
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/alter
@@ -0,0 +1 @@
+MODIFY `t1` varchar(128) CHARACTER SET utf8mb4 NOT NULL, MODIFY `t2` varchar(128) CHARACTER SET latin2 NOT NULL, MODIFY `tutf8` varchar(128) CHARACTER SET latin1 NOT NULL
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql
new file mode 100644
index 00000000000..79e8fda23ee
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/create.sql
@@ -0,0 +1,19 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id int auto_increment,
+ t1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ t2 varchar(128) charset latin1 collate latin1_swedish_ci,
+ tutf8 varchar(128) charset utf8,
+ tutf8mb4 varchar(128) charset utf8mb4,
+ tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+insert into onlineddl_test values (null, 'átesting', 'átesting', 'átesting', 'átesting', 'átesting');
+insert into onlineddl_test values (null, 'testátest', 'testátest', 'testátest', '🍻😀', 'átesting');
+insert into onlineddl_test values (null, 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog');
+insert into onlineddl_test values (null, 'testátest-binlog', 'testátest-binlog', 'testátest-binlog', '🍻😀', 'átesting-binlog');
+insert into onlineddl_test values (null, 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null);
+
+drop event if exists onlineddl_test;
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions
new file mode 100644
index 00000000000..0790a1e68fd
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80-vcopier/ignore_versions
@@ -0,0 +1 @@
+(5.5|5.6|5.7)
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/allow_schemadiff_normalization b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/allow_schemadiff_normalization
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql
new file mode 100644
index 00000000000..c0313e62c8d
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/create.sql
@@ -0,0 +1,30 @@
+drop table if exists onlineddl_test;
+create table onlineddl_test (
+ id varchar(128) charset latin1 collate latin1_swedish_ci,
+ t1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ t2 varchar(128) charset latin1 collate latin1_swedish_ci,
+ tutf8 varchar(128) charset utf8,
+ tutf8mb4 varchar(128) charset utf8mb4,
+ tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci,
+ primary key(id)
+) auto_increment=1;
+
+insert into onlineddl_test values (concat('átesting-', md5(rand())), md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting', 'átesting', 'átesting', 'átesting', 'átesting');
+insert into onlineddl_test values (concat('átesting-', md5(rand())), 'testátest', 'testátest', 'testátest', '🍻😀', 'átesting');
+
+drop event if exists onlineddl_test;
+delimiter ;;
+create event onlineddl_test
+ on schedule every 1 second
+ starts current_timestamp
+ ends current_timestamp + interval 60 second
+ on completion not preserve
+ enable
+ do
+begin
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand()));
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog');
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'testátest-binlog', 'testátest-binlog', 'testátest-binlog', '🍻😀', 'átesting-binlog');
+ insert into onlineddl_test values (concat('átesting-', md5(rand())), 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null);
+end ;;
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions
new file mode 100644
index 00000000000..0790a1e68fd
--- /dev/null
+++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/non-utf8-charset-pk/ignore_versions
@@ -0,0 +1 @@
+(5.5|5.6|5.7)
diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
index 1f6f4da0a78..38e872f0f2b 100644
--- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go
+++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go
@@ -199,13 +199,13 @@ func TestReparentFromOutsideWithNoPrimary(t *testing.T) {
}
func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessCluster, downPrimary bool) {
- //This test will start a primary and 3 replicas.
- //Then:
- //- one replica will be the new primary
- //- one replica will be reparented to that new primary
- //- one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
- //Args:
- //downPrimary: kills the old primary first
+ // This test will start a primary and 3 replicas.
+ // Then:
+ // - one replica will be the new primary
+ // - one replica will be reparented to that new primary
+ // - one replica will be busted and dead in the water and we'll call TabletExternallyReparented.
+ // Args:
+ // downPrimary: kills the old primary first
ctx := context.Background()
tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets
@@ -218,7 +218,7 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus
demoteCommands := "SET GLOBAL read_only = ON; FLUSH TABLES WITH READ LOCK; UNLOCK TABLES"
utils.RunSQL(ctx, t, demoteCommands, tablets[0])
- //Get the position of the old primary and wait for the new one to catch up.
+ // Get the position of the old primary and wait for the new one to catch up.
err := utils.WaitForReplicationPosition(t, tablets[0], tablets[1])
require.NoError(t, err)
}
@@ -453,14 +453,7 @@ func TestFullStatus(t *testing.T) {
assert.Contains(t, primaryStatus.PrimaryStatus.String(), "vt-0000000101-bin")
assert.Equal(t, primaryStatus.GtidPurged, "MySQL56/")
assert.False(t, primaryStatus.ReadOnly)
- vtTabletVersion, err := cluster.GetMajorVersion("vttablet")
- require.NoError(t, err)
- vtcltlVersion, err := cluster.GetMajorVersion("vtctl")
- require.NoError(t, err)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode.
- if vtTabletVersion >= 17 && vtcltlVersion >= 17 {
- assert.False(t, primaryStatus.SuperReadOnly)
- }
+ assert.False(t, primaryStatus.SuperReadOnly)
assert.True(t, primaryStatus.SemiSyncPrimaryEnabled)
assert.True(t, primaryStatus.SemiSyncReplicaEnabled)
assert.True(t, primaryStatus.SemiSyncPrimaryStatus)
@@ -514,10 +507,7 @@ func TestFullStatus(t *testing.T) {
assert.Contains(t, replicaStatus.PrimaryStatus.String(), "vt-0000000102-bin")
assert.Equal(t, replicaStatus.GtidPurged, "MySQL56/")
assert.True(t, replicaStatus.ReadOnly)
- // For all version at or above v17.0.0, each replica will start in super_read_only mode.
- if vtTabletVersion >= 17 && vtcltlVersion >= 17 {
- assert.True(t, replicaStatus.SuperReadOnly)
- }
+ assert.True(t, replicaStatus.SuperReadOnly)
assert.False(t, replicaStatus.SemiSyncPrimaryEnabled)
assert.True(t, replicaStatus.SemiSyncReplicaEnabled)
assert.False(t, replicaStatus.SemiSyncPrimaryStatus)
diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
index 83840a78516..d4035ebe5d6 100644
--- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
+++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go
@@ -71,11 +71,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
}
func TestAggrWithLimit(t *testing.T) {
- version, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if version != 19 {
- t.Skip("Test requires VTGate version 18")
- }
mcmp, closer := start(t)
defer closer()
@@ -105,7 +100,6 @@ func TestAggregateTypes(t *testing.T) {
mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`)
mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`)
mcmp.Run("Average for sharded keyspaces", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`)
})
}
@@ -209,7 +203,6 @@ func TestAggrOnJoin(t *testing.T) {
`[[VARCHAR("a")]]`)
mcmp.Run("Average in join for sharded", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(`select avg(a1.val2), avg(a2.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`,
"[[DECIMAL(1.5000) DECIMAL(1.0000)]]")
@@ -367,7 +360,6 @@ func TestAggOnTopOfLimit(t *testing.T) {
mcmp.AssertMatches("select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`)
mcmp.AssertMatchesNoOrder("select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[NULL]]")
mcmp.AssertMatchesNoOrder("select val1, avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL DECIMAL(2.0000)] [VARCHAR("a") DECIMAL(3.5000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.5000)]]`)
})
@@ -379,7 +371,6 @@ func TestAggOnTopOfLimit(t *testing.T) {
mcmp.AssertMatches("select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]")
mcmp.AssertMatches("select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select count(*), sum(val1), avg(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0) FLOAT64(0)]]")
mcmp.AssertMatches("select count(val1), sum(id), avg(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7) DECIMAL(3.5000)]]")
mcmp.AssertMatchesNoOrder("select val1, count(val2), sum(val2), avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1",
@@ -401,7 +392,6 @@ func TestEmptyTableAggr(t *testing.T) {
mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]")
})
@@ -417,7 +407,6 @@ func TestEmptyTableAggr(t *testing.T) {
mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]")
mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]")
mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]")
@@ -435,12 +424,8 @@ func TestOrderByCount(t *testing.T) {
mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'b1','bar',200)")
mcmp.Exec("SELECT t9.id2 FROM t9 GROUP BY t9.id2 ORDER BY COUNT(t9.id2) DESC")
- version, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if version == 19 {
- mcmp.Exec("select COUNT(*) from (select 1 as one FROM t9 WHERE id3 = 3 ORDER BY id1 DESC LIMIT 3 OFFSET 0) subquery_for_count")
- mcmp.Exec("select t.id1, t1.name, t.leCount from (select id1, count(*) as leCount from t9 group by 1 order by 2 desc limit 20) t join t1 on t.id1 = t1.t1_id")
- }
+ mcmp.Exec("select COUNT(*) from (select 1 as one FROM t9 WHERE id3 = 3 ORDER BY id1 DESC LIMIT 3 OFFSET 0) subquery_for_count")
+ mcmp.Exec("select t.id1, t1.name, t.leCount from (select id1, count(*) as leCount from t9 group by 1 order by 2 desc limit 20) t join t1 on t.id1 = t1.t1_id")
}
func TestAggregateAnyValue(t *testing.T) {
@@ -473,7 +458,6 @@ func TestAggregateLeftJoin(t *testing.T) {
mcmp.AssertMatches("SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'", `[[INT64(1)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("SELECT avg(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(0.5000)]]`)
mcmp.AssertMatches("SELECT avg(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1.0000)]]`)
aggregations := []string{
@@ -530,7 +514,6 @@ func TestScalarAggregate(t *testing.T) {
mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)")
mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`)
})
}
@@ -590,15 +573,11 @@ func TestComplexAggregation(t *testing.T) {
mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`)
mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`)
mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) {
- mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate")
mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey)+AVG(t1_id) FROM t1`)
})
}
func TestJoinAggregation(t *testing.T) {
- // This is new functionality in Vitess 20
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -713,7 +692,6 @@ func TestDistinctAggregation(t *testing.T) {
}
func TestHavingQueries(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/derived/cte_test.go b/go/test/endtoend/vtgate/queries/derived/cte_test.go
index 61ddf5d6661..677a5dba653 100644
--- a/go/test/endtoend/vtgate/queries/derived/cte_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/cte_test.go
@@ -18,12 +18,9 @@ package misc
import (
"testing"
-
- "vitess.io/vitess/go/test/endtoend/utils"
)
func TestCTEWithOrderByLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -31,7 +28,6 @@ func TestCTEWithOrderByLimit(t *testing.T) {
}
func TestCTEAggregationOnRHS(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -40,7 +36,6 @@ func TestCTEAggregationOnRHS(t *testing.T) {
}
func TestCTERemoveInnerOrderBy(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -48,7 +43,6 @@ func TestCTERemoveInnerOrderBy(t *testing.T) {
}
func TestCTEWithHaving(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -59,7 +53,6 @@ func TestCTEWithHaving(t *testing.T) {
}
func TestCTEColumns(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go
index 6eb7ee914cd..cb106564b2f 100644
--- a/go/test/endtoend/vtgate/queries/derived/derived_test.go
+++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go
@@ -92,7 +92,6 @@ func TestDerivedTableColumns(t *testing.T) {
// We do this by not using the apply join we usually use, and instead use the hash join engine primitive
// These tests exercise these situations
func TestDerivedTablesWithLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// We need full type info before planning this, so we wait for the schema tracker
require.NoError(t,
utils.WaitForAuthoritative(t, keyspaceName, "user", clusterInstance.VtgateProcess.ReadVSchema))
@@ -116,7 +115,6 @@ func TestDerivedTablesWithLimit(t *testing.T) {
// TestDerivedTableColumnAliasWithJoin tests the derived table having alias column and using it in the join condition
func TestDerivedTableColumnAliasWithJoin(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/dml/dml_test.go b/go/test/endtoend/vtgate/queries/dml/dml_test.go
index 9d060e99881..c3d1acdec4d 100644
--- a/go/test/endtoend/vtgate/queries/dml/dml_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/dml_test.go
@@ -21,8 +21,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/test/endtoend/utils"
)
func TestMultiEqual(t *testing.T) {
@@ -45,8 +43,6 @@ func TestMultiEqual(t *testing.T) {
// TestMultiTableDelete executed multi-table delete queries
func TestMultiTableDelete(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -82,8 +78,6 @@ func TestMultiTableDelete(t *testing.T) {
// TestDeleteWithLimit executed delete queries with limit
func TestDeleteWithLimit(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go
index dfb5961d887..b8c67f31ce3 100644
--- a/go/test/endtoend/vtgate/queries/dml/insert_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go
@@ -21,9 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
- "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/utils"
)
@@ -56,8 +54,6 @@ func TestSimpleInsertSelect(t *testing.T) {
// TestInsertOnDup test the insert on duplicate key update feature with argument and list argument.
func TestInsertOnDup(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -92,19 +88,10 @@ func TestFailureInsertSelect(t *testing.T) {
// primary key same
mcmp.AssertContainsError("insert into s_tbl(id, num) select id, num*20 from s_tbl where id = 1", `AlreadyExists desc = Duplicate entry '1' for key`)
// lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique constraint on num column)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion >= 19 {
- utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`)
- // mismatch column count
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`)
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`)
- } else {
- utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`)
- // mismatch column count
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`)
- mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`)
- }
+ utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`)
+ // mismatch column count
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`)
+ mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`)
})
}
}
@@ -483,3 +470,29 @@ func TestMixedCases(t *testing.T) {
// final check count on the lookup vindex table.
utils.AssertMatches(t, mcmp.VtConn, "select count(*) from lkp_mixed_idx", "[[INT64(12)]]")
}
+
+// TestInsertJson tests that selected json values are encoded correctly.
+func TestInsertJson(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vttablet")
+
+ mcmp, closer := start(t)
+ defer closer()
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) values (1, '{}'), (2, '{"a": 1, "b": 2}')`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) select 3, json_object("k", "a")`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ mcmp.Exec(`insert into j_tbl(id, jdoc) select 4,JSON_OBJECT(
+ 'date', CAST(1629849600 AS UNSIGNED),
+ 'keywordSourceId', CAST(930701976723823 AS UNSIGNED),
+ 'keywordSourceVersionId', CAST(210825230433 AS UNSIGNED)
+ )`)
+ mcmp.Exec(`select * from j_tbl order by id`)
+
+ utils.Exec(t, mcmp.VtConn, `insert into uks.j_utbl(id, jdoc) select * from sks.j_tbl`)
+ utils.AssertMatches(t, mcmp.VtConn, `select * from uks.j_utbl order by id`,
+ `[[INT64(1) JSON("{}")] [INT64(2) JSON("{\"a\": 1, \"b\": 2}")] [INT64(3) JSON("{\"k\": \"a\"}")] [INT64(4) JSON("{\"date\": 1629849600, \"keywordSourceId\": 930701976723823, \"keywordSourceVersionId\": 210825230433}")]]`)
+}
diff --git a/go/test/endtoend/vtgate/queries/dml/main_test.go b/go/test/endtoend/vtgate/queries/dml/main_test.go
index c00e27fe3a0..0c4d58aa614 100644
--- a/go/test/endtoend/vtgate/queries/dml/main_test.go
+++ b/go/test/endtoend/vtgate/queries/dml/main_test.go
@@ -133,7 +133,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
tables := []string{
"s_tbl", "num_vdx_tbl", "user_tbl", "order_tbl", "oevent_tbl", "oextra_tbl",
- "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", "mixed_tbl", "lkp_map_idx",
+ "auto_tbl", "oid_vdx_tbl", "unq_idx", "nonunq_idx", "u_tbl", "mixed_tbl", "lkp_map_idx", "j_tbl", "j_utbl",
}
for _, table := range tables {
// TODO (@frouioui): following assertions produce different results between MySQL and Vitess
diff --git a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
index 3310724d420..cc24737a0fa 100644
--- a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
+++ b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql
@@ -86,3 +86,10 @@ create table lkp_mixed_idx
keyspace_id varbinary(20),
primary key (lkp_key)
) Engine = InnoDB;
+
+create table j_tbl
+(
+ id bigint,
+ jdoc json,
+ primary key (id)
+) Engine = InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
index 4d2ad06618a..cd64605ad20 100644
--- a/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
+++ b/go/test/endtoend/vtgate/queries/dml/unsharded_schema.sql
@@ -34,4 +34,11 @@ values (0, 1, 1000);
insert into auto_seq(id, next_id, cache)
values (0, 666, 1000);
insert into mixed_seq(id, next_id, cache)
-values (0, 1, 1000);
\ No newline at end of file
+values (0, 1, 1000);
+
+create table j_utbl
+(
+ id bigint,
+ jdoc json,
+ primary key (id)
+) Engine = InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/dml/vschema.json b/go/test/endtoend/vtgate/queries/dml/vschema.json
index a42a93d7403..72a949a49e4 100644
--- a/go/test/endtoend/vtgate/queries/dml/vschema.json
+++ b/go/test/endtoend/vtgate/queries/dml/vschema.json
@@ -188,6 +188,14 @@
"name": "hash"
}
]
+ },
+ "j_tbl": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
}
}
}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
index 5ba9877bf5f..a1ef2711499 100644
--- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
+++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go
@@ -221,9 +221,7 @@ func TestInfrSchemaAndUnionAll(t *testing.T) {
}
func TestTypeORMQuery(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// This test checks that we can run queries similar to the ones that the TypeORM framework uses
-
require.NoError(t,
utils.WaitForAuthoritative(t, "ks", "t1", clusterInstance.VtgateProcess.ReadVSchema))
@@ -270,7 +268,6 @@ WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't2';
}
func TestJoinWithSingleShardQueryOnRHS(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// This test checks that we can run queries like this, where the RHS is a single shard query
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go
index 2d861b1a625..408b32a7969 100644
--- a/go/test/endtoend/vtgate/queries/misc/misc_test.go
+++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go
@@ -37,7 +37,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
require.NoError(t, err)
deleteAll := func() {
- tables := []string{"t1", "uks.unsharded"}
+ tables := []string{"t1", "uks.unsharded", "tbl"}
for _, table := range tables {
_, _ = mcmp.ExecAndIgnore("delete from " + table)
}
@@ -60,15 +60,8 @@ func TestBitVals(t *testing.T) {
mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010'`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`)
mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010' from t1`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion >= 19 {
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
- } else {
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`)
- mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`)
- }
+ mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
+ mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`)
}
// TestTimeFunctionWithPrecision tests that inserting data with NOW(1) works as intended.
@@ -115,12 +108,6 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) {
}
func TestJoinWithThreeTables(t *testing.T) {
- version, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if version != 19 {
- t.Skip("cannot run upgrade/downgrade test")
- }
-
mcmp, closer := start(t)
defer closer()
@@ -303,8 +290,6 @@ func TestAnalyze(t *testing.T) {
// TestTransactionModeVar executes SELECT on `transaction_mode` variable
func TestTransactionModeVar(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
@@ -334,8 +319,18 @@ func TestTransactionModeVar(t *testing.T) {
}
}
+// TestAliasesInOuterJoinQueries tests that aliases work in queries that have outer join clauses.
+func TestAliasesInOuterJoinQueries(t *testing.T) {
+ mcmp, closer := start(t)
+ defer closer()
+
+ // Insert data into the 2 tables
+ mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)")
+ mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (1,2,3), (2,5,3), (3, 42, 2)")
+ mcmp.ExecWithColumnCompare("select * from t1 t left join tbl on t.id1 = 666 and t.id2 = tbl.id")
+}
+
func TestAlterTableWithView(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -388,7 +383,6 @@ func TestAlterTableWithView(t *testing.T) {
}
func TestHandleNullableColumn(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
require.NoError(t,
utils.WaitForAuthoritative(t, keyspaceName, "tbl", clusterInstance.VtgateProcess.ReadVSchema))
mcmp, closer := start(t)
diff --git a/go/test/endtoend/vtgate/queries/misc/schema.sql b/go/test/endtoend/vtgate/queries/misc/schema.sql
index f87d7c19078..c1d9a7ed8b3 100644
--- a/go/test/endtoend/vtgate/queries/misc/schema.sql
+++ b/go/test/endtoend/vtgate/queries/misc/schema.sql
@@ -9,7 +9,7 @@ create table tbl
(
id bigint,
unq_col bigint,
- nonunq_col bigint,
+ nonunq_col bigint not null,
primary key (id),
unique (unq_col)
) Engine = InnoDB;
diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
index a3637ef5230..b6495443a8e 100644
--- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
+++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go
@@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "vitess.io/vitess/go/test/endtoend/cluster"
"vitess.io/vitess/go/test/endtoend/utils"
"vitess.io/vitess/go/mysql"
@@ -40,16 +39,7 @@ func TestNormalizeAllFields(t *testing.T) {
defer conn.Close()
insertQuery := `insert into t1 values (1, "chars", "variable chars", x'73757265', 0x676F, 0.33, 9.99, 1, "1976-06-08", "small", "b", "{\"key\":\"value\"}", point(1,5), b'011', 0b0101)`
-
- normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL(3,2) */, :vtg7 /* DECIMAL(3,2) */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- if vtgateVersion < 20 {
- normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
- }
- if vtgateVersion < 19 {
- normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* HEXNUM */, :vtg16 /* HEXNUM */)`
- }
+ normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)`
selectQuery := "select * from t1"
utils.Exec(t, conn, insertQuery)
qr := utils.Exec(t, conn, selectQuery)
diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
index 1d2ee7db795..f6c52cab2ac 100644
--- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
+++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go
@@ -85,9 +85,6 @@ func TestOrderBy(t *testing.T) {
}
func TestOrderByComplex(t *testing.T) {
- // tests written to try to trick the ORDER BY engine and planner
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
-
mcmp, closer := start(t)
defer closer()
diff --git a/go/test/endtoend/vtgate/queries/reference/main_test.go b/go/test/endtoend/vtgate/queries/reference/main_test.go
index 4c9440ca4ff..c350038bf6e 100644
--- a/go/test/endtoend/vtgate/queries/reference/main_test.go
+++ b/go/test/endtoend/vtgate/queries/reference/main_test.go
@@ -18,6 +18,7 @@ package reference
import (
"context"
+ _ "embed"
"flag"
"fmt"
"os"
@@ -39,68 +40,16 @@ var (
vtParams mysql.ConnParams
unshardedKeyspaceName = "uks"
- unshardedSQLSchema = `
- CREATE TABLE IF NOT EXISTS zip(
- id BIGINT NOT NULL AUTO_INCREMENT,
- code5 INT(5) NOT NULL,
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
+ //go:embed uschema.sql
+ unshardedSQLSchema string
+ //go:embed uvschema.json
+ unshardedVSchema string
- INSERT INTO zip(id, code5)
- VALUES (1, 47107),
- (2, 82845),
- (3, 11237);
-
- CREATE TABLE IF NOT EXISTS zip_detail(
- id BIGINT NOT NULL AUTO_INCREMENT,
- zip_id BIGINT NOT NULL,
- discontinued_at DATE,
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
-
- `
- unshardedVSchema = `
- {
- "sharded":false,
- "tables": {
- "zip": {},
- "zip_detail": {}
- }
- }
- `
shardedKeyspaceName = "sks"
- shardedSQLSchema = `
- CREATE TABLE IF NOT EXISTS delivery_failure (
- id BIGINT NOT NULL,
- zip_detail_id BIGINT NOT NULL,
- reason VARCHAR(255),
- PRIMARY KEY(id)
- ) ENGINE=InnoDB;
- `
- shardedVSchema = `
- {
- "sharded": true,
- "vindexes": {
- "hash": {
- "type": "hash"
- }
- },
- "tables": {
- "delivery_failure": {
- "columnVindexes": [
- {
- "column": "id",
- "name": "hash"
- }
- ]
- },
- "zip_detail": {
- "type": "reference",
- "source": "` + unshardedKeyspaceName + `.zip_detail"
- }
- }
- }
- `
+ //go:embed sschema.sql
+ shardedSQLSchema string
+ //go:embed svschema.json
+ shardedVSchema string
)
func TestMain(m *testing.M) {
diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go
index ae7319a52e3..08e9cbe13b1 100644
--- a/go/test/endtoend/vtgate/queries/reference/reference_test.go
+++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go
@@ -84,20 +84,19 @@ func TestReferenceRouting(t *testing.T) {
)
t.Run("Complex reference query", func(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
// Verify a complex query using reference tables with a left join having a derived table with an order by clause works as intended.
utils.AssertMatches(
t,
conn,
`SELECT t.id FROM (
- SELECT zd.id, zd.zip_id
- FROM `+shardedKeyspaceName+`.zip_detail AS zd
- WHERE zd.id IN (2)
- ORDER BY zd.discontinued_at
- LIMIT 1
- ) AS t
- LEFT JOIN `+shardedKeyspaceName+`.zip_detail AS t0 ON t.zip_id = t0.zip_id
- ORDER BY t.id`,
+ SELECT zd.id, zd.zip_id
+ FROM `+shardedKeyspaceName+`.zip_detail AS zd
+ WHERE zd.id IN (2)
+ ORDER BY zd.discontinued_at
+ LIMIT 1
+ ) AS t
+ LEFT JOIN `+shardedKeyspaceName+`.zip_detail AS t0 ON t.zip_id = t0.zip_id
+ ORDER BY t.id`,
`[[INT64(2)]]`,
)
})
@@ -156,3 +155,19 @@ func TestReferenceRouting(t *testing.T) {
`[[INT64(2)]]`,
)
}
+
+// TestMultiReferenceQuery tests that a query with multiple references with unsharded keyspace and sharded keyspace works with join.
+func TestMultiReferenceQuery(t *testing.T) {
+ utils.SkipIfBinaryIsBelowVersion(t, 21, "vtgate")
+ conn, closer := start(t)
+ defer closer()
+
+ query :=
+ `select 1
+ from delivery_failure df1
+ join delivery_failure df2 on df1.id = df2.id
+ join uks.zip_detail zd1 on df1.zip_detail_id = zd1.zip_id
+ join uks.zip_detail zd2 on zd1.zip_id = zd2.zip_id`
+
+ utils.Exec(t, conn, query)
+}
diff --git a/go/test/endtoend/vtgate/queries/reference/sschema.sql b/go/test/endtoend/vtgate/queries/reference/sschema.sql
new file mode 100644
index 00000000000..0fcaf63a422
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/sschema.sql
@@ -0,0 +1,6 @@
+CREATE TABLE IF NOT EXISTS delivery_failure (
+ id BIGINT NOT NULL,
+ zip_detail_id BIGINT NOT NULL,
+ reason VARCHAR(255),
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/svschema.json b/go/test/endtoend/vtgate/queries/reference/svschema.json
new file mode 100644
index 00000000000..815e0e8d21c
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/svschema.json
@@ -0,0 +1,22 @@
+{
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "delivery_failure": {
+ "columnVindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "zip_detail": {
+ "type": "reference",
+ "source": "uks.zip_detail"
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/uschema.sql b/go/test/endtoend/vtgate/queries/reference/uschema.sql
new file mode 100644
index 00000000000..52737928469
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/uschema.sql
@@ -0,0 +1,17 @@
+CREATE TABLE IF NOT EXISTS zip(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ code5 INT(5) NOT NULL,
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
+
+INSERT INTO zip(id, code5)
+VALUES (1, 47107),
+ (2, 82845),
+ (3, 11237);
+
+CREATE TABLE IF NOT EXISTS zip_detail(
+ id BIGINT NOT NULL AUTO_INCREMENT,
+ zip_id BIGINT NOT NULL,
+ discontinued_at DATE,
+ PRIMARY KEY(id)
+) ENGINE=InnoDB;
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/reference/uvschema.json b/go/test/endtoend/vtgate/queries/reference/uvschema.json
new file mode 100644
index 00000000000..fdcfca0d7a9
--- /dev/null
+++ b/go/test/endtoend/vtgate/queries/reference/uvschema.json
@@ -0,0 +1,6 @@
+{
+ "tables": {
+ "zip": {},
+ "zip_detail": {}
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
index e3f3cc52a5b..50d6f02f3f4 100644
--- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
+++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go
@@ -80,7 +80,6 @@ func TestNotINQueries(t *testing.T) {
// Test only supported in >= v16.0.0
func TestSubqueriesExists(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -162,7 +161,6 @@ func TestSubqueryInReference(t *testing.T) {
// TestSubqueryInAggregation validates that subquery work inside aggregation functions.
func TestSubqueryInAggregation(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -180,7 +178,6 @@ func TestSubqueryInAggregation(t *testing.T) {
// TestSubqueryInDerivedTable tests that subqueries and derived tables
// are handled correctly when there are joins inside the derived table
func TestSubqueryInDerivedTable(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
@@ -194,7 +191,6 @@ func TestSubqueries(t *testing.T) {
// This method tests many types of subqueries. The queries should move to a vitess-tester test file once we have a way to run them.
// The commented out queries are failing because of wrong types being returned.
// The tests are commented out until the issue is fixed.
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
queries := []string{
diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
index 9c81a6c5822..25a7f57b3bc 100644
--- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
+++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go
@@ -96,5 +96,9 @@ func TestQueryTimeoutWithTables(t *testing.T) {
_, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=20 */ sleep(0.1) from t1 where id1 = 1")
require.Error(t, err)
assert.Contains(t, err.Error(), "context deadline exceeded")
- assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+ vttabletVersion, err2 := cluster.GetMajorVersion("vttablet")
+ require.NoError(t, err2)
+ if vttabletVersion <= 19 {
+ require.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)")
+ }
}
diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
index c0d8c798273..ec33bd0ae9d 100644
--- a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
+++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go
@@ -48,7 +48,6 @@ func start(t *testing.T) (utils.MySQLCompare, func()) {
}
func TestTPCHQueries(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate")
mcmp, closer := start(t)
defer closer()
err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, keyspaceName, "region", `R_COMMENT`)
diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
index 8f8050bebe1..c6f3d8469cc 100644
--- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
+++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go
@@ -178,13 +178,7 @@ func TestInitAndUpdate(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
-
- expected := `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected := `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -192,6 +186,8 @@ func TestInitAndUpdate(t *testing.T) {
30*time.Second,
"initial table list not complete")
+ vtgateVersion, err := cluster.GetMajorVersion("vtgate")
+ require.NoError(t, err)
if vtgateVersion >= 19 {
utils.AssertMatches(t, conn,
"SHOW VSCHEMA KEYSPACES",
@@ -200,10 +196,7 @@ func TestInitAndUpdate(t *testing.T) {
// Init
_ = utils.Exec(t, conn, "create table test_sc (id bigint primary key)")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -213,10 +206,7 @@ func TestInitAndUpdate(t *testing.T) {
// Tables Update via health check.
_ = utils.Exec(t, conn, "create table test_sc1 (id bigint primary key)")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -225,10 +215,7 @@ func TestInitAndUpdate(t *testing.T) {
"test_sc1 not in vschema tables")
_ = utils.Exec(t, conn, "drop table test_sc, test_sc1")
- expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected = `[[VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
expected,
@@ -247,12 +234,7 @@ func TestDMLOnNewTable(t *testing.T) {
// create a new table which is not part of the VSchema
utils.Exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`)
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
- expected := `[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- if vtgateVersion >= 17 {
- expected = `[[VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
- }
+ expected := `[[VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`
// wait for vttablet's schema reload interval to pass
utils.AssertMatchesWithTimeout(t, conn,
"SHOW VSCHEMA TABLES",
@@ -296,9 +278,6 @@ func TestDMLOnNewTable(t *testing.T) {
// TestNewView validates that view tracking works as expected.
func TestNewView(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
-
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
@@ -321,9 +300,6 @@ func TestNewView(t *testing.T) {
// TestViewAndTable validates that new column added in table is present in the view definition
func TestViewAndTable(t *testing.T) {
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate")
- utils.SkipIfBinaryIsBelowVersion(t, 16, "vttablet")
-
ctx := context.Background()
conn, err := mysql.Connect(ctx, &vtParams)
require.NoError(t, err)
diff --git a/go/test/endtoend/vtgate/vitess_tester/join/join.test b/go/test/endtoend/vtgate/vitess_tester/join/join.test
new file mode 100644
index 00000000000..72d79a1206e
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/join/join.test
@@ -0,0 +1,79 @@
+CREATE TABLE `t1`
+(
+ `id` int unsigned NOT NULL AUTO_INCREMENT,
+ `name` varchar(191) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t2`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `t1_id` int unsigned NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t3`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `name` varchar(191) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+CREATE TABLE `t4`
+(
+ `id` bigint unsigned NOT NULL AUTO_INCREMENT,
+ `col` int unsigned NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE InnoDB,
+ CHARSET utf8mb4,
+ COLLATE utf8mb4_unicode_ci;
+
+insert into t1 (id, name)
+values (1, 'A'),
+ (2, 'B'),
+ (3, 'C'),
+ (4, 'D');
+
+insert into t2 (id, t1_id)
+values (1, 1),
+ (2, 2),
+ (3, 3);
+
+insert into t3 (id, name)
+values (1, 'A'),
+ (2, 'B'),
+ (3, 'B'),
+ (4, 'B'),
+ (5, 'B');
+
+insert into t4 (id, col)
+values (1, 1),
+ (2, 2),
+ (3, 3);
+
+-- wait_authoritative t1
+-- wait_authoritative t2
+-- wait_authoritative t3
+select 42
+from t1
+ join t2 on t1.id = t2.t1_id
+ join t3 on t1.id = t3.id
+where t1.name
+ or t2.id
+ or t3.name;
+
+# Complex query that requires hash join underneath a memory sort and ordered aggregate
+select 1
+from t1
+ join t2 on t1.id = t2.t1_id
+ join t4 on t4.col = t2.id
+ left join (select t4.col, count(*) as count from t4 group by t4.col) t3 on t3.col = t2.id
+where t1.id IN (1, 2)
+group by t2.id, t4.col;
+
diff --git a/go/test/endtoend/vtgate/vitess_tester/join/vschema.json b/go/test/endtoend/vtgate/vitess_tester/join/vschema.json
new file mode 100644
index 00000000000..1105b951e61
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/join/vschema.json
@@ -0,0 +1,46 @@
+{
+ "keyspaces": {
+ "joinks": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash"
+ }
+ },
+ "tables": {
+ "t1": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t2": {
+ "column_vindexes": [
+ {
+ "column": "t1_id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t3": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "t4": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test
new file mode 100644
index 00000000000..28c55e559c9
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/queries.test
@@ -0,0 +1,39 @@
+use customer;
+create table if not exists customer
+(
+ customer_id bigint not null,
+ email varbinary(128),
+ primary key (customer_id)
+) ENGINE = InnoDB;
+
+insert into customer.customer(customer_id, email)
+values (1, '[alice@domain.com](mailto:alice@domain.com)'),
+ (2, '[bob@domain.com](mailto:bob@domain.com)'),
+ (3, '[charlie@domain.com](mailto:charlie@domain.com)'),
+ (4, '[dan@domain.com](mailto:dan@domain.com)'),
+ (5, '[eve@domain.com](mailto:eve@domain.com)');
+use corder;
+create table if not exists corder
+(
+ order_id bigint not null,
+ customer_id bigint,
+ sku varbinary(128),
+ price bigint,
+ primary key (order_id)
+) ENGINE = InnoDB;
+insert into corder.corder(order_id, customer_id, sku, price)
+values (1, 1, 'SKU-1001', 100),
+ (2, 2, 'SKU-1002', 30),
+ (3, 3, 'SKU-1002', 30),
+ (4, 4, 'SKU-1002', 30),
+ (5, 5, 'SKU-1002', 30);
+
+select co.order_id, co.customer_id, co.price
+from corder.corder co
+ left join customer.customer cu on co.customer_id = cu.customer_id
+where cu.customer_id = 1;
+
+# This query was accidentally disallowed by https://github.com/vitessio/vitess/pull/16520
+select 1
+from customer.customer
+where customer_id in (select customer_id from corder.corder where price > 50);
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json
new file mode 100644
index 00000000000..5672042bace
--- /dev/null
+++ b/go/test/endtoend/vtgate/vitess_tester/two_sharded_keyspaces/vschema.json
@@ -0,0 +1,72 @@
+{
+ "keyspaces": {
+ "customer": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash",
+ "params": {},
+ "owner": ""
+ }
+ },
+ "tables": {
+ "customer": {
+ "type": "",
+ "column_vindexes": [
+ {
+ "column": "customer_id",
+ "name": "hash",
+ "columns": []
+ }
+ ],
+ "columns": [],
+ "pinned": "",
+ "column_list_authoritative": false,
+ "source": ""
+ }
+ },
+ "require_explicit_routing": false,
+ "foreign_key_mode": 0,
+ "multi_tenant_spec": null
+ },
+ "corder": {
+ "sharded": true,
+ "vindexes": {
+ "hash": {
+ "type": "hash",
+ "params": {},
+ "owner": ""
+ }
+ },
+ "tables": {
+ "corder": {
+ "type": "",
+ "column_vindexes": [
+ {
+ "column": "customer_id",
+ "name": "hash",
+ "columns": []
+ }
+ ],
+ "columns": [],
+ "pinned": "",
+ "column_list_authoritative": false,
+ "source": ""
+ }
+ },
+ "require_explicit_routing": false,
+ "foreign_key_mode": 0,
+ "multi_tenant_spec": null
+ }
+ },
+ "routing_rules": {
+ "rules": []
+ },
+ "shard_routing_rules": {
+ "rules": []
+ },
+ "keyspace_routing_rules": null,
+ "mirror_rules": {
+ "rules": []
+ }
+}
\ No newline at end of file
diff --git a/go/test/endtoend/vtgate/vschema/vschema_test.go b/go/test/endtoend/vtgate/vschema/vschema_test.go
index 92863ff7dc8..eec54f8f47f 100644
--- a/go/test/endtoend/vtgate/vschema/vschema_test.go
+++ b/go/test/endtoend/vtgate/vschema/vschema_test.go
@@ -110,16 +110,7 @@ func TestVSchema(t *testing.T) {
`[[INT64(1) VARCHAR("test1")] [INT64(2) VARCHAR("test2")] [INT64(3) VARCHAR("test3")] [INT64(4) VARCHAR("test4")]]`)
utils.AssertMatches(t, conn, "delete from vt_user", `[]`)
-
- vtgateVersion, err := cluster.GetMajorVersion("vtgate")
- require.NoError(t, err)
-
- // Test empty vschema
- if vtgateVersion >= 17 {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[]`)
- } else {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")]]`)
- }
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[]`)
// Use the DDL to create an unsharded vschema and test again
@@ -135,11 +126,7 @@ func TestVSchema(t *testing.T) {
utils.Exec(t, conn, "commit")
// Test Showing Tables
- if vtgateVersion >= 17 {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("main")] [VARCHAR("vt_user")]]`)
- } else {
- utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("vt_user")]]`)
- }
+ utils.AssertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("main")] [VARCHAR("vt_user")]]`)
// Test Showing Vindexes
utils.AssertMatches(t, conn, "SHOW VSCHEMA VINDEXES", `[]`)
diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go
index 95e08290d53..355bc09d761 100644
--- a/go/vt/discovery/healthcheck.go
+++ b/go/vt/discovery/healthcheck.go
@@ -470,7 +470,20 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) {
// delete from healthy list
healthy, ok := hc.healthy[key]
if ok && len(healthy) > 0 {
- hc.recomputeHealthy(key)
+ if tabletType == topodata.TabletType_PRIMARY {
+ // If the deleted tablet was a primary,
+ // and it matches what we think is the current active primary,
+ // clear the healthy list for the primary.
+ //
+ // See the logic in `updateHealth` for more details.
+ alias := tabletAliasString(topoproto.TabletAliasString(healthy[0].Tablet.Alias))
+ if alias == tabletAlias {
+ hc.healthy[key] = []*TabletHealth{}
+ }
+ } else {
+ // Simply recompute the list of healthy tablets for all other tablet types.
+ hc.recomputeHealthy(key)
+ }
}
}
}()
@@ -586,6 +599,13 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ
hc.broadcast(th)
}
+// recomputeHealthy recomputes the healthy tablets for the given key.
+//
+// This filters out tablets that might be healthy, but are not part of the current
+// cell or cell alias. It also performs filtering of tablets based on replication lag,
+// if configured to do so.
+//
+// This should not be called for primary tablets.
func (hc *HealthCheckImpl) recomputeHealthy(key KeyspaceShardTabletType) {
all := hc.healthData[key]
allArray := make([]*TabletHealth, 0, len(all))
diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go
index 31376bd8c7d..8576ef060ba 100644
--- a/go/vt/discovery/healthcheck_test.go
+++ b/go/vt/discovery/healthcheck_test.go
@@ -784,6 +784,127 @@ func TestRemoveTablet(t *testing.T) {
assert.Empty(t, a, "wrong result, expected empty list")
}
+// When an external primary failover is performed,
+// the demoted primary will advertise itself as a `PRIMARY`
+// tablet until it recognizes that it was demoted,
+// and until all in-flight operations have either finished
+// (successfully or unsuccessfully, see `--shutdown_grace_period` flag).
+//
+// During this time, operations like `RemoveTablet` should not lead
+// to multiple tablets becoming valid targets for `PRIMARY`.
+func TestRemoveTabletDuringExternalReparenting(t *testing.T) {
+ ctx := utils.LeakCheckContext(t)
+
+ // reset error counters
+ hcErrorCounters.ResetAll()
+ ts := memorytopo.NewServer(ctx, "cell")
+ defer ts.Close()
+ hc := createTestHc(ctx, ts)
+ // close healthcheck
+ defer hc.Close()
+
+ firstTablet := createTestTablet(0, "cell", "a")
+ firstTablet.Type = topodatapb.TabletType_PRIMARY
+
+ secondTablet := createTestTablet(1, "cell", "b")
+ secondTablet.Type = topodatapb.TabletType_REPLICA
+
+ thirdTablet := createTestTablet(2, "cell", "c")
+ thirdTablet.Type = topodatapb.TabletType_REPLICA
+
+ firstTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ firstTabletConn := createFakeConn(firstTablet, firstTabletHealthStream)
+ firstTabletConn.errCh = make(chan error)
+
+ secondTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ secondTabletConn := createFakeConn(secondTablet, secondTabletHealthStream)
+ secondTabletConn.errCh = make(chan error)
+
+ thirdTabletHealthStream := make(chan *querypb.StreamHealthResponse)
+ thirdTabletConn := createFakeConn(thirdTablet, thirdTabletHealthStream)
+ thirdTabletConn.errCh = make(chan error)
+
+ resultChan := hc.Subscribe()
+
+ hc.AddTablet(firstTablet)
+ <-resultChan
+
+ hc.AddTablet(secondTablet)
+ <-resultChan
+
+ hc.AddTablet(thirdTablet)
+ <-resultChan
+
+ firstTabletPrimaryTermStartTimestamp := time.Now().Unix() - 10
+
+ firstTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: firstTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: firstTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: secondTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ thirdTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: thirdTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: 0,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 1, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletPrimaryTermStartTimestamp := time.Now().Unix()
+
+ // Simulate a failover
+ firstTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: firstTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: firstTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ secondTabletHealthStream <- &querypb.StreamHealthResponse{
+ TabletAlias: secondTablet.Alias,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+
+ PrimaryTermStartTimestamp: secondTabletPrimaryTermStartTimestamp,
+ RealtimeStats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ }
+ <-resultChan
+
+ hc.RemoveTablet(thirdTablet)
+
+ // `secondTablet` should be the primary now
+ expectedTabletStats := []*TabletHealth{{
+ Tablet: secondTablet,
+ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY},
+ Serving: true,
+ Stats: &querypb.RealtimeStats{ReplicationLagSeconds: 0, CpuUsage: 0.5},
+ PrimaryTermStartTime: secondTabletPrimaryTermStartTimestamp,
+ }}
+
+ actualTabletStats := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_PRIMARY})
+ mustMatch(t, expectedTabletStats, actualTabletStats, "unexpected result")
+}
+
// TestGetHealthyTablets tests the functionality of GetHealthyTabletStats.
func TestGetHealthyTablets(t *testing.T) {
ctx := utils.LeakCheckContext(t)
diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go
index 098ccad1032..b5e72b6a1f3 100644
--- a/go/vt/proto/query/query.pb.go
+++ b/go/vt/proto/query/query.pb.go
@@ -315,6 +315,8 @@ const (
// BITNUM specifies a base 2 binary type (unquoted varbinary).
// Properties: 34, IsText.
Type_BITNUM Type = 4130
+ // RAW specifies a type which won't be quoted but the value used as-is while encoding.
+ Type_RAW Type = 2084
)
// Enum value maps for Type.
@@ -355,6 +357,7 @@ var (
4128: "HEXNUM",
4129: "HEXVAL",
4130: "BITNUM",
+ 2084: "RAW",
}
Type_value = map[string]int32{
"NULL_TYPE": 0,
@@ -392,6 +395,7 @@ var (
"HEXNUM": 4128,
"HEXVAL": 4129,
"BITNUM": 4130,
+ "RAW": 2084,
}
)
@@ -6510,7 +6514,7 @@ var file_query_proto_rawDesc = []byte{
0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55,
0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x53, 0x54, 0x45, 0x58,
0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59,
- 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09,
+ 0x10, 0x80, 0x40, 0x2a, 0xca, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09,
0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49,
0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10,
0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b,
@@ -6538,18 +6542,19 @@ var file_query_proto_rawDesc = []byte{
0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12,
0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06,
0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54,
- 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41,
- 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02,
- 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31,
- 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06,
- 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10,
- 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f,
- 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x12, 0x08, 0x0a, 0x03, 0x52, 0x41, 0x57, 0x10, 0xa4, 0x10,
+ 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+ 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f,
+ 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56,
+ 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53,
+ 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69,
+ 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22,
+ 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go
index 61c606c65ea..d022e4df5e7 100644
--- a/go/vt/servenv/version.go
+++ b/go/vt/servenv/version.go
@@ -19,4 +19,4 @@ package servenv
// DO NOT EDIT
// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY THE VITESS-RELEASER
-const versionName = "19.0.5"
+const versionName = "19.0.6"
diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go
index de1fdc868ad..15d24d9d3be 100644
--- a/go/vt/sqlparser/normalizer_test.go
+++ b/go/vt/sqlparser/normalizer_test.go
@@ -209,6 +209,23 @@ func TestNormalize(t *testing.T) {
outbv: map[string]*querypb.BindVariable{
"v1": sqltypes.BitNumBindVariable([]byte("0b11")),
},
+ }, {
+ // json value in insert
+ in: "insert into t values ('{\"k\", \"v\"}')",
+ outstmt: "insert into t values (:bv1 /* VARCHAR */)",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.StringBindVariable("{\"k\", \"v\"}"),
+ },
+ }, {
+ // json function in insert
+ in: "insert into t values (JSON_OBJECT('_id', 27, 'name', 'carrot'))",
+ outstmt: "insert into t values (json_object(:bv1 /* VARCHAR */, :bv2 /* INT64 */, :bv3 /* VARCHAR */, :bv4 /* VARCHAR */))",
+ outbv: map[string]*querypb.BindVariable{
+ "bv1": sqltypes.StringBindVariable("_id"),
+ "bv2": sqltypes.Int64BindVariable(27),
+ "bv3": sqltypes.StringBindVariable("name"),
+ "bv4": sqltypes.StringBindVariable("carrot"),
+ },
}, {
// ORDER BY column_position
in: "select a, b from t order by 1 asc",
diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go
index a612e555ee8..491e7400988 100644
--- a/go/vt/sqlparser/parsed_query.go
+++ b/go/vt/sqlparser/parsed_query.go
@@ -101,7 +101,7 @@ func EncodeValue(buf *strings.Builder, value *querypb.BindVariable) {
sqltypes.ProtoToValue(bv).EncodeSQLStringBuilder(buf)
}
buf.WriteByte(')')
- case querypb.Type_JSON:
+ case querypb.Type_RAW:
v, _ := sqltypes.BindVariableToValue(value)
buf.Write(v.Raw())
default:
diff --git a/go/vt/sqlparser/parsed_query_test.go b/go/vt/sqlparser/parsed_query_test.go
index ef59676883f..2e01655c644 100644
--- a/go/vt/sqlparser/parsed_query_test.go
+++ b/go/vt/sqlparser/parsed_query_test.go
@@ -20,6 +20,8 @@ import (
"reflect"
"testing"
+ "github.com/stretchr/testify/require"
+
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
@@ -80,6 +82,14 @@ func TestGenerateQuery(t *testing.T) {
"vals": sqltypes.TestBindVariable([]any{1, "aa"}),
},
output: "select * from a where id in (1, 'aa')",
+ }, {
+ desc: "json bindvar and raw bindvar",
+ query: "insert into t values (:v1, :v2)",
+ bindVars: map[string]*querypb.BindVariable{
+ "v1": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(querypb.Type_JSON, []byte(`{"key": "value"}`))),
+ "v2": sqltypes.ValueBindVariable(sqltypes.MakeTrusted(querypb.Type_RAW, []byte(`json_object("k", "v")`))),
+ },
+ output: `insert into t values ('{\"key\": \"value\"}', json_object("k", "v"))`,
}, {
desc: "list bind vars 0 arguments",
query: "select * from a where id in ::vals",
@@ -138,20 +148,19 @@ func TestGenerateQuery(t *testing.T) {
parser := NewTestParser()
for _, tcase := range tcases {
- tree, err := parser.Parse(tcase.query)
- if err != nil {
- t.Errorf("parse failed for %s: %v", tcase.desc, err)
- continue
- }
- buf := NewTrackedBuffer(nil)
- buf.Myprintf("%v", tree)
- pq := buf.ParsedQuery()
- bytes, err := pq.GenerateQuery(tcase.bindVars, tcase.extras)
- if err != nil {
- assert.Equal(t, tcase.output, err.Error())
- } else {
- assert.Equal(t, tcase.output, string(bytes))
- }
+ t.Run(tcase.query, func(t *testing.T) {
+ tree, err := parser.Parse(tcase.query)
+ require.NoError(t, err)
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf("%v", tree)
+ pq := buf.ParsedQuery()
+ bytes, err := pq.GenerateQuery(tcase.bindVars, tcase.extras)
+ if err != nil {
+ assert.Equal(t, tcase.output, err.Error())
+ } else {
+ assert.Equal(t, tcase.output, bytes)
+ }
+ })
}
}
diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go
index 9b33a5b0536..7c39befded1 100644
--- a/go/vt/vtctl/reparentutil/replication.go
+++ b/go/vt/vtctl/reparentutil/replication.go
@@ -123,7 +123,7 @@ func FindValidEmergencyReparentCandidates(
case len(errantGTIDs) != 0:
// This tablet has errant GTIDs. It's not a valid candidate for
// reparent, so don't insert it into the final mapping.
- log.Errorf("skipping %v because we detected errant GTIDs - %v", alias, errantGTIDs)
+ log.Errorf("skipping %v with GTIDSet:%v because we detected errant GTIDs - %v", alias, relayLogGTIDSet, errantGTIDs)
continue
}
diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go
index b7a2bcb07e7..eb4602f7095 100644
--- a/go/vt/vtctl/reparentutil/replication_test.go
+++ b/go/vt/vtctl/reparentutil/replication_test.go
@@ -161,7 +161,7 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) {
shouldErr: false,
},
{
- name: "tablet with errant GTIDs is excluded",
+ name: "tablet with superset GTIDs is included",
statusMap: map[string]*replicationdatapb.StopReplicationStatus{
"r1": {
After: &replicationdatapb.Status{
@@ -169,19 +169,33 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) {
RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
},
},
- "errant": {
+ "r2": {
After: &replicationdatapb.Status{
SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1",
},
},
},
- primaryStatusMap: map[string]*replicationdatapb.PrimaryStatus{
- "p1": {
- Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5",
+ expected: []string{"r1", "r2"},
+ shouldErr: false,
+ },
+ {
+ name: "tablets with errant GTIDs are excluded",
+ statusMap: map[string]*replicationdatapb.StopReplicationStatus{
+ "r1": {
+ After: &replicationdatapb.Status{
+ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
+ RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:1",
+ },
+ },
+ "r2": {
+ After: &replicationdatapb.Status{
+ SourceUuid: "3E11FA47-71CA-11E1-9E33-C80AA9429562",
+ RelayLogPosition: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5,AAAAAAAA-71CA-11E1-9E33-C80AA9429562:2-3",
+ },
},
},
- expected: []string{"r1", "p1"},
+ expected: []string{},
shouldErr: false,
},
{
diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go
index d1e555bf990..b99873ced02 100644
--- a/go/vt/vtgate/executor.go
+++ b/go/vt/vtgate/executor.go
@@ -202,7 +202,7 @@ func NewExecutor(
return e.plans.Metrics.Hits()
})
stats.NewCounterFunc("QueryPlanCacheMisses", "Query plan cache misses", func() int64 {
- return e.plans.Metrics.Hits()
+ return e.plans.Metrics.Misses()
})
servenv.HTTPHandle(pathQueryPlans, e)
servenv.HTTPHandle(pathScatterStats, e)
diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go
index 402a2ae19ba..a54c71646ec 100644
--- a/go/vt/vtgate/planbuilder/operators/apply_join.go
+++ b/go/vt/vtgate/planbuilder/operators/apply_join.go
@@ -277,6 +277,10 @@ func (aj *ApplyJoin) ShortDescription() string {
}
firstPart := fmt.Sprintf("on %s columns: %s", fn(aj.JoinPredicates), fn(aj.JoinColumns))
+ if aj.LeftJoin {
+ firstPart = "LEFT JOIN " + firstPart
+ }
+
if len(aj.ExtraLHSVars) == 0 {
return firstPart
}
diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go
index f997ed5205d..135fda276b5 100644
--- a/go/vt/vtgate/planbuilder/operators/hash_join.go
+++ b/go/vt/vtgate/planbuilder/operators/hash_join.go
@@ -300,20 +300,9 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp
inOffset = op.AddColumn(ctx, false, false, aeWrap(expr))
}
- // we turn the
+ // we have to turn the incoming offset to an outgoing offset of the columns this operator is exposing
internalOffset := offsetter(inOffset)
-
- // ok, we have an offset from the input operator. Let's check if we already have it
- // in our list of incoming columns
-
- for idx, offset := range hj.ColumnOffsets {
- if internalOffset == offset {
- return idx
- }
- }
-
hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset)
-
return len(hj.ColumnOffsets) - 1
}
@@ -408,17 +397,7 @@ func (hj *HashJoin) addSingleSidedColumn(
// we have to turn the incoming offset to an outgoing offset of the columns this operator is exposing
internalOffset := offsetter(inOffset)
-
- // ok, we have an offset from the input operator. Let's check if we already have it
- // in our list of incoming columns
- for idx, offset := range hj.ColumnOffsets {
- if internalOffset == offset {
- return idx
- }
- }
-
hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset)
-
return len(hj.ColumnOffsets) - 1
}
diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go
index 0cc5da9121f..0042994bda3 100644
--- a/go/vt/vtgate/planbuilder/operators/join_merging.go
+++ b/go/vt/vtgate/planbuilder/operators/join_merging.go
@@ -22,12 +22,13 @@ import (
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
)
// mergeJoinInputs checks whether two operators can be merged into a single one.
// If they can be merged, a new operator with the merged routing is returned
// If they cannot be merged, nil is returned.
-func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m merger) *Route {
+func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m *joinMerger) *Route {
lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs)
if lhsRoute == nil {
return nil
@@ -40,6 +41,11 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPr
case b == dual:
return m.merge(ctx, lhsRoute, rhsRoute, routingA)
+ // As both are reference route. We need to merge the alternates as well.
+ case a == anyShard && b == anyShard && sameKeyspace:
+ newrouting := mergeAnyShardRoutings(ctx, routingA.(*AnyShardRouting), routingB.(*AnyShardRouting), joinPredicates, m.innerJoin)
+ return m.merge(ctx, lhsRoute, rhsRoute, newrouting)
+
// an unsharded/reference route can be merged with anything going to that keyspace
case a == anyShard && sameKeyspace:
return m.merge(ctx, lhsRoute, rhsRoute, routingB)
@@ -58,13 +64,33 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPr
// sharded routing is complex, so we handle it in a separate method
case a == sharded && b == sharded:
- return tryMergeJoinShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates)
+ return tryMergeShardedRouting(ctx, lhsRoute, rhsRoute, m, joinPredicates)
default:
return nil
}
}
+func mergeAnyShardRoutings(ctx *plancontext.PlanningContext, a, b *AnyShardRouting, joinPredicates []sqlparser.Expr, innerJoin bool) *AnyShardRouting {
+ alternates := make(map[*vindexes.Keyspace]*Route)
+ for ak, av := range a.Alternates {
+ for bk, bv := range b.Alternates {
+ // only same keyspace alternates can be merged.
+ if ak != bk {
+ continue
+ }
+ op, _ := mergeOrJoin(ctx, av, bv, joinPredicates, innerJoin)
+ if r, ok := op.(*Route); ok {
+ alternates[ak] = r
+ }
+ }
+ }
+ return &AnyShardRouting{
+ keyspace: a.keyspace,
+ Alternates: alternates,
+ }
+}
+
func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) {
lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs)
if lhsRoute == nil || rhsRoute == nil {
@@ -176,7 +202,7 @@ func getRoutingType(r Routing) routingType {
panic(fmt.Sprintf("switch should be exhaustive, got %T", r))
}
-func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) merger {
+func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) *joinMerger {
return &joinMerger{
predicates: predicates,
innerJoin: innerJoin,
diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go
index 266b9b8288f..b819c4a1f5f 100644
--- a/go/vt/vtgate/planbuilder/operators/joins.go
+++ b/go/vt/vtgate/planbuilder/operators/joins.go
@@ -17,7 +17,10 @@ limitations under the License.
package operators
import (
+ "fmt"
+
"vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
"vitess.io/vitess/go/vt/vtgate/semantics"
)
@@ -82,7 +85,7 @@ func AddPredicate(
return join
}
- return nil
+ panic(vterrors.VT13001(fmt.Sprintf("pushed wrong predicate to the join: %s", sqlparser.String(expr))))
}
// we are looking for predicates like `tbl.col = <>` or `<> = tbl.col`,
diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go
index 638d3d80907..712cc8ee5ad 100644
--- a/go/vt/vtgate/planbuilder/operators/offset_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go
@@ -38,7 +38,6 @@ func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator {
panic(vterrors.VT13001(fmt.Sprintf("should not see %T here", in)))
case offsettable:
newOp := op.planOffsets(ctx)
-
if newOp == nil {
newOp = op
}
@@ -47,7 +46,13 @@ func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator {
fmt.Println("Planned offsets for:")
fmt.Println(ToTree(newOp))
}
- return newOp, nil
+
+ if newOp == op {
+ return newOp, nil
+ } else {
+ // We got a new operator from plan offsets. We should return that something has changed.
+ return newOp, Rewrote("planning offsets introduced a new operator")
+ }
}
return in, NoRewrite
}
diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go
index c58340291ff..d0b32e37200 100644
--- a/go/vt/vtgate/planbuilder/operators/route_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/route_planning.go
@@ -358,13 +358,18 @@ func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredic
}
join := NewApplyJoin(ctx, Clone(rhs), Clone(lhs), nil, !inner)
- newOp := pushJoinPredicates(ctx, joinPredicates, join)
- return newOp, Rewrote("logical join to applyJoin, switching side because LIMIT")
+ for _, pred := range joinPredicates {
+ join.AddJoinPredicate(ctx, pred)
+ }
+ return join, Rewrote("logical join to applyJoin, switching side because LIMIT")
}
join := NewApplyJoin(ctx, Clone(lhs), Clone(rhs), nil, !inner)
- newOp := pushJoinPredicates(ctx, joinPredicates, join)
- return newOp, Rewrote("logical join to applyJoin ")
+ for _, pred := range joinPredicates {
+ join.AddJoinPredicate(ctx, pred)
+ }
+
+ return join, Rewrote("logical join to applyJoin ")
}
func operatorsToRoutes(a, b Operator) (*Route, *Route) {
@@ -583,15 +588,3 @@ func hexEqual(a, b *sqlparser.Literal) bool {
}
return false
}
-
-func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) Operator {
- if len(exprs) == 0 {
- return op
- }
-
- for _, expr := range exprs {
- AddPredicate(ctx, op, expr, true, newFilterSinglePredicate)
- }
-
- return op
-}
diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
index 6818311c0dd..61046e4da67 100644
--- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go
+++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go
@@ -23,7 +23,6 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/slice"
"vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/engine"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
@@ -601,13 +600,15 @@ func (tr *ShardedRouting) extraInfo() string {
)
}
-func tryMergeJoinShardedRouting(
+func tryMergeShardedRouting(
ctx *plancontext.PlanningContext,
routeA, routeB *Route,
m merger,
joinPredicates []sqlparser.Expr,
) *Route {
- sameKeyspace := routeA.Routing.Keyspace() == routeB.Routing.Keyspace()
+ if routeA.Routing.Keyspace() != routeB.Routing.Keyspace() {
+ return nil
+ }
tblA := routeA.Routing.(*ShardedRouting)
tblB := routeB.Routing.(*ShardedRouting)
@@ -636,10 +637,6 @@ func tryMergeJoinShardedRouting(
return nil
}
- if !sameKeyspace {
- panic(vterrors.VT12001("cross-shard correlated subquery"))
- }
-
canMerge := canMergeOnFilters(ctx, routeA, routeB, joinPredicates)
if !canMerge {
return nil
diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
index a85829bab6d..fb8db06f312 100644
--- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go
+++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go
@@ -722,7 +722,7 @@ func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joi
// sharded routing is complex, so we handle it in a separate method
case inner == sharded && outer == sharded:
- return tryMergeJoinShardedRouting(ctx, inRoute, outRoute, m, joinPredicates)
+ return tryMergeShardedRouting(ctx, inRoute, outRoute, m, joinPredicates)
default:
return nil
diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
index f1555686230..4296c72a6e6 100644
--- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json
@@ -6507,59 +6507,74 @@
"OrderBy": "(4|6) ASC, (5|7) ASC",
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "HashLeftJoin",
- "Collation": "binary",
- "ComparisonType": "INT16",
- "JoinColumnIndexes": "-1,1,-2,2,-3,3",
- "Predicate": "`user`.col = ue.col",
- "TableName": "`user`_user_extra",
+ "OperatorType": "Projection",
+ "Expressions": [
+ "count(*) as count(*)",
+ "count(*) as count(*)",
+ "`user`.col as col",
+ "ue.col as col",
+ "`user`.foo as foo",
+ "ue.bar as bar",
+ "weight_string(`user`.foo) as weight_string(`user`.foo)",
+ "weight_string(ue.bar) as weight_string(ue.bar)"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo",
- "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo",
- "Table": "`user`"
- },
- {
- "OperatorType": "Aggregate",
- "Variant": "Ordered",
- "Aggregates": "count_star(0)",
- "GroupBy": "1, (2|3)",
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,1,-2,2,-3,3,-3,3",
+ "Predicate": "`user`.col = ue.col",
+ "TableName": "`user`_user_extra",
"Inputs": [
{
- "OperatorType": "SimpleProjection",
- "Columns": [
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo",
+ "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "count_star(0)",
+ "GroupBy": "1, (2|3)",
+ "Inputs": [
+ {
+ "OperatorType": "SimpleProjection",
+ "Columns": [
2,
0,
1,
3
],
- "Inputs": [
- {
- "OperatorType": "Sort",
- "Variant": "Memory",
- "OrderBy": "0 ASC, (1|3) ASC",
"Inputs": [
{
- "OperatorType": "Limit",
- "Count": "10",
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "0 ASC, (1|3) ASC",
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra where 1 != 1) as ue where 1 != 1",
- "Query": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra) as ue limit :__upper_limit",
- "Table": "user_extra"
+ "OperatorType": "Limit",
+ "Count": "10",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra where 1 != 1) as ue where 1 != 1",
+ "Query": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra) as ue limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json
index 86753825e42..18f9f376810 100644
--- a/go/vt/vtgate/planbuilder/testdata/from_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json
@@ -744,6 +744,111 @@
]
}
},
+ {
+ "comment": "Complex query that has hash left join underneath a memory sort and ordered aggregation",
+ "query": "select 1 from user join user_extra on user.id = user_extra.user_id join music on music.intcol = user_extra.col left join (select user_metadata.col, count(*) as count from user_metadata group by user_metadata.col) um on um.col = user_extra.col where user.id IN (103) group by user_extra.col, music.intcol",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user join user_extra on user.id = user_extra.user_id join music on music.intcol = user_extra.col left join (select user_metadata.col, count(*) as count from user_metadata group by user_metadata.col) um on um.col = user_extra.col where user.id IN (103) group by user_extra.col, music.intcol",
+ "Instructions": {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "any_value(0) AS 1",
+ "GroupBy": "1, 4",
+ "ResultColumns": 1,
+ "Inputs": [
+ {
+ "OperatorType": "Sort",
+ "Variant": "Memory",
+ "OrderBy": "1 ASC, 4 ASC",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,-2,1,-2,-4,-1",
+ "Predicate": "user_extra.col = um.col",
+ "TableName": "music_`user`, user_extra_user_metadata",
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0,R:0,R:0,L:1",
+ "JoinVars": {
+ "music_intcol": 1
+ },
+ "TableName": "music_`user`, user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, music.intcol from music where 1 != 1 group by music.intcol",
+ "Query": "select 1, music.intcol from music group by music.intcol",
+ "Table": "music"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_extra.col, user_extra.col from `user`, user_extra where 1 != 1 group by user_extra.col, user_extra.col",
+ "Query": "select user_extra.col, user_extra.col from `user`, user_extra where `user`.id in (103) and user_extra.col = :music_intcol and `user`.id = user_extra.user_id group by user_extra.col, user_extra.col",
+ "Table": "`user`, user_extra",
+ "Values": [
+ "103"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Aggregate",
+ "Variant": "Ordered",
+ "Aggregates": "sum_count_star(1) AS count",
+ "GroupBy": "0",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select user_metadata.col, count(*) as `count` from user_metadata where 1 != 1 group by user_metadata.col",
+ "OrderBy": "0 ASC",
+ "Query": "select user_metadata.col, count(*) as `count` from user_metadata group by user_metadata.col order by user_metadata.col asc",
+ "Table": "user_metadata"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.music",
+ "user.user",
+ "user.user_extra",
+ "user.user_metadata"
+ ]
+ }
+ },
{
"comment": "Straight-join (ignores the straight_join hint)",
"query": "select m1.col from unsharded as m1 straight_join unsharded as m2",
@@ -808,6 +913,59 @@
]
}
},
+ {
+ "comment": "Outer join with join predicates that only depend on the inner side",
+ "query": "select 1 from user left join user_extra on user.foo = 42 and user.bar = user_extra.bar",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user left join user_extra on user.foo = 42 and user.bar = user_extra.bar",
+ "Instructions": {
+ "OperatorType": "Projection",
+ "Expressions": [
+ "1 as 1"
+ ],
+ "Inputs": [
+ {
+ "OperatorType": "Join",
+ "Variant": "LeftJoin",
+ "JoinVars": {
+ "user_bar": 1,
+ "user_foo": 0
+ },
+ "TableName": "`user`_user_extra",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select `user`.foo, `user`.bar from `user` where 1 != 1",
+ "Query": "select `user`.foo, `user`.bar from `user`",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from user_extra where 1 != 1",
+ "Query": "select 1 from user_extra where user_extra.bar = :user_bar and :user_foo = 42",
+ "Table": "user_extra"
+ }
+ ]
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "user.user_extra"
+ ]
+ }
+ },
{
"comment": "Parenthesized, single chunk",
"query": "select user.col from user join (unsharded as m1 join unsharded as m2)",
@@ -4247,28 +4405,22 @@
"ResultColumns": 2,
"Inputs": [
{
- "OperatorType": "Join",
- "Variant": "HashLeftJoin",
- "Collation": "binary",
- "ComparisonType": "INT16",
- "JoinColumnIndexes": "-1,2",
- "Predicate": "u.col = ue.col",
- "TableName": "`user`_user_extra",
+ "OperatorType": "Projection",
+ "Expressions": [
+ "id as id",
+ "user_id as user_id",
+ "weight_string(id) as weight_string(id)",
+ "weight_string(user_id) as weight_string(user_id)"
+ ],
"Inputs": [
{
- "OperatorType": "Route",
- "Variant": "Scatter",
- "Keyspace": {
- "Name": "user",
- "Sharded": true
- },
- "FieldQuery": "select u.id, u.col from (select id, col from `user` where 1 != 1) as u where 1 != 1",
- "Query": "select distinct u.id, u.col from (select id, col from `user`) as u",
- "Table": "`user`"
- },
- {
- "OperatorType": "Limit",
- "Count": "10",
+ "OperatorType": "Join",
+ "Variant": "HashLeftJoin",
+ "Collation": "binary",
+ "ComparisonType": "INT16",
+ "JoinColumnIndexes": "-1,2,-1,2",
+ "Predicate": "u.col = ue.col",
+ "TableName": "`user`_user_extra",
"Inputs": [
{
"OperatorType": "Route",
@@ -4277,9 +4429,26 @@
"Name": "user",
"Sharded": true
},
- "FieldQuery": "select ue.col, ue.user_id from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1",
- "Query": "select ue.col, ue.user_id from (select col, user_id from user_extra) as ue limit :__upper_limit",
- "Table": "user_extra"
+ "FieldQuery": "select u.id, u.col from (select id, col from `user` where 1 != 1) as u where 1 != 1",
+ "Query": "select distinct u.id, u.col from (select id, col from `user`) as u",
+ "Table": "`user`"
+ },
+ {
+ "OperatorType": "Limit",
+ "Count": "10",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select ue.col, ue.user_id from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1",
+ "Query": "select ue.col, ue.user_id from (select col, user_id from user_extra) as ue limit :__upper_limit",
+ "Table": "user_extra"
+ }
+ ]
}
]
}
@@ -4388,5 +4557,54 @@
"user.user_extra"
]
}
+ },
+ {
+ "comment": "Cross keyspace join",
+ "query": "select 1 from user join t1 on user.id = t1.id",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user join t1 on user.id = t1.id",
+ "Instructions": {
+ "OperatorType": "Join",
+ "Variant": "Join",
+ "JoinColumnIndexes": "L:0",
+ "JoinVars": {
+ "t1_id": 1
+ },
+ "TableName": "t1_`user`",
+ "Inputs": [
+ {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "zlookup_unique",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1, t1.id from t1 where 1 != 1",
+ "Query": "select 1, t1.id from t1",
+ "Table": "t1"
+ },
+ {
+ "OperatorType": "Route",
+ "Variant": "EqualUnique",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` where 1 != 1",
+ "Query": "select 1 from `user` where `user`.id = :t1_id",
+ "Table": "`user`",
+ "Values": [
+ ":t1_id"
+ ],
+ "Vindex": "user_index"
+ }
+ ]
+ },
+ "TablesUsed": [
+ "user.user",
+ "zlookup_unique.t1"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
index a89fa103923..6aa01355934 100644
--- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json
@@ -746,5 +746,30 @@
"user.user"
]
}
+ },
+ {
+ "comment": "two sharded and two unsharded reference table join - all should be merged into one route",
+ "query": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar",
+ "plan": {
+ "QueryType": "SELECT",
+ "Original": "select 1 from user u join user_extra ue on u.id = ue.user_id join main.source_of_ref sr on sr.foo = ue.foo join main.rerouted_ref rr on rr.bar = sr.bar",
+ "Instructions": {
+ "OperatorType": "Route",
+ "Variant": "Scatter",
+ "Keyspace": {
+ "Name": "user",
+ "Sharded": true
+ },
+ "FieldQuery": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where 1 != 1",
+ "Query": "select 1 from `user` as u, user_extra as ue, ref_with_source as sr, ref as rr where rr.bar = sr.bar and u.id = ue.user_id and sr.foo = ue.foo",
+ "Table": "`user`, ref, ref_with_source, user_extra"
+ },
+ "TablesUsed": [
+ "user.ref",
+ "user.ref_with_source",
+ "user.user",
+ "user.user_extra"
+ ]
+ }
}
]
diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
index b66ddd79ad5..f055fe6bb2c 100644
--- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
+++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json
@@ -329,6 +329,11 @@
"query": "select 1 from music union (select id from user union all select name from unsharded)",
"plan": "VT12001: unsupported: nesting of UNIONs on the right-hand side"
},
+ {
+ "comment": "Cross keyspace query with subquery",
+ "query": "select 1 from user where id = (select id from t1 where user.foo = t1.bar)",
+ "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS"
+ },
{
"comment": "multi-shard union",
"query": "select 1 from music union (select id from user union select name from unsharded)",
diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
index 7aaa2648388..d28a9f97482 100644
--- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
+++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json
@@ -252,6 +252,12 @@
"column": "non_planable",
"name": "non_planable_user_map"
}
+ ],
+ "columns": [
+ {
+ "name": "col",
+ "type": "INT16"
+ }
]
},
"user_extra": {
@@ -282,6 +288,12 @@
"column": "id",
"name": "music_user_map"
}
+ ],
+ "columns": [
+ {
+ "name": "intcol",
+ "type": "INT16"
+ }
]
},
"authoritative": {
diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go
index 847e40e3fbc..fe6d2bd9141 100644
--- a/go/vt/vttablet/onlineddl/vrepl.go
+++ b/go/vt/vttablet/onlineddl/vrepl.go
@@ -571,6 +571,9 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error {
sb.WriteString(fmt.Sprintf("CONCAT(%s)", escapeName(name)))
case sourceCol.Type == vrepl.JSONColumnType:
sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
+ case targetCol.Type == vrepl.JSONColumnType:
+ // Convert any type to JSON: encode the type as utf8mb4 text
+ sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
case sourceCol.Type == vrepl.StringColumnType:
// Check source and target charset/encoding. If needed, create
// a binlogdatapb.CharsetConversion entry (later written to vreplication)
@@ -583,19 +586,19 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error {
if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown {
return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name)
}
-
- if trivialCharset(fromCollation) && trivialCharset(toCollation) && targetCol.Type != vrepl.JSONColumnType {
+ if trivialCharset(fromCollation) && trivialCharset(toCollation) {
+ sb.WriteString(escapeName(name))
+ } else if fromCollation == toCollation {
+ // No need for charset conversions as both have the same collation.
sb.WriteString(escapeName(name))
} else {
+ // Charset conversion required:
v.convertCharset[targetName] = &binlogdatapb.CharsetConversion{
FromCharset: sourceCol.Charset,
ToCharset: targetCol.Charset,
}
- sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
+ sb.WriteString(escapeName(name))
}
- case targetCol.Type == vrepl.JSONColumnType && sourceCol.Type != vrepl.JSONColumnType:
- // Convert any type to JSON: encode the type as utf8mb4 text
- sb.WriteString(fmt.Sprintf("convert(%s using utf8mb4)", escapeName(name)))
default:
sb.WriteString(escapeName(name))
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
index 424daad4871..dce61436295 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go
@@ -27,6 +27,7 @@ import (
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/mysql/collations/colldata"
vjson "vitess.io/vitess/go/mysql/json"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/sqlparser"
@@ -258,7 +259,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R
if i > 0 {
sqlbuffer.WriteString(", ")
}
- if err := appendFromRow(tp.BulkInsertValues, sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil {
+ if err := tp.appendFromRow(sqlbuffer, row); err != nil {
return nil, err
}
}
@@ -313,6 +314,30 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable,
return false
}
+// convertStringCharset does a charset conversion given raw data and an applicable conversion rule.
+// In case of a conversion error, it returns an equivalent of MySQL error 1366, which is what you'd
+// get in a failed `CONVERT()` function, e.g.:
+//
+// > create table tascii(v varchar(100) charset ascii);
+// > insert into tascii values ('€');
+// ERROR 1366 (HY000): Incorrect string value: '\xE2\x82\xAC' for column 'v' at row 1
+func (tp *TablePlan) convertStringCharset(raw []byte, conversion *binlogdatapb.CharsetConversion, fieldName string) ([]byte, error) {
+ fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset)
+ if fromCollation == collations.Unknown {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "character set %s not supported for column %s", conversion.FromCharset, fieldName)
+ }
+ toCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.ToCharset)
+ if toCollation == collations.Unknown {
+ return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "character set %s not supported for column %s", conversion.ToCharset, fieldName)
+ }
+
+ out, err := charset.Convert(nil, colldata.Lookup(toCollation).Charset(), raw, colldata.Lookup(fromCollation).Charset())
+ if err != nil {
+ return nil, sqlerror.NewSQLError(sqlerror.ERTruncatedWrongValueForField, sqlerror.SSUnknownSQLState, "Incorrect string value: %s", err.Error())
+ }
+ return out, nil
+}
+
// bindFieldVal returns a bind variable based on given field and value.
// Most values will just bind directly. But some values may need manipulation:
// - text values with charset conversion
@@ -321,11 +346,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable,
func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) {
if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() {
// Non-null string value, for which we have a charset conversion instruction
- fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset)
- if fromCollation == collations.Unknown {
- return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name)
- }
- out, err := charset.Convert(nil, charset.Charset_utf8mb4{}, val.Raw(), colldata.Lookup(fromCollation).Charset())
+ out, err := tp.convertStringCharset(val.Raw(), conversion, field.Name)
if err != nil {
return nil, err
}
@@ -619,28 +640,30 @@ func valsEqual(v1, v2 sqltypes.Value) bool {
// note: there can be more fields than bind locations since extra columns might be requested from the source if not all
// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for
// values from the database on the source: sum/count for aggregation queries, for example
-func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error {
- bindLocations := pq.BindLocations()
- if len(fields) < len(bindLocations) {
+func (tp *TablePlan) appendFromRow(buf *bytes2.Buffer, row *querypb.Row) error {
+ bindLocations := tp.BulkInsertValues.BindLocations()
+ if len(tp.Fields) < len(bindLocations) {
return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ",
- len(fields), len(bindLocations))
+ len(tp.Fields), len(bindLocations))
}
type colInfo struct {
typ querypb.Type
length int64
offset int64
+ field *querypb.Field
}
rowInfo := make([]*colInfo, 0)
offset := int64(0)
- for i, field := range fields { // collect info required for fields to be bound
+ for i, field := range tp.Fields { // collect info required for fields to be bound
length := row.Lengths[i]
- if !skipFields[strings.ToLower(field.Name)] {
+ if !tp.FieldsToSkip[strings.ToLower(field.Name)] {
rowInfo = append(rowInfo, &colInfo{
typ: field.Type,
length: length,
offset: offset,
+ field: field,
})
}
if length > 0 {
@@ -652,7 +675,7 @@ func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*quer
var offsetQuery int
for i, loc := range bindLocations {
col := rowInfo[i]
- buf.WriteString(pq.Query[offsetQuery:loc.Offset])
+ buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:loc.Offset])
typ := col.typ
switch typ {
@@ -674,12 +697,25 @@ func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*quer
// -1 means a null variable; serialize it directly
buf.WriteString(sqltypes.NullStr)
} else {
- vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length])
+ raw := row.Values[col.offset : col.offset+col.length]
+ var vv sqltypes.Value
+
+ if conversion, ok := tp.ConvertCharset[col.field.Name]; ok && col.length > 0 {
+ // Non-null string value, for which we have a charset conversion instruction
+ out, err := tp.convertStringCharset(raw, conversion, col.field.Name)
+ if err != nil {
+ return err
+ }
+ vv = sqltypes.MakeTrusted(typ, out)
+ } else {
+ vv = sqltypes.MakeTrusted(typ, raw)
+ }
+
vv.EncodeSQLBytes2(buf)
}
}
offsetQuery = loc.Offset + loc.Length
}
- buf.WriteString(pq.Query[offsetQuery:])
+ buf.WriteString(tp.BulkInsertValues.Query[offsetQuery:])
return nil
}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go
index 21c3a61c9f1..9e3ebb42f62 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/utils.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go
@@ -126,7 +126,12 @@ func isUnrecoverableError(err error) bool {
if err == nil {
return false
}
- if vterrors.Code(err) == vtrpcpb.Code_FAILED_PRECONDITION {
+ switch vterrors.Code(err) {
+ case vtrpcpb.Code_FAILED_PRECONDITION:
+ if vterrors.RxWrongTablet.MatchString(err.Error()) {
+ // If the chosen tablet type picked changes, say due to PRS/ERS, we should retry.
+ return false
+ }
return true
}
sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError)
diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
new file mode 100644
index 00000000000..c00ed34a4d6
--- /dev/null
+++ b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2024 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vreplication
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/sqlerror"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+// TestIsUnrecoverableError tests the different error cases for isUnrecoverableError().
+func TestIsUnrecoverableError(t *testing.T) {
+ if runNoBlobTest {
+ t.Skip()
+ }
+
+ type testCase struct {
+ name string
+ err error
+ expected bool
+ }
+
+ testCases := []testCase{
+ {
+ name: "Nil error",
+ err: nil,
+ expected: false,
+ },
+ {
+ name: "vterrors.Code_FAILED_PRECONDITION",
+ err: vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "test error"),
+ expected: true,
+ },
+ {
+ name: "vterrors.Code_FAILED_PRECONDITION, WrongTablet",
+ err: vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%s: %v, want: %v or %v", vterrors.WrongTablet, "PRIMARY", "REPLICA", nil),
+ expected: false,
+ },
+ {
+ name: "Non-SQL error",
+ err: errors.New("non-SQL error"),
+ expected: false,
+ },
+ {
+ name: "SQL error with ERUnknownError",
+ err: sqlerror.NewSQLError(sqlerror.ERUnknownError, "test SQL error", "test"),
+ expected: false,
+ },
+ {
+ name: "SQL error with ERAccessDeniedError",
+ err: sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, "access denied", "test"),
+ expected: true,
+ },
+ {
+ name: "SQL error with ERDataOutOfRange",
+ err: sqlerror.NewSQLError(sqlerror.ERDataOutOfRange, "data out of range", "test"),
+ expected: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ result := isUnrecoverableError(tc.err)
+ require.Equal(t, tc.expected, result)
+ })
+ }
+}
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
index f2cb0a96e71..992618ed3eb 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go
@@ -163,7 +163,7 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map
timeLastSaved: time.Now(),
tablePlans: make(map[string]*TablePlan),
phase: phase,
- throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()),
+ throttlerAppName: throttlerapp.VPlayerName.ConcatenateString(vr.throttlerAppName()),
query: queryFunc,
commit: commitFunc,
batchMode: batchMode,
diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
index 3be0525dc88..4586cc761e8 100644
--- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
+++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go
@@ -31,6 +31,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/binlog/binlogplayer"
"vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/mysqlctl"
@@ -810,3 +811,59 @@ func waitForQueryResult(t *testing.T, dbc binlogplayer.DBClient, query, val stri
}
}
}
+
+func TestThrottlerAppNames(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ tablet := addTablet(100)
+ defer deleteTablet(tablet)
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ }},
+ }
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: env.KeyspaceName,
+ Shard: env.ShardName,
+ Filter: filter,
+ }
+ id := int32(1)
+ vsclient := newTabletConnector(tablet)
+ stats := binlogplayer.NewStats()
+ defer stats.Stop()
+ dbClient := playerEngine.dbClientFactoryFiltered()
+ err := dbClient.Connect()
+ require.NoError(t, err)
+ defer dbClient.Close()
+ dbName := dbClient.DBName()
+ // Ensure there's a dummy vreplication workflow record
+ _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) values (%d, 'test_workflow', '', '', 99999, 99999, 0, 0, 'Running', '%s') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'",
+ id, dbName, dbName), 1)
+ require.NoError(t, err)
+ defer func() {
+ _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1)
+ require.NoError(t, err)
+ }()
+ vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine)
+ settings, _, err := vr.loadSettings(ctx, newVDBClient(dbClient, stats))
+ require.NoError(t, err)
+
+ throttlerAppName := vr.throttlerAppName()
+ assert.Contains(t, throttlerAppName, "test_workflow")
+ assert.Contains(t, throttlerAppName, "vreplication")
+ assert.NotContains(t, throttlerAppName, "vcopier")
+ assert.NotContains(t, throttlerAppName, "vplayer")
+
+ vp := newVPlayer(vr, settings, nil, replication.Position{}, "")
+ assert.Contains(t, vp.throttlerAppName, "test_workflow")
+ assert.Contains(t, vp.throttlerAppName, "vreplication")
+ assert.Contains(t, vp.throttlerAppName, "vplayer")
+ assert.NotContains(t, vp.throttlerAppName, "vcopier")
+
+ vc := newVCopier(vr)
+ assert.Contains(t, vc.throttlerAppName, "test_workflow")
+ assert.Contains(t, vc.throttlerAppName, "vreplication")
+ assert.Contains(t, vc.throttlerAppName, "vcopier")
+ assert.NotContains(t, vc.throttlerAppName, "vplayer")
+}
diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
index 2b770c1d4f4..854157b1546 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer.go
@@ -88,7 +88,7 @@ type uvstreamer struct {
config *uvstreamerConfig
- vs *vstreamer //last vstreamer created in uvstreamer
+ vs *vstreamer // last vstreamer created in uvstreamer
}
type uvstreamerConfig struct {
@@ -138,6 +138,9 @@ func (uvs *uvstreamer) buildTablePlan() error {
uvs.plans = make(map[string]*tablePlan)
tableLastPKs := make(map[string]*binlogdatapb.TableLastPK)
for _, tablePK := range uvs.inTablePKs {
+ if tablePK != nil && tablePK.Lastpk != nil && len(tablePK.Lastpk.Fields) == 0 {
+ return fmt.Errorf("lastpk for table %s has no fields defined", tablePK.TableName)
+ }
tableLastPKs[tablePK.TableName] = tablePK
}
tables := uvs.se.GetSchema()
@@ -313,7 +316,6 @@ func (uvs *uvstreamer) send2(evs []*binlogdatapb.VEvent) error {
}
behind := time.Now().UnixNano() - uvs.lastTimestampNs
uvs.setReplicationLagSeconds(behind / 1e9)
- //log.Infof("sbm set to %d", uvs.ReplicationLagSeconds)
var evs2 []*binlogdatapb.VEvent
if len(uvs.plans) > 0 {
evs2 = uvs.filterEvents(evs)
diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
index 0eda0d6c52e..0fb9a841a7c 100644
--- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
+++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go
@@ -83,6 +83,42 @@ func (tfe *TestFieldEvent) String() string {
return s
}
+// TestVStreamMissingFieldsInLastPK tests that we error out if the lastpk for a table is missing the fields spec.
+func TestVStreamMissingFieldsInLastPK(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ oldEngine := engine
+ engine = nil
+ oldEnv := env
+ env = nil
+ newEngine(t, ctx, "noblob")
+ defer func() {
+ engine = oldEngine
+ env = oldEnv
+ }()
+ execStatements(t, []string{
+ "create table t1(id int, blb blob, val varchar(4), primary key(id))",
+ })
+ defer execStatements(t, []string{
+ "drop table t1",
+ })
+ engine.se.Reload(context.Background())
+ var tablePKs []*binlogdatapb.TableLastPK
+ tablePKs = append(tablePKs, getTablePK("t1", 1))
+ for _, tpk := range tablePKs {
+ tpk.Lastpk.Fields = nil
+ }
+ filter := &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: "select * from t1",
+ }},
+ }
+ ch := make(chan []*binlogdatapb.VEvent)
+ err := vstream(ctx, t, "", tablePKs, filter, ch)
+ require.ErrorContains(t, err, "lastpk for table t1 has no fields defined")
+}
+
// TestPlayerNoBlob sets up a new environment with mysql running with binlog_row_image as noblob. It confirms that
// the VEvents created are correct: that they don't contain the missing columns and that the DataColumns bitmap is sent
func TestNoBlob(t *testing.T) {
diff --git a/java/client/pom.xml b/java/client/pom.xml
index 0b40e76be7b..3de4ca2278d 100644
--- a/java/client/pom.xml
+++ b/java/client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.6
vitess-client
diff --git a/java/example/pom.xml b/java/example/pom.xml
index efb327f363f..5ad445a2898 100644
--- a/java/example/pom.xml
+++ b/java/example/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.6
vitess-example
diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml
index 5114fd48def..bced8e4ff54 100644
--- a/java/grpc-client/pom.xml
+++ b/java/grpc-client/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.6
vitess-grpc-client
diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml
index eee476ea4df..310be0cd86d 100644
--- a/java/jdbc/pom.xml
+++ b/java/jdbc/pom.xml
@@ -5,7 +5,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.6
vitess-jdbc
diff --git a/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java b/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
index bcadc49d33a..26ad5fd11b3 100644
--- a/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
+++ b/java/jdbc/src/test/java/io/vitess/jdbc/FieldWithMetadataTest.java
@@ -16,6 +16,9 @@
package io.vitess.jdbc;
+import java.util.Set;
+import java.util.EnumSet;
+
import io.vitess.proto.Query;
import io.vitess.util.MysqlDefs;
import io.vitess.util.charset.CharsetMapping;
@@ -274,6 +277,16 @@ public void testNumericAndDateTimeEncoding() throws SQLException {
}
}
+ // Define the types to skip
+ Set typesToSkip = EnumSet.of(
+ Query.Type.UNRECOGNIZED,
+ Query.Type.EXPRESSION,
+ Query.Type.HEXVAL,
+ Query.Type.HEXNUM,
+ Query.Type.BITNUM,
+ Query.Type.RAW
+ );
+
@Test
public void testPrecisionAdjustFactor() throws SQLException {
VitessConnection conn = getVitessConnection();
@@ -294,7 +307,8 @@ public void testPrecisionAdjustFactor() throws SQLException {
conn.setIncludedFields(Query.ExecuteOptions.IncludedFields.TYPE_AND_NAME);
for (Query.Type type : Query.Type.values()) {
- if (type == Query.Type.UNRECOGNIZED || type == Query.Type.EXPRESSION || type == Query.Type.HEXVAL || type == Query.Type.HEXNUM || type == Query.Type.BITNUM) {
+ // Skip if the type is in the set
+ if (typesToSkip.contains(type)) {
continue;
}
diff --git a/java/pom.xml b/java/pom.xml
index 9f1d57cfede..39f7a405643 100644
--- a/java/pom.xml
+++ b/java/pom.xml
@@ -11,7 +11,7 @@
io.vitess
vitess-parent
- 19.0.5
+ 19.0.6
pom
Vitess Java Client libraries [Parent]
diff --git a/proto/query.proto b/proto/query.proto
index 4d94fcb2c83..6ba19dc6691 100644
--- a/proto/query.proto
+++ b/proto/query.proto
@@ -215,6 +215,8 @@ enum Type {
// BITNUM specifies a base 2 binary type (unquoted varbinary).
// Properties: 34, IsText.
BITNUM = 4130;
+ // RAW specifies a type which won't be quoted but the value used as-is while encoding.
+ RAW = 2084;
}
// Value represents a typed value.
diff --git a/test.go b/test.go
index 2f8851e73a4..aca5e901bc9 100755
--- a/test.go
+++ b/test.go
@@ -77,7 +77,7 @@ For example:
// Flags
var (
flavor = flag.String("flavor", "mysql80", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors)
- bootstrapVersion = flag.String("bootstrap-version", "27.5", "the version identifier to use for the docker images")
+ bootstrapVersion = flag.String("bootstrap-version", "27.7", "the version identifier to use for the docker images")
runCount = flag.Int("runs", 1, "run each test this many times")
retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests")
logPass = flag.Bool("log-pass", false, "log test output even if it passes")
@@ -111,7 +111,7 @@ const (
configFileName = "test/config.json"
// List of flavors for which a bootstrap Docker image is available.
- flavors = "mysql57,mysql80,percona,percona57,percona80"
+ flavors = "mysql80,percona80"
)
// Config is the overall object serialized in test/config.json.
diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl
index 8abb7f1e4c6..4ef7f5d6835 100644
--- a/test/templates/cluster_endtoend_test.tpl
+++ b/test/templates/cluster_endtoend_test.tpl
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl
index 5b170db25e5..ddd9984126f 100644
--- a/test/templates/cluster_endtoend_test_docker.tpl
+++ b/test/templates/cluster_endtoend_test_docker.tpl
@@ -58,7 +58,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl
index 6ac17b2c395..1efeb7e9869 100644
--- a/test/templates/cluster_endtoend_test_mysql57.tpl
+++ b/test/templates/cluster_endtoend_test_mysql57.tpl
@@ -79,7 +79,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/test/templates/cluster_vitess_tester.tpl b/test/templates/cluster_vitess_tester.tpl
index 2b6ecddb730..32fca822b3a 100644
--- a/test/templates/cluster_vitess_tester.tpl
+++ b/test/templates/cluster_vitess_tester.tpl
@@ -58,7 +58,7 @@ jobs:
end_to_end:
- 'go/**/*.go'
- 'go/vt/sidecardb/**/*.sql'
- - 'go/test/endtoend/onlineddl/vrepl_suite/**'
+ - 'go/test/endtoend/vtgate/vitess_tester/**'
- 'test.go'
- 'Makefile'
- 'build.env'
@@ -74,7 +74,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-addons access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -117,7 +117,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
# install vitess tester
- go install github.com/vitessio/vitess-tester@eb953122baba163ed8ccaa6642458ee984f5d7e4
+ go install github.com/vitessio/vitess-tester@89dd933a9ea0e15f69ca58b9c8ea09a358762cca
- name: Setup launchable dependencies
if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
@@ -148,9 +148,9 @@ jobs:
# We go over all the directories in the given path.
# If there is a vschema file there, we use it, otherwise we let vitess-tester autogenerate it.
if [ -f $dir/vschema.json ]; then
- vitess-tester --sharded --xunit --test-dir $dir --vschema "$dir"vschema.json
+ vitess-tester --xunit --vschema "$dir"vschema.json $dir/*.test
else
- vitess-tester --sharded --xunit --test-dir $dir
+ vitess-tester --sharded --xunit $dir/*.test
fi
# Number the reports by changing their file names.
mv report.xml report"$i".xml
diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl
index a31ccbe3103..5fa0ea144f6 100644
--- a/test/templates/dockerfile.tpl
+++ b/test/templates/dockerfile.tpl
@@ -1,4 +1,4 @@
-ARG bootstrap_version=27.5
+ARG bootstrap_version=27.7
ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}"
FROM "${image}"
@@ -15,7 +15,7 @@ RUN wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_
RUN apt-get update
RUN apt-get install -y gnupg2
RUN dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
-RUN percona-release enable-only pxb-24
+RUN percona-release enable-only tools
RUN apt-get update
RUN apt-get install -y percona-xtrabackup-24
{{end}}
diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl
index 2beb8fac9ad..6445683ffd1 100644
--- a/test/templates/unit_test.tpl
+++ b/test/templates/unit_test.tpl
@@ -69,7 +69,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v5
with:
- go-version: 1.22.5
+ go-version: 1.22.7
- name: Setup github.com/slackhq/vitess-additions access token
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
diff --git a/vitess-mixin/go.mod b/vitess-mixin/go.mod
index d38b8bc4d80..fcb2de67107 100644
--- a/vitess-mixin/go.mod
+++ b/vitess-mixin/go.mod
@@ -1,6 +1,6 @@
module vitess-mixin
-go 1.13
+go 1.22.7
require (
github.com/Azure/go-autorest/autorest v0.11.1 // indirect
diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json
index 7f5755e5cc4..ce44946a616 100644
--- a/web/vtadmin/package-lock.json
+++ b/web/vtadmin/package-lock.json
@@ -9588,7 +9588,7 @@
"whatwg-encoding": "^2.0.0",
"whatwg-mimetype": "^3.0.0",
"whatwg-url": "^12.0.1",
- "ws": "^8.13.0",
+ "ws": "^8.17.1",
"xml-name-validator": "^4.0.0"
},
"engines": {
@@ -10009,11 +10009,11 @@
}
},
"node_modules/micromatch": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
- "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"dependencies": {
- "braces": "^3.0.2",
+ "braces": "^3.0.3",
"picomatch": "^2.3.1"
},
"engines": {
@@ -17478,9 +17478,9 @@
}
},
"node_modules/ws": {
- "version": "8.13.0",
- "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
- "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
"dev": true,
"engines": {
"node": ">=10.0.0"
@@ -24315,7 +24315,7 @@
"whatwg-encoding": "^2.0.0",
"whatwg-mimetype": "^3.0.0",
"whatwg-url": "^12.0.1",
- "ws": "^8.13.0",
+ "ws": "^8.17.1",
"xml-name-validator": "^4.0.0"
}
},
@@ -24641,11 +24641,11 @@
"dev": true
},
"micromatch": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
- "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"requires": {
- "braces": "^3.0.2",
+ "braces": "^3.0.3",
"picomatch": "^2.3.1"
}
},
@@ -29804,9 +29804,9 @@
}
},
"ws": {
- "version": "8.13.0",
- "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
- "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
"dev": true,
"requires": {}
},
diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts
index 69f7891e271..97a1deeb1ee 100644
--- a/web/vtadmin/src/proto/vtadmin.d.ts
+++ b/web/vtadmin/src/proto/vtadmin.d.ts
@@ -33979,7 +33979,8 @@ export namespace query {
EXPRESSION = 31,
HEXNUM = 4128,
HEXVAL = 4129,
- BITNUM = 4130
+ BITNUM = 4130,
+ RAW = 2084
}
/** Properties of a Value. */
diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js
index 3263bc3eb51..94537540930 100644
--- a/web/vtadmin/src/proto/vtadmin.js
+++ b/web/vtadmin/src/proto/vtadmin.js
@@ -80140,6 +80140,7 @@ export const query = $root.query = (() => {
* @property {number} HEXNUM=4128 HEXNUM value
* @property {number} HEXVAL=4129 HEXVAL value
* @property {number} BITNUM=4130 BITNUM value
+ * @property {number} RAW=2084 RAW value
*/
query.Type = (function() {
const valuesById = {}, values = Object.create(valuesById);
@@ -80178,6 +80179,7 @@ export const query = $root.query = (() => {
values[valuesById[4128] = "HEXNUM"] = 4128;
values[valuesById[4129] = "HEXVAL"] = 4129;
values[valuesById[4130] = "BITNUM"] = 4130;
+ values[valuesById[2084] = "RAW"] = 2084;
return values;
})();
@@ -80366,6 +80368,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.value != null && message.hasOwnProperty("value"))
@@ -80533,6 +80536,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.value != null)
if (typeof object.value === "string")
@@ -80805,6 +80812,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.value != null && message.hasOwnProperty("value"))
@@ -80981,6 +80989,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.value != null)
if (typeof object.value === "string")
@@ -82471,6 +82483,7 @@ export const query = $root.query = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.table != null && message.hasOwnProperty("table"))
@@ -82664,6 +82677,10 @@ export const query = $root.query = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.table != null)
message.table = String(object.table);
@@ -103539,6 +103556,7 @@ export const vschema = $root.vschema = (() => {
case 4128:
case 4129:
case 4130:
+ case 2084:
break;
}
if (message.invisible != null && message.hasOwnProperty("invisible"))
@@ -103732,6 +103750,10 @@ export const vschema = $root.vschema = (() => {
case 4130:
message.type = 4130;
break;
+ case "RAW":
+ case 2084:
+ message.type = 2084;
+ break;
}
if (object.invisible != null)
message.invisible = Boolean(object.invisible);