From 0d724e37b36bf276aa095d4571213cd035eafad0 Mon Sep 17 00:00:00 2001 From: Jan Piotrowski Date: Mon, 4 Dec 2023 21:17:30 +0100 Subject: [PATCH 1/3] chore: rename distinctOn preview feature to nativeDistinct (#4518) --- psl/psl-core/src/common/preview_features.rs | 8 ++++---- psl/psl/tests/config/generators.rs | 2 +- query-engine/query-structure/src/query_arguments.rs | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/psl/psl-core/src/common/preview_features.rs b/psl/psl-core/src/common/preview_features.rs index ca9da322933..84caa7138e8 100644 --- a/psl/psl-core/src/common/preview_features.rs +++ b/psl/psl-core/src/common/preview_features.rs @@ -34,7 +34,7 @@ macro_rules! features { }; } -// (Usually) Append-only list of features. +// (Usually) Append-only list of features. (alphabetically sorted) features!( AggregateApi, AtomicNumberOperations, @@ -43,7 +43,6 @@ features!( ConnectOrCreate, CreateMany, DataProxy, - DistinctOn, Deno, Distinct, DriverAdapters, @@ -65,6 +64,7 @@ features!( MultiSchema, NamedConstraints, NApi, + NativeDistinct, NativeTypes, OrderByAggregateGroup, OrderByNulls, @@ -79,16 +79,16 @@ features!( Views, ); -/// Generator preview features +/// Generator preview features (alphabetically sorted) pub const ALL_PREVIEW_FEATURES: FeatureMap = FeatureMap { active: enumflags2::make_bitflags!(PreviewFeature::{ Deno - | DistinctOn | DriverAdapters | FullTextIndex | FullTextSearch | Metrics | MultiSchema + | NativeDistinct | PostgresqlExtensions | Tracing | Views diff --git a/psl/psl/tests/config/generators.rs b/psl/psl/tests/config/generators.rs index 2f0708b13dc..c5380c271bf 100644 --- a/psl/psl/tests/config/generators.rs +++ b/psl/psl/tests/config/generators.rs @@ -258,7 +258,7 @@ fn nice_error_for_unknown_generator_preview_feature() { .unwrap_err(); let expectation = expect![[r#" - error: The preview feature "foo" is not known. Expected one of: distinctOn, deno, driverAdapters, fullTextIndex, fullTextSearch, metrics, multiSchema, postgresqlExtensions, tracing, views + error: The preview feature "foo" is not known. Expected one of: deno, driverAdapters, fullTextIndex, fullTextSearch, metrics, multiSchema, nativeDistinct, postgresqlExtensions, tracing, views --> schema.prisma:3  |   2 |  provider = "prisma-client-js" diff --git a/query-engine/query-structure/src/query_arguments.rs b/query-engine/query-structure/src/query_arguments.rs index 16eaa189dd8..1d75866db67 100644 --- a/query-engine/query-structure/src/query_arguments.rs +++ b/query-engine/query-structure/src/query_arguments.rs @@ -86,7 +86,7 @@ impl QueryArguments { .schema .configuration .preview_features() - .contains(PreviewFeature::DistinctOn); + .contains(PreviewFeature::NativeDistinct); let connector_can_distinct_in_db = self .model() From a26fa4c08aabe624756a915fddecc8897d3296e3 Mon Sep 17 00:00:00 2001 From: Alexey Orlenko Date: Tue, 5 Dec 2023 17:30:54 +0100 Subject: [PATCH 2/3] ci: fix wasm build script (#4519) The build script had an invalid `sed` command with an extra `''` argument that caused it to fail with ``` sed: can't read s/name = "query_engine_wasm"/name = "query_engine"/g: No such file or directory ``` Example: https://github.com/prisma/prisma-engines/actions/runs/7090582268/job/19297872413 This is reproducible both on CI and locally for me. Perhaps it was written for BSD sed and doesn't work with GNU sed (so it always fails on Linux and also fails on macOS inside prisma-engines Nix flake but maybe it works on macOS without Nix)? Because of this, a broken package was published from CI. The commit fixes the `sed` command and adds `set -e` so that errors like this would fail CI instead of silently continuing and doing wrong things. --- query-engine/query-engine-wasm/build.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/query-engine/query-engine-wasm/build.sh b/query-engine/query-engine-wasm/build.sh index 184e4baf354..a5b4859e82e 100755 --- a/query-engine/query-engine-wasm/build.sh +++ b/query-engine/query-engine-wasm/build.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + # Call this script as `./build.sh ` OUT_VERSION="$1" @@ -12,7 +14,7 @@ OUT_NPM_NAME="@prisma/query-engine-wasm" # to avoid conflicts with libquery's `name = "query_engine"` library name declaration. # This little `sed -i` trick below is a hack to publish "@prisma/query-engine-wasm" # with the same binding filenames currently expected by the Prisma Client. -sed -i '' 's/name = "query_engine_wasm"/name = "query_engine"/g' Cargo.toml +sed -i 's/name = "query_engine_wasm"/name = "query_engine"/g' Cargo.toml # use `wasm-pack build --release` on CI only if [[ -z "$BUILDKITE" ]] && [[ -z "$GITHUB_ACTIONS" ]]; then @@ -23,7 +25,7 @@ fi wasm-pack build $BUILD_PROFILE --target $OUT_TARGET -sed -i '' 's/name = "query_engine"/name = "query_engine_wasm"/g' Cargo.toml +sed -i 's/name = "query_engine"/name = "query_engine_wasm"/g' Cargo.toml sleep 1 From 1964a5cbd8c8ab62f2ef25627fbcd8489aa2eca2 Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Tue, 5 Dec 2023 17:35:18 +0100 Subject: [PATCH 3/3] feat: resolve relations with lateral joins (#4509) --- .envrc | 2 +- Cargo.lock | 1 + libs/prisma-value/src/lib.rs | 8 + .../src/cockroach_datamodel_connector.rs | 3 +- .../src/postgres_datamodel_connector.rs | 3 +- psl/psl-core/src/common/preview_features.rs | 2 + .../src/datamodel_connector/capabilities.rs | 3 +- psl/psl/tests/config/generators.rs | 2 +- quaint/src/ast/function.rs | 10 +- quaint/src/ast/function/json_array_agg.rs | 18 + quaint/src/ast/function/json_build_obj.rs | 15 + quaint/src/ast/join.rs | 9 + quaint/src/ast/select.rs | 9 + quaint/src/ast/table.rs | 9 + quaint/src/visitor.rs | 41 ++ .../tests/new/multi_schema.rs | 2 +- .../tests/new/regressions/prisma_7434.rs | 6 +- .../queries/batch/in_selection_batching.rs | 18 +- .../tests/queries/data_types/mod.rs | 1 + .../queries/data_types/through_relation.rs | 326 ++++++++++++++ .../filters/field_reference/enum_filter.rs | 77 ++++ .../queries/filters/field_reference/mod.rs | 1 + .../tests/queries/filters/one_relation.rs | 5 + .../nested_multi_order_pagination.rs | 8 +- .../order_and_pagination/nested_pagination.rs | 32 +- .../order_by_aggregation.rs | 18 +- .../order_by_dependent.rs | 10 +- .../tests/queries/simple/m2m.rs | 43 ++ .../tests/queries/simple/mod.rs | 1 + .../tests/queries/simple/one2m.rs | 126 ++++++ .../nested_update_many_inside_update.rs | 49 ++- .../nested_atomic_number_ops.rs | 4 +- .../nested_update_inside_update.rs | 4 +- .../mongodb-query-connector/src/error.rs | 1 + .../src/interface/connection.rs | 4 +- .../src/interface/transaction.rs | 4 +- .../src/output_meta.rs | 1 + .../mongodb-query-connector/src/projection.rs | 1 + .../mongodb-query-connector/src/value.rs | 1 + .../query-connector/src/interface.rs | 2 + .../query-connector/src/write_args.rs | 1 + .../connectors/sql-query-connector/Cargo.toml | 1 + .../src/database/connection.rs | 11 +- .../src/database/operations/coerce.rs | 186 ++++++++ .../src/database/operations/mod.rs | 1 + .../src/database/operations/read.rs | 166 ++++++- .../src/database/operations/update.rs | 15 +- .../src/database/transaction.rs | 10 +- .../sql-query-connector/src/filter/alias.rs | 6 +- .../sql-query-connector/src/filter/mod.rs | 4 +- .../sql-query-connector/src/filter/visitor.rs | 7 +- .../src/model_extensions/selection_result.rs | 7 +- .../sql-query-connector/src/ordering.rs | 31 +- .../src/query_builder/mod.rs | 1 + .../src/query_builder/select.rs | 407 ++++++++++++++++++ .../core/src/interpreter/interpreter_impl.rs | 7 + .../query_interpreters/nested_read.rs | 2 + .../interpreter/query_interpreters/read.rs | 76 +++- query-engine/core/src/query_ast/read.rs | 53 ++- query-engine/core/src/query_graph/mod.rs | 1 + .../core/src/query_graph_builder/builder.rs | 10 +- .../src/query_graph_builder/read/first.rs | 17 +- .../core/src/query_graph_builder/read/many.rs | 33 +- .../core/src/query_graph_builder/read/one.rs | 27 +- .../src/query_graph_builder/read/related.rs | 6 +- .../src/query_graph_builder/read/utils.rs | 109 ++++- .../src/query_graph_builder/write/create.rs | 2 +- .../src/query_graph_builder/write/delete.rs | 2 +- .../src/query_graph_builder/write/update.rs | 2 +- .../src/query_graph_builder/write/upsert.rs | 2 +- .../src/query_graph_builder/write/utils.rs | 1 + query-engine/core/src/response_ir/internal.rs | 286 +++++++++--- query-engine/core/src/result_ast/mod.rs | 37 +- query-engine/query-structure/src/field/mod.rs | 5 + .../query-structure/src/field_selection.rs | 77 +++- .../query-structure/src/filter/into_filter.rs | 3 +- .../src/projections/model_projection.rs | 18 +- .../query-structure/src/query_arguments.rs | 13 +- .../query-structure/src/selection_result.rs | 1 + query-engine/schema/src/query_schema.rs | 2 +- 80 files changed, 2307 insertions(+), 219 deletions(-) create mode 100644 quaint/src/ast/function/json_array_agg.rs create mode 100644 quaint/src/ast/function/json_build_obj.rs create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/through_relation.rs create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/enum_filter.rs create mode 100644 query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/one2m.rs create mode 100644 query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs create mode 100644 query-engine/connectors/sql-query-connector/src/query_builder/select.rs diff --git a/.envrc b/.envrc index 5488da9e10e..431c45be687 100644 --- a/.envrc +++ b/.envrc @@ -20,7 +20,7 @@ export SIMPLE_TEST_MODE="yes" # Reduces the amount of generated `relation_link_t ### QE specific logging vars ### export QE_LOG_LEVEL=debug # Set it to "trace" to enable query-graph debugging logs # export PRISMA_RENDER_DOT_FILE=1 # Uncomment to enable rendering a dot file of the Query Graph from an executed query. -# export FMT_SQL=1 # Uncomment it to enable logging formatted SQL queries +export FMT_SQL=1 # Uncomment it to enable logging formatted SQL queries ### Uncomment to run driver adapters tests. See query-engine-driver-adapters.yml workflow for how tests run in CI. # export EXTERNAL_TEST_EXECUTOR="napi" diff --git a/Cargo.lock b/Cargo.lock index 74f0b840d4f..93d70a3bae4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4939,6 +4939,7 @@ dependencies = [ "chrono", "cuid", "futures", + "hex", "itertools", "once_cell", "opentelemetry", diff --git a/libs/prisma-value/src/lib.rs b/libs/prisma-value/src/lib.rs index d797605eccc..43aff0156dd 100644 --- a/libs/prisma-value/src/lib.rs +++ b/libs/prisma-value/src/lib.rs @@ -304,6 +304,14 @@ impl PrismaValue { _ => None, } } + + pub fn as_json(&self) -> Option<&String> { + if let Self::Json(v) = self { + Some(v) + } else { + None + } + } } impl fmt::Display for PrismaValue { diff --git a/psl/builtin-connectors/src/cockroach_datamodel_connector.rs b/psl/builtin-connectors/src/cockroach_datamodel_connector.rs index 5456deb59df..4ab77cf4563 100644 --- a/psl/builtin-connectors/src/cockroach_datamodel_connector.rs +++ b/psl/builtin-connectors/src/cockroach_datamodel_connector.rs @@ -58,7 +58,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector FilteredInlineChildNestedToOneDisconnect | InsertReturning | UpdateReturning | - RowIn + RowIn | + LateralJoin }); const SCALAR_TYPE_DEFAULTS: &[(ScalarType, CockroachType)] = &[ diff --git a/psl/builtin-connectors/src/postgres_datamodel_connector.rs b/psl/builtin-connectors/src/postgres_datamodel_connector.rs index a1bb73f8bd1..697b4b9c12b 100644 --- a/psl/builtin-connectors/src/postgres_datamodel_connector.rs +++ b/psl/builtin-connectors/src/postgres_datamodel_connector.rs @@ -66,7 +66,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector InsertReturning | UpdateReturning | RowIn | - DistinctOn + DistinctOn | + LateralJoin }); pub struct PostgresDatamodelConnector; diff --git a/psl/psl-core/src/common/preview_features.rs b/psl/psl-core/src/common/preview_features.rs index 84caa7138e8..5b6349aa2b9 100644 --- a/psl/psl-core/src/common/preview_features.rs +++ b/psl/psl-core/src/common/preview_features.rs @@ -77,6 +77,7 @@ features!( TransactionApi, UncheckedScalarInputs, Views, + RelationJoins ); /// Generator preview features (alphabetically sorted) @@ -92,6 +93,7 @@ pub const ALL_PREVIEW_FEATURES: FeatureMap = FeatureMap { | PostgresqlExtensions | Tracing | Views + | RelationJoins }), deprecated: enumflags2::make_bitflags!(PreviewFeature::{ AtomicNumberOperations diff --git a/psl/psl-core/src/datamodel_connector/capabilities.rs b/psl/psl-core/src/datamodel_connector/capabilities.rs index 3c0c1bbe47e..67c8a56c957 100644 --- a/psl/psl-core/src/datamodel_connector/capabilities.rs +++ b/psl/psl-core/src/datamodel_connector/capabilities.rs @@ -104,7 +104,8 @@ capabilities!( InsertReturning, UpdateReturning, RowIn, // Connector supports (a, b) IN (c, d) expression. - DistinctOn // Connector supports DB-level distinct (e.g. postgres) + DistinctOn, // Connector supports DB-level distinct (e.g. postgres) + LateralJoin, ); /// Contains all capabilities that the connector is able to serve. diff --git a/psl/psl/tests/config/generators.rs b/psl/psl/tests/config/generators.rs index c5380c271bf..fd003fe3489 100644 --- a/psl/psl/tests/config/generators.rs +++ b/psl/psl/tests/config/generators.rs @@ -258,7 +258,7 @@ fn nice_error_for_unknown_generator_preview_feature() { .unwrap_err(); let expectation = expect![[r#" - error: The preview feature "foo" is not known. Expected one of: deno, driverAdapters, fullTextIndex, fullTextSearch, metrics, multiSchema, nativeDistinct, postgresqlExtensions, tracing, views + error: The preview feature "foo" is not known. Expected one of: distinctOn, deno, driverAdapters, fullTextIndex, fullTextSearch, metrics, multiSchema, postgresqlExtensions, tracing, views, relationJoins --> schema.prisma:3  |   2 |  provider = "prisma-client-js" diff --git a/quaint/src/ast/function.rs b/quaint/src/ast/function.rs index 5b637379548..3bcc24c4b07 100644 --- a/quaint/src/ast/function.rs +++ b/quaint/src/ast/function.rs @@ -3,6 +3,8 @@ mod average; mod coalesce; mod concat; mod count; +mod json_array_agg; +mod json_build_obj; #[cfg(any(feature = "postgresql", feature = "mysql"))] mod json_extract; #[cfg(any(feature = "postgresql", feature = "mysql"))] @@ -28,6 +30,8 @@ pub use average::*; pub use coalesce::*; pub use concat::*; pub use count::*; +pub use json_array_agg::*; +pub use json_build_obj::*; #[cfg(any(feature = "postgresql", feature = "mysql"))] pub use json_extract::*; #[cfg(any(feature = "postgresql", feature = "mysql"))] @@ -98,6 +102,8 @@ pub(crate) enum FunctionType<'a> { JsonExtractFirstArrayElem(JsonExtractFirstArrayElem<'a>), #[cfg(any(feature = "postgresql", feature = "mysql"))] JsonUnquote(JsonUnquote<'a>), + JsonArrayAgg(JsonArrayAgg<'a>), + JsonBuildObject(JsonBuildObject<'a>), #[cfg(any(feature = "postgresql", feature = "mysql"))] TextSearch(TextSearch<'a>), #[cfg(any(feature = "postgresql", feature = "mysql"))] @@ -154,5 +160,7 @@ function!( Minimum, Maximum, Coalesce, - Concat + Concat, + JsonArrayAgg, + JsonBuildObject ); diff --git a/quaint/src/ast/function/json_array_agg.rs b/quaint/src/ast/function/json_array_agg.rs new file mode 100644 index 00000000000..ed7c3fd6422 --- /dev/null +++ b/quaint/src/ast/function/json_array_agg.rs @@ -0,0 +1,18 @@ +use crate::prelude::*; + +#[derive(Debug, Clone, PartialEq)] +pub struct JsonArrayAgg<'a> { + pub(crate) expr: Box>, +} + +/// Builds a JSON array out of a list of values. +pub fn json_array_agg<'a, E>(expr: E) -> Function<'a> +where + E: Into>, +{ + let fun = JsonArrayAgg { + expr: Box::new(expr.into()), + }; + + fun.into() +} diff --git a/quaint/src/ast/function/json_build_obj.rs b/quaint/src/ast/function/json_build_obj.rs new file mode 100644 index 00000000000..0578d63e7c5 --- /dev/null +++ b/quaint/src/ast/function/json_build_obj.rs @@ -0,0 +1,15 @@ +use std::borrow::Cow; + +use crate::prelude::*; + +#[derive(Debug, Clone, PartialEq)] +pub struct JsonBuildObject<'a> { + pub(crate) exprs: Vec<(Cow<'a, str>, Expression<'a>)>, +} + +/// Builds a JSON object out of a list of key-value pairs. +pub fn json_build_object<'a>(exprs: Vec<(Cow<'a, str>, Expression<'a>)>) -> Function<'a> { + let fun = JsonBuildObject { exprs }; + + fun.into() +} diff --git a/quaint/src/ast/join.rs b/quaint/src/ast/join.rs index 7387250f870..158b39a0e4c 100644 --- a/quaint/src/ast/join.rs +++ b/quaint/src/ast/join.rs @@ -5,6 +5,7 @@ use crate::ast::{ConditionTree, Table}; pub struct JoinData<'a> { pub(crate) table: Table<'a>, pub(crate) conditions: ConditionTree<'a>, + pub(crate) lateral: bool, } impl<'a> JoinData<'a> { @@ -13,8 +14,14 @@ impl<'a> JoinData<'a> { Self { table: table.into(), conditions: ConditionTree::NoCondition, + lateral: false, } } + + pub fn lateral(mut self) -> Self { + self.lateral = true; + self + } } impl<'a, T> From for JoinData<'a> @@ -73,6 +80,7 @@ where JoinData { table: self.into(), conditions: conditions.into(), + lateral: false, } } } @@ -90,6 +98,7 @@ impl<'a> Joinable<'a> for JoinData<'a> { JoinData { table: self.table, conditions, + lateral: false, } } } diff --git a/quaint/src/ast/select.rs b/quaint/src/ast/select.rs index 0c798d5227a..f6ba4059975 100644 --- a/quaint/src/ast/select.rs +++ b/quaint/src/ast/select.rs @@ -402,6 +402,15 @@ impl<'a> Select<'a> { self } + pub fn left_join_lateral(self, join: J) -> Self + where + J: Into>, + { + let join_data: JoinData = join.into(); + + self.left_join(join_data.lateral()) + } + /// Adds `RIGHT JOIN` clause to the query. /// /// ```rust diff --git a/quaint/src/ast/table.rs b/quaint/src/ast/table.rs index 4eca73f27bc..d09c7ecdfef 100644 --- a/quaint/src/ast/table.rs +++ b/quaint/src/ast/table.rs @@ -204,6 +204,15 @@ impl<'a> Table<'a> { self } + pub fn left_join_lateral(self, join: J) -> Self + where + J: Into>, + { + let join_data: JoinData = join.into(); + + self.left_join(join_data.lateral()) + } + /// Adds an `INNER JOIN` clause to the query, specifically for that table. /// Useful to positionally add a JOIN clause in case you are selecting from multiple tables. /// diff --git a/quaint/src/visitor.rs b/quaint/src/visitor.rs index 18dee697722..57159bd1e9c 100644 --- a/quaint/src/visitor.rs +++ b/quaint/src/visitor.rs @@ -188,18 +188,38 @@ pub trait Visitor<'a> { match j { Join::Inner(data) => { self.write(" INNER JOIN ")?; + + if data.lateral { + self.write("LATERAL ")?; + } + self.visit_join_data(data)?; } Join::Left(data) => { self.write(" LEFT JOIN ")?; + + if data.lateral { + self.write("LATERAL ")?; + } + self.visit_join_data(data)?; } Join::Right(data) => { self.write(" RIGHT JOIN ")?; + + if data.lateral { + self.write("LATERAL ")?; + } + self.visit_join_data(data)?; } Join::Full(data) => { self.write(" FULL JOIN ")?; + + if data.lateral { + self.write("LATERAL ")?; + } + self.visit_join_data(data)?; } } @@ -1112,6 +1132,27 @@ pub trait Visitor<'a> { FunctionType::Concat(concat) => { self.visit_concat(concat)?; } + FunctionType::JsonArrayAgg(array_agg) => { + self.write("JSON_AGG")?; + self.surround_with("(", ")", |s| s.visit_expression(*array_agg.expr))?; + } + FunctionType::JsonBuildObject(build_obj) => { + let len = build_obj.exprs.len(); + + self.write("JSON_BUILD_OBJECT")?; + self.surround_with("(", ")", |s| { + for (i, (name, expr)) in build_obj.exprs.into_iter().enumerate() { + s.visit_raw_value(Value::text(name))?; + s.write(", ")?; + s.visit_expression(expr)?; + if i < (len - 1) { + s.write(", ")?; + } + } + + Ok(()) + })?; + } }; if let Some(alias) = fun.alias { diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/multi_schema.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/multi_schema.rs index db0f020e029..29c93689f54 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/multi_schema.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/multi_schema.rs @@ -434,7 +434,7 @@ mod multi_schema { insta::assert_snapshot!( run_query!(&runner, r#" query { - findManyCategoriesOnPosts(where: {postId: {gt: 0}}) { + findManyCategoriesOnPosts(orderBy: [{ postId: asc }, { categoryId: asc }], where: {postId: {gt: 0}}) { category { name }, diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_7434.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_7434.rs index 3471f0c2d72..e5fa8388d66 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_7434.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_7434.rs @@ -11,8 +11,7 @@ mod not_in_batching { assert_error!( runner, "query { findManyTestModel(where: { id: { notIn: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] } }) { id }}", - 2029, - "Parameter limits for this database provider require this query to be split into multiple queries, but the negation filters used prevent the query from being split. Please reduce the used values in the query." + 2029 // QueryParameterLimitExceeded ); Ok(()) @@ -30,8 +29,7 @@ mod not_in_batching_cockroachdb { assert_error!( runner, "query { findManyTestModel(where: { id: { notIn: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] } }) { id }}", - 2029, - "Parameter limits for this database provider require this query to be split into multiple queries, but the negation filters used prevent the query from being split. Please reduce the used values in the query." + 2029 // QueryParameterLimitExceeded ); Ok(()) diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/batch/in_selection_batching.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/batch/in_selection_batching.rs index e2b21fc215e..f5e7face676 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/batch/in_selection_batching.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/batch/in_selection_batching.rs @@ -35,7 +35,9 @@ mod isb { } // "batching of IN queries" should "work when having more than the specified amount of items" - #[connector_test] + // TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances + // TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations. + #[connector_test(exclude_features("relationJoins"))] async fn in_more_items(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -51,7 +53,9 @@ mod isb { } // "ascending ordering of batched IN queries" should "work when having more than the specified amount of items" - #[connector_test] + // TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances + // TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations. + #[connector_test(exclude_features("relationJoins"))] async fn asc_in_ordering(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -67,7 +71,9 @@ mod isb { } // "ascending ordering of batched IN queries" should "work when having more than the specified amount of items" - #[connector_test] + // TODO(joins): Excluded because we have no support for batched queries with joins. In practice, it should happen under much less circumstances + // TODO(joins): than with the query-based strategy, because we don't issue `WHERE IN (parent_ids)` queries anymore to resolve relations. + #[connector_test(exclude_features("relationJoins"))] async fn desc_in_ordering(runner: Runner) -> TestResult<()> { create_test_data(&runner).await?; @@ -91,8 +97,7 @@ mod isb { r#"query { findManyA(where: {id: { in: [5,4,3,2,1,1,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }}, orderBy: { b: { as: { _count: asc } } }) { id } }"#, - 2029, - "Your query cannot be split into multiple queries because of the order by aggregation or relevance." + 2029 // QueryParameterLimitExceeded ); Ok(()) @@ -107,8 +112,7 @@ mod isb { r#"query { findManyA(where: {id: { in: [5,4,3,2,1,1,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,6] }}, orderBy: { _relevance: { fields: text, search: "something", sort: asc } }) { id } }"#, - 2029, - "Your query cannot be split into multiple queries because of the order by aggregation or relevance." + 2029 // QueryParameterLimitExceeded ); Ok(()) diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/mod.rs index ae1fe75b883..09ed6668f61 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/mod.rs @@ -8,3 +8,4 @@ mod float; mod int; mod json; mod string; +mod through_relation; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/through_relation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/through_relation.rs new file mode 100644 index 00000000000..b2af72ab955 --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/data_types/through_relation.rs @@ -0,0 +1,326 @@ +use indoc::indoc; +use query_engine_tests::*; + +#[test_suite] +mod scalar_relations { + fn schema_common() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + + string String + int Int + bInt BigInt + float Float + bytes Bytes + bool Boolean + dt DateTime + } + "# + }; + + schema.to_owned() + } + + // TODO: fix https://github.com/prisma/team-orm/issues/684, https://github.com/prisma/team-orm/issues/685 and unexclude DAs + #[connector_test( + schema(schema_common), + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] + async fn common_types(runner: Runner) -> TestResult<()> { + create_common_children(&runner).await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyParent { id children { childId string int bInt float bytes bool dt } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"children":[{"childId":1,"string":"abc","int":1,"bInt":"1","float":1.5,"bytes":"AQID","bool":false,"dt":"1900-10-10T01:10:10.001Z"},{"childId":2,"string":"def","int":-4234234,"bInt":"14324324234324","float":-2.54367,"bytes":"FDSF","bool":true,"dt":"1999-12-12T21:12:12.121Z"}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 1 }) { id children { childId string int bInt float bytes bool dt } } }"#), + @r###"{"data":{"findUniqueParent":{"id":1,"children":[{"childId":1,"string":"abc","int":1,"bInt":"1","float":1.5,"bytes":"AQID","bool":false,"dt":"1900-10-10T01:10:10.001Z"},{"childId":2,"string":"def","int":-4234234,"bInt":"14324324234324","float":-2.54367,"bytes":"FDSF","bool":true,"dt":"1999-12-12T21:12:12.121Z"}]}}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 2 }) { id children { childId string int bInt float bytes bool dt } } }"#), + @r###"{"data":{"findUniqueParent":null}}"### + ); + + Ok(()) + } + + fn schema_json() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + + json Json + } + "# + }; + + schema.to_owned() + } + + #[connector_test(schema(schema_json), capabilities(Json), exclude(Mysql(5.6)))] + async fn json_type(runner: Runner) -> TestResult<()> { + create_child(&runner, r#"{ childId: 1, json: "1" }"#).await?; + create_child(&runner, r#"{ childId: 2, json: "{}" }"#).await?; + create_child(&runner, r#"{ childId: 3, json: "{\"a\": \"b\"}" }"#).await?; + create_child(&runner, r#"{ childId: 4, json: "[]" }"#).await?; + create_child(&runner, r#"{ childId: 5, json: "[1, -1, true, {\"a\": \"b\"}]" }"#).await?; + create_parent( + &runner, + r#"{ id: 1, children: { connect: [{ childId: 1 }, { childId: 2 }, { childId: 3 }, { childId: 4 }, { childId: 5 }] } }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyParent(orderBy: { id: asc }) { id children { childId json } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"children":[{"childId":1,"json":"1"},{"childId":2,"json":"{}"},{"childId":3,"json":"{\"a\":\"b\"}"},{"childId":4,"json":"[]"},{"childId":5,"json":"[1,-1,true,{\"a\":\"b\"}]"}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 1 }) { id children { childId json } } }"#), + @r###"{"data":{"findUniqueParent":{"id":1,"children":[{"childId":1,"json":"1"},{"childId":2,"json":"{}"},{"childId":3,"json":"{\"a\":\"b\"}"},{"childId":4,"json":"[]"},{"childId":5,"json":"[1,-1,true,{\"a\":\"b\"}]"}]}}}"### + ); + + Ok(()) + } + + fn schema_enum() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + + enum Color + } + + enum Color { + Red + Green + Blue + } + "# + }; + + schema.to_owned() + } + + #[connector_test(schema(schema_enum), capabilities(Enums))] + async fn enum_type(runner: Runner) -> TestResult<()> { + create_child(&runner, r#"{ childId: 1, enum: Red }"#).await?; + create_child(&runner, r#"{ childId: 2, enum: Green }"#).await?; + create_child(&runner, r#"{ childId: 3, enum: Blue }"#).await?; + create_parent( + &runner, + r#"{ id: 1, children: { connect: [{ childId: 1 }, { childId: 2 }, { childId: 3 }] } }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyParent(orderBy: { id :asc }) { id children { childId enum } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"children":[{"childId":1,"enum":"Red"},{"childId":2,"enum":"Green"},{"childId":3,"enum":"Blue"}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 1 }) { id children { childId enum } } }"#), + @r###"{"data":{"findUniqueParent":{"id":1,"children":[{"childId":1,"enum":"Red"},{"childId":2,"enum":"Green"},{"childId":3,"enum":"Blue"}]}}}"### + ); + + Ok(()) + } + + fn schema_decimal() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + + dec Decimal + } + "# + }; + + schema.to_owned() + } + + #[connector_test(schema(schema_decimal), capabilities(DecimalType))] + async fn decimal_type(runner: Runner) -> TestResult<()> { + create_child(&runner, r#"{ childId: 1, dec: "1" }"#).await?; + create_child(&runner, r#"{ childId: 2, dec: "-1" }"#).await?; + create_child(&runner, r#"{ childId: 3, dec: "123.45678910" }"#).await?; + create_parent( + &runner, + r#"{ id: 1, children: { connect: [{ childId: 1 }, { childId: 2 }, { childId: 3 }] } }"#, + ) + .await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyParent(orderBy: { id: asc }) { id children { childId dec } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"children":[{"childId":1,"dec":"1"},{"childId":2,"dec":"-1"},{"childId":3,"dec":"123.4567891"}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 1 }) { id children { childId dec } } }"#), + @r###"{"data":{"findUniqueParent":{"id":1,"children":[{"childId":1,"dec":"1"},{"childId":2,"dec":"-1"},{"childId":3,"dec":"123.4567891"}]}}}"### + ); + + Ok(()) + } + + fn schema_scalar_lists() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + + string String[] + int Int[] + bInt BigInt[] + float Float[] + bytes Bytes[] + bool Boolean[] + dt DateTime[] + } + "# + }; + + schema.to_owned() + } + + // TODO: fix https://github.com/prisma/team-orm/issues/684 and unexclude DAs + + #[connector_test( + schema(schema_scalar_lists), + capabilities(ScalarLists), + exclude(Postgres("pg.js", "neon.js")) + )] + async fn scalar_lists(runner: Runner) -> TestResult<()> { + create_child( + &runner, + r#"{ + childId: 1, + string: ["abc", "def"], + int: [1, -1, 1234567], + bInt: [1, -1, 9223372036854775807, -9223372036854775807], + float: [1.5, -1.5, 1.234567], + bytes: ["AQID", "Qk9OSk9VUg=="], + bool: [false, true], + dt: ["1900-10-10T01:10:10.001Z", "1999-12-12T21:12:12.121Z"], + }"#, + ) + .await?; + create_parent(&runner, r#"{ id: 1, children: { connect: [{ childId: 1 }] } }"#).await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findManyParent { id children { childId string int bInt float bytes bool dt } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"children":[{"childId":1,"string":["abc","def"],"int":[1,-1,1234567],"bInt":["1","-1","9223372036854775807","-9223372036854775807"],"float":[1.5,-1.5,1.234567],"bytes":["AQID","Qk9OSk9VUg=="],"bool":[false,true],"dt":["1900-10-10T01:10:10.001Z","1999-12-12T21:12:12.121Z"]}]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueParent(where: { id: 1 }) { id children { childId string int bInt float bytes bool dt } } }"#), + @r###"{"data":{"findUniqueParent":{"id":1,"children":[{"childId":1,"string":["abc","def"],"int":[1,-1,1234567],"bInt":["1","-1","9223372036854775807","-9223372036854775807"],"float":[1.5,-1.5,1.234567],"bytes":["AQID","Qk9OSk9VUg=="],"bool":[false,true],"dt":["1900-10-10T01:10:10.001Z","1999-12-12T21:12:12.121Z"]}]}}}"### + ); + + Ok(()) + } + + async fn create_common_children(runner: &Runner) -> TestResult<()> { + create_child( + &runner, + r#"{ + childId: 1, + string: "abc", + int: 1, + bInt: 1, + float: 1.5, + bytes: "AQID", + bool: false, + dt: "1900-10-10T01:10:10.001Z", + }"#, + ) + .await?; + + create_child( + &runner, + r#"{ + childId: 2, + string: "def", + int: -4234234, + bInt: 14324324234324, + float: -2.54367, + bytes: "FDSF", + bool: true, + dt: "1999-12-12T21:12:12.121Z", + }"#, + ) + .await?; + + create_parent( + &runner, + r#"{ id: 1, children: { connect: [{ childId: 1 }, { childId: 2 }] } }"#, + ) + .await?; + + Ok(()) + } + + async fn create_child(runner: &Runner, data: &str) -> TestResult<()> { + runner + .query(format!("mutation {{ createOneChild(data: {}) {{ childId }} }}", data)) + .await? + .assert_success(); + Ok(()) + } + + async fn create_parent(runner: &Runner, data: &str) -> TestResult<()> { + runner + .query(format!("mutation {{ createOneParent(data: {}) {{ id }} }}", data)) + .await? + .assert_success(); + Ok(()) + } +} diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/enum_filter.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/enum_filter.rs new file mode 100644 index 00000000000..5f8d8182a59 --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/enum_filter.rs @@ -0,0 +1,77 @@ +use query_engine_tests::*; + +#[test_suite(schema(schema))] +mod enum_filter { + use query_engine_tests::run_query; + + fn schema() -> String { + let schema = indoc! { + r#"model TestModel { + #id(id, Int, @id) + + enum TestEnum? + enum2 TestEnum[] + } + + enum TestEnum { + a + b + c + } + "# + }; + + schema.to_owned() + } + + #[connector_test(capabilities(Enums, ScalarLists))] + async fn inclusion_filter(runner: Runner) -> TestResult<()> { + test_data(&runner).await?; + + insta::assert_snapshot!( + run_query!(&runner, r#"query { findManyTestModel(where: { enum: { in: { _ref: "enum2", _container: "TestModel" } } }) { id enum enum2 }}"#), + @r###"{"data":{"findManyTestModel":[{"id":1,"enum":"a","enum2":["a","b"]}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"query { findManyTestModel(where: { enum: { notIn: { _ref: "enum2", _container: "TestModel" } } }) { id }}"#), + @r###"{"data":{"findManyTestModel":[{"id":2}]}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"query { findManyTestModel(where: { enum: { not: { in: { _ref: "enum2", _container: "TestModel" } } } }) { id }}"#), + @r###"{"data":{"findManyTestModel":[{"id":2}]}}"### + ); + + Ok(()) + } + + pub async fn test_data(runner: &Runner) -> TestResult<()> { + runner + .query(indoc! { r#" + mutation { createOneTestModel(data: { + id: 1, + enum: a, + enum2: [a, b] + }) { id }}"# }) + .await? + .assert_success(); + + runner + .query(indoc! { r#" + mutation { createOneTestModel(data: { + id: 2, + enum: b, + enum2: [a, c] + }) { id }}"# }) + .await? + .assert_success(); + + runner + .query(indoc! { r#"mutation { createOneTestModel(data: { id: 3 }) { id }}"# }) + .await? + .assert_success(); + + Ok(()) + } +} diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/mod.rs index 8cee074b88d..32a8200484b 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/field_reference/mod.rs @@ -7,6 +7,7 @@ pub mod bytes_filter; pub mod composite_filter; pub mod datetime_filter; pub mod decimal_filter; +pub mod enum_filter; pub mod float_filter; pub mod having_filter; pub mod int_filter; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/one_relation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/one_relation.rs index cca380f8113..020de727d49 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/one_relation.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/filters/one_relation.rs @@ -183,6 +183,11 @@ mod one_relation { @r###"{"data":{"findManyBlog":[{"name":"blog 1","post":{"title":"post 1","comment":{"text":"comment 1"}}},{"name":"blog 2","post":null},{"name":"blog 3","post":null}]}}"### ); + insta::assert_snapshot!( + run_query!(&runner, r#"query { findManyBlog { name, post(where: { title: "post 1", comment: { is: { text: "comment 1" } } }) { title } }}"#), + @r###"{"data":{"findManyBlog":[{"name":"blog 1","post":{"title":"post 1"}},{"name":"blog 2","post":null},{"name":"blog 3","post":null}]}}"### + ); + Ok(()) } diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_multi_order_pagination.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_multi_order_pagination.rs index cf14f3e8bb4..ccbdc693b55 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_multi_order_pagination.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_multi_order_pagination.rs @@ -46,7 +46,7 @@ mod paging_one2m_stable_order { // Makes: [1 => 2, 2 => 3, 3 => 5] insta::assert_snapshot!( run_query!(&runner, r#"query { - findManyTestModel { + findManyTestModel(orderBy: { id: asc }) { id related(take: 1, orderBy: [{ fieldA: desc }, {fieldB: asc }, { fieldC: asc }, { fieldD: desc }]) { id @@ -73,7 +73,7 @@ mod paging_one2m_stable_order { // Makes: [1 => 1, 2 => 4, 3 => 6] insta::assert_snapshot!( run_query!(&runner, r#"query { - findManyTestModel { + findManyTestModel(orderBy: { id: asc}) { id related(take: -1, orderBy: [{ fieldA: desc }, { fieldB: asc }, { fieldC: asc }, { fieldD: desc }]) { id @@ -185,7 +185,7 @@ mod paging_one2m_unstable_order { run_query!( &runner, r#"query { - findManyTestModel { + findManyTestModel(orderBy: { id: asc }) { id related(take: 1, orderBy: [{ fieldA: desc }, {fieldB: asc }, { fieldC: asc }, { fieldD: desc }]) { id @@ -214,7 +214,7 @@ mod paging_one2m_unstable_order { run_query!( &runner, r#"query { - findManyTestModel { + findManyTestModel(orderBy: { id: asc }) { id related(take: -1, orderBy: [{ fieldA: desc }, { fieldB: asc }, { fieldC: asc }, { fieldD: desc }]) { id diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs index 27c04241288..6a67b87d56b 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/nested_pagination.rs @@ -357,7 +357,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(take: -1, orderBy: { id: asc }){m}} + findManyTop(orderBy: {t: asc}){t, middles(take: -1, orderBy: { id: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M13"}]},{"t":"T2","middles":[{"m":"M23"}]},{"t":"T3","middles":[{"m":"M33"}]}]}}"### ); @@ -372,7 +372,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(take: -3, orderBy: { id: asc }) {m}} + findManyTop(orderBy: {t: asc}){t, middles(take: -3, orderBy: { id: asc }) {m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"},{"m":"M13"}]},{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[{"m":"M31"},{"m":"M32"},{"m":"M33"}]}]}}"### ); @@ -387,7 +387,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(take: -4, orderBy: { id: asc }) {m}} + findManyTop(orderBy: {t: asc}){t, middles(take: -4, orderBy: { id: asc }) {m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"},{"m":"M13"}]},{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[{"m":"M31"},{"m":"M32"},{"m":"M33"}]}]}}"### ); @@ -402,7 +402,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{middles{bottoms(take: -1, orderBy: { id: asc }){b}}} + findManyTop{middles(orderBy: {m: asc}){bottoms(take: -1, orderBy: { id: asc }){b}}} }"#), @r###"{"data":{"findManyTop":[{"middles":[{"bottoms":[{"b":"B113"}]},{"bottoms":[{"b":"B123"}]},{"bottoms":[{"b":"B133"}]}]},{"middles":[{"bottoms":[{"b":"B213"}]},{"bottoms":[{"b":"B223"}]},{"bottoms":[{"b":"B233"}]}]},{"middles":[{"bottoms":[{"b":"B313"}]},{"bottoms":[{"b":"B323"}]},{"bottoms":[{"b":"B333"}]}]}]}}"### ); @@ -417,7 +417,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{middles{bottoms(take: -3, orderBy: { id: asc }){b}}} + findManyTop{middles(orderBy: {m: asc}){bottoms(take: -3, orderBy: { id: asc }){b}}} }"#), @r###"{"data":{"findManyTop":[{"middles":[{"bottoms":[{"b":"B111"},{"b":"B112"},{"b":"B113"}]},{"bottoms":[{"b":"B121"},{"b":"B122"},{"b":"B123"}]},{"bottoms":[{"b":"B131"},{"b":"B132"},{"b":"B133"}]}]},{"middles":[{"bottoms":[{"b":"B211"},{"b":"B212"},{"b":"B213"}]},{"bottoms":[{"b":"B221"},{"b":"B222"},{"b":"B223"}]},{"bottoms":[{"b":"B231"},{"b":"B232"},{"b":"B233"}]}]},{"middles":[{"bottoms":[{"b":"B311"},{"b":"B312"},{"b":"B313"}]},{"bottoms":[{"b":"B321"},{"b":"B322"},{"b":"B323"}]},{"bottoms":[{"b":"B331"},{"b":"B332"},{"b":"B333"}]}]}]}}"### ); @@ -432,7 +432,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{middles{bottoms(take: -4, orderBy: { id: asc }){b}}} + findManyTop{middles(orderBy: {m: asc}){bottoms(take: -4, orderBy: { id: asc }){b}}} }"#), @r###"{"data":{"findManyTop":[{"middles":[{"bottoms":[{"b":"B111"},{"b":"B112"},{"b":"B113"}]},{"bottoms":[{"b":"B121"},{"b":"B122"},{"b":"B123"}]},{"bottoms":[{"b":"B131"},{"b":"B132"},{"b":"B133"}]}]},{"middles":[{"bottoms":[{"b":"B211"},{"b":"B212"},{"b":"B213"}]},{"bottoms":[{"b":"B221"},{"b":"B222"},{"b":"B223"}]},{"bottoms":[{"b":"B231"},{"b":"B232"},{"b":"B233"}]}]},{"middles":[{"bottoms":[{"b":"B311"},{"b":"B312"},{"b":"B313"}]},{"bottoms":[{"b":"B321"},{"b":"B322"},{"b":"B323"}]},{"bottoms":[{"b":"B331"},{"b":"B332"},{"b":"B333"}]}]}]}}"### ); @@ -451,7 +451,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop(skip: 1, take: 1){t, middles{m}} + findManyTop(skip: 1, take: 1){t, middles(orderBy: { m: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]}]}}"### ); @@ -466,7 +466,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop(skip: 1, take: 3){t, middles{m}} + findManyTop(skip: 1, take: 3){t, middles(orderBy: { m: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[{"m":"M31"},{"m":"M32"},{"m":"M33"}]}]}}"### ); @@ -511,7 +511,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop(skip: 1, take: -3, orderBy: { id: asc }){t, middles{m}} + findManyTop(skip: 1, take: -3, orderBy: { id: asc }){t, middles(orderBy: { m: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"},{"m":"M13"}]},{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]}]}}"### ); @@ -526,7 +526,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(skip: 1, take: -1, orderBy: { id: asc }){m}} + findManyTop(orderBy: { t: asc }){t, middles(skip: 1, take: -1, orderBy: { id: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M12"}]},{"t":"T2","middles":[{"m":"M22"}]},{"t":"T3","middles":[{"m":"M32"}]}]}}"### ); @@ -541,7 +541,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(skip: 1, take: -3, orderBy: { id: asc }){m}} + findManyTop(orderBy: { t: asc }){t, middles(skip: 1, take: -3, orderBy: { id: asc }){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"}]},{"t":"T2","middles":[{"m":"M21"},{"m":"M22"}]},{"t":"T3","middles":[{"m":"M31"},{"m":"M32"}]}]}}"### ); @@ -560,7 +560,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(orderBy: { m: desc }, take: 1){m}} + findManyTop(orderBy: { t: asc }){t, middles(orderBy: { m: desc }, take: 1){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M13"}]},{"t":"T2","middles":[{"m":"M23"}]},{"t":"T3","middles":[{"m":"M33"}]}]}}"### ); @@ -575,7 +575,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyTop{t, middles(orderBy: { m: desc }, take: 3){m}} + findManyTop(orderBy: { t: asc }){t, middles(orderBy: { m: desc }, take: 3){m}} }"#), @r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M13"},{"m":"M12"},{"m":"M11"}]},{"t":"T2","middles":[{"m":"M23"},{"m":"M22"},{"m":"M21"}]},{"t":"T3","middles":[{"m":"M33"},{"m":"M32"},{"m":"M31"}]}]}}"### ); @@ -885,7 +885,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyModelA { + findManyModelA(orderBy: { id: asc }) { id manyB(skip: 1) { id @@ -909,7 +909,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyModelA { + findManyModelA(orderBy: { id: asc }) { id manyB(skip: 1, take: 2) { id @@ -933,7 +933,7 @@ mod nested_pagination { insta::assert_snapshot!( run_query!(&runner, r#"{ - findManyModelA { + findManyModelA(orderBy: { id: asc }) { id manyB(skip: 1, take: -2, orderBy: { id: asc }) { id diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs index c2151782330..52e9fcaf8cc 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_aggregation.rs @@ -79,12 +79,12 @@ mod order_by_aggr { run_query!(&runner, r#"{ findManyPost(orderBy: { categories: { _count: asc } }) { title - categories { + categories(orderBy: { name: asc }) { name } } }"#), - @r###"{"data":{"findManyPost":[{"title":"bob_post_1","categories":[{"name":"Finance"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_2","categories":[{"name":"History"},{"name":"Gaming"},{"name":"Hacking"}]}]}}"### + @r###"{"data":{"findManyPost":[{"title":"bob_post_1","categories":[{"name":"Finance"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_2","categories":[{"name":"Gaming"},{"name":"Hacking"},{"name":"History"}]}]}}"### ); Ok(()) @@ -98,12 +98,12 @@ mod order_by_aggr { run_query!(&runner, r#"{ findManyPost(orderBy: { categories: { _count: desc } }) { title - categories { + categories(orderBy: { name :asc }) { name } } }"#), - @r###"{"data":{"findManyPost":[{"title":"bob_post_2","categories":[{"name":"History"},{"name":"Gaming"},{"name":"Hacking"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_1","categories":[{"name":"Finance"}]}]}}"### + @r###"{"data":{"findManyPost":[{"title":"bob_post_2","categories":[{"name":"Gaming"},{"name":"Hacking"},{"name":"History"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_1","categories":[{"name":"Finance"}]}]}}"### ); Ok(()) @@ -159,12 +159,12 @@ mod order_by_aggr { run_query!(&runner, r#"{ findManyPost(orderBy: [{ categories: { _count: asc } }, { title: asc }]) { title - categories { + categories(orderBy: { name: asc }) { name } } }"#), - @r###"{"data":{"findManyPost":[{"title":"bob_post_1","categories":[{"name":"Finance"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_2","categories":[{"name":"History"},{"name":"Gaming"},{"name":"Hacking"}]}]}}"### + @r###"{"data":{"findManyPost":[{"title":"bob_post_1","categories":[{"name":"Finance"}]},{"title":"alice_post_1","categories":[{"name":"News"},{"name":"Society"}]},{"title":"bob_post_2","categories":[{"name":"Gaming"},{"name":"Hacking"},{"name":"History"}]}]}}"### ); Ok(()) @@ -181,12 +181,12 @@ mod order_by_aggr { user { name } - categories { + categories(orderBy: { name: asc }) { name } } }"#), - @r###"{"data":{"findManyPost":[{"user":{"name":"Alice"},"categories":[{"name":"News"},{"name":"Society"}]},{"user":{"name":"Bob"},"categories":[{"name":"History"},{"name":"Gaming"},{"name":"Hacking"}]},{"user":{"name":"Bob"},"categories":[{"name":"Finance"}]}]}}"### + @r###"{"data":{"findManyPost":[{"user":{"name":"Alice"},"categories":[{"name":"News"},{"name":"Society"}]},{"user":{"name":"Bob"},"categories":[{"name":"Gaming"},{"name":"Hacking"},{"name":"History"}]},{"user":{"name":"Bob"},"categories":[{"name":"Finance"}]}]}}"### ); Ok(()) @@ -571,7 +571,7 @@ mod order_by_aggr { findManyPost(orderBy: [{ categories: { _count: asc } }, { title: asc }], cursor: { id: 2 }, take: 2) { id title - categories { + categories(orderBy: { name: asc }) { name } } diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_dependent.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_dependent.rs index b4c6e7b5ef3..c8f7429451a 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_dependent.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/order_and_pagination/order_by_dependent.rs @@ -248,7 +248,10 @@ mod order_by_dependent { r#"{"data":{"findManyModelA":[{"id":4,"b":null},{"id":3,"b":null},{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":2,"b":{"c":{"a":{"id":4}}}}]}}"#, r#"{"data":{"findManyModelA":[{"id":3,"b":null},{"id":4,"b":null},{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":2,"b":{"c":{"a":{"id":4}}}}]}}"#, ], - _ => vec![r#"{"data":{"findManyModelA":[{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":3,"b":null},{"id":4,"b":null}]}}"#] + _ => vec![ + r#"{"data":{"findManyModelA":[{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":3,"b":null},{"id":4,"b":null}]}}"#, + r#"{"data":{"findManyModelA":[{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":4,"b":null},{"id":3,"b":null}]}}"# + ] ); Ok(()) @@ -280,7 +283,10 @@ mod order_by_dependent { r#"{"data":{"findManyModelA":[{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":4,"b":null},{"id":3,"b":null}]}}"#, r#"{"data":{"findManyModelA":[{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":1,"b":{"c":{"a":{"id":3}}}},{"id":3,"b":null},{"id":4,"b":null}]}}"#, ], - _ => vec![r#"{"data":{"findManyModelA":[{"id":3,"b":null},{"id":4,"b":null},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":1,"b":{"c":{"a":{"id":3}}}}]}}"#] + _ => vec![ + r#"{"data":{"findManyModelA":[{"id":3,"b":null},{"id":4,"b":null},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":1,"b":{"c":{"a":{"id":3}}}}]}}"#, + r#"{"data":{"findManyModelA":[{"id":4,"b":null},{"id":3,"b":null},{"id":2,"b":{"c":{"a":{"id":4}}}},{"id":1,"b":{"c":{"a":{"id":3}}}}]}}"# + ] ); Ok(()) } diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/m2m.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/m2m.rs index 556e5f21f3f..bf8da606a81 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/m2m.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/m2m.rs @@ -69,6 +69,49 @@ mod m2m { Ok(()) } + fn schema() -> String { + let schema = indoc! { + r#"model Item { + id Int @id @default(autoincrement()) + categories Category[] + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + } + + model Category { + id Int @id @default(autoincrement()) + items Item[] + createdAt DateTime @default(now()) + updatedAt DateTime? @updatedAt + }"# + }; + + schema.to_owned() + } + + // https://github.com/prisma/prisma/issues/16390 + #[connector_test(schema(schema), relation_mode = "prisma", only(Postgres))] + async fn repro_16390(runner: Runner) -> TestResult<()> { + run_query!(&runner, r#"mutation { createOneCategory(data: {}) { id } }"#); + run_query!( + &runner, + r#"mutation { createOneItem(data: { categories: { connect: { id: 1 } } }) { id } }"# + ); + run_query!(&runner, r#"mutation { deleteOneItem(where: { id: 1 }) { id } }"#); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueItem(where: { id: 1 }) { id categories { id } } }"#), + @r###"{"data":{"findUniqueItem":null}}"### + ); + + insta::assert_snapshot!( + run_query!(&runner, r#"{ findUniqueCategory(where: { id: 1 }) { id items { id } } }"#), + @r###"{"data":{"findUniqueCategory":{"id":1,"items":[]}}}"### + ); + + Ok(()) + } + async fn test_data(runner: &Runner) -> TestResult<()> { runner .query( diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/mod.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/mod.rs index 9e9cf195116..939468590e4 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/mod.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/mod.rs @@ -8,4 +8,5 @@ mod json_result; mod m2m; mod mongo_incorrect_fields; mod multi_field_unique; +mod one2m; mod raw_mongo; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/one2m.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/one2m.rs new file mode 100644 index 00000000000..97f41f5f438 --- /dev/null +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/queries/simple/one2m.rs @@ -0,0 +1,126 @@ +use indoc::indoc; +use query_engine_tests::*; + +#[test_suite(schema(schema))] +mod simple { + fn schema() -> String { + let schema = indoc! { + r#"model Parent { + #id(id, Int, @id) + name String + + children Child[] + } + + model Child { + #id(id, Int, @id) + name String + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [id]) + } + "# + }; + + schema.to_owned() + } + + #[connector_test] + async fn simple(runner: Runner) -> TestResult<()> { + test_data(&runner).await?; + + insta::assert_snapshot!( + run_query!(runner, r#"{ findManyParent { id name children { id name } } }"#), + @r###"{"data":{"findManyParent":[{"id":1,"name":"Bob","children":[{"id":1,"name":"Hello!"},{"id":2,"name":"World!"}]}]}}"### + ); + + Ok(()) + } + + async fn test_data(runner: &Runner) -> TestResult<()> { + create_row( + runner, + r#"{ id: 1, name: "Bob", children: { create: [{ id: 1, name: "Hello!" }, { id: 2, name: "World!" }] } }"#, + ) + .await?; + + Ok(()) + } + + async fn create_row(runner: &Runner, data: &str) -> TestResult<()> { + runner + .query(format!("mutation {{ createOneParent(data: {}) {{ id }} }}", data)) + .await? + .assert_success(); + Ok(()) + } +} + +#[test_suite(schema(schema))] +mod nested { + fn schema() -> String { + let schema = indoc! { + r#"model Parent { + #id(parentId, Int, @id) + + children Child[] + } + + model Child { + #id(childId, Int, @id) + + parentId Int? + parent Parent? @relation(fields: [parentId], references: [parentId]) + + children GrandChild[] + } + + model GrandChild { + #id(grandChildId, Int, @id) + + parentId Int? + parent Child? @relation(fields: [parentId], references: [childId]) + }"# + }; + + schema.to_owned() + } + + #[connector_test] + async fn vanilla(runner: Runner) -> TestResult<()> { + create_test_data(&runner).await?; + + insta::assert_snapshot!( + run_query!(runner, r#"{ findManyParent { parentId children { childId children { grandChildId } } } }"#), + @r###"{"data":{"findManyParent":[{"parentId":1,"children":[{"childId":1,"children":[{"grandChildId":1},{"grandChildId":2}]},{"childId":2,"children":[{"grandChildId":3}]}]}]}}"### + ); + + Ok(()) + } + + async fn create_test_data(runner: &Runner) -> TestResult<()> { + create_row( + runner, + r#"{ + parentId: 1, + children: { + create: [ + { childId: 1, children: { create: [{ grandChildId: 1 }, { grandChildId: 2 }] }}, + { childId: 2, children: { create: [{ grandChildId: 3 }] } } + ] + } + }"#, + ) + .await?; + + Ok(()) + } + + async fn create_row(runner: &Runner, data: &str) -> TestResult<()> { + runner + .query(format!("mutation {{ createOneParent(data: {}) {{ parentId }} }}", data)) + .await? + .assert_success(); + Ok(()) + } +} diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/already_converted/nested_update_many_inside_update.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/already_converted/nested_update_many_inside_update.rs index 4a42911d989..05931d16084 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/already_converted/nested_update_many_inside_update.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/already_converted/nested_update_many_inside_update.rs @@ -55,7 +55,12 @@ mod um_inside_update { } // "a PM to C1! relation" should "work" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneReq")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneReq", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_req_should_work(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -89,7 +94,12 @@ mod um_inside_update { } // "a PM to C1 relation " should "work" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneOpt")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneOpt", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_should_work(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -123,7 +133,12 @@ mod um_inside_update { } // "a PM to CM relation " should "work" - #[relation_link_test(on_parent = "ToMany", on_child = "ToMany")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToMany", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_cm_should_work(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -157,7 +172,12 @@ mod um_inside_update { } // "a PM to C1! relation " should "work with several updateManys" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneReq")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneReq", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_req_many_ums(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -197,7 +217,12 @@ mod um_inside_update { } // "a PM to C1! relation " should "work with empty Filter" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneReq")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneReq", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_req_empty_filter(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -233,7 +258,12 @@ mod um_inside_update { } // "a PM to C1! relation " should "not change anything when there is no hit" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneReq")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneReq", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_req_noop_no_hit(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; @@ -275,7 +305,12 @@ mod um_inside_update { // optional ordering // "a PM to C1! relation " should "work when multiple filters hit" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneReq")] + // TODO: fix https://github.com/prisma/team-orm/issues/683 and then unexclude DAs + #[relation_link_test( + on_parent = "ToMany", + on_child = "ToOneReq", + exclude(Postgres("pg.js", "neon.js"), Vitess("planetscale.js")) + )] async fn pm_c1_req_many_filters(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let parent = setup_data(runner, t).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/nested_atomic_number_ops.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/nested_atomic_number_ops.rs index c325fccb6d6..014c921705a 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/nested_atomic_number_ops.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/nested_atomic_number_ops.rs @@ -195,7 +195,7 @@ mod atomic_number_ops { } // "A nested updateOne mutation" should "correctly apply all number operations for Int" - #[connector_test(schema(schema_3), exclude(CockroachDb))] + #[connector_test(schema(schema_3), exclude(CockroachDb, Postgres("pg.js", "neon.js")))] async fn nested_update_int_ops(runner: Runner) -> TestResult<()> { create_test_model(&runner, 1, None, None).await?; create_test_model(&runner, 2, Some(3), None).await?; @@ -324,7 +324,7 @@ mod atomic_number_ops { } // "A nested updateOne mutation" should "correctly apply all number operations for Int" - #[connector_test(schema(schema_3), exclude(MongoDb))] + #[connector_test(schema(schema_3), exclude(MongoDb, Postgres("pg.js", "neon.js")))] async fn nested_update_float_ops(runner: Runner) -> TestResult<()> { create_test_model(&runner, 1, None, None).await?; create_test_model(&runner, 2, None, Some("5.5")).await?; diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/not_using_schema_base/nested_update_inside_update.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/not_using_schema_base/nested_update_inside_update.rs index a543ba7b8f5..1cf206d347a 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/not_using_schema_base/nested_update_inside_update.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/writes/nested_mutations/not_using_schema_base/nested_update_inside_update.rs @@ -191,7 +191,7 @@ mod update_inside_update { // ---------------------------------- // "A PM to C1 relation relation" should "work" - #[relation_link_test(on_parent = "ToMany", on_child = "ToOneOpt")] + #[relation_link_test(on_parent = "ToMany", on_child = "ToOneOpt", exclude(Postgres("pg.js", "neon.js")))] async fn pm_c1_should_work(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let res = run_query_json!( runner, @@ -384,7 +384,7 @@ mod update_inside_update { // ---------------------------------- // "A PM to CM relation relation" should "work" - #[relation_link_test(on_parent = "ToMany", on_child = "ToMany")] + #[relation_link_test(on_parent = "ToMany", on_child = "ToMany", exclude(Postgres("pg.js", "neon.js")))] async fn pm_cm_should_work(runner: &Runner, t: &DatamodelWithParams) -> TestResult<()> { let res = run_query_json!( runner, diff --git a/query-engine/connectors/mongodb-query-connector/src/error.rs b/query-engine/connectors/mongodb-query-connector/src/error.rs index f32ff78e29c..3355f0020f3 100644 --- a/query-engine/connectors/mongodb-query-connector/src/error.rs +++ b/query-engine/connectors/mongodb-query-connector/src/error.rs @@ -278,6 +278,7 @@ impl DecorateErrorWithFieldInformationExtension for crate::Result { match selected_field { SelectedField::Scalar(sf) => self.decorate_with_scalar_field_info(sf), SelectedField::Composite(composite_sel) => self.decorate_with_composite_field_info(&composite_sel.field), + SelectedField::Relation(_) => unreachable!(), } } diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs index e10c0e1f5b3..9f825241939 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs @@ -10,7 +10,7 @@ use connector_interface::{ WriteOperations, }; use mongodb::{ClientSession, Database}; -use query_structure::{prelude::*, SelectionResult}; +use query_structure::{prelude::*, RelationLoadStrategy, SelectionResult}; use std::collections::HashMap; pub struct MongoDbConnection { @@ -190,6 +190,7 @@ impl ReadOperations for MongoDbConnection { filter: &query_structure::Filter, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + _relation_load_strategy: RelationLoadStrategy, _trace_id: Option, ) -> connector_interface::Result> { catch(async move { @@ -212,6 +213,7 @@ impl ReadOperations for MongoDbConnection { query_arguments: query_structure::QueryArguments, selected_fields: &FieldSelection, aggregation_selections: &[RelAggregationSelection], + _relation_load_strategy: RelationLoadStrategy, _trace_id: Option, ) -> connector_interface::Result { catch(async move { diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs index 1de0bb8c750..6e15d126212 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs @@ -8,7 +8,7 @@ use connector_interface::{ }; use mongodb::options::{Acknowledgment, ReadConcern, TransactionOptions, WriteConcern}; use query_engine_metrics::{decrement_gauge, increment_gauge, metrics, PRISMA_CLIENT_QUERIES_ACTIVE}; -use query_structure::SelectionResult; +use query_structure::{RelationLoadStrategy, SelectionResult}; use std::collections::HashMap; pub struct MongoDbTransaction<'conn> { @@ -255,6 +255,7 @@ impl<'conn> ReadOperations for MongoDbTransaction<'conn> { filter: &query_structure::Filter, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + _relation_load_strategy: RelationLoadStrategy, _trace_id: Option, ) -> connector_interface::Result> { catch(async move { @@ -277,6 +278,7 @@ impl<'conn> ReadOperations for MongoDbTransaction<'conn> { query_arguments: query_structure::QueryArguments, selected_fields: &FieldSelection, aggregation_selections: &[RelAggregationSelection], + _relation_load_strategy: RelationLoadStrategy, _trace_id: Option, ) -> connector_interface::Result { catch(async move { diff --git a/query-engine/connectors/mongodb-query-connector/src/output_meta.rs b/query-engine/connectors/mongodb-query-connector/src/output_meta.rs index 081672f9d6e..1100c9d436a 100644 --- a/query-engine/connectors/mongodb-query-connector/src/output_meta.rs +++ b/query-engine/connectors/mongodb-query-connector/src/output_meta.rs @@ -80,6 +80,7 @@ pub fn from_selections( }), ); } + SelectedField::Relation(_) => unreachable!(), } } diff --git a/query-engine/connectors/mongodb-query-connector/src/projection.rs b/query-engine/connectors/mongodb-query-connector/src/projection.rs index 80a6a3e792e..cbb16e0a097 100644 --- a/query-engine/connectors/mongodb-query-connector/src/projection.rs +++ b/query-engine/connectors/mongodb-query-connector/src/projection.rs @@ -26,6 +26,7 @@ fn path_prefixed_selection(doc: &mut Document, parent_paths: Vec, select parent_paths.push(cs.field.db_name().to_owned()); path_prefixed_selection(doc, parent_paths, cs.selections); } + query_structure::SelectedField::Relation(_) => unreachable!(), } } } diff --git a/query-engine/connectors/mongodb-query-connector/src/value.rs b/query-engine/connectors/mongodb-query-connector/src/value.rs index cf6812d59b6..9faecaa13f4 100644 --- a/query-engine/connectors/mongodb-query-connector/src/value.rs +++ b/query-engine/connectors/mongodb-query-connector/src/value.rs @@ -23,6 +23,7 @@ impl IntoBson for (&SelectedField, PrismaValue) { match selection { SelectedField::Scalar(sf) => (sf, value).into_bson(), SelectedField::Composite(_) => todo!(), // [Composites] todo + SelectedField::Relation(_) => unreachable!(), } } } diff --git a/query-engine/connectors/query-connector/src/interface.rs b/query-engine/connectors/query-connector/src/interface.rs index 942edd1868f..518f4356d54 100644 --- a/query-engine/connectors/query-connector/src/interface.rs +++ b/query-engine/connectors/query-connector/src/interface.rs @@ -231,6 +231,7 @@ pub trait ReadOperations { filter: &Filter, selected_fields: &FieldSelection, aggregation_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> crate::Result>; @@ -246,6 +247,7 @@ pub trait ReadOperations { query_arguments: QueryArguments, selected_fields: &FieldSelection, aggregation_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> crate::Result; diff --git a/query-engine/connectors/query-connector/src/write_args.rs b/query-engine/connectors/query-connector/src/write_args.rs index e0b03097504..c89f4e51514 100644 --- a/query-engine/connectors/query-connector/src/write_args.rs +++ b/query-engine/connectors/query-connector/src/write_args.rs @@ -327,6 +327,7 @@ impl From<(&SelectedField, PrismaValue)> for WriteOperation { match selection { SelectedField::Scalar(sf) => (sf, pv).into(), SelectedField::Composite(cs) => (&cs.field, pv).into(), + SelectedField::Relation(_) => todo!(), } } } diff --git a/query-engine/connectors/sql-query-connector/Cargo.toml b/query-engine/connectors/sql-query-connector/Cargo.toml index fbe04850164..9ba23da469c 100644 --- a/query-engine/connectors/sql-query-connector/Cargo.toml +++ b/query-engine/connectors/sql-query-connector/Cargo.toml @@ -27,6 +27,7 @@ uuid.workspace = true opentelemetry = { version = "0.17", features = ["tokio"] } tracing-opentelemetry = "0.17.3" cuid = { git = "https://github.com/prisma/cuid-rust", branch = "wasm32-support" } +hex = "0.4" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] quaint.workspace = true diff --git a/query-engine/connectors/sql-query-connector/src/database/connection.rs b/query-engine/connectors/sql-query-connector/src/database/connection.rs index cb7c0a9b312..2bdabe57b2e 100644 --- a/query-engine/connectors/sql-query-connector/src/database/connection.rs +++ b/query-engine/connectors/sql-query-connector/src/database/connection.rs @@ -13,7 +13,7 @@ use quaint::{ connector::{IsolationLevel, TransactionCapable}, prelude::{ConnectionInfo, Queryable}, }; -use query_structure::{prelude::*, Filter, QueryArguments, SelectionResult}; +use query_structure::{prelude::*, Filter, QueryArguments, RelationLoadStrategy, SelectionResult}; use std::{collections::HashMap, str::FromStr}; pub(crate) struct SqlConnection { @@ -87,6 +87,7 @@ where filter: &Filter, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> connector::Result> { // [Composites] todo: FieldSelection -> ModelProjection conversion @@ -96,8 +97,9 @@ where &self.inner, model, filter, - &selected_fields.into(), + selected_fields, aggr_selections, + relation_load_strategy, &ctx, ) .await @@ -111,16 +113,19 @@ where query_arguments: QueryArguments, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> connector::Result { catch(self.connection_info.clone(), async move { let ctx = Context::new(&self.connection_info, trace_id.as_deref()); + read::get_many_records( &self.inner, model, query_arguments, - &selected_fields.into(), + selected_fields, aggr_selections, + relation_load_strategy, &ctx, ) .await diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs b/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs new file mode 100644 index 00000000000..61390bc5fa0 --- /dev/null +++ b/query-engine/connectors/sql-query-connector/src/database/operations/coerce.rs @@ -0,0 +1,186 @@ +use std::io; + +use bigdecimal::{BigDecimal, FromPrimitive}; +use itertools::{Either, Itertools}; +use query_structure::*; + +use crate::{query_arguments_ext::QueryArgumentsExt, SqlError}; + +/// Coerces relations resolved as JSON to PrismaValues. +/// Note: Some in-memory processing is baked into this function too for performance reasons. +pub(crate) fn coerce_record_with_json_relation( + record: &mut Record, + rs_indexes: Vec<(usize, &RelationSelection)>, +) -> crate::Result<()> { + for (val_idx, rs) in rs_indexes { + let val = record.values.get_mut(val_idx).unwrap(); + // TODO(perf): Find ways to avoid serializing and deserializing multiple times. + let json_val: serde_json::Value = serde_json::from_str(val.as_json().unwrap()).unwrap(); + + *val = coerce_json_relation_to_pv(json_val, rs)?; + } + + Ok(()) +} + +fn coerce_json_relation_to_pv(value: serde_json::Value, rs: &RelationSelection) -> crate::Result { + let relations = rs.relations().collect_vec(); + + match value { + // one-to-many + serde_json::Value::Array(values) if rs.field.is_list() => { + let iter = values.into_iter().filter_map(|value| { + // FIXME: In the case of m2m relations, the aggregation produces null values if the B side of the m2m table points to a record that doesn't exist. + // FIXME: This only seems to happen because of a bug with `relationMode=prisma`` which doesn't cleanup the relation table properly when deleting records that belongs to a m2m relation. + // FIXME: This hack filters down the null values from the array, but we should fix the root cause instead, if possible. + // FIXME: In theory, the aggregated array should only contain objects, which are the joined rows. + // FIXME: See m2m.rs::repro_16390 for a reproduction. + if value.is_null() && rs.field.relation().is_many_to_many() { + None + } else { + Some(coerce_json_relation_to_pv(value, rs)) + } + }); + + // Reverses order when using negative take. + let iter = match rs.args.needs_reversed_order() { + true => Either::Left(iter.rev()), + false => Either::Right(iter), + }; + + Ok(PrismaValue::List(iter.collect::>>()?)) + } + // to-one + serde_json::Value::Array(values) => { + let coerced = values + .into_iter() + .next() + .map(|value| coerce_json_relation_to_pv(value, rs)); + + // TODO(HACK): We probably want to update the sql builder instead to not aggregate to-one relations as array + // If the arary is empty, it means there's no relations, so we coerce it to + if let Some(val) = coerced { + val + } else { + Ok(PrismaValue::Null) + } + } + serde_json::Value::Object(obj) => { + let mut map: Vec<(String, PrismaValue)> = Vec::with_capacity(obj.len()); + let related_model = rs.field.related_model(); + + for (key, value) in obj { + match related_model.fields().all().find(|f| f.db_name() == key).unwrap() { + Field::Scalar(sf) => { + map.push((key, coerce_json_scalar_to_pv(value, &sf)?)); + } + Field::Relation(rf) => { + // TODO: optimize this + if let Some(nested_selection) = relations.iter().find(|rs| rs.field == rf) { + map.push((key, coerce_json_relation_to_pv(value, nested_selection)?)); + } + } + _ => (), + } + } + + Ok(PrismaValue::Object(map)) + } + x => unreachable!("Unexpected value when deserializing JSON relation data: {x:?}"), + } +} + +pub(crate) fn coerce_json_scalar_to_pv(value: serde_json::Value, sf: &ScalarField) -> crate::Result { + if sf.type_identifier().is_json() { + return Ok(PrismaValue::Json(serde_json::to_string(&value)?)); + } + + match value { + serde_json::Value::Null => Ok(PrismaValue::Null), + serde_json::Value::Bool(b) => Ok(PrismaValue::Boolean(b)), + serde_json::Value::Number(n) => match sf.type_identifier() { + TypeIdentifier::Int => Ok(PrismaValue::Int(n.as_i64().ok_or_else(|| { + build_conversion_error(&format!("Number({n})"), &format!("{:?}", sf.type_identifier())) + })?)), + TypeIdentifier::BigInt => Ok(PrismaValue::BigInt(n.as_i64().ok_or_else(|| { + build_conversion_error(&format!("Number({n})"), &format!("{:?}", sf.type_identifier())) + })?)), + TypeIdentifier::Float | TypeIdentifier::Decimal => { + let bd = n + .as_f64() + .and_then(BigDecimal::from_f64) + .map(|bd| bd.normalized()) + .ok_or_else(|| { + build_conversion_error(&format!("Number({n})"), &format!("{:?}", sf.type_identifier())) + })?; + + Ok(PrismaValue::Float(bd)) + } + _ => Err(build_conversion_error( + &format!("Number({n})"), + &format!("{:?}", sf.type_identifier()), + )), + }, + serde_json::Value::String(s) => match sf.type_identifier() { + TypeIdentifier::String => Ok(PrismaValue::String(s)), + TypeIdentifier::Enum(_) => Ok(PrismaValue::Enum(s)), + TypeIdentifier::DateTime => Ok(PrismaValue::DateTime(parse_datetime(&format!("{s}Z")).map_err( + |err| { + build_conversion_error_with_reason( + &format!("String({s})"), + &format!("{:?}", sf.type_identifier()), + &err.to_string(), + ) + }, + )?)), + TypeIdentifier::UUID => Ok(PrismaValue::Uuid(uuid::Uuid::parse_str(&s).map_err(|err| { + build_conversion_error_with_reason( + &format!("String({s})"), + &format!("{:?}", sf.type_identifier()), + &err.to_string(), + ) + })?)), + TypeIdentifier::Bytes => { + // We skip the first two characters because they are the \x prefix. + let bytes = hex::decode(&s[2..]).map_err(|err| { + build_conversion_error_with_reason( + &format!("String({s})"), + &format!("{:?}", sf.type_identifier()), + &err.to_string(), + ) + })?; + + Ok(PrismaValue::Bytes(bytes)) + } + _ => Err(build_conversion_error( + &format!("String({s})"), + &format!("{:?}", sf.type_identifier()), + )), + }, + serde_json::Value::Array(values) => Ok(PrismaValue::List( + values + .into_iter() + .map(|v| coerce_json_scalar_to_pv(v, sf)) + .collect::>>()?, + )), + serde_json::Value::Object(_) => unreachable!("Objects should be caught by the json catch-all above."), + } +} + +fn build_conversion_error(from: &str, to: &str) -> SqlError { + let error = io::Error::new( + io::ErrorKind::InvalidData, + format!("Unexpected conversion failure from {from} to {to}."), + ); + + SqlError::ConversionError(error.into()) +} + +fn build_conversion_error_with_reason(from: &str, to: &str, reason: &str) -> SqlError { + let error = io::Error::new( + io::ErrorKind::InvalidData, + format!("Unexpected conversion failure from {from} to {to}. Reason: ${reason}"), + ); + + SqlError::ConversionError(error.into()) +} diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/mod.rs b/query-engine/connectors/sql-query-connector/src/database/operations/mod.rs index 5a10395a788..b4eadcceb22 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/mod.rs @@ -1,3 +1,4 @@ +pub mod coerce; pub mod read; pub(crate) mod update; pub mod upsert; diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs index 4d33fe3d2ff..a561c61e6f3 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/read.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/read.rs @@ -1,3 +1,4 @@ +use super::coerce::coerce_record_with_json_relation; use crate::{ column_metadata, model_extensions::*, @@ -5,12 +6,64 @@ use crate::{ query_builder::{self, read}, Context, QueryExt, Queryable, SqlError, }; + use connector_interface::*; use futures::stream::{FuturesUnordered, StreamExt}; use quaint::ast::*; use query_structure::*; pub(crate) async fn get_single_record( + conn: &dyn Queryable, + model: &Model, + filter: &Filter, + selected_fields: &FieldSelection, + aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, + ctx: &Context<'_>, +) -> crate::Result> { + match relation_load_strategy { + RelationLoadStrategy::Join => get_single_record_joins(conn, model, filter, selected_fields, ctx).await, + RelationLoadStrategy::Query => { + get_single_record_wo_joins( + conn, + model, + filter, + &ModelProjection::from(selected_fields), + aggr_selections, + ctx, + ) + .await + } + } +} + +pub(crate) async fn get_single_record_joins( + conn: &dyn Queryable, + model: &Model, + filter: &Filter, + selected_fields: &FieldSelection, + ctx: &Context<'_>, +) -> crate::Result> { + let field_names: Vec<_> = selected_fields.db_names().collect(); + let idents = selected_fields.type_identifiers_with_arities(); + let rs_indexes = get_relation_selection_indexes(selected_fields.relations().collect(), &field_names); + + let query = query_builder::select::SelectBuilder::default().build( + QueryArguments::from((model.clone(), filter.clone())), + selected_fields, + ctx, + ); + + let mut record = execute_find_one(conn, query, &idents, &field_names, ctx).await?; + + if let Some(record) = record.as_mut() { + coerce_record_with_json_relation(record, rs_indexes)?; + }; + + Ok(record.map(|record| SingleRecord { record, field_names })) +} + +pub(crate) async fn get_single_record_wo_joins( conn: &dyn Queryable, model: &Model, filter: &Filter, @@ -39,21 +92,108 @@ pub(crate) async fn get_single_record( idents.append(&mut aggr_idents); - let meta = column_metadata::create(field_names.as_slice(), idents.as_slice()); + let record = execute_find_one(conn, query, &idents, &field_names, ctx) + .await? + .map(|record| SingleRecord { record, field_names }); + + Ok(record) +} + +async fn execute_find_one( + conn: &dyn Queryable, + query: Select<'_>, + idents: &[(TypeIdentifier, FieldArity)], + field_names: &[String], + ctx: &Context<'_>, +) -> crate::Result> { + let meta = column_metadata::create(field_names, idents); - let record = (match conn.find(query, meta.as_slice(), ctx).await { + let row = (match conn.find(query, meta.as_slice(), ctx).await { Ok(result) => Ok(Some(result)), Err(_e @ SqlError::RecordNotFoundForWhere(_)) => Ok(None), Err(_e @ SqlError::RecordDoesNotExist) => Ok(None), Err(e) => Err(e), })? - .map(Record::from) - .map(|record| SingleRecord { record, field_names }); + .map(Record::from); - Ok(record) + Ok(row) } pub(crate) async fn get_many_records( + conn: &dyn Queryable, + model: &Model, + query_arguments: QueryArguments, + selected_fields: &FieldSelection, + aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, + ctx: &Context<'_>, +) -> crate::Result { + match relation_load_strategy { + RelationLoadStrategy::Join => { + get_many_records_joins(conn, model, query_arguments, selected_fields, aggr_selections, ctx).await + } + RelationLoadStrategy::Query => { + get_many_records_wo_joins( + conn, + model, + query_arguments, + &ModelProjection::from(selected_fields), + aggr_selections, + ctx, + ) + .await + } + } +} + +pub(crate) async fn get_many_records_joins( + conn: &dyn Queryable, + _model: &Model, + query_arguments: QueryArguments, + selected_fields: &FieldSelection, + _aggr_selections: &[RelAggregationSelection], + ctx: &Context<'_>, +) -> crate::Result { + let field_names: Vec<_> = selected_fields.db_names().collect(); + let idents = selected_fields.type_identifiers_with_arities(); + let meta = column_metadata::create(field_names.as_slice(), idents.as_slice()); + let rs_indexes = get_relation_selection_indexes(selected_fields.relations().collect(), &field_names); + + let mut records = ManyRecords::new(field_names.clone()); + + if let Some(0) = query_arguments.take { + return Ok(records); + }; + + match ctx.max_bind_values { + Some(chunk_size) if query_arguments.should_batch(chunk_size) => { + return Err(SqlError::QueryParameterLimitExceeded( + "Joined queries cannot be split into multiple queries.".to_string(), + )); + } + _ => (), + }; + + let query = query_builder::select::SelectBuilder::default().build(query_arguments.clone(), selected_fields, ctx); + + for item in conn.filter(query.into(), meta.as_slice(), ctx).await?.into_iter() { + let mut record = Record::from(item); + + // Coerces json values to prisma values + coerce_record_with_json_relation(&mut record, rs_indexes.clone())?; + + records.push(record) + } + + // Reverses order when using negative take + if query_arguments.needs_reversed_order() { + records.reverse(); + } + + Ok(records) +} + +pub(crate) async fn get_many_records_wo_joins( conn: &dyn Queryable, model: &Model, mut query_arguments: QueryArguments, @@ -290,3 +430,19 @@ async fn group_by_aggregate( .map(|row| row.into_aggregation_results(&selections)) .collect()) } + +/// Find the indexes of the relation records to traverse a set of records faster when coercing JSON values +fn get_relation_selection_indexes<'a>( + selections: Vec<&'a RelationSelection>, + field_names: &[String], +) -> Vec<(usize, &'a RelationSelection)> { + let mut output: Vec<(usize, &RelationSelection)> = Vec::new(); + + for (idx, field_name) in field_names.iter().enumerate() { + if let Some(rs) = selections.iter().find(|rq| rq.field.name() == *field_name) { + output.push((idx, rs)); + } + } + + output +} diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/update.rs b/query-engine/connectors/sql-query-connector/src/database/operations/update.rs index 617e02455ab..40ca5ce84fc 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/update.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/update.rs @@ -21,16 +21,25 @@ pub(crate) async fn update_one_with_selection( selected_fields: FieldSelection, ctx: &Context<'_>, ) -> crate::Result> { - let selected_fields = ModelProjection::from(selected_fields); - // If there's nothing to update, just read the record. // TODO(perf): Technically, if the selectors are fulfilling the field selection, there's no need to perform an additional read. if args.args.is_empty() { let filter = build_update_one_filter(record_filter); - return get_single_record(conn, model, &filter, &selected_fields, &[], ctx).await; + return get_single_record( + conn, + model, + &filter, + &selected_fields, + &[], + RelationLoadStrategy::Query, + ctx, + ) + .await; } + let selected_fields = ModelProjection::from(selected_fields); + let cond = FilterBuilder::without_top_level_joins().visit_filter(build_update_one_filter(record_filter), ctx); let update = build_update_and_set_query(model, args, Some(&selected_fields), ctx).so_that(cond); diff --git a/query-engine/connectors/sql-query-connector/src/database/transaction.rs b/query-engine/connectors/sql-query-connector/src/database/transaction.rs index 7fa9aaf3b5b..35adddb52ab 100644 --- a/query-engine/connectors/sql-query-connector/src/database/transaction.rs +++ b/query-engine/connectors/sql-query-connector/src/database/transaction.rs @@ -8,7 +8,7 @@ use connector_interface::{ }; use prisma_value::PrismaValue; use quaint::prelude::ConnectionInfo; -use query_structure::{prelude::*, Filter, QueryArguments, SelectionResult}; +use query_structure::{prelude::*, Filter, QueryArguments, RelationLoadStrategy, SelectionResult}; use std::collections::HashMap; pub struct SqlConnectorTransaction<'tx> { @@ -69,6 +69,7 @@ impl<'tx> ReadOperations for SqlConnectorTransaction<'tx> { filter: &Filter, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> connector::Result> { catch(self.connection_info.clone(), async move { @@ -77,8 +78,9 @@ impl<'tx> ReadOperations for SqlConnectorTransaction<'tx> { self.inner.as_queryable(), model, filter, - &selected_fields.into(), + selected_fields, aggr_selections, + relation_load_strategy, &ctx, ) .await @@ -92,6 +94,7 @@ impl<'tx> ReadOperations for SqlConnectorTransaction<'tx> { query_arguments: QueryArguments, selected_fields: &FieldSelection, aggr_selections: &[RelAggregationSelection], + relation_load_strategy: RelationLoadStrategy, trace_id: Option, ) -> connector::Result { catch(self.connection_info.clone(), async move { @@ -100,8 +103,9 @@ impl<'tx> ReadOperations for SqlConnectorTransaction<'tx> { self.inner.as_queryable(), model, query_arguments, - &selected_fields.into(), + selected_fields, aggr_selections, + relation_load_strategy, &ctx, ) .await diff --git a/query-engine/connectors/sql-query-connector/src/filter/alias.rs b/query-engine/connectors/sql-query-connector/src/filter/alias.rs index c7a62bba02a..af3ad932748 100644 --- a/query-engine/connectors/sql-query-connector/src/filter/alias.rs +++ b/query-engine/connectors/sql-query-connector/src/filter/alias.rs @@ -16,7 +16,7 @@ pub enum AliasMode { #[derive(Clone, Copy, Debug, Default)] /// Aliasing tool to count the nesting level to help with heavily nested /// self-related queries. -pub(crate) struct Alias { +pub struct Alias { counter: usize, mode: AliasMode, } @@ -49,6 +49,10 @@ impl Alias { AliasMode::Join => format!("j{}", self.counter), } } + + pub fn to_table_string(&self) -> String { + self.to_string(Some(AliasMode::Table)) + } } pub(crate) trait AliasedColumn { diff --git a/query-engine/connectors/sql-query-connector/src/filter/mod.rs b/query-engine/connectors/sql-query-connector/src/filter/mod.rs index b9ae856ef65..573024845b4 100644 --- a/query-engine/connectors/sql-query-connector/src/filter/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/filter/mod.rs @@ -1,9 +1,9 @@ -mod alias; +pub mod alias; mod visitor; use quaint::prelude::*; use query_structure::Filter; -use visitor::*; +pub use visitor::*; use crate::{context::Context, join_utils::AliasedJoin}; diff --git a/query-engine/connectors/sql-query-connector/src/filter/visitor.rs b/query-engine/connectors/sql-query-connector/src/filter/visitor.rs index 1a71cdd824a..b27ab539e60 100644 --- a/query-engine/connectors/sql-query-connector/src/filter/visitor.rs +++ b/query-engine/connectors/sql-query-connector/src/filter/visitor.rs @@ -27,7 +27,7 @@ pub(crate) trait FilterVisitorExt { } #[derive(Debug, Clone, Default)] -pub(crate) struct FilterVisitor { +pub struct FilterVisitor { /// The last alias that's been rendered. last_alias: Option, /// The parent alias, used when rendering nested filters so that a child filter can refer to its join. @@ -68,6 +68,11 @@ impl FilterVisitor { self.parent_alias } + pub fn set_parent_alias_opt(mut self, alias: Option) -> Self { + self.parent_alias = alias; + self + } + /// A top-level join can be rendered if we're explicitly allowing it or if we're in a nested visitor. fn can_render_join(&self) -> bool { self.with_top_level_joins || self.is_nested diff --git a/query-engine/connectors/sql-query-connector/src/model_extensions/selection_result.rs b/query-engine/connectors/sql-query-connector/src/model_extensions/selection_result.rs index 51eb7768d06..9cf94e06d42 100644 --- a/query-engine/connectors/sql-query-connector/src/model_extensions/selection_result.rs +++ b/query-engine/connectors/sql-query-connector/src/model_extensions/selection_result.rs @@ -34,9 +34,10 @@ impl SelectionResultExt for SelectionResult { fn db_values<'a>(&self, ctx: &Context<'_>) -> Vec> { self.pairs .iter() - .map(|(selection, v)| match selection { - SelectedField::Scalar(sf) => sf.value(v.clone(), ctx), - SelectedField::Composite(_cf) => todo!(), // [Composites] todo + .filter_map(|(selection, v)| match selection { + SelectedField::Scalar(sf) => Some(sf.value(v.clone(), ctx)), + SelectedField::Composite(_) => None, + SelectedField::Relation(_) => None, }) .collect() } diff --git a/query-engine/connectors/sql-query-connector/src/ordering.rs b/query-engine/connectors/sql-query-connector/src/ordering.rs index 5f61d0c3a90..ade10aaa716 100644 --- a/query-engine/connectors/sql-query-connector/src/ordering.rs +++ b/query-engine/connectors/sql-query-connector/src/ordering.rs @@ -18,16 +18,23 @@ pub(crate) struct OrderByDefinition { #[derive(Debug, Default)] pub(crate) struct OrderByBuilder { + parent_alias: Option, // Used to generate unique join alias join_counter: usize, } +impl OrderByBuilder { + pub(crate) fn with_parent_alias(mut self, alias: Option) -> Self { + self.parent_alias = alias; + self + } +} + impl OrderByBuilder { /// Builds all expressions for an `ORDER BY` clause based on the query arguments. pub(crate) fn build(&mut self, query_arguments: &QueryArguments, ctx: &Context<'_>) -> Vec { let needs_reversed_order = query_arguments.needs_reversed_order(); - // The index is used to differentiate potentially separate relations to the same model. query_arguments .order_by .iter() @@ -69,7 +76,11 @@ impl OrderByBuilder { needs_reversed_order: bool, ctx: &Context<'_>, ) -> OrderByDefinition { - let columns: Vec = order_by.fields.iter().map(|sf| sf.as_column(ctx).into()).collect(); + let columns: Vec = order_by + .fields + .iter() + .map(|sf| sf.as_column(ctx).opt_table(self.parent_alias.clone()).into()) + .collect(); let order_column: Expression = text_search_relevance(&columns, order_by.search.clone()).into(); let order: Option = Some(into_order(&order_by.sort_order, None, needs_reversed_order)); let order_definition: OrderDefinition = (order_column.clone(), order); @@ -143,9 +154,12 @@ impl OrderByBuilder { // Unwraps are safe because the SQL connector doesn't yet support any other type of orderBy hop but the relation hop. let mut joins: Vec = vec![]; + let parent_alias = self.parent_alias.clone(); + for (i, hop) in rest_hops.iter().enumerate() { let previous_join = if i > 0 { joins.get(i - 1) } else { None }; - let previous_alias = previous_join.map(|j| j.alias.as_str()); + + let previous_alias = previous_join.map(|j| j.alias.as_str()).or(parent_alias.as_deref()); let join = compute_one2m_join(hop.as_relation_hop().unwrap(), &self.join_prefix(), previous_alias, ctx); joins.push(join); @@ -156,7 +170,7 @@ impl OrderByBuilder { _ => unreachable!("Order by relation aggregation other than count are not supported"), }; - let previous_alias = joins.last().map(|j| j.alias.as_str()); + let previous_alias = joins.last().map(|j| j.alias.as_str()).or(parent_alias.as_deref()); // We perform the aggregation on the last join let last_aggr_join = compute_aggr_join( @@ -185,9 +199,14 @@ impl OrderByBuilder { ) -> (Vec, Column<'static>) { let mut joins: Vec = vec![]; + let parent_alias = self.parent_alias.clone(); + for (i, hop) in order_by.path.iter().enumerate() { let previous_join = if i > 0 { joins.get(i - 1) } else { None }; - let previous_alias = previous_join.map(|j| j.alias.as_str()); + let previous_alias = previous_join + .map(|j| &j.alias) + .or(parent_alias.as_ref()) + .map(|alias| alias.as_str()); let join = compute_one2m_join(hop.as_relation_hop().unwrap(), &self.join_prefix(), previous_alias, ctx); joins.push(join); @@ -201,7 +220,7 @@ impl OrderByBuilder { let order_by_column = if let Some(last_join) = joins.last() { Column::from((last_join.alias.to_owned(), order_by.field.db_name().to_owned())) } else { - order_by.field.as_column(ctx) + order_by.field.as_column(ctx).opt_table(self.parent_alias.clone()) }; (joins, order_by_column) diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/mod.rs b/query-engine/connectors/sql-query-connector/src/query_builder/mod.rs index b605d076eed..7f16b84f95f 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/mod.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/mod.rs @@ -1,4 +1,5 @@ pub(crate) mod read; +pub(crate) mod select; pub(crate) mod write; use crate::context::Context; diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/select.rs b/query-engine/connectors/sql-query-connector/src/query_builder/select.rs new file mode 100644 index 00000000000..d2ef3e62b34 --- /dev/null +++ b/query-engine/connectors/sql-query-connector/src/query_builder/select.rs @@ -0,0 +1,407 @@ +use std::borrow::Cow; +use tracing::Span; + +use crate::{ + context::Context, + filter::alias::{Alias, AliasMode}, + model_extensions::{AsColumn, AsColumns, AsTable, ColumnIterator, RelationFieldExt}, + ordering::OrderByBuilder, + sql_trace::SqlTraceComment, +}; + +use quaint::prelude::*; +use query_structure::*; + +pub const JSON_AGG_IDENT: &str = "__prisma_data__"; + +#[derive(Debug, Default)] +pub(crate) struct SelectBuilder { + alias: Alias, +} + +impl SelectBuilder { + pub(crate) fn next_alias(&mut self) -> Alias { + self.alias = self.alias.inc(AliasMode::Table); + self.alias + } + + pub(crate) fn build( + &mut self, + args: QueryArguments, + selected_fields: &FieldSelection, + ctx: &Context<'_>, + ) -> Select<'static> { + let table_alias = self.next_alias(); + let table = args.model().as_table(ctx).alias(table_alias.to_table_string()); + + // SELECT ... FROM Table "t1" + let select = Select::from_table(table) + .with_selection(selected_fields, table_alias, ctx) + .with_ordering(&args, Some(table_alias.to_table_string()), ctx) + .with_pagination(args.take_abs(), args.skip) + .with_filters(args.filter, Some(table_alias), ctx) + .append_trace(&Span::current()) + .add_trace_id(ctx.trace_id); + + // Adds joins for relations + self.with_related_queries(select, selected_fields.relations(), table_alias, ctx) + } + + fn with_related_queries<'a, 'b>( + &mut self, + input: Select<'a>, + relation_selections: impl Iterator, + parent_alias: Alias, + ctx: &Context<'_>, + ) -> Select<'a> { + relation_selections.fold(input, |acc, rs| self.with_related_query(acc, rs, parent_alias, ctx)) + } + + fn with_related_query<'a>( + &mut self, + select: Select<'a>, + rs: &RelationSelection, + parent_alias: Alias, + ctx: &Context<'_>, + ) -> Select<'a> { + if rs.field.relation().is_many_to_many() { + // m2m relations need to left join on the relation table first + let m2m_join = self.build_m2m_join(rs, parent_alias, ctx); + + select.left_join(m2m_join) + } else { + let join_table_alias = join_alias_name(&rs.field); + let join_table = + Table::from(self.build_related_query_select(rs, parent_alias, ctx)).alias(join_table_alias); + + // LEFT JOIN LATERAL ( ) AS ON TRUE + select.left_join(join_table.on(ConditionTree::single(true.raw())).lateral()) + } + } + + fn build_related_query_select( + &mut self, + rs: &RelationSelection, + parent_alias: Alias, + ctx: &Context<'_>, + ) -> Select<'static> { + let inner_root_table_alias = self.next_alias(); + let root_alias = self.next_alias(); + let inner_alias = self.next_alias(); + let middle_alias = self.next_alias(); + + let related_table = rs + .related_model() + .as_table(ctx) + .alias(inner_root_table_alias.to_table_string()); + + // SELECT * FROM "Table" as WHERE parent.id = child.parent_id + let root = Select::from_table(related_table) + .with_join_conditions(&rs.field, parent_alias, inner_root_table_alias, ctx) + .comment("root select"); + + // SELECT JSON_BUILD_OBJECT() FROM ( ) + let inner = Select::from_table(Table::from(root).alias(root_alias.to_table_string())) + .value(build_json_obj_fn(rs, ctx, root_alias).alias(JSON_AGG_IDENT)); + + // LEFT JOIN LATERAL () AS ON TRUE + let inner = self.with_related_queries(inner, rs.relations(), root_alias, ctx); + + let linking_fields = rs.field.related_field().linking_fields(); + + if rs.field.relation().is_many_to_many() { + let selection: Vec> = FieldSelection::union(vec![order_by_selection(rs), linking_fields]) + .into_projection() + .as_columns(ctx) + .map(|c| c.table(root_alias.to_table_string())) + .collect(); + + // SELECT , + inner.with_columns(selection.into()) + } else { + // select ordering, filtering & join fields from child selections to order, filter & join them on the outer query + let inner_selection: Vec> = FieldSelection::union(vec![ + order_by_selection(rs), + filtering_selection(rs), + relation_selection(rs), + ]) + .into_projection() + .as_columns(ctx) + .map(|c| c.table(root_alias.to_table_string())) + .collect(); + + let inner = inner.with_columns(inner_selection.into()).comment("inner select"); + + let middle = Select::from_table(Table::from(inner).alias(inner_alias.to_table_string())) + // SELECT . + .column(Column::from((inner_alias.to_table_string(), JSON_AGG_IDENT))) + // ORDER BY ... + .with_ordering(&rs.args, Some(inner_alias.to_table_string()), ctx) + // WHERE ... + .with_filters(rs.args.filter.clone(), Some(inner_alias), ctx) + // LIMIT $1 OFFSET $2 + .with_pagination(rs.args.take_abs(), rs.args.skip) + .comment("middle select"); + + // SELECT COALESCE(JSON_AGG(), '[]') AS FROM ( ) as + Select::from_table(Table::from(middle).alias(middle_alias.to_table_string())) + .value(json_agg()) + .comment("outer select") + } + } + + fn build_m2m_join<'a>(&mut self, rs: &RelationSelection, parent_alias: Alias, ctx: &Context<'_>) -> JoinData<'a> { + let rf = rs.field.clone(); + let m2m_alias = m2m_join_alias_name(&rf); + let m2m_table_alias = self.next_alias(); + + let left_columns = rf.related_field().m2m_columns(ctx); + let right_columns = ModelProjection::from(rf.model().primary_identifier()).as_columns(ctx); + + let join_conditions = left_columns + .into_iter() + .zip(right_columns) + .fold(None::, |acc, (a, b)| { + let a = a.table(m2m_table_alias.to_table_string()); + let b = b.table(parent_alias.to_table_string()); + let condition = a.equals(b); + + match acc { + Some(acc) => Some(acc.and(condition)), + None => Some(condition.into()), + } + }) + .unwrap(); + + let m2m_join_data = Table::from(self.build_related_query_select(rs, m2m_table_alias, ctx)) + .alias(join_alias_name(&rf)) + .on(ConditionTree::single(true.raw())) + .lateral(); + + let child_table = rf.as_table(ctx).alias(m2m_table_alias.to_table_string()); + + let inner = Select::from_table(child_table) + .value(Column::from((join_alias_name(&rf), JSON_AGG_IDENT))) + .left_join(m2m_join_data) // join m2m table + .and_where(join_conditions) // adds join condition to the child table + .with_ordering(&rs.args, Some(join_alias_name(&rs.field)), ctx) // adds ordering stmts + .with_filters(rs.args.filter.clone(), None, ctx) // adds query filters // TODO: avoid clone filter + .with_pagination(rs.args.take_abs(), rs.args.skip); // adds pagination + + let outer = Select::from_table(Table::from(inner).alias(format!("{}_1", m2m_alias))).value(json_agg()); + + Table::from(outer) + .alias(m2m_alias) + .on(ConditionTree::single(true.raw())) + .lateral() + } +} + +trait SelectBuilderExt<'a> { + fn with_filters(self, filter: Option, parent_alias: Option, ctx: &Context<'_>) -> Select<'a>; + fn with_pagination(self, take: Option, skip: Option) -> Select<'a>; + fn with_ordering(self, args: &QueryArguments, parent_alias: Option, ctx: &Context<'_>) -> Select<'a>; + fn with_join_conditions( + self, + rf: &RelationField, + parent_alias: Alias, + child_alias: Alias, + ctx: &Context<'_>, + ) -> Select<'a>; + fn with_selection(self, selected_fields: &FieldSelection, table_alias: Alias, ctx: &Context<'_>) -> Select<'a>; + fn with_columns(self, columns: ColumnIterator) -> Select<'a>; +} + +impl<'a> SelectBuilderExt<'a> for Select<'a> { + fn with_filters(self, filter: Option, parent_alias: Option, ctx: &Context<'_>) -> Select<'a> { + use crate::filter::*; + + if let Some(filter) = filter { + let mut visitor = crate::filter::FilterVisitor::with_top_level_joins().set_parent_alias_opt(parent_alias); + let (filter, joins) = visitor.visit_filter(filter, ctx); + let select = self.and_where(filter); + + match joins { + Some(joins) => joins.into_iter().fold(select, |acc, join| acc.join(join.data)), + None => select, + } + } else { + self + } + } + + fn with_pagination(self, take: Option, skip: Option) -> Select<'a> { + let select = match take { + Some(take) => self.limit(take as usize), + None => self, + }; + + let select = match skip { + Some(skip) => select.offset(skip as usize), + None => select, + }; + + select + } + + fn with_ordering(self, args: &QueryArguments, parent_alias: Option, ctx: &Context<'_>) -> Select<'a> { + let order_by_definitions = OrderByBuilder::default() + .with_parent_alias(parent_alias) + .build(args, ctx); + + let select = order_by_definitions + .iter() + .flat_map(|j| &j.joins) + .fold(self, |acc, join| acc.join(join.clone().data)); + + order_by_definitions + .iter() + .fold(select, |acc, o| acc.order_by(o.order_definition.clone())) + } + + fn with_join_conditions( + self, + rf: &RelationField, + parent_alias: Alias, + child_alias: Alias, + ctx: &Context<'_>, + ) -> Select<'a> { + let join_columns = rf.join_columns(ctx); + let related_join_columns = ModelProjection::from(rf.related_field().linking_fields()).as_columns(ctx); + + // WHERE Parent.id = Child.id + let conditions = join_columns + .zip(related_join_columns) + .fold(None::, |acc, (a, b)| { + let a = a.table(parent_alias.to_table_string()); + let b = b.table(child_alias.to_table_string()); + let condition = a.equals(b); + + match acc { + Some(acc) => Some(acc.and(condition)), + None => Some(condition.into()), + } + }) + .unwrap(); + + self.and_where(conditions) + } + + fn with_selection(self, selected_fields: &FieldSelection, table_alias: Alias, ctx: &Context<'_>) -> Select<'a> { + selected_fields + .selections() + .fold(self, |acc, selection| match selection { + SelectedField::Scalar(sf) => acc.column( + sf.as_column(ctx) + .table(table_alias.to_table_string()) + .set_is_selected(true), + ), + SelectedField::Relation(rs) => { + let table_name = match rs.field.relation().is_many_to_many() { + true => m2m_join_alias_name(&rs.field), + false => join_alias_name(&rs.field), + }; + + acc.value(Column::from((table_name, JSON_AGG_IDENT)).alias(rs.field.name().to_owned())) + } + _ => acc, + }) + } + + fn with_columns(self, columns: ColumnIterator) -> Select<'a> { + columns.into_iter().fold(self, |select, col| select.column(col)) + } +} + +fn build_json_obj_fn(rs: &RelationSelection, ctx: &Context<'_>, root_alias: Alias) -> Function<'static> { + let build_obj_params = rs + .selections + .iter() + .filter_map(|f| match f { + SelectedField::Scalar(sf) => Some(( + Cow::from(sf.db_name().to_owned()), + Expression::from(sf.as_column(ctx).table(root_alias.to_table_string())), + )), + SelectedField::Relation(rs) => { + let table_name = match rs.field.relation().is_many_to_many() { + true => m2m_join_alias_name(&rs.field), + false => join_alias_name(&rs.field), + }; + + Some(( + Cow::from(rs.field.name().to_owned()), + Expression::from(Column::from((table_name, JSON_AGG_IDENT))), + )) + } + _ => None, + }) + .collect(); + + json_build_object(build_obj_params) +} + +fn order_by_selection(rs: &RelationSelection) -> FieldSelection { + let selection: Vec<_> = rs + .args + .order_by + .iter() + .flat_map(|order_by| match order_by { + OrderBy::Scalar(x) if x.path.is_empty() => vec![x.field.clone()], + OrderBy::Relevance(x) => x.fields.clone(), + _ => Vec::new(), + }) + .collect(); + + FieldSelection::from(selection) +} + +fn relation_selection(rs: &RelationSelection) -> FieldSelection { + let relation_fields = rs.relations().flat_map(|rs| join_fields(&rs.field)).collect::>(); + + FieldSelection::from(relation_fields) +} + +fn filtering_selection(rs: &RelationSelection) -> FieldSelection { + if let Some(filter) = &rs.args.filter { + FieldSelection::from(extract_filter_scalars(filter)) + } else { + FieldSelection::default() + } +} + +fn extract_filter_scalars(f: &Filter) -> Vec { + match f { + Filter::And(x) => x.iter().flat_map(extract_filter_scalars).collect(), + Filter::Or(x) => x.iter().flat_map(extract_filter_scalars).collect(), + Filter::Not(x) => x.iter().flat_map(extract_filter_scalars).collect(), + Filter::Scalar(x) => x.scalar_fields().into_iter().map(ToOwned::to_owned).collect(), + Filter::ScalarList(x) => vec![x.field.clone()], + Filter::OneRelationIsNull(x) => join_fields(&x.field), + Filter::Relation(x) => join_fields(&x.field), + _ => Vec::new(), + } +} + +fn join_fields(rf: &RelationField) -> Vec { + if rf.is_inlined_on_enclosing_model() { + rf.scalar_fields() + } else { + rf.related_field().referenced_fields() + } +} + +fn join_alias_name(rf: &RelationField) -> String { + format!("{}_{}", rf.model().name(), rf.name()) +} + +fn m2m_join_alias_name(rf: &RelationField) -> String { + format!("{}_{}_m2m", rf.model().name(), rf.name()) +} + +fn json_agg() -> Function<'static> { + coalesce(vec![ + json_array_agg(Column::from(JSON_AGG_IDENT)).into(), + Expression::from("[]".raw()), + ]) + .alias(JSON_AGG_IDENT) +} diff --git a/query-engine/core/src/interpreter/interpreter_impl.rs b/query-engine/core/src/interpreter/interpreter_impl.rs index 8aa3d77ae76..f1011b13f8f 100644 --- a/query-engine/core/src/interpreter/interpreter_impl.rs +++ b/query-engine/core/src/interpreter/interpreter_impl.rs @@ -72,6 +72,13 @@ impl ExpressionResult { .into_iter() .collect(), ), + QueryResult::RecordSelectionWithRelations(rsr) => Some( + rsr.records + .extract_selection_results(field_selection) + .expect("Expected record selection to contain required model ID fields.") + .into_iter() + .collect(), + ), QueryResult::RecordSelection(None) => Some(vec![]), _ => None, diff --git a/query-engine/core/src/interpreter/query_interpreters/nested_read.rs b/query-engine/core/src/interpreter/query_interpreters/nested_read.rs index 8573e2853c4..fd2e28ad555 100644 --- a/query-engine/core/src/interpreter/query_interpreters/nested_read.rs +++ b/query-engine/core/src/interpreter/query_interpreters/nested_read.rs @@ -69,6 +69,7 @@ pub(crate) async fn m2m( args, &query.selected_fields, &query.aggregation_selections, + RelationLoadStrategy::Query, trace_id.clone(), ) .await? @@ -210,6 +211,7 @@ pub async fn one2m( args, selected_fields, &aggr_selections, + RelationLoadStrategy::Query, trace_id, ) .await? diff --git a/query-engine/core/src/interpreter/query_interpreters/read.rs b/query-engine/core/src/interpreter/query_interpreters/read.rs index cac53ef5900..8fc0b10d67b 100644 --- a/query-engine/core/src/interpreter/query_interpreters/read.rs +++ b/query-engine/core/src/interpreter/query_interpreters/read.rs @@ -2,7 +2,7 @@ use super::{inmemory_record_processor::InMemoryRecordProcessor, *}; use crate::{interpreter::InterpretationResult, query_ast::*, result_ast::*}; use connector::{self, error::ConnectorError, ConnectionLike, RelAggregationRow, RelAggregationSelection}; use futures::future::{BoxFuture, FutureExt}; -use query_structure::ManyRecords; +use query_structure::{ManyRecords, RelationLoadStrategy, RelationSelection}; use std::collections::HashMap; use user_facing_errors::KnownError; @@ -39,12 +39,13 @@ fn read_one( &filter, &query.selected_fields, &query.aggregation_selections, + query.relation_load_strategy, trace_id, ) .await?; match scalars { - Some(record) => { + Some(record) if query.relation_load_strategy.is_query() => { let scalars: ManyRecords = record.into(); let (scalars, aggregation_rows) = extract_aggregation_rows_from_scalars(scalars, query.aggregation_selections); @@ -60,6 +61,18 @@ fn read_one( } .into()) } + Some(record) => { + let records: ManyRecords = record.into(); + + Ok(RecordSelectionWithRelations { + name: query.name, + model, + fields: query.selection_order, + records, + nested: build_relation_record_selection(query.selected_fields.relations()), + } + .into()) + } None if query.options.contains(QueryOption::ThrowOnEmpty) => record_not_found(), @@ -84,6 +97,17 @@ fn read_one( /// are distinct by definition if a unique is in the selection set. /// -> Unstable cursors can't reliably be fetched by the underlying datasource, so we need to process part of it in-memory. fn read_many( + tx: &mut dyn ConnectionLike, + query: ManyRecordsQuery, + trace_id: Option, +) -> BoxFuture<'_, InterpretationResult> { + match query.relation_load_strategy { + RelationLoadStrategy::Join => read_many_by_joins(tx, query, trace_id), + RelationLoadStrategy::Query => read_many_by_queries(tx, query, trace_id), + } +} + +fn read_many_by_queries( tx: &mut dyn ConnectionLike, mut query: ManyRecordsQuery, trace_id: Option, @@ -101,6 +125,7 @@ fn read_many( query.args.clone(), &query.selected_fields, &query.aggregation_selections, + query.relation_load_strategy, trace_id, ) .await?; @@ -132,6 +157,53 @@ fn read_many( fut.boxed() } +fn read_many_by_joins( + tx: &mut dyn ConnectionLike, + query: ManyRecordsQuery, + trace_id: Option, +) -> BoxFuture<'_, InterpretationResult> { + let fut = async move { + let result = tx + .get_many_records( + &query.model, + query.args.clone(), + &query.selected_fields, + &query.aggregation_selections, + query.relation_load_strategy, + trace_id, + ) + .await?; + + if result.records.is_empty() && query.options.contains(QueryOption::ThrowOnEmpty) { + record_not_found() + } else { + Ok(RecordSelectionWithRelations { + name: query.name, + fields: query.selection_order, + records: result, + nested: build_relation_record_selection(query.selected_fields.relations()), + model: query.model, + } + .into()) + } + }; + + fut.boxed() +} + +fn build_relation_record_selection<'a>( + selections: impl Iterator, +) -> Vec { + selections + .map(|rq| RelationRecordSelection { + name: rq.field.name().to_owned(), + fields: rq.result_fields.clone(), + model: rq.field.related_model(), + nested: build_relation_record_selection(rq.relations()), + }) + .collect() +} + /// Queries related records for a set of parent IDs. fn read_related<'conn>( tx: &'conn mut dyn ConnectionLike, diff --git a/query-engine/core/src/query_ast/read.rs b/query-engine/core/src/query_ast/read.rs index 271ff44e388..a01fce4d953 100644 --- a/query-engine/core/src/query_ast/read.rs +++ b/query-engine/core/src/query_ast/read.rs @@ -3,12 +3,12 @@ use super::FilteredQuery; use crate::ToGraphviz; use connector::{AggregationSelection, RelAggregationSelection}; use enumflags2::BitFlags; -use query_structure::{prelude::*, Filter, QueryArguments}; +use query_structure::{prelude::*, Filter, QueryArguments, RelationLoadStrategy}; use std::fmt::Display; #[allow(clippy::enum_variant_names)] #[derive(Debug, Clone)] -pub(crate) enum ReadQuery { +pub enum ReadQuery { RecordQuery(RecordQuery), ManyRecordsQuery(ManyRecordsQuery), RelatedRecordsQuery(RelatedRecordsQuery), @@ -55,6 +55,37 @@ impl ReadQuery { ReadQuery::AggregateRecordsQuery(x) => x.model.clone(), } } + + pub(crate) fn has_cursor(&self) -> bool { + match self { + ReadQuery::RecordQuery(_) => false, + ReadQuery::ManyRecordsQuery(q) => q.args.cursor.is_some() || q.nested.iter().any(|q| q.has_cursor()), + ReadQuery::RelatedRecordsQuery(q) => q.args.cursor.is_some() || q.nested.iter().any(|q| q.has_cursor()), + ReadQuery::AggregateRecordsQuery(_) => false, + } + } + + pub(crate) fn has_distinct(&self) -> bool { + match self { + ReadQuery::RecordQuery(_) => false, + ReadQuery::ManyRecordsQuery(q) => q.args.distinct.is_some() || q.nested.iter().any(|q| q.has_cursor()), + ReadQuery::RelatedRecordsQuery(q) => q.args.distinct.is_some() || q.nested.iter().any(|q| q.has_cursor()), + ReadQuery::AggregateRecordsQuery(_) => false, + } + } + + pub(crate) fn has_aggregation_selections(&self) -> bool { + fn has_aggregations(selections: &[RelAggregationSelection], nested: &[ReadQuery]) -> bool { + !selections.is_empty() || nested.iter().any(|q| q.has_aggregation_selections()) + } + + match self { + ReadQuery::RecordQuery(q) => has_aggregations(&q.aggregation_selections, &q.nested), + ReadQuery::ManyRecordsQuery(q) => has_aggregations(&q.aggregation_selections, &q.nested), + ReadQuery::RelatedRecordsQuery(q) => has_aggregations(&q.aggregation_selections, &q.nested), + ReadQuery::AggregateRecordsQuery(_) => false, + } + } } impl FilteredQuery for ReadQuery { @@ -172,6 +203,7 @@ pub struct RecordQuery { pub selection_order: Vec, pub aggregation_selections: Vec, pub options: QueryOptions, + pub relation_load_strategy: RelationLoadStrategy, } #[derive(Debug, Clone)] @@ -185,6 +217,7 @@ pub struct ManyRecordsQuery { pub selection_order: Vec, pub aggregation_selections: Vec, pub options: QueryOptions, + pub relation_load_strategy: RelationLoadStrategy, } #[derive(Debug, Clone)] @@ -194,7 +227,7 @@ pub struct RelatedRecordsQuery { pub parent_field: RelationFieldRef, pub args: QueryArguments, pub selected_fields: FieldSelection, - pub(crate) nested: Vec, + pub nested: Vec, pub selection_order: Vec, pub aggregation_selections: Vec, @@ -203,6 +236,20 @@ pub struct RelatedRecordsQuery { pub parent_results: Option>, } +impl RelatedRecordsQuery { + pub fn has_cursor(&self) -> bool { + self.args.cursor.is_some() || self.nested.iter().any(|q| q.has_cursor()) + } + + pub fn has_distinct(&self) -> bool { + self.args.distinct.is_some() || self.nested.iter().any(|q| q.has_distinct()) + } + + pub fn has_aggregation_selections(&self) -> bool { + !self.aggregation_selections.is_empty() || self.nested.iter().any(|q| q.has_aggregation_selections()) + } +} + #[derive(Debug, Clone)] pub struct AggregateRecordsQuery { pub name: String, diff --git a/query-engine/core/src/query_graph/mod.rs b/query-engine/core/src/query_graph/mod.rs index 6086fa24333..f1d5896d695 100644 --- a/query-engine/core/src/query_graph/mod.rs +++ b/query-engine/core/src/query_graph/mod.rs @@ -797,6 +797,7 @@ impl QueryGraph { selection_order: vec![], aggregation_selections: vec![], options: QueryOptions::none(), + relation_load_strategy: query_structure::RelationLoadStrategy::Query, }); let reload_query = Query::Read(read_query); diff --git a/query-engine/core/src/query_graph_builder/builder.rs b/query-engine/core/src/query_graph_builder/builder.rs index b7851baf451..1152c4974a4 100644 --- a/query-engine/core/src/query_graph_builder/builder.rs +++ b/query-engine/core/src/query_graph_builder/builder.rs @@ -74,11 +74,11 @@ impl<'a> QueryGraphBuilder<'a> { let query_schema = self.query_schema; let mut graph = match (&query_info.tag, query_info.model.map(|id| self.query_schema.internal_data_model.clone().zip(id))) { - (QueryTag::FindUnique, Some(m)) => read::find_unique(parsed_field, m).map(Into::into), - (QueryTag::FindUniqueOrThrow, Some(m)) => read::find_unique_or_throw(parsed_field, m).map(Into::into), - (QueryTag::FindFirst, Some(m)) => read::find_first(parsed_field, m).map(Into::into), - (QueryTag::FindFirstOrThrow, Some(m)) => read::find_first_or_throw(parsed_field, m).map(Into::into), - (QueryTag::FindMany, Some(m)) => read::find_many(parsed_field, m).map(Into::into), + (QueryTag::FindUnique, Some(m)) => read::find_unique(parsed_field, m, query_schema).map(Into::into), + (QueryTag::FindUniqueOrThrow, Some(m)) => read::find_unique_or_throw(parsed_field, m, query_schema).map(Into::into), + (QueryTag::FindFirst, Some(m)) => read::find_first(parsed_field, m, query_schema).map(Into::into), + (QueryTag::FindFirstOrThrow, Some(m)) => read::find_first_or_throw(parsed_field, m, query_schema).map(Into::into), + (QueryTag::FindMany, Some(m)) => read::find_many(parsed_field, m, query_schema).map(Into::into), (QueryTag::Aggregate, Some(m)) => read::aggregate(parsed_field, m).map(Into::into), (QueryTag::GroupBy, Some(m)) => read::group_by(parsed_field, m).map(Into::into), (QueryTag::CreateOne, Some(m)) => QueryGraph::root(|g| write::create_record(g, query_schema, m, parsed_field)), diff --git a/query-engine/core/src/query_graph_builder/read/first.rs b/query-engine/core/src/query_graph_builder/read/first.rs index 84c90016858..1d1b22dc43c 100644 --- a/query-engine/core/src/query_graph_builder/read/first.rs +++ b/query-engine/core/src/query_graph_builder/read/first.rs @@ -1,15 +1,24 @@ use query_structure::Model; +use schema::QuerySchema; use super::*; use crate::ParsedField; -pub(crate) fn find_first(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - let many_query = many::find_many(field, model)?; +pub(crate) fn find_first( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + let many_query = many::find_many(field, model, query_schema)?; try_limit_to_one(many_query) } -pub(crate) fn find_first_or_throw(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - let many_query = many::find_many_or_throw(field, model)?; +pub(crate) fn find_first_or_throw( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + let many_query = many::find_many_or_throw(field, model, query_schema)?; try_limit_to_one(many_query) } diff --git a/query-engine/core/src/query_graph_builder/read/many.rs b/query-engine/core/src/query_graph_builder/read/many.rs index 6c9242330a8..fe8009681c6 100644 --- a/query-engine/core/src/query_graph_builder/read/many.rs +++ b/query-engine/core/src/query_graph_builder/read/many.rs @@ -1,13 +1,22 @@ -use super::*; +use super::{utils::get_relation_load_strategy, *}; use crate::{query_document::ParsedField, ManyRecordsQuery, QueryOption, QueryOptions, ReadQuery}; use query_structure::Model; +use schema::QuerySchema; -pub(crate) fn find_many(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - find_many_with_options(field, model, QueryOptions::none()) +pub(crate) fn find_many( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + find_many_with_options(field, model, QueryOptions::none(), query_schema) } -pub(crate) fn find_many_or_throw(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - find_many_with_options(field, model, QueryOption::ThrowOnEmpty.into()) +pub(crate) fn find_many_or_throw( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + find_many_with_options(field, model, QueryOption::ThrowOnEmpty.into(), query_schema) } #[inline] @@ -15,6 +24,7 @@ fn find_many_with_options( field: ParsedField<'_>, model: Model, options: QueryOptions, + query_schema: &QuerySchema, ) -> QueryGraphBuilderResult { let args = extractors::extract_query_args(field.arguments, &model)?; let name = field.name; @@ -23,13 +33,21 @@ fn find_many_with_options( let (aggr_fields_pairs, nested_fields) = extractors::extract_nested_rel_aggr_selections(nested_fields); let aggregation_selections = utils::collect_relation_aggr_selections(aggr_fields_pairs, &model)?; let selection_order: Vec = utils::collect_selection_order(&nested_fields); - let selected_fields = utils::collect_selected_fields(&nested_fields, args.distinct.clone(), &model); - let nested = utils::collect_nested_queries(nested_fields, &model)?; + let selected_fields = utils::collect_selected_fields(&nested_fields, args.distinct.clone(), &model, query_schema)?; + let nested = utils::collect_nested_queries(nested_fields, &model, query_schema)?; let model = model; let selected_fields = utils::merge_relation_selections(selected_fields, None, &nested); let selected_fields = utils::merge_cursor_fields(selected_fields, &args.cursor); + let relation_load_strategy = get_relation_load_strategy( + args.cursor.as_ref(), + args.distinct.as_ref(), + &nested, + &aggregation_selections, + query_schema, + ); + Ok(ReadQuery::ManyRecordsQuery(ManyRecordsQuery { name, alias, @@ -40,5 +58,6 @@ fn find_many_with_options( selection_order, aggregation_selections, options, + relation_load_strategy, })) } diff --git a/query-engine/core/src/query_graph_builder/read/one.rs b/query-engine/core/src/query_graph_builder/read/one.rs index d71c2535bb2..e2b6d2b4b94 100644 --- a/query-engine/core/src/query_graph_builder/read/one.rs +++ b/query-engine/core/src/query_graph_builder/read/one.rs @@ -1,15 +1,23 @@ -use super::*; +use super::{utils::get_relation_load_strategy, *}; use crate::{query_document::*, QueryOption, QueryOptions, ReadQuery, RecordQuery}; use query_structure::Model; -use schema::constants::args; +use schema::{constants::args, QuerySchema}; use std::convert::TryInto; -pub(crate) fn find_unique(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - find_unique_with_options(field, model, QueryOptions::none()) +pub(crate) fn find_unique( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + find_unique_with_options(field, model, QueryOptions::none(), query_schema) } -pub(crate) fn find_unique_or_throw(field: ParsedField<'_>, model: Model) -> QueryGraphBuilderResult { - find_unique_with_options(field, model, QueryOption::ThrowOnEmpty.into()) +pub(crate) fn find_unique_or_throw( + field: ParsedField<'_>, + model: Model, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + find_unique_with_options(field, model, QueryOption::ThrowOnEmpty.into(), query_schema) } /// Builds a read query from a parsed incoming read query field. @@ -18,6 +26,7 @@ fn find_unique_with_options( mut field: ParsedField<'_>, model: Model, options: QueryOptions, + query_schema: &QuerySchema, ) -> QueryGraphBuilderResult { let filter = match field.arguments.lookup(args::WHERE) { Some(where_arg) => { @@ -34,9 +43,10 @@ fn find_unique_with_options( let (aggr_fields_pairs, nested_fields) = extractors::extract_nested_rel_aggr_selections(nested_fields); let aggregation_selections = utils::collect_relation_aggr_selections(aggr_fields_pairs, &model)?; let selection_order: Vec = utils::collect_selection_order(&nested_fields); - let selected_fields = utils::collect_selected_fields(&nested_fields, None, &model); - let nested = utils::collect_nested_queries(nested_fields, &model)?; + let selected_fields = utils::collect_selected_fields(&nested_fields, None, &model, query_schema)?; + let nested = utils::collect_nested_queries(nested_fields, &model, query_schema)?; let selected_fields = utils::merge_relation_selections(selected_fields, None, &nested); + let relation_load_strategy = get_relation_load_strategy(None, None, &nested, &aggregation_selections, query_schema); Ok(ReadQuery::RecordQuery(RecordQuery { name, @@ -48,5 +58,6 @@ fn find_unique_with_options( selection_order, aggregation_selections, options, + relation_load_strategy, })) } diff --git a/query-engine/core/src/query_graph_builder/read/related.rs b/query-engine/core/src/query_graph_builder/read/related.rs index 9c73699b047..7ebed8a7a06 100644 --- a/query-engine/core/src/query_graph_builder/read/related.rs +++ b/query-engine/core/src/query_graph_builder/read/related.rs @@ -1,11 +1,13 @@ use super::*; use crate::{query_document::ParsedField, ReadQuery, RelatedRecordsQuery}; use query_structure::{Model, RelationFieldRef}; +use schema::QuerySchema; pub(crate) fn find_related( field: ParsedField<'_>, parent: RelationFieldRef, model: Model, + query_schema: &QuerySchema, ) -> QueryGraphBuilderResult { let args = extractors::extract_query_args(field.arguments, &model)?; let name = field.name; @@ -14,8 +16,8 @@ pub(crate) fn find_related( let (aggr_fields_pairs, sub_selections) = extractors::extract_nested_rel_aggr_selections(sub_selections); let aggregation_selections = utils::collect_relation_aggr_selections(aggr_fields_pairs, &model)?; let selection_order: Vec = utils::collect_selection_order(&sub_selections); - let selected_fields = utils::collect_selected_fields(&sub_selections, args.distinct.clone(), &model); - let nested = utils::collect_nested_queries(sub_selections, &model)?; + let selected_fields = utils::collect_selected_fields(&sub_selections, args.distinct.clone(), &model, query_schema)?; + let nested = utils::collect_nested_queries(sub_selections, &model, query_schema)?; let parent_field = parent; let selected_fields = utils::merge_relation_selections(selected_fields, Some(parent_field.clone()), &nested); diff --git a/query-engine/core/src/query_graph_builder/read/utils.rs b/query-engine/core/src/query_graph_builder/read/utils.rs index 545393ba3d1..fc569b94c83 100644 --- a/query-engine/core/src/query_graph_builder/read/utils.rs +++ b/query-engine/core/src/query_graph_builder/read/utils.rs @@ -1,8 +1,12 @@ use super::*; use crate::{ArgumentListLookup, FieldPair, ParsedField, ReadQuery}; use connector::RelAggregationSelection; -use query_structure::prelude::*; -use schema::constants::{aggregations::*, args}; +use psl::{datamodel_connector::ConnectorCapability, PreviewFeature}; +use query_structure::{prelude::*, RelationLoadStrategy}; +use schema::{ + constants::{aggregations::*, args}, + QuerySchema, +}; pub fn collect_selection_order(from: &[FieldPair<'_>]) -> Vec { from.iter() @@ -21,18 +25,19 @@ pub fn collect_selected_fields( from_pairs: &[FieldPair<'_>], distinct: Option, model: &Model, -) -> FieldSelection { + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { let model_id = model.primary_identifier(); - let selected_fields = pairs_to_selections(model, from_pairs); + let selected_fields = pairs_to_selections(model, from_pairs, query_schema)?; let selection = FieldSelection::new(selected_fields); let selection = model_id.merge(selection); // Distinct fields are always selected because we are processing them in-memory if let Some(distinct) = distinct { - selection.merge(distinct) + Ok(selection.merge(distinct)) } else { - selection + Ok(selection) } } @@ -60,12 +65,20 @@ where .collect() } -fn pairs_to_selections(parent: T, pairs: &[FieldPair<'_>]) -> Vec +fn pairs_to_selections( + parent: T, + pairs: &[FieldPair<'_>], + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult> where T: Into, { + let should_collect_relation_selection = query_schema.has_capability(ConnectorCapability::LateralJoin) + && query_schema.has_feature(PreviewFeature::RelationJoins); + let parent = parent.into(); - pairs + + let selected_fields = pairs .iter() .filter_map(|pair| { parent @@ -73,29 +86,68 @@ where .map(|field| (pair.parsed_field.clone(), field)) }) .flat_map(|field| match field { - (_, Field::Relation(rf)) => rf.scalar_fields().into_iter().map(Into::into).collect(), - (_, Field::Scalar(sf)) => vec![sf.into()], - (pf, Field::Composite(cf)) => vec![extract_composite_selection(pf, cf)], + (pf, Field::Relation(rf)) => { + let mut fields: Vec> = rf + .scalar_fields() + .into_iter() + .map(SelectedField::from) + .map(Ok) + .collect(); + + if should_collect_relation_selection { + fields.push(extract_relation_selection(pf, rf, query_schema)); + } + + fields + } + (_, Field::Scalar(sf)) => vec![Ok(sf.into())], + (pf, Field::Composite(cf)) => vec![extract_composite_selection(pf, cf, query_schema)], }) - .collect() + .collect::, _>>()?; + + Ok(selected_fields) } -fn extract_composite_selection(pf: ParsedField<'_>, cf: CompositeFieldRef) -> SelectedField { +fn extract_composite_selection( + pf: ParsedField<'_>, + cf: CompositeFieldRef, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { let object = pf .nested_fields .expect("Invalid composite query shape: Composite field selected without sub-selection."); let typ = cf.typ(); - SelectedField::Composite(CompositeSelection { + Ok(SelectedField::Composite(CompositeSelection { field: cf, - selections: pairs_to_selections(typ, &object.fields), - }) + selections: pairs_to_selections(typ, &object.fields, query_schema)?, + })) +} + +fn extract_relation_selection( + pf: ParsedField<'_>, + rf: RelationFieldRef, + query_schema: &QuerySchema, +) -> QueryGraphBuilderResult { + let object = pf + .nested_fields + .expect("Invalid relation query shape: Relation field selected without sub-selection."); + + let related_model = rf.related_model(); + + Ok(SelectedField::Relation(RelationSelection { + field: rf, + args: extract_query_args(pf.arguments, &related_model)?, + result_fields: collect_selection_order(&object.fields), + selections: pairs_to_selections(related_model, &object.fields, query_schema)?, + })) } pub(crate) fn collect_nested_queries( from: Vec>, model: &Model, + query_schema: &QuerySchema, ) -> QueryGraphBuilderResult> { from.into_iter() .filter_map(|pair| { @@ -112,7 +164,7 @@ pub(crate) fn collect_nested_queries( let model = rf.related_model(); let parent = rf.clone(); - Some(related::find_related(pair.parsed_field, parent, model)) + Some(related::find_related(pair.parsed_field, parent, model, query_schema)) } } }) @@ -189,3 +241,26 @@ pub fn collect_relation_aggr_selections( Ok(selections) } + +pub(crate) fn get_relation_load_strategy( + cursor: Option<&SelectionResult>, + distinct: Option<&FieldSelection>, + nested_queries: &[ReadQuery], + aggregation_selections: &[RelAggregationSelection], + query_schema: &QuerySchema, +) -> RelationLoadStrategy { + if query_schema.has_feature(PreviewFeature::RelationJoins) + && query_schema.has_capability(ConnectorCapability::LateralJoin) + && cursor.is_none() + && distinct.is_none() + && aggregation_selections.is_empty() + && !nested_queries.iter().any(|q| match q { + ReadQuery::RelatedRecordsQuery(q) => q.has_cursor() || q.has_distinct() || q.has_aggregation_selections(), + _ => false, + }) + { + RelationLoadStrategy::Join + } else { + RelationLoadStrategy::Query + } +} diff --git a/query-engine/core/src/query_graph_builder/write/create.rs b/query-engine/core/src/query_graph_builder/write/create.rs index 59661c6c16b..dc6ac8dd820 100644 --- a/query-engine/core/src/query_graph_builder/write/create.rs +++ b/query-engine/core/src/query_graph_builder/write/create.rs @@ -32,7 +32,7 @@ pub(crate) fn create_record( let create_node = create::create_record_node(graph, query_schema, model.clone(), data_map)?; // Follow-up read query on the write - let read_query = read::find_unique(field, model.clone())?; + let read_query = read::find_unique(field, model.clone(), query_schema)?; let read_node = graph.create_node(Query::Read(read_query)); graph.add_result_node(&read_node); diff --git a/query-engine/core/src/query_graph_builder/write/delete.rs b/query-engine/core/src/query_graph_builder/write/delete.rs index df6a6643602..c6a7c28e2d8 100644 --- a/query-engine/core/src/query_graph_builder/write/delete.rs +++ b/query-engine/core/src/query_graph_builder/write/delete.rs @@ -21,7 +21,7 @@ pub(crate) fn delete_record( let filter = extract_unique_filter(where_arg.value.try_into()?, &model)?; // Prefetch read query for the delete - let mut read_query = read::find_unique(field, model.clone())?; + let mut read_query = read::find_unique(field, model.clone(), query_schema)?; read_query.add_filter(filter.clone()); let read_node = graph.create_node(Query::Read(read_query)); diff --git a/query-engine/core/src/query_graph_builder/write/update.rs b/query-engine/core/src/query_graph_builder/write/update.rs index 63cf88a3d29..5e275ebc9e8 100644 --- a/query-engine/core/src/query_graph_builder/write/update.rs +++ b/query-engine/core/src/query_graph_builder/write/update.rs @@ -88,7 +88,7 @@ pub(crate) fn update_record( } else { graph.flag_transactional(); - let read_query = read::find_unique(field, model.clone())?; + let read_query = read::find_unique(field, model.clone(), query_schema)?; let read_node = graph.create_node(Query::Read(read_query)); graph.add_result_node(&read_node); diff --git a/query-engine/core/src/query_graph_builder/write/upsert.rs b/query-engine/core/src/query_graph_builder/write/upsert.rs index 92fcd6d12ef..5cba49c5153 100644 --- a/query-engine/core/src/query_graph_builder/write/upsert.rs +++ b/query-engine/core/src/query_graph_builder/write/upsert.rs @@ -70,7 +70,7 @@ pub(crate) fn upsert_record( ); let filter = extract_unique_filter(where_argument, &model)?; - let read_query = read::find_unique(field.clone(), model.clone())?; + let read_query = read::find_unique(field.clone(), model.clone(), query_schema)?; if can_use_native_upsert { if let ReadQuery::RecordQuery(read) = read_query { diff --git a/query-engine/core/src/query_graph_builder/write/utils.rs b/query-engine/core/src/query_graph_builder/write/utils.rs index 2f2e736aeda..6feccd91203 100644 --- a/query-engine/core/src/query_graph_builder/write/utils.rs +++ b/query-engine/core/src/query_graph_builder/write/utils.rs @@ -44,6 +44,7 @@ where selection_order: vec![], aggregation_selections: vec![], options: QueryOptions::none(), + relation_load_strategy: query_structure::RelationLoadStrategy::Query, }); Query::Read(read_query) diff --git a/query-engine/core/src/response_ir/internal.rs b/query-engine/core/src/response_ir/internal.rs index 7becb19e768..47385692b38 100644 --- a/query-engine/core/src/response_ir/internal.rs +++ b/query-engine/core/src/response_ir/internal.rs @@ -1,6 +1,9 @@ use super::*; use crate::{ - constants::custom_types, protocol::EngineProtocol, CoreError, QueryResult, RecordAggregations, RecordSelection, + constants::custom_types, + protocol::EngineProtocol, + result_ast::{RecordSelectionWithRelations, RelationRecordSelection}, + CoreError, QueryResult, RecordAggregations, RecordSelection, }; use connector::{AggregationResult, RelAggregationResult, RelAggregationRow}; use indexmap::IndexMap; @@ -10,7 +13,7 @@ use schema::{ constants::{aggregations::*, output_fields::*}, *, }; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; /// A grouping of items to their parent record. /// The item implicitly holds the information of the type of item contained. @@ -46,6 +49,9 @@ pub(crate) fn serialize_internal( QueryResult::RecordSelection(Some(rs)) => { serialize_record_selection(*rs, field, field.field_type(), is_list, query_schema) } + QueryResult::RecordSelectionWithRelations(rs) => { + serialize_record_selection_with_relations(*rs, field, field.field_type(), is_list) + } QueryResult::RecordAggregations(ras) => serialize_aggregations(field, ras), QueryResult::Count(c) => { // Todo needs a real implementation or needs to move to RecordAggregation @@ -216,6 +222,31 @@ fn coerce_non_numeric(value: PrismaValue, output: &OutputType<'_>) -> PrismaValu } } +fn serialize_record_selection_with_relations( + record_selection: RecordSelectionWithRelations, + field: &OutputField<'_>, + typ: &OutputType<'_>, // We additionally pass the type to allow recursing into nested type definitions of a field. + is_list: bool, +) -> crate::Result { + let name = record_selection.name.clone(); + + match &typ.inner { + inner if typ.is_list() => serialize_record_selection_with_relations( + record_selection, + field, + &OutputType::non_list(inner.clone()), + true, + ), + InnerOutputType::Object(obj) => { + let result = serialize_objects_with_relation(record_selection, obj)?; + + finalize_objects(field, is_list, result, name) + } + // We always serialize record selections into objects or lists on the top levels. Scalars and enums are handled separately. + _ => unreachable!(), + } +} + fn serialize_record_selection( record_selection: RecordSelection, field: &OutputField<'_>, @@ -235,54 +266,197 @@ fn serialize_record_selection( ), InnerOutputType::Object(obj) => { let result = serialize_objects(record_selection, obj, query_schema)?; - let is_optional = field.is_nullable; - // Items will be ref'ed on the top level to allow cheap clones in nested scenarios. - match (is_list, is_optional) { - // List(Opt(_)) | List(_) - (true, opt) => { - result - .into_iter() - .map(|(parent, items)| { - if !opt { - // Check that all items are non-null - if items.iter().any(|item| matches!(item, Item::Value(PrismaValue::Null))) { - return Err(CoreError::null_serialization_error(&name)); - } - } - - Ok((parent, Item::Ref(ItemRef::new(Item::list(items))))) - }) - .collect() + finalize_objects(field, is_list, result, name) + } + + _ => unreachable!(), // We always serialize record selections into objects or lists on the top levels. Scalars and enums are handled separately. + } +} + +fn finalize_objects( + field: &OutputField<'_>, + is_list: bool, + result: IndexMap, Vec>, + name: String, +) -> Result, Item>, CoreError> { + let is_optional = field.is_nullable; + + // Items will be ref'ed on the top level to allow cheap clones in nested scenarios. + match (is_list, is_optional) { + // List(Opt(_)) | List(_) + (true, opt) => { + result + .into_iter() + .map(|(parent, items)| { + if !opt { + // Check that all items are non-null + if items.iter().any(|item| matches!(item, Item::Value(PrismaValue::Null))) { + return Err(CoreError::null_serialization_error(&name)); + } + } + + Ok((parent, Item::Ref(ItemRef::new(Item::list(items))))) + }) + .collect() + } + + // Opt(_) + (false, opt) => { + result + .into_iter() + .map(|(parent, mut items)| { + // As it's not a list, we require a single result + if items.len() > 1 { + items.reverse(); + let first = items.pop().unwrap(); + + // Simple return the first record in the list. + Ok((parent, Item::Ref(ItemRef::new(first)))) + } else if items.is_empty() && opt { + Ok((parent, Item::Ref(ItemRef::new(Item::Value(PrismaValue::Null))))) + } else if items.is_empty() && opt { + Err(CoreError::null_serialization_error(&name)) + } else { + Ok((parent, Item::Ref(ItemRef::new(items.pop().unwrap())))) + } + }) + .collect() + } + } +} + +// TODO: Handle errors properly +fn serialize_objects_with_relation( + result: RecordSelectionWithRelations, + typ: &ObjectType<'_>, +) -> crate::Result { + let mut object_mapping = UncheckedItemsWithParents::with_capacity(result.records.records.len()); + + let model = result.model; + let db_field_names = result.records.field_names; + let nested = result.nested; + + let fields: Vec<_> = db_field_names + .iter() + .filter_map(|f| model.fields().all().find(|field| field.db_name() == f)) + .collect(); + + // Hack: we convert it to a hashset to support contains with &str as input + // because Vec::contains(&str) doesn't work and we don't want to allocate a string record value + let selected_db_field_names: HashSet = result.fields.clone().into_iter().collect(); + + for record in result.records.records.into_iter() { + if !object_mapping.contains_key(&record.parent_id) { + object_mapping.insert(record.parent_id.clone(), Vec::new()); + } + + let values = record.values; + let mut object = HashMap::with_capacity(values.len()); + + for (val, field) in values.into_iter().zip(fields.iter()) { + // Skip fields that aren't part of the selection set + if !selected_db_field_names.contains(field.name()) { + continue; + } + + let out_field = typ.find_field(field.name()).unwrap(); + + match field { + Field::Scalar(_) if !out_field.field_type().is_object() => { + object.insert(field.name().to_owned(), serialize_scalar(out_field, val)?); } + Field::Relation(_) if out_field.field_type().is_list() => { + let inner_typ = out_field.field_type.as_object_type().unwrap(); + let rrs = nested.iter().find(|rrs| rrs.name == field.name()).unwrap(); - // Opt(_) - (false, opt) => { - result + let items = val + .into_list() + .unwrap() .into_iter() - .map(|(parent, mut items)| { - // As it's not a list, we require a single result - if items.len() > 1 { - items.reverse(); - let first = items.pop().unwrap(); - - // Simple return the first record in the list. - Ok((parent, Item::Ref(ItemRef::new(first)))) - } else if items.is_empty() && opt { - Ok((parent, Item::Ref(ItemRef::new(Item::Value(PrismaValue::Null))))) - } else if items.is_empty() && opt { - Err(CoreError::null_serialization_error(&name)) - } else { - Ok((parent, Item::Ref(ItemRef::new(items.pop().unwrap())))) - } - }) - .collect() + .map(|value| serialize_relation_selection(rrs, value, inner_typ)) + .collect::>>()?; + + object.insert(field.name().to_owned(), Item::list(items)); + } + Field::Relation(_) => { + let inner_typ = out_field.field_type.as_object_type().unwrap(); + let rrs = nested.iter().find(|rrs| rrs.name == field.name()).unwrap(); + + object.insert( + field.name().to_owned(), + serialize_relation_selection(rrs, val, inner_typ)?, + ); } + _ => panic!("unexpected field"), } } - _ => unreachable!(), // We always serialize record selections into objects or lists on the top levels. Scalars and enums are handled separately. + let map = reorder_object_with_selection_order(result.fields.clone(), object); + + let result = Item::Map(map); + + object_mapping.get_mut(&record.parent_id).unwrap().push(result); } + + Ok(object_mapping) +} + +fn serialize_relation_selection( + rrs: &RelationRecordSelection, + value: PrismaValue, + // parent_id: Option, + typ: &ObjectType<'_>, +) -> crate::Result { + if value.is_null() { + return Ok(Item::Value(PrismaValue::Null)); + } + + let mut map = Map::new(); + + // TODO: better handle errors + let mut value_obj: HashMap = HashMap::from_iter(value.into_object().unwrap()); + let db_field_names = &rrs.fields; + let fields: Vec<_> = db_field_names + .iter() + .filter_map(|f| rrs.model.fields().all().find(|field| field.name() == f)) + .collect(); + + for field in fields { + let out_field = typ.find_field(field.name()).unwrap(); + let value = value_obj.remove(field.db_name()).unwrap(); + + match field { + Field::Scalar(_) if !out_field.field_type().is_object() => { + map.insert(field.name().to_owned(), serialize_scalar(out_field, value)?); + } + Field::Relation(_) if out_field.field_type().is_list() => { + let inner_typ = out_field.field_type.as_object_type().unwrap(); + let inner_rrs = rrs.nested.iter().find(|rrs| rrs.name == field.name()).unwrap(); + + let items = value + .into_list() + .unwrap() + .into_iter() + .map(|value| serialize_relation_selection(inner_rrs, value, inner_typ)) + .collect::>>()?; + + map.insert(field.name().to_owned(), Item::list(items)); + } + Field::Relation(_) => { + let inner_typ = out_field.field_type.as_object_type().unwrap(); + let inner_rrs = rrs.nested.iter().find(|rrs| rrs.name == field.name()).unwrap(); + + map.insert( + field.name().to_owned(), + serialize_relation_selection(inner_rrs, value, inner_typ)?, + ); + } + _ => (), + } + } + + Ok(Item::Map(map)) } /// Serializes the given result into objects of given type. @@ -365,28 +539,26 @@ fn serialize_objects( let mut all_fields = result.fields.clone(); all_fields.append(&mut aggr_fields); - let map = all_fields - .iter() - .fold(Map::with_capacity(all_fields.len()), |mut acc, field_name| { - acc.insert(field_name.to_owned(), object.remove(field_name).unwrap()); - acc - }); + let map = reorder_object_with_selection_order(all_fields, object); - // TODO: Find out how to easily determine when a result is null. - // If the object is null or completely empty, coerce into null instead. - let result = Item::Map(map); - // let result = if result.is_null_or_empty() { - // Item::Value(PrismaValue::Null) - // } else { - // result - // }; - - object_mapping.get_mut(&record.parent_id).unwrap().push(result); + object_mapping.get_mut(&record.parent_id).unwrap().push(Item::Map(map)); } Ok(object_mapping) } +fn reorder_object_with_selection_order( + selection_order: Vec, + mut object: HashMap, +) -> IndexMap { + selection_order + .iter() + .fold(Map::with_capacity(selection_order.len()), |mut acc, field_name| { + acc.insert(field_name.to_owned(), object.remove(field_name).unwrap()); + acc + }) +} + /// Unwraps are safe due to query validation. fn write_nested_items( record_id: &Option, diff --git a/query-engine/core/src/result_ast/mod.rs b/query-engine/core/src/result_ast/mod.rs index 91c58f8551a..a54f333c90a 100644 --- a/query-engine/core/src/result_ast/mod.rs +++ b/query-engine/core/src/result_ast/mod.rs @@ -6,12 +6,47 @@ pub(crate) enum QueryResult { Id(Option), Count(usize), RecordSelection(Option>), + RecordSelectionWithRelations(Box), Json(serde_json::Value), RecordAggregations(RecordAggregations), Unit, } -// Todo: In theory, much of this info can go into the serializer as soon as the read results are resolved in a flat tree. +#[derive(Debug, Clone)] +pub struct RecordSelectionWithRelations { + /// Name of the query. + pub(crate) name: String, + + /// Holds an ordered list of selected field names for each contained record. + pub(crate) fields: Vec, + + /// Selection results + pub(crate) records: ManyRecords, + + pub(crate) nested: Vec, + + /// The model of the contained records. + pub(crate) model: Model, +} + +impl From for QueryResult { + fn from(value: RecordSelectionWithRelations) -> Self { + QueryResult::RecordSelectionWithRelations(Box::new(value)) + } +} + +#[derive(Debug, Clone)] +pub struct RelationRecordSelection { + /// Name of the relation. + pub name: String, + /// Holds an ordered list of selected field names for each contained record. + pub fields: Vec, + /// The model of the contained records. + pub model: Model, + /// Nested relation selections + pub nested: Vec, +} + #[derive(Debug, Clone)] pub struct RecordSelection { /// Name of the query. diff --git a/query-engine/query-structure/src/field/mod.rs b/query-engine/query-structure/src/field/mod.rs index 45d529c56ab..39e43f186c1 100644 --- a/query-engine/query-structure/src/field/mod.rs +++ b/query-engine/query-structure/src/field/mod.rs @@ -183,6 +183,11 @@ impl TypeIdentifier { pub fn is_enum(&self) -> bool { matches!(self, Self::Enum(..)) } + + /// Returns `true` if the type identifier is [`Json`]. + pub fn is_json(&self) -> bool { + matches!(self, Self::Json) + } } #[derive(Clone, Debug, PartialEq, Eq, Hash)] diff --git a/query-engine/query-structure/src/field_selection.rs b/query-engine/query-structure/src/field_selection.rs index b44529793a5..5254ccb20cb 100644 --- a/query-engine/query-structure/src/field_selection.rs +++ b/query-engine/query-structure/src/field_selection.rs @@ -1,9 +1,11 @@ use crate::{ parent_container::ParentContainer, prisma_value_ext::PrismaValueExtensions, CompositeFieldRef, DomainError, Field, - ScalarFieldRef, SelectionResult, + Model, ModelProjection, QueryArguments, RelationField, ScalarField, ScalarFieldRef, SelectionResult, + TypeIdentifier, }; use itertools::Itertools; use prisma_value::PrismaValue; +use psl::schema_ast::ast::FieldArity; use std::fmt::Display; /// A selection of fields from a model. @@ -31,6 +33,8 @@ impl FieldSelection { .and_then(|selection| selection.as_composite()) .map(|cs| cs.is_superset_of(other_cs)) .unwrap_or(false), + // TODO: Relation selections are ignored for now to prevent breaking the existing query-based strategy to resolve relations. + SelectedField::Relation(_) => true, }) } @@ -64,6 +68,7 @@ impl FieldSelection { .map(|selection| match selection { SelectedField::Scalar(sf) => sf.clone().into(), SelectedField::Composite(cf) => cf.field.clone().into(), + SelectedField::Relation(rs) => rs.field.clone().into(), }) .collect() } @@ -76,6 +81,7 @@ impl FieldSelection { .filter_map(|selection| match selection { SelectedField::Scalar(sf) => Some(sf.clone()), SelectedField::Composite(_) => None, + SelectedField::Relation(_) => None, }) .collect::>(); @@ -139,6 +145,28 @@ impl FieldSelection { FieldSelection { selections } } + + pub fn type_identifiers_with_arities(&self) -> Vec<(TypeIdentifier, FieldArity)> { + self.selections() + .filter_map(|selection| match selection { + SelectedField::Scalar(sf) => Some(sf.type_identifier_with_arity()), + SelectedField::Relation(rf) if rf.field.is_list() => Some((TypeIdentifier::Json, FieldArity::Required)), + SelectedField::Relation(rf) => Some((TypeIdentifier::Json, rf.field.arity())), + SelectedField::Composite(_) => None, + }) + .collect() + } + + pub fn relations(&self) -> impl Iterator { + self.selections().filter_map(|selection| match selection { + SelectedField::Relation(rs) => Some(rs), + _ => None, + }) + } + + pub fn into_projection(self) -> ModelProjection { + self.into() + } } /// A selected field. Can be contained on a model or composite type. @@ -147,6 +175,37 @@ impl FieldSelection { pub enum SelectedField { Scalar(ScalarFieldRef), Composite(CompositeSelection), + Relation(RelationSelection), +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct RelationSelection { + pub field: RelationField, + pub args: QueryArguments, + /// Field names that will eventually be serialized + pub result_fields: Vec, + // Fields that will be queried by the connectors + pub selections: Vec, +} + +impl RelationSelection { + pub fn scalars(&self) -> impl Iterator { + self.selections.iter().filter_map(|selection| match selection { + SelectedField::Scalar(sf) => Some(sf), + _ => None, + }) + } + + pub fn relations(&self) -> impl Iterator { + self.selections.iter().filter_map(|selection| match selection { + SelectedField::Relation(rs) => Some(rs), + _ => None, + }) + } + + pub fn related_model(&self) -> Model { + self.field.related_model() + } } impl SelectedField { @@ -154,6 +213,7 @@ impl SelectedField { match self { SelectedField::Scalar(sf) => sf.name(), SelectedField::Composite(cf) => cf.field.name(), + SelectedField::Relation(rs) => rs.field.name(), } } @@ -161,6 +221,7 @@ impl SelectedField { match self { SelectedField::Scalar(sf) => sf.db_name(), SelectedField::Composite(cs) => cs.field.db_name(), + SelectedField::Relation(rs) => rs.field.name(), } } @@ -175,6 +236,7 @@ impl SelectedField { match self { SelectedField::Scalar(sf) => sf.container(), SelectedField::Composite(cs) => cs.field.container(), + SelectedField::Relation(rs) => ParentContainer::from(rs.field.model()), } } @@ -183,8 +245,14 @@ impl SelectedField { match self { SelectedField::Scalar(sf) => value.coerce(&sf.type_identifier()), SelectedField::Composite(cs) => cs.coerce_value(value), + SelectedField::Relation(_) => todo!(), } } + + /// Returns `true` if the selected field is [`Scalar`]. + pub fn is_scalar(&self) -> bool { + matches!(self, Self::Scalar(..)) + } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -203,6 +271,7 @@ impl CompositeSelection { .and_then(|selection| selection.as_composite()) .map(|cs| cs.is_superset_of(other_cs)) .unwrap_or(false), + SelectedField::Relation(_) => true, // A composite selection cannot hold relations. }) } @@ -278,6 +347,12 @@ impl Display for SelectedField { cs.field, cs.selections.iter().map(|selection| format!("{selection}")).join(", ") ), + SelectedField::Relation(rs) => write!( + f, + "{} {{ {} }}", + rs.field, + rs.selections.iter().map(|selection| format!("{selection}")).join(", ") + ), } } } diff --git a/query-engine/query-structure/src/filter/into_filter.rs b/query-engine/query-structure/src/filter/into_filter.rs index b180b3b80c4..eaf4711628f 100644 --- a/query-engine/query-structure/src/filter/into_filter.rs +++ b/query-engine/query-structure/src/filter/into_filter.rs @@ -14,7 +14,8 @@ impl IntoFilter for SelectionResult { .into_iter() .map(|(selection, value)| match selection { SelectedField::Scalar(sf) => sf.equals(value), - SelectedField::Composite(_) => todo!(), // [Composites] todo + SelectedField::Composite(_) => unreachable!(), // [Composites] todo + SelectedField::Relation(_) => unreachable!(), }) .collect(); diff --git a/query-engine/query-structure/src/projections/model_projection.rs b/query-engine/query-structure/src/projections/model_projection.rs index e17cafc896d..0d1a8f4b517 100644 --- a/query-engine/query-structure/src/projections/model_projection.rs +++ b/query-engine/query-structure/src/projections/model_projection.rs @@ -1,4 +1,4 @@ -use crate::{Field, FieldSelection, ScalarFieldRef, SelectedField, SelectionResult, TypeIdentifier}; +use crate::{Field, FieldSelection, ScalarFieldRef, SelectedField, TypeIdentifier}; use itertools::Itertools; use psl::schema_ast::ast::FieldArity; @@ -30,6 +30,7 @@ impl From<&FieldSelection> for ModelProjection { .filter_map(|selected| match selected { SelectedField::Scalar(sf) => Some(sf.clone().into()), SelectedField::Composite(_cf) => None, + SelectedField::Relation(_) => None, }) .collect(), } @@ -103,18 +104,3 @@ impl IntoIterator for ModelProjection { self.fields.into_iter() } } - -impl From<&SelectionResult> for ModelProjection { - fn from(p: &SelectionResult) -> Self { - let fields = p - .pairs - .iter() - .map(|(field_selection, _)| match field_selection { - SelectedField::Scalar(sf) => sf.clone().into(), - SelectedField::Composite(cf) => cf.field.clone().into(), - }) - .collect::>(); - - Self::new(fields) - } -} diff --git a/query-engine/query-structure/src/query_arguments.rs b/query-engine/query-structure/src/query_arguments.rs index 1d75866db67..76b1d9274c9 100644 --- a/query-engine/query-structure/src/query_arguments.rs +++ b/query-engine/query-structure/src/query_arguments.rs @@ -14,7 +14,7 @@ use crate::*; /// A query argument struct is always valid over a single model only, meaning that all /// data referenced in a single query argument instance is always refering to data of /// a single model (e.g. the cursor projection, distinct projection, orderby, ...). -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq, Hash)] pub struct QueryArguments { pub model: Model, pub cursor: Option, @@ -27,6 +27,17 @@ pub struct QueryArguments { pub ignore_take: bool, } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub enum RelationLoadStrategy { + Join, + Query, +} +impl RelationLoadStrategy { + pub fn is_query(&self) -> bool { + matches!(self, RelationLoadStrategy::Query) + } +} + impl std::fmt::Debug for QueryArguments { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("QueryArguments") diff --git a/query-engine/query-structure/src/selection_result.rs b/query-engine/query-structure/src/selection_result.rs index d6e1ef46349..6f87ec74f6c 100644 --- a/query-engine/query-structure/src/selection_result.rs +++ b/query-engine/query-structure/src/selection_result.rs @@ -94,6 +94,7 @@ impl SelectionResult { .filter_map(|(selection, _)| match selection { SelectedField::Scalar(sf) => Some(sf.clone()), SelectedField::Composite(_) => None, + SelectedField::Relation(_) => None, }) .collect(); diff --git a/query-engine/schema/src/query_schema.rs b/query-engine/schema/src/query_schema.rs index 0324896aea0..3098a96f159 100644 --- a/query-engine/schema/src/query_schema.rs +++ b/query-engine/schema/src/query_schema.rs @@ -96,7 +96,7 @@ impl QuerySchema { || self.has_capability(ConnectorCapability::FullTextSearchWithIndex)) } - pub(crate) fn has_feature(&self, feature: PreviewFeature) -> bool { + pub fn has_feature(&self, feature: PreviewFeature) -> bool { self.preview_features.contains(feature) }