From 96f20d9708e206998e24cf78c7cfac4d7506308e Mon Sep 17 00:00:00 2001 From: Flavian Desverne Date: Wed, 12 Jul 2023 18:32:35 +0200 Subject: [PATCH] perf: avoid read after create when possible (#4041) --- .../src/cockroach_datamodel_connector.rs | 3 +- .../src/postgres_datamodel_connector.rs | 3 +- .../src/sqlite_datamodel_connector.rs | 3 + .../src/datamodel_connector/capabilities.rs | 3 +- .../query-engine-tests/tests/new/metrics.rs | 2 +- .../tests/new/regressions/prisma_15581.rs | 15 +- .../src/interface/connection.rs | 4 +- .../src/interface/transaction.rs | 4 +- .../src/root_queries/write.rs | 7 +- .../query-connector/src/interface.rs | 3 +- .../query-connector/src/write_args.rs | 2 +- .../src/database/connection.rs | 13 +- .../src/database/operations/upsert.rs | 8 +- .../src/database/operations/write.rs | 55 ++++--- .../src/database/transaction.rs | 13 +- .../src/query_builder/write.rs | 9 +- .../interpreter/query_interpreters/write.rs | 11 +- query-engine/core/src/query_ast/write.rs | 3 + .../core/src/query_document/parse_ast.rs | 12 ++ .../core/src/query_graph_builder/read/mod.rs | 2 +- .../src/query_graph_builder/read/utils.rs | 29 +++- .../src/query_graph_builder/write/create.rs | 144 ++++++++++++++---- .../write/write_args_parser.rs | 6 + query-engine/prisma-models/src/field/mod.rs | 7 - .../prisma-models/src/parent_container.rs | 1 - query-engine/prisma-models/src/record.rs | 6 + .../prisma-models/src/selection_result.rs | 4 + query-engine/schema/src/query_schema.rs | 2 +- 28 files changed, 281 insertions(+), 93 deletions(-) diff --git a/psl/builtin-connectors/src/cockroach_datamodel_connector.rs b/psl/builtin-connectors/src/cockroach_datamodel_connector.rs index e41a6c599dc..7807a2f335e 100644 --- a/psl/builtin-connectors/src/cockroach_datamodel_connector.rs +++ b/psl/builtin-connectors/src/cockroach_datamodel_connector.rs @@ -55,7 +55,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector SupportsTxIsolationSerializable | NativeUpsert | MultiSchema | - FilteredInlineChildNestedToOneDisconnect + FilteredInlineChildNestedToOneDisconnect | + InsertReturning }); const SCALAR_TYPE_DEFAULTS: &[(ScalarType, CockroachType)] = &[ diff --git a/psl/builtin-connectors/src/postgres_datamodel_connector.rs b/psl/builtin-connectors/src/postgres_datamodel_connector.rs index 80a40f5f554..66e89e2e0a0 100644 --- a/psl/builtin-connectors/src/postgres_datamodel_connector.rs +++ b/psl/builtin-connectors/src/postgres_datamodel_connector.rs @@ -62,7 +62,8 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector SupportsTxIsolationReadCommitted | SupportsTxIsolationRepeatableRead | SupportsTxIsolationSerializable | - NativeUpsert + NativeUpsert | + InsertReturning }); pub struct PostgresDatamodelConnector; diff --git a/psl/builtin-connectors/src/sqlite_datamodel_connector.rs b/psl/builtin-connectors/src/sqlite_datamodel_connector.rs index 0fb3b253136..c3c383b8ad1 100644 --- a/psl/builtin-connectors/src/sqlite_datamodel_connector.rs +++ b/psl/builtin-connectors/src/sqlite_datamodel_connector.rs @@ -25,6 +25,9 @@ const CAPABILITIES: ConnectorCapabilities = enumflags2::make_bitflags!(Connector SupportsTxIsolationSerializable | NativeUpsert | FilteredInlineChildNestedToOneDisconnect + // InsertReturning - While SQLite does support RETURNING, it does not return column information on the way back from the database. + // This column type information is necessary in order to preserve consistency for some data types such as int, where values could overflow. + // Since we care to stay consistent with reads, it is not enabled. }); pub struct SqliteDatamodelConnector; diff --git a/psl/psl-core/src/datamodel_connector/capabilities.rs b/psl/psl-core/src/datamodel_connector/capabilities.rs index 1f7bd040fb3..7aeab14e2d8 100644 --- a/psl/psl-core/src/datamodel_connector/capabilities.rs +++ b/psl/psl-core/src/datamodel_connector/capabilities.rs @@ -100,7 +100,8 @@ capabilities!( SupportsTxIsolationRepeatableRead, SupportsTxIsolationSerializable, SupportsTxIsolationSnapshot, - NativeUpsert + NativeUpsert, + InsertReturning ); /// Contains all capabilities that the connector is able to serve. diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/metrics.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/metrics.rs index d01b70fbe55..49ae8465817 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/metrics.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/metrics.rs @@ -33,7 +33,7 @@ mod metrics { CockroachDb => (), // not deterministic MySql(_) => assert_eq!(total_queries, 12), Vitess(_) => assert_eq!(total_queries, 11), - Postgres(_) => assert_eq!(total_queries, 14), + Postgres(_) => assert_eq!(total_queries, 11), } assert_eq!(total_operations, 2); diff --git a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15581.rs b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15581.rs index 581ab22c422..e042eb8c3d4 100644 --- a/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15581.rs +++ b/query-engine/connector-test-kit-rs/query-engine-tests/tests/new/regressions/prisma_15581.rs @@ -36,6 +36,7 @@ mod prisma_15581 { runner, r#"mutation { createOnetest(data: { reference: 3 }) { reference created_at other } }"# ); + Ok(()) } @@ -45,6 +46,7 @@ mod prisma_15581 { runner, r#"mutation { createOnetest2(data: { reference: 3 }) { reference updated_at other } }"# ); + Ok(()) } @@ -63,16 +65,11 @@ mod prisma_15581 { #[connector_test(only(Postgres), schema(pg_schema))] async fn create_one_model_with_low_precision_datetime_in_id(runner: Runner) -> TestResult<()> { - let result = runner - .query(r#"mutation { createOnetest(data: { reference: 3 }) { reference created_at other } }"#) - .await? - .to_string_pretty(); - - // This is a test that confirms the current behaviour. Ideally, the create mutation above - // should work. - assert!( - result.contains("\"error\": \"Query createOnetest is required to return data, but found no record(s).\"") + run_query!( + runner, + r#"mutation { createOnetest(data: { reference: 3 }) { reference created_at other } }"# ); + Ok(()) } diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs index a238cd18d24..eac2c8963aa 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/connection.rs @@ -52,8 +52,10 @@ impl WriteOperations for MongoDbConnection { &mut self, model: &Model, args: WriteArgs, + // The field selection on a create is never used on MongoDB as it cannot return more than the ID. + _selected_fields: FieldSelection, _trace_id: Option, - ) -> connector_interface::Result { + ) -> connector_interface::Result { catch(async move { write::create_record(&self.database, &mut self.session, model, args).await }).await } diff --git a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs index 9e985c42319..e5255245cde 100644 --- a/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs +++ b/query-engine/connectors/mongodb-query-connector/src/interface/transaction.rs @@ -73,8 +73,10 @@ impl<'conn> WriteOperations for MongoDbTransaction<'conn> { &mut self, model: &Model, args: connector_interface::WriteArgs, + // The field selection on a create is never used on MongoDB as it cannot return more than the ID. + _selected_fields: FieldSelection, _trace_id: Option, - ) -> connector_interface::Result { + ) -> connector_interface::Result { catch(async move { write::create_record(&self.connection.database, &mut self.connection.session, model, args).await }) diff --git a/query-engine/connectors/mongodb-query-connector/src/root_queries/write.rs b/query-engine/connectors/mongodb-query-connector/src/root_queries/write.rs index 19af930f3ec..f5aa24c6b5d 100644 --- a/query-engine/connectors/mongodb-query-connector/src/root_queries/write.rs +++ b/query-engine/connectors/mongodb-query-connector/src/root_queries/write.rs @@ -27,7 +27,7 @@ pub async fn create_record<'conn>( session: &mut ClientSession, model: &Model, mut args: WriteArgs, -) -> crate::Result { +) -> crate::Result { let coll = database.collection::(model.db_name()); let span = info_span!( @@ -72,7 +72,10 @@ pub async fn create_record<'conn>( .await?; let id_value = value_from_bson(insert_result.inserted_id, &id_meta)?; - Ok(SelectionResult::from((id_field, id_value))) + Ok(SingleRecord { + record: Record::new(vec![id_value]), + field_names: vec![id_field.db_name().to_owned()], + }) } pub async fn create_records<'conn>( diff --git a/query-engine/connectors/query-connector/src/interface.rs b/query-engine/connectors/query-connector/src/interface.rs index c50d1497335..1649db2fd4c 100644 --- a/query-engine/connectors/query-connector/src/interface.rs +++ b/query-engine/connectors/query-connector/src/interface.rs @@ -286,8 +286,9 @@ pub trait WriteOperations { &mut self, model: &Model, args: WriteArgs, + selected_fields: FieldSelection, trace_id: Option, - ) -> crate::Result; + ) -> crate::Result; /// Inserts many records at once into the database. async fn create_records( diff --git a/query-engine/connectors/query-connector/src/write_args.rs b/query-engine/connectors/query-connector/src/write_args.rs index d3a1a21fa4d..60ec69a1f46 100644 --- a/query-engine/connectors/query-connector/src/write_args.rs +++ b/query-engine/connectors/query-connector/src/write_args.rs @@ -409,7 +409,7 @@ impl WriteArgs { } } - pub fn as_record_projection(&self, model_projection: ModelProjection) -> Option { + pub fn as_selection_result(&self, model_projection: ModelProjection) -> Option { let pairs: Vec<_> = model_projection .scalar_fields() .map(|field| { diff --git a/query-engine/connectors/sql-query-connector/src/database/connection.rs b/query-engine/connectors/sql-query-connector/src/database/connection.rs index 39871249ccd..c466b5ad743 100644 --- a/query-engine/connectors/sql-query-connector/src/database/connection.rs +++ b/query-engine/connectors/sql-query-connector/src/database/connection.rs @@ -165,11 +165,20 @@ where &mut self, model: &Model, args: WriteArgs, + selected_fields: FieldSelection, trace_id: Option, - ) -> connector::Result { + ) -> connector::Result { catch(self.connection_info.clone(), async move { let ctx = Context::new(&self.connection_info, trace_id.as_deref()); - write::create_record(&self.inner, &self.connection_info.sql_family(), model, args, &ctx).await + write::create_record( + &self.inner, + &self.connection_info.sql_family(), + model, + args, + selected_fields, + &ctx, + ) + .await }) .await } diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/upsert.rs b/query-engine/connectors/sql-query-connector/src/database/operations/upsert.rs index 31effbd4886..a4b6982eee3 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/upsert.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/upsert.rs @@ -24,17 +24,15 @@ pub(crate) async fn native_upsert( let where_condition = upsert.filter().aliased_condition_from(None, false, ctx); let update = build_update_and_set_query(upsert.model(), upsert.update().clone(), ctx).so_that(where_condition); - let insert = create_record(upsert.model(), upsert.create().clone(), ctx); + let insert = create_record(upsert.model(), upsert.create().clone(), &selected_fields, ctx); let constraints: Vec<_> = upsert.unique_constraints().as_columns(ctx).collect(); - let query: Query = insert - .on_conflict(OnConflict::Update(update, constraints)) - .returning(selected_fields.as_columns(ctx)) - .into(); + let query: Query = insert.on_conflict(OnConflict::Update(update, constraints)).into(); let result_set = conn.query(query).await?; let row = result_set.into_single()?; let record = Record::from(row.to_sql_row(&meta)?); + Ok(SingleRecord { record, field_names }) } diff --git a/query-engine/connectors/sql-query-connector/src/database/operations/write.rs b/query-engine/connectors/sql-query-connector/src/database/operations/write.rs index 7470ec4e069..43a8745dd5b 100644 --- a/query-engine/connectors/sql-query-connector/src/database/operations/write.rs +++ b/query-engine/connectors/sql-query-connector/src/database/operations/write.rs @@ -1,5 +1,7 @@ +use crate::column_metadata; use crate::filter_conversion::AliasedCondition; use crate::query_builder::write::{build_update_and_set_query, chunk_update_with_ids}; +use crate::row::ToSqlRow; use crate::{ error::SqlError, model_extensions::*, query_builder::write, sql_trace::SqlTraceComment, Context, QueryExt, Queryable, @@ -21,15 +23,15 @@ use user_facing_errors::query_engine::DatabaseConstraint; async fn generate_id( conn: &dyn Queryable, - primary_key: &FieldSelection, + id_field: &FieldSelection, args: &WriteArgs, ctx: &Context<'_>, ) -> crate::Result> { // Go through all the values and generate a select statement with the correct MySQL function - let (pk_select, need_select) = primary_key + let (id_select, need_select) = id_field .selections() .filter_map(|field| match field { - SelectedField::Scalar(x) if x.default_value().is_some() && !args.has_arg_for(x.db_name()) => x + SelectedField::Scalar(sf) if sf.default_value().is_some() && !args.has_arg_for(sf.db_name()) => sf .default_value() .unwrap() .to_dbgenerated_func() @@ -50,9 +52,9 @@ async fn generate_id( // db generate values only if needed if need_select { - let pk_select = pk_select.add_trace_id(ctx.trace_id); + let pk_select = id_select.add_trace_id(ctx.trace_id); let pk_result = conn.query(pk_select.into()).await?; - let result = try_convert(&(primary_key.into()), pk_result)?; + let result = try_convert(&(id_field.into()), pk_result)?; Ok(Some(result)) } else { @@ -67,18 +69,19 @@ pub(crate) async fn create_record( sql_family: &SqlFamily, model: &Model, mut args: WriteArgs, + selected_fields: FieldSelection, ctx: &Context<'_>, -) -> crate::Result { - let pk = model.primary_identifier(); +) -> crate::Result { + let id_field: FieldSelection = model.primary_identifier(); let returned_id = if *sql_family == SqlFamily::Mysql { - generate_id(conn, &pk, &args, ctx).await? + generate_id(conn, &id_field, &args, ctx) + .await? + .or_else(|| args.as_selection_result(ModelProjection::from(id_field))) } else { - args.as_record_projection(pk.clone().into()) + args.as_selection_result(ModelProjection::from(id_field)) }; - let returned_id = returned_id.or_else(|| args.as_record_projection(pk.clone().into())); - let args = match returned_id { Some(ref pk) if *sql_family == SqlFamily::Mysql => { for (field, value) in pk.pairs.iter() { @@ -91,7 +94,7 @@ pub(crate) async fn create_record( _ => args, }; - let insert = write::create_record(model, args, ctx); + let insert = write::create_record(model, args, &ModelProjection::from(&selected_fields), ctx); let result_set = match conn.insert(insert).await { Ok(id) => id, @@ -137,16 +140,34 @@ pub(crate) async fn create_record( }; match (returned_id, result_set.len(), result_set.last_insert_id()) { - // All values provided in the write arrghs - (Some(identifier), _, _) if !identifier.misses_autogen_value() => Ok(identifier), - // with a working RETURNING statement - (_, n, _) if n > 0 => Ok(try_convert(&model.primary_identifier().into(), result_set)?), + (_, n, _) if n > 0 => { + let row = result_set.into_single()?; + let field_names: Vec<_> = selected_fields.db_names().collect(); + let idents = ModelProjection::from(&selected_fields).type_identifiers_with_arities(); + let meta = column_metadata::create(&field_names, &idents); + let sql_row = row.to_sql_row(&meta)?; + let record = Record::from(sql_row); + + Ok(SingleRecord { record, field_names }) + } + + // All values provided in the write args + (Some(identifier), _, _) if !identifier.misses_autogen_value() => { + let field_names = identifier.db_names().map(ToOwned::to_owned).collect(); + let record = Record::from(identifier); + + Ok(SingleRecord { record, field_names }) + } // We have an auto-incremented id that we got from MySQL or SQLite (Some(mut identifier), _, Some(num)) if identifier.misses_autogen_value() => { identifier.add_autogen_value(num as i64); - Ok(identifier) + + let field_names = identifier.db_names().map(ToOwned::to_owned).collect(); + let record = Record::from(identifier); + + Ok(SingleRecord { record, field_names }) } (_, _, _) => panic!("Could not figure out an ID in create"), diff --git a/query-engine/connectors/sql-query-connector/src/database/transaction.rs b/query-engine/connectors/sql-query-connector/src/database/transaction.rs index 4df795d337d..26ec8212745 100644 --- a/query-engine/connectors/sql-query-connector/src/database/transaction.rs +++ b/query-engine/connectors/sql-query-connector/src/database/transaction.rs @@ -145,11 +145,20 @@ impl<'tx> WriteOperations for SqlConnectorTransaction<'tx> { &mut self, model: &Model, args: WriteArgs, + selected_fields: FieldSelection, trace_id: Option, - ) -> connector::Result { + ) -> connector::Result { catch(self.connection_info.clone(), async move { let ctx = Context::new(&self.connection_info, trace_id.as_deref()); - write::create_record(&self.inner, &self.connection_info.sql_family(), model, args, &ctx).await + write::create_record( + &self.inner, + &self.connection_info.sql_family(), + model, + args, + selected_fields, + &ctx, + ) + .await }) .await } diff --git a/query-engine/connectors/sql-query-connector/src/query_builder/write.rs b/query-engine/connectors/sql-query-connector/src/query_builder/write.rs index b29b1d93be1..e726cb81f73 100644 --- a/query-engine/connectors/sql-query-connector/src/query_builder/write.rs +++ b/query-engine/connectors/sql-query-connector/src/query_builder/write.rs @@ -7,7 +7,12 @@ use tracing::Span; /// `INSERT` a new record to the database. Resulting an `INSERT` ast and an /// optional `RecordProjection` if available from the arguments or model. -pub(crate) fn create_record(model: &Model, mut args: WriteArgs, ctx: &Context<'_>) -> Insert<'static> { +pub(crate) fn create_record( + model: &Model, + mut args: WriteArgs, + selected_fields: &ModelProjection, + ctx: &Context<'_>, +) -> Insert<'static> { let fields: Vec<_> = model .fields() .scalar() @@ -27,7 +32,7 @@ pub(crate) fn create_record(model: &Model, mut args: WriteArgs, ctx: &Context<'_ }); Insert::from(insert) - .returning(ModelProjection::from(model.primary_identifier()).as_columns(ctx)) + .returning(selected_fields.as_columns(ctx)) .append_trace(&Span::current()) .add_trace_id(ctx.trace_id) } diff --git a/query-engine/core/src/interpreter/query_interpreters/write.rs b/query-engine/core/src/interpreter/query_interpreters/write.rs index ed3002f5b0c..2f2a5cd1fa0 100644 --- a/query-engine/core/src/interpreter/query_interpreters/write.rs +++ b/query-engine/core/src/interpreter/query_interpreters/write.rs @@ -43,9 +43,16 @@ async fn create_one( q: CreateRecord, trace_id: Option, ) -> InterpretationResult { - let res = tx.create_record(&q.model, q.args, trace_id).await?; + let res = tx.create_record(&q.model, q.args, q.selected_fields, trace_id).await?; - Ok(QueryResult::Id(Some(res))) + Ok(QueryResult::RecordSelection(Box::new(RecordSelection { + name: q.name, + fields: q.selection_order, + aggregation_rows: None, + model: q.model, + scalars: res.into(), + nested: vec![], + }))) } async fn create_many( diff --git a/query-engine/core/src/query_ast/write.rs b/query-engine/core/src/query_ast/write.rs index e4e927a0a5e..5d3b4446cd9 100644 --- a/query-engine/core/src/query_ast/write.rs +++ b/query-engine/core/src/query_ast/write.rs @@ -180,8 +180,11 @@ impl ToGraphviz for WriteQuery { #[derive(Debug, Clone)] pub struct CreateRecord { + pub name: String, pub model: Model, pub args: WriteArgs, + pub selected_fields: FieldSelection, + pub selection_order: Vec, } #[derive(Debug, Clone)] diff --git a/query-engine/core/src/query_document/parse_ast.rs b/query-engine/core/src/query_document/parse_ast.rs index 74621de5710..b9d1e8f6816 100644 --- a/query-engine/core/src/query_document/parse_ast.rs +++ b/query-engine/core/src/query_document/parse_ast.rs @@ -108,6 +108,18 @@ impl<'a> ParsedField<'a> { self.look_arg("update") } + pub(crate) fn has_nested_selection(&self) -> bool { + self.nested_fields + .as_ref() + .map(|nested_field| { + nested_field + .fields + .iter() + .any(|field| field.parsed_field.nested_fields.is_some()) + }) + .unwrap_or(false) + } + fn look_arg(&mut self, arg_name: &str) -> QueryParserResult>> { self.arguments .lookup(arg_name) diff --git a/query-engine/core/src/query_graph_builder/read/mod.rs b/query-engine/core/src/query_graph_builder/read/mod.rs index 7efa3215f45..b101973f1d8 100644 --- a/query-engine/core/src/query_graph_builder/read/mod.rs +++ b/query-engine/core/src/query_graph_builder/read/mod.rs @@ -5,7 +5,7 @@ mod first; mod many; mod one; mod related; -mod utils; +pub(crate) mod utils; pub(crate) use aggregations::*; pub(crate) use first::*; diff --git a/query-engine/core/src/query_graph_builder/read/utils.rs b/query-engine/core/src/query_graph_builder/read/utils.rs index 9be42ddfa5c..234a50d6f09 100644 --- a/query-engine/core/src/query_graph_builder/read/utils.rs +++ b/query-engine/core/src/query_graph_builder/read/utils.rs @@ -4,7 +4,7 @@ use connector::RelAggregationSelection; use prisma_models::prelude::*; use schema::constants::{aggregations::*, args}; -pub(crate) fn collect_selection_order(from: &[FieldPair<'_>]) -> Vec { +pub fn collect_selection_order(from: &[FieldPair<'_>]) -> Vec { from.iter() .map(|pair| { pair.parsed_field @@ -17,8 +17,7 @@ pub(crate) fn collect_selection_order(from: &[FieldPair<'_>]) -> Vec { /// Creates a `FieldSelection` from a query selection. /// Automatically adds model IDs to the selected fields as well. -/// Unwraps are safe due to query validation. -pub(crate) fn collect_selected_fields( +pub fn collect_selected_fields( from_pairs: &[FieldPair<'_>], distinct: Option, model: &Model, @@ -37,6 +36,30 @@ pub(crate) fn collect_selected_fields( } } +/// Creates a `FieldSelection` from a query selection, which contains only scalar fields. +/// Automatically adds model IDs to the selected fields as well. +pub fn collect_selected_scalars(from_pairs: &[FieldPair<'_>], model: &Model) -> FieldSelection { + let model_id = model.primary_identifier(); + let selected_fields = pairs_to_scalar_selections(model, from_pairs); + let selection = FieldSelection::new(selected_fields); + + model_id.merge(selection) +} + +fn pairs_to_scalar_selections(parent: T, pairs: &[FieldPair<'_>]) -> Vec +where + T: Into, +{ + let parent: ParentContainer = parent.into(); + + pairs + .iter() + .filter_map(|pair| parent.find_field(&pair.parsed_field.name)) + .filter_map(|field| field.into_scalar()) + .map(SelectedField::from) + .collect() +} + fn pairs_to_selections(parent: T, pairs: &[FieldPair<'_>]) -> Vec where T: Into, diff --git a/query-engine/core/src/query_graph_builder/write/create.rs b/query-engine/core/src/query_graph_builder/write/create.rs index 3d12421dad0..c8b9cea0c4b 100644 --- a/query-engine/core/src/query_graph_builder/write/create.rs +++ b/query-engine/core/src/query_graph_builder/write/create.rs @@ -6,6 +6,7 @@ use crate::{ }; use connector::IntoFilter; use prisma_models::Model; +use psl::datamodel_connector::ConnectorCapability; use schema::{constants::args, QuerySchema}; use std::convert::TryInto; use write_args_parser::*; @@ -17,41 +18,47 @@ pub(crate) fn create_record( model: Model, mut field: ParsedField<'_>, ) -> QueryGraphBuilderResult<()> { - graph.flag_transactional(); - let data_map = match field.arguments.lookup(args::DATA) { Some(data) => data.value.try_into()?, None => ParsedInputMap::default(), }; - let create_node = create::create_record_node(graph, query_schema, model.clone(), data_map)?; - - // Follow-up read query on the write - let read_query = read::find_unique(field, model.clone())?; - let read_node = graph.create_node(Query::Read(read_query)); - - graph.add_result_node(&read_node); - graph.create_edge( - &create_node, - &read_node, - QueryGraphDependency::ProjectedDataDependency( - model.primary_identifier(), - Box::new(move |mut read_node, mut parent_ids| { - let parent_id = match parent_ids.pop() { - Some(pid) => Ok(pid), - None => Err(QueryGraphBuilderError::AssertionError( - "Expected a valid parent ID to be present for create follow-up read query.".to_string(), - )), - }?; - - if let Node::Query(Query::Read(ReadQuery::RecordQuery(ref mut rq))) = read_node { - rq.add_filter(parent_id.filter()); - }; - - Ok(read_node) - }), - ), - )?; + if can_use_atomic_create(query_schema, &model, &data_map, &field) { + let create_node = create::atomic_create_record_node(graph, query_schema, model, data_map, field)?; + + graph.add_result_node(&create_node); + } else { + graph.flag_transactional(); + + let create_node = create::create_record_node(graph, query_schema, model.clone(), data_map)?; + + // Follow-up read query on the write + let read_query = read::find_unique(field, model.clone())?; + let read_node = graph.create_node(Query::Read(read_query)); + + graph.add_result_node(&read_node); + graph.create_edge( + &create_node, + &read_node, + QueryGraphDependency::ProjectedDataDependency( + model.primary_identifier(), + Box::new(move |mut read_node, mut parent_ids| { + let parent_id = match parent_ids.pop() { + Some(pid) => Ok(pid), + None => Err(QueryGraphBuilderError::AssertionError( + "Expected a valid parent ID to be present for create follow-up read query.".to_string(), + )), + }?; + + if let Node::Query(Query::Read(ReadQuery::RecordQuery(ref mut rq))) = read_node { + rq.add_filter(parent_id.filter()); + }; + + Ok(read_node) + }), + ), + )?; + } Ok(()) } @@ -107,7 +114,82 @@ pub fn create_record_node( args.add_datetimes(&model); - let cr = CreateRecord { model, args }; + let selected_fields = model.primary_identifier(); + let selection_order = selected_fields.db_names().collect(); + + let cr = CreateRecord { + // A regular create record is never used as a result node. Therefore, it's never serialized, so we don't need a name. + name: String::new(), + model, + args, + selected_fields, + selection_order, + }; + + let create_node = graph.create_node(Query::Write(WriteQuery::CreateRecord(cr))); + + for (relation_field, data_map) in create_args.nested { + nested::connect_nested_query(graph, query_schema, create_node, relation_field, data_map)?; + } + + Ok(create_node) +} + +/// An atomic create is a create performed in a single operation. +/// It uses `INSERT ... RETURNING` when the connector supports it. +/// We only perform such create when: +/// 1. There's no nested operations +/// 2. The selection set contains no relation +fn can_use_atomic_create( + query_schema: &QuerySchema, + model: &Model, + data_map: &ParsedInputMap<'_>, + field: &ParsedField<'_>, +) -> bool { + // If the connector does not support RETURNING at all + if !query_schema.has_capability(ConnectorCapability::InsertReturning) { + return false; + } + + // If the operation has nested creates + if WriteArgsParser::has_nested_operation(model, data_map) { + return false; + } + + // If the operation has nested selection sets + if field.has_nested_selection() { + return false; + } + + true +} + +/// Creates a create record query that's done in a single operation and adds it to the query graph. +/// Translates to an `INSERT ... RETURNING` under the hood. +fn atomic_create_record_node( + graph: &mut QueryGraph, + query_schema: &QuerySchema, + model: Model, + data_map: ParsedInputMap<'_>, + field: ParsedField<'_>, +) -> QueryGraphBuilderResult { + let create_args = WriteArgsParser::from(&model, data_map)?; + let mut args = create_args.args; + + let nested_fields = field.nested_fields.unwrap().fields; + let selection_order: Vec = read::utils::collect_selection_order(&nested_fields); + let selected_fields = read::utils::collect_selected_scalars(&nested_fields, &model); + + args.add_datetimes(&model); + + let cr = CreateRecord { + name: field.name.clone(), + model, + args, + selected_fields, + selection_order, + }; + let create_node = graph.create_node(Query::Write(WriteQuery::CreateRecord(cr))); for (relation_field, data_map) in create_args.nested { diff --git a/query-engine/core/src/query_graph_builder/write/write_args_parser.rs b/query-engine/core/src/query_graph_builder/write/write_args_parser.rs index 251ed33f90c..c5473065ac6 100644 --- a/query-engine/core/src/query_graph_builder/write/write_args_parser.rs +++ b/query-engine/core/src/query_graph_builder/write/write_args_parser.rs @@ -51,6 +51,12 @@ impl<'a> WriteArgsParser<'a> { }, ) } + + pub(crate) fn has_nested_operation(model: &Model, data_map: &ParsedInputMap<'a>) -> bool { + data_map + .iter() + .any(|(field_name, _)| model.fields().find_from_relation_fields(field_name).is_ok()) + } } fn parse_scalar(sf: &ScalarFieldRef, v: ParsedInputValue<'_>) -> Result { diff --git a/query-engine/prisma-models/src/field/mod.rs b/query-engine/prisma-models/src/field/mod.rs index f895b620216..d05b6d7d636 100644 --- a/query-engine/prisma-models/src/field/mod.rs +++ b/query-engine/prisma-models/src/field/mod.rs @@ -77,13 +77,6 @@ impl Field { } } - pub fn try_into_scalar(self) -> Option { - match self { - Field::Scalar(scalar) => Some(scalar), - _ => None, - } - } - pub fn is_required(&self) -> bool { match self { Field::Scalar(ref sf) => sf.is_required(), diff --git a/query-engine/prisma-models/src/parent_container.rs b/query-engine/prisma-models/src/parent_container.rs index d1b97550e24..e1ada0875f9 100644 --- a/query-engine/prisma-models/src/parent_container.rs +++ b/query-engine/prisma-models/src/parent_container.rs @@ -48,7 +48,6 @@ impl ParentContainer { pub fn find_field(&self, prisma_name: &str) -> Option { match self { ParentContainer::Model(model) => model.fields().find_from_all(prisma_name).ok(), - ParentContainer::CompositeType(ct) => ct.fields().find(|field| field.name() == prisma_name), } } diff --git a/query-engine/prisma-models/src/record.rs b/query-engine/prisma-models/src/record.rs index 19842fbf4b5..757fec74f87 100644 --- a/query-engine/prisma-models/src/record.rs +++ b/query-engine/prisma-models/src/record.rs @@ -143,6 +143,12 @@ pub struct Record { pub parent_id: Option, } +impl From for Record { + fn from(selection_result: SelectionResult) -> Self { + Record::new(selection_result.values().collect()) + } +} + impl Record { pub fn new(values: Vec) -> Record { Record { diff --git a/query-engine/prisma-models/src/selection_result.rs b/query-engine/prisma-models/src/selection_result.rs index 681bf54d223..d6e1ef46349 100644 --- a/query-engine/prisma-models/src/selection_result.rs +++ b/query-engine/prisma-models/src/selection_result.rs @@ -61,6 +61,10 @@ impl SelectionResult { self.len() == 0 } + pub fn db_names(&self) -> impl Iterator + '_ { + self.pairs.iter().map(|(field, _)| field.db_name()) + } + /// Consumes this `SelectionResult` and splits it into a set of `SelectionResult`s based on the passed /// `FieldSelection`s. Assumes that the transformation can be done. pub fn split_into(self, field_selections: &[FieldSelection]) -> Vec { diff --git a/query-engine/schema/src/query_schema.rs b/query-engine/schema/src/query_schema.rs index 72f2fb1bfa5..b8b6a0e7440 100644 --- a/query-engine/schema/src/query_schema.rs +++ b/query-engine/schema/src/query_schema.rs @@ -100,7 +100,7 @@ impl QuerySchema { self.preview_features.contains(feature) } - pub(crate) fn has_capability(&self, capability: ConnectorCapability) -> bool { + pub fn has_capability(&self, capability: ConnectorCapability) -> bool { self.connector.has_capability(capability) }