Skip to content

Commit

Permalink
perf: use db pagination for one2m related fetch
Browse files Browse the repository at this point in the history
Database pagination is currently only used when we're fetching related records from a single parent.
  • Loading branch information
Weakky committed Jul 28, 2023
1 parent 2a6ced8 commit 3314cd4
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]},{"t":"T2","middles":[{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(skip: 1, take: 1){t, middles(cursor: { m: "M22" }, orderBy: { id: asc }){ m }}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T2","middles":[{"m":"M22"},{"m":"M23"}]}]}}"###
);

Ok(())
}

Expand All @@ -84,6 +91,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M12"},{"m":"M13"}]},{"t":"T2","middles":[{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[{"m":"M32"},{"m":"M33"}]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(skip: 1){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M12"},{"m":"M13"}]}]}}"###
);

Ok(())
}

Expand All @@ -99,6 +113,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]},{"t":"T2","middles":[]},{"t":"T3","middles":[]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(skip: 3){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]}]}}"###
);

Ok(())
}

Expand All @@ -114,6 +135,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]},{"t":"T2","middles":[]},{"t":"T3","middles":[]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(skip: 4){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]}]}}"###
);

Ok(())
}

Expand Down Expand Up @@ -193,6 +221,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]},{"t":"T2","middles":[]},{"t":"T3","middles":[]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(take: 0){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[]}]}}"###
);

Ok(())
}

Expand All @@ -208,6 +243,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"}]},{"t":"T2","middles":[{"m":"M21"}]},{"t":"T3","middles":[{"m":"M31"}]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(take: 1){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"}]}]}}"###
);

Ok(())
}

Expand All @@ -223,6 +265,13 @@ mod nested_pagination {
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"},{"m":"M13"}]},{"t":"T2","middles":[{"m":"M21"},{"m":"M22"},{"m":"M23"}]},{"t":"T3","middles":[{"m":"M31"},{"m":"M32"},{"m":"M33"}]}]}}"###
);

insta::assert_snapshot!(
run_query!(&runner, r#"{
findManyTop(take: 1){t, middles(take: 3){m}}
}"#),
@r###"{"data":{"findManyTop":[{"t":"T1","middles":[{"m":"M11"},{"m":"M12"},{"m":"M13"}]}]}}"###
);

Ok(())
}

Expand Down
12 changes: 3 additions & 9 deletions query-engine/connectors/query-connector/src/query_arguments.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
use crate::filter::Filter;
use prisma_models::{ast::FieldArity, *};

#[derive(Debug, Clone, Copy, PartialEq)]
pub struct SkipAndLimit {
pub skip: usize,
pub limit: Option<usize>,
}
use prisma_models::*;

/// `QueryArguments` define various constraints queried data should fulfill:
/// - `cursor`, `take`, `skip` page through the data.
Expand Down Expand Up @@ -159,12 +153,12 @@ impl QueryArguments {

let has_optional_hop = on_relation.iter().any(|o| {
o.path.iter().any(|hop| match hop {
OrderByHop::Relation(rf) => rf.arity() == FieldArity::Optional,
OrderByHop::Relation(rf) => rf.arity().is_optional(),
OrderByHop::Composite(cf) => !cf.is_required(),
})
});

// [Dom] I'm not entirely sure why we're doing this, but I assume that optionals introduce NULLs that make the ordering inherently unstable?
// Optional hops introduce NULLs that make the ordering inherently unstable.
if has_optional_hop {
return false;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@ use std::collections::HashMap;

pub(crate) async fn m2m(
tx: &mut dyn ConnectionLike,
query: &RelatedRecordsQuery,
query: &mut RelatedRecordsQuery,
parent_result: Option<&ManyRecords>,
processor: InMemoryRecordProcessor,
trace_id: Option<String>,
) -> InterpretationResult<(ManyRecords, Option<Vec<RelAggregationRow>>)> {
let processor = InMemoryRecordProcessor::new_from_query_args(&mut query.args);
let parent_field = &query.parent_field;
let child_link_id = parent_field.related_field().linking_fields();

Expand Down Expand Up @@ -138,10 +138,9 @@ pub async fn one2m(
parent_field: &RelationFieldRef,
parent_selections: Option<Vec<SelectionResult>>,
parent_result: Option<&ManyRecords>,
query_args: QueryArguments,
mut query_args: QueryArguments,
selected_fields: &FieldSelection,
aggr_selections: Vec<RelAggregationSelection>,
processor: InMemoryRecordProcessor,
trace_id: Option<String>,
) -> InterpretationResult<(ManyRecords, Option<Vec<RelAggregationRow>>)> {
let parent_model_id = parent_field.model().primary_identifier();
Expand Down Expand Up @@ -190,6 +189,15 @@ pub async fn one2m(
return Ok((ManyRecords::empty(selected_fields), None));
}

// If we're fetching related records from a single parent, then we can apply normal pagination.
// If we're fetching related records from multiple parents though, we can't just apply a LIMIT/OFFSET,
// as we need N related records PER parent. The only known viable solution is the use of `ROW_NUMBER()`.
let processor = if uniq_selections.len() == 1 && !query_args.requires_inmemory_processing() {
None
} else {
Some(InMemoryRecordProcessor::new_from_query_args(&mut query_args))
};

let mut scalars = {
let filter = child_link_id.is_in(ConditionListValue::list(uniq_selections));
let mut args = query_args;
Expand All @@ -198,6 +206,7 @@ pub async fn one2m(
Some(existing_filter) => Some(Filter::and(vec![existing_filter, filter])),
None => Some(filter),
};

tx.get_many_records(
&parent_field.related_model(),
args,
Expand Down Expand Up @@ -255,7 +264,11 @@ pub async fn one2m(
);
}

let scalars = processor.apply(scalars);
let scalars = if let Some(processor) = processor {
processor.apply(scalars)
} else {
scalars
};
let (scalars, aggregation_rows) = read::extract_aggregation_rows_from_scalars(scalars, aggr_selections);

Ok((scalars, aggregation_rows))
Expand Down
4 changes: 1 addition & 3 deletions query-engine/core/src/interpreter/query_interpreters/read.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,9 @@ fn read_related<'conn>(
) -> BoxFuture<'conn, InterpretationResult<QueryResult>> {
let fut = async move {
let relation = query.parent_field.relation();
let processor = InMemoryRecordProcessor::new_from_query_args(&mut query.args);

let (scalars, aggregation_rows) = if relation.is_many_to_many() {
nested_read::m2m(tx, &query, parent_result, processor, trace_id).await?
nested_read::m2m(tx, &mut query, parent_result, trace_id).await?
} else {
nested_read::one2m(
tx,
Expand All @@ -155,7 +154,6 @@ fn read_related<'conn>(
query.args.clone(),
&query.selected_fields,
query.aggregation_selections,
processor,
trace_id,
)
.await?
Expand Down

0 comments on commit 3314cd4

Please sign in to comment.