diff --git a/sqlakeyset/__init__.py b/sqlakeyset/__init__.py index 322327c0..47737356 100644 --- a/sqlakeyset/__init__.py +++ b/sqlakeyset/__init__.py @@ -1,4 +1,12 @@ -from .paging import get_page, select_page, InvalidPage +from .paging import ( + get_homogeneous_pages, + get_page, + select_homogeneous_pages, + select_page, + InvalidPage, + OrmPageRequest, + PageRequest, +) from .results import ( Page, Paging, @@ -15,11 +23,15 @@ from .types import Keyset, Marker __all__ = [ + "get_homogeneous_pages", "get_page", + "select_homogeneous_pages", "select_page", "serialize_bookmark", "unserialize_bookmark", "Page", + "PageRequest", + "OrmPageRequest", "Paging", "Keyset", "Marker", diff --git a/sqlakeyset/columns.py b/sqlakeyset/columns.py index 4295ce01..fa621131 100644 --- a/sqlakeyset/columns.py +++ b/sqlakeyset/columns.py @@ -366,9 +366,8 @@ def derive_order_key(ocol, desc, index): else: return None - entity = desc["entity"] expr = desc["expr"] - + entity = desc.get("entity") if isinstance(expr, Bundle): for key, col in dict(expr.columns).items(): if strip_labels(col).compare(ocol.comparable_value): @@ -409,7 +408,7 @@ def derive_order_key(ocol, desc, index): # is an attribute with label try: - if ocol.quoted_full_name == OC(expr).full_name: + if ocol.quoted_full_name == OC(expr).quoted_full_name: return DirectColumn(ocol, index) except sqlalchemy.exc.ArgumentError: pass diff --git a/sqlakeyset/paging.py b/sqlakeyset/paging.py index c1ed1d33..3b3a1d5c 100644 --- a/sqlakeyset/paging.py +++ b/sqlakeyset/paging.py @@ -3,8 +3,11 @@ from __future__ import annotations from functools import partial +from dataclasses import dataclass, field from typing import ( Any, + Callable, + Generic, List, NamedTuple, Optional, @@ -16,12 +19,12 @@ ) from typing_extensions import Literal # to keep python 3.7 support -from sqlalchemy import tuple_, and_, or_ +from sqlalchemy import tuple_, and_, or_, func, text from sqlalchemy.engine import Connection from sqlalchemy.engine.interfaces import Dialect from sqlalchemy.orm import Session from sqlalchemy.orm.query import Query -from sqlalchemy.sql.expression import ColumnElement +from sqlalchemy.sql.expression import ColumnElement, literal, select, union_all from sqlalchemy.sql.selectable import Select from .columns import OC, MappedOrderColumn, find_order_key, parse_ob_clause @@ -152,6 +155,7 @@ def prepare_paging( backwards: bool, orm: Literal[True], dialect: Dialect, + page_identifier: Optional[int] = None, ) -> _PagingQuery: ... @@ -164,6 +168,7 @@ def prepare_paging( backwards: bool, orm: Literal[False], dialect: Dialect, + page_identifier: Optional[int] = None, ) -> _PagingSelect: ... @@ -175,6 +180,7 @@ def prepare_paging( backwards: bool, orm: bool, dialect: Dialect, + page_identifier: Optional[int] = None, ) -> Union[_PagingQuery, _PagingSelect]: if orm: if not isinstance(q, Query): @@ -203,12 +209,24 @@ def prepare_paging( extra_columns = [ col.extra_column for col in mapped_ocols if col.extra_column is not None ] + if hasattr(q, "add_columns"): # ORM or SQLAlchemy 1.4+ q = q.add_columns(*extra_columns) else: for col in extra_columns: # SQLAlchemy Core <1.4 q = q.column(col) # type: ignore + q = _apply_where_and_limit(q, selectable, per_page, place, dialect, order_cols, orm) + + if orm: + assert isinstance(q, Query) + return _PagingQuery(q, order_cols, mapped_ocols, extra_columns) + else: + assert not isinstance(q, Query) + return _PagingSelect(q, order_cols, mapped_ocols, extra_columns) + + +def _apply_where_and_limit(q, selectable, per_page, place, dialect, order_cols, orm): if place: condition = where_condition_for_page(order_cols, place, dialect) # For aggregate queries, paging condition is applied *after* @@ -223,12 +241,7 @@ def prepare_paging( q = q.where(condition) q = q.limit(per_page + 1) # 1 extra to check if there's a further page - if orm: - assert isinstance(q, Query) - return _PagingQuery(q, order_cols, mapped_ocols, extra_columns) - else: - assert not isinstance(q, Query) - return _PagingSelect(q, order_cols, mapped_ocols, extra_columns) + return q def orm_get_page( @@ -276,11 +289,13 @@ def core_get_page( :param backwards: If ``True``, reverse pagination direction. :returns: :class:`Page` """ - # We need the result schema for the *original* query in order to properly - # trim off our extra_columns. As far as I can tell, this is the only + # In SQLAlchemy 1.3, we need the result schema for the *original* query in order + # to properly trim off our extra_columns. As far as I can tell, this is the only # way to get it without copy-pasting chunks of the sqlalchemy internals. # LIMIT 0 to minimize database load (though the fact that a round trip to # the DB has to happen at all is regrettable). + # + # Thankfully this is obsolete in 1.4+ result_type = core_result_type(selectable, s) sel = prepare_paging( q=selectable, @@ -438,3 +453,289 @@ def get_page( place, backwards = process_args(after, before, page) return orm_get_page(query, per_page, place, backwards) + + +@dataclass +class OrmPageRequest(Generic[_TP]): + """See ``get_page()`` documentation for parameter explanations.""" + query: Query[_TP] + per_page: int = PER_PAGE_DEFAULT + after: OptionalKeyset = None + before: OptionalKeyset = None + page: Optional[Union[MarkerLike, str]] = None + + +@dataclass +class PageRequest(Generic[_TP]): + """See ``select_page()`` documentation for parameter explanations.""" + selectable: Select[_TP] + per_page: int = PER_PAGE_DEFAULT + after: OptionalKeyset = None + before: OptionalKeyset = None + page: Optional[Union[MarkerLike, str]] = None + + +def get_homogeneous_pages(requests: list[OrmPageRequest[_TP]]) -> list[Page[Row[_TP]]]: + """Get multiple pages of results for homogeneous legacy ORM queries. + + This only involves a single round trip to the database. To do that, under the + hood it generates a UNION ALL. That means each query must select exactly the + same columns. They may have different filters or ordering, but must result in + selecting the same columns with the same names. + + Note: This requires the underlying database to support ORDER BY and LIMIT + statements in components of a compound select, which SQLite does not. + + Resulting pages are returned in the same order as the original page requests. + """ + if not requests: + return [] + + # Because UNION ALL requires identical SELECT statements, but we allow different + # order_bys which could result in different extra columns for order keys, we need + # to first find the superset of extra columns and then add those to every single + # selectable. + ordering_infos = _get_ordering_infos(requests, orm=True) + prepared_queries = [ + _orm_prepare_homogeneous_page(request, ordering_infos[i], i) + for i, request in enumerate(requests) + ] + + query = prepared_queries[0].paging_query.query + query = query.union_all( + *[p.paging_query.query for p in prepared_queries[1:]] + ).order_by(text("_page_identifier"), text("_row_number")) + + results = query.all() + + # We need to make sure there's an entry for every page in case some return + # empty. + page_to_rows = {i: list() for i in range(len(requests))} + for row in results: + page_to_rows[row._page_identifier].append(row) + + pages = [] + for i in range(len(requests)): + rows = page_to_rows[i] + pages.append(prepared_queries[i].page_from_rows(rows)) + return pages + + +def select_homogeneous_pages( + requests: list[PageRequest[_TP]], s: Union[Session, Connection] +) -> list[Page[Row[_TP]]]: + """Get multiple pages of results for homogeneous 2.0 style queries. + + This only involves a single round trip to the database. To do that, under the + hood it generates a UNION ALL. That means each query must select exactly the + same columns. They may have different filters or ordering, but must result in + selecting the same columns with the same names. + + Note: This requires the underlying database to support ORDER BY and LIMIT + statements in components of a compound select, which SQLite does not. + + Resulting pages are returned in the same order as the original page requests. + + Only supported in SQLAlchemy version 1.4+ + """ + if not requests: + return [] + + if len(requests) == 1: + # Handling 1 request is annoying because of its effect on union_all, + # so it's easier to just farm it out. + request = requests[0] + return [ + select_page( + s, + request.selectable, + per_page=request.per_page, + after=request.after, + before=request.before, + page=request.page + ) + ] + + # Because UNION ALL requires identical SELECT statements, but we allow different + # order_bys which could result in different extra columns for order keys, we need + # to first find the superset of extra columns and then add those to every single + # selectable. + ordering_infos = _get_ordering_infos(requests, orm=False) + + prepared_queries = [ + _core_prepare_homogeneous_page(request, s, ordering_infos[i], i) + for i, request in enumerate(requests) + ] + + selectable = union_all( + *[p.paging_query.select for p in prepared_queries] + ).order_by(text("_page_identifier"), text("_row_number")) + + columns = prepared_queries[0].paging_query.select._raw_columns + selectable = select(*columns).from_statement(selectable) + + selected = s.execute(selectable) + results = selected.fetchall() + + # We need to make sure there's an entry for every page in case some return + # empty. + page_to_rows = {i: list() for i in range(len(requests))} + for row in results: + page_to_rows[row._page_identifier].append(row) + + pages = [] + + keys = list(selected.keys()) + N = len(keys) - len(prepared_queries[0].paging_query.extra_columns) + keys = keys[:N] + + for i in range(len(requests)): + rows = page_to_rows[i] + pages.append(prepared_queries[i].page_from_rows(rows, keys)) + return pages + + +@dataclass +class _OrderingInfo: + order_cols: list[OC] = field(default_factory=list) + mapped_ocols: list[MappedOrderColumn] = field(default_factory=list) + extra_columns: list[ColumnElement] = field(default_factory=list) + + +def _get_ordering_infos(requests, orm) -> list[_OrderingInfo]: + infos = [] + extra_column_mappers: dict[str, MappedOrderColumn] = {} + + for request in requests: + info = _OrderingInfo() + infos.append(info) + if orm: + if not isinstance(request, OrmPageRequest): + raise ValueError("If orm=True then requests must be OrmPageRequests") + selectable = orm_to_selectable(request.query) + column_descriptions = request.query.column_descriptions + else: + if isinstance(request, OrmPageRequest): + raise ValueError("If orm=False then q cannot be a OrmPageRequest") + selectable = request.selectable + try: + column_descriptions = selectable.column_descriptions + except Exception: + column_descriptions = selectable._raw_columns # type: ignore + + order_cols = parse_ob_clause(selectable) + place, backwards = process_args(request.after, request.before, request.page) + if backwards: + order_cols = [c.reversed for c in order_cols] + info.order_cols = order_cols + + mapped_ocols = [find_order_key(ocol, column_descriptions) for ocol in order_cols] + for i, col in enumerate(list(mapped_ocols)): + if col.extra_column is None: + continue + name = OC(col.extra_column).quoted_full_name + if name in extra_column_mappers: + mapped_ocols[i] = extra_column_mappers[name] + # Since we cache these mappers across different selects, we need + # to fix up any ordering here. + if mapped_ocols[i].oc.is_ascending != order_cols[i].is_ascending: + mapped_ocols[i] = mapped_ocols[i].reversed + else: + extra_column_mappers[name] = col + + info.mapped_ocols = mapped_ocols + + extra_columns = [col.extra_column for col in extra_column_mappers.values()] + for i, info in enumerate(infos): + info.extra_columns = list(extra_columns) + [ + literal(i).label("_page_identifier"), + func.ROW_NUMBER().over( + order_by=[c.uo for c in info.order_cols] + ).label("_row_number"), + ] + + return infos + + +@dataclass +class _PreparedQuery: + paging_query: Union[_PagingQuery, _PagingSelect] + page_from_rows: Callable[..., Page[Row[_TP]]] + + +def _core_prepare_homogeneous_page( + request: PageRequest[_TP], + s: Union[Session, Connection], + info: _OrderingInfo, + page_identifier: int +) -> _PreparedQuery: + place, backwards = process_args(request.after, request.before, request.page) + + selectable = request.selectable + result_type = core_result_type(selectable, s) + + clauses = [col.ob_clause for col in info.mapped_ocols] + selectable = selectable.order_by(None).order_by(*clauses) + + selectable = selectable.add_columns(*info.extra_columns) + selectable = _apply_where_and_limit( + selectable, + selectable, + request.per_page, + place, + get_bind(q=selectable, s=s).dialect, + info.order_cols, + orm=False + ) + sel = _PagingSelect(selectable, info.order_cols, info.mapped_ocols, info.extra_columns) + + def page_from_rows(rows, keys): + page = core_page_from_rows( + sel, + rows, + keys, + result_type, + request.per_page, + backwards, + current_place=place, + ) + return page + + return _PreparedQuery(paging_query=sel, page_from_rows=page_from_rows) + + +def _orm_prepare_homogeneous_page( + request: OrmPageRequest[_TP], info: _OrderingInfo, page_identifier: int +) -> _PreparedQuery: + place, backwards = process_args(request.after, request.before, request.page) + + query = request.query + result_type = orm_result_type(query) + keys = orm_query_keys(query) + + clauses = [col.ob_clause for col in info.mapped_ocols] + query = query.order_by(None).order_by(*clauses) + + if hasattr(query, "add_columns"): # ORM or SQLAlchemy 1.4+ + query = query.add_columns(*info.extra_columns) + else: + for col in info.extra_columns: # SQLAlchemy Core <1.4 + query = query.column(col) # type: ignore + + query = _apply_where_and_limit( + query, + orm_to_selectable(query), + request.per_page, + place, + query.session.get_bind().dialect, + info.order_cols, + orm=True + ) + paging_query = _PagingQuery(query, info.order_cols, info.mapped_ocols, info.extra_columns) + + def page_from_rows(rows): + return orm_page_from_rows( + paging_query, rows, keys, result_type, request.per_page, backwards, current_place=place + ) + + return _PreparedQuery(paging_query=paging_query, page_from_rows=page_from_rows) diff --git a/tests/conftest.py b/tests/conftest.py index 37421080..4d264386 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -246,6 +246,7 @@ def _dburl(request): dburl = pytest.fixture(params=SUPPORTED_ENGINES)(_dburl) no_mysql_dburl = pytest.fixture(params=["sqlite", "postgresql"])(_dburl) +no_sqlite_dburl = pytest.fixture(params=["mysql", "postgresql"])(_dburl) pg_only_dburl = pytest.fixture(params=["postgresql"])(_dburl) diff --git a/tests/test_paging.py b/tests/test_paging.py index 721224f5..fc1830e2 100644 --- a/tests/test_paging.py +++ b/tests/test_paging.py @@ -13,24 +13,36 @@ docker containers. (Available python versions are 3.7, 3.8, 3.9, 3.10, 3.11 and valid sqlalchemy versions are 1.3.0, 1.4.0, 2.0.0.)""" import warnings +from collections import deque +from dataclasses import dataclass from packaging import version +from typing import Any, Optional, Tuple, Union import pytest import sqlalchemy -from sqlalchemy.orm import sessionmaker, aliased, Bundle +from sqlalchemy.orm import sessionmaker, aliased, Bundle, Query from sqlalchemy import ( desc, func, ) +from sqlalchemy.sql.expression import literal +from sqlalchemy.sql.selectable import Select from sqlakeyset import ( + get_homogeneous_pages, get_page, + select_homogeneous_pages, select_page, serialize_bookmark, unserialize_bookmark, InvalidPage, + OrmPageRequest, + PageRequest, ) from sqlakeyset.paging import process_args +from sqlakeyset.results import Page +from sqlakeyset.sqla import SQLA_VERSION +from sqlakeyset.types import MarkerLike from conftest import ( Book, Author, @@ -49,6 +61,48 @@ warnings.simplefilter("error") +@dataclass +class _PageTracker: + query: Union[Query, Select] + unpaged: list + gathered: deque + backwards: bool + page: Tuple[Union[MarkerLike, str], bool] + selected: Any = None + page_with_paging: Optional[Page] = None + + +def assert_paging_orm(page_with_paging, gathered, backwards, unpaged, page, per_page): + """Returns the next page, or None if no further pages. + + Modifies gathered in place. + """ + paging = page_with_paging.paging + + assert paging.current == page + + if backwards: + gathered.extendleft(reversed(page_with_paging)) + else: + gathered.extend(page_with_paging) + + if len(gathered) < len(unpaged): + # Ensure each page is the correct size + assert len(page_with_paging) == per_page + assert paging.has_further + else: + assert not paging.has_further + + if not page_with_paging: + assert not paging.has_further + assert paging.further == paging.current + assert paging.current_opposite == (None, not paging.backwards) + # Is this return None necessary or will paging.further just be None? + return None + + return paging.further + + def check_paging_orm(q): item_counts = range(1, 12) @@ -56,7 +110,7 @@ def check_paging_orm(q): for backwards in [False, True]: for per_page in item_counts: - gathered = [] + gathered = deque() page = None, backwards @@ -65,32 +119,68 @@ def check_paging_orm(q): page = unserialize_bookmark(serialized_page) page_with_paging = get_page(q, per_page=per_page, page=serialized_page) - paging = page_with_paging.paging + page = assert_paging_orm(page_with_paging, gathered, backwards, unpaged, page, per_page) + if page is None: + break - assert paging.current == page + # Ensure union of pages is original q.all() + assert list(gathered) == unpaged - if backwards: - gathered = page_with_paging + gathered - else: - gathered = gathered + page_with_paging - page = paging.further +def check_multiple_paging_orm(qs): + page_trackers = [ + _PageTracker( + query=q, + gathered=deque(), + backwards=(i % 2 == 0), + page=(None, i % 2 == 0), + unpaged=q.all(), + ) + for i, q in enumerate(qs) + ] + while True: + for t in page_trackers: + t.page = unserialize_bookmark(serialize_bookmark(t.page)) - if len(gathered) < len(unpaged): - # Ensure each page is the correct size - assert paging.has_further - assert len(page_with_paging) == per_page - else: - assert not paging.has_further + page_requests = [ + OrmPageRequest(query=t.query, per_page=i + 1, page=t.page) for i, t in enumerate(page_trackers) + ] + pages_with_paging = get_homogeneous_pages(page_requests) + for p, t in zip(pages_with_paging, page_trackers): + t.page_with_paging = p - if not page_with_paging: - assert not paging.has_further - assert paging.further == paging.current - assert paging.current_opposite == (None, not paging.backwards) - break + for i, t in enumerate(list(page_trackers)): + page = assert_paging_orm(t.page_with_paging, t.gathered, t.backwards, t.unpaged, t.page, i + 1) + if page is None: + # Ensure union of pages is original q.all() + assert list(t.gathered) == t.unpaged + page_trackers.remove(t) + continue - # Ensure union of pages is original q.all() - assert gathered == unpaged + t.page = page + + if not page_trackers: + break + + +def assert_paging_core(page_with_paging, gathered, backwards, result, page, per_page): + paging = page_with_paging.paging + + assert paging.current == page + assert page_with_paging.keys() == result.keys() + + if backwards: + gathered.extendleft(reversed(page_with_paging)) + else: + gathered.extend(page_with_paging) + + if not page_with_paging: + assert not paging.has_further + assert paging.further == paging.current + assert paging.current_opposite == (None, not paging.backwards) + return None + + return paging.further def check_paging_core(selectable, s): @@ -101,7 +191,7 @@ def check_paging_core(selectable, s): for backwards in [False, True]: for per_page in item_counts: - gathered = [] + gathered = deque() page = None, backwards @@ -112,25 +202,48 @@ def check_paging_core(selectable, s): page_with_paging = select_page( s, selectable, per_page=per_page, page=serialized_page ) - paging = page_with_paging.paging + page = assert_paging_core(page_with_paging, gathered, backwards, result, page, per_page) + if page is None: + break - assert paging.current == page - assert page_with_paging.keys() == result.keys() + assert list(gathered) == unpaged - if backwards: - gathered = page_with_paging + gathered - else: - gathered = gathered + page_with_paging - page = paging.further +def check_multiple_paging_core(qs, s): + page_trackers = [ + _PageTracker( + query=q, + gathered=deque(), + backwards=(i % 2 == 0), + page=(None, i % 2 == 0), + selected=s.execute(q), + unpaged=s.execute(q).fetchall(), + ) + for i, q in enumerate(qs) + ] + while True: + for t in page_trackers: + t.page = unserialize_bookmark(serialize_bookmark(t.page)) - if not page_with_paging: - assert not paging.has_further - assert paging.further == paging.current - assert paging.current_opposite == (None, not paging.backwards) - break + page_requests = [ + PageRequest(selectable=t.query, per_page=i + 1, page=t.page) for i, t in enumerate(page_trackers) + ] + pages_with_paging = select_homogeneous_pages(page_requests, s) + for p, t in zip(pages_with_paging, page_trackers): + t.page_with_paging = p + + for i, t in enumerate(list(page_trackers)): + page = assert_paging_core(t.page_with_paging, t.gathered, t.backwards, t.selected, t.page, i + 1) + if page is None: + # Ensure union of pages is original q.all() + assert list(t.gathered) == t.unpaged, f"Different elements for tracker {t}" + page_trackers.remove(t) + continue + + t.page = page - assert gathered == unpaged + if not page_trackers: + break def test_orm_query1(dburl): @@ -361,6 +474,124 @@ def test_orm_joined_inheritance(joined_inheritance_dburl): check_paging_orm(q=q) +def test_orm_multiple_pages(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + s.query(Book).order_by(Book.name, Book.id), + s.query(Book).filter(Book.author_id == 1).order_by(Book.id), + s.query(Book).order_by(Book.name, Book.id.desc()), + ] + check_multiple_paging_orm(qs=qs) + + +def test_orm_multiple_pages_select_columns(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + s.query(Book.name, Book.author_id, Book.id).order_by(Book.name, Book.id), + s.query(Book.name, Book.author_id, Book.id).filter(Book.author_id == 1).order_by(Book.id), + s.query(Book.name, Book.author_id, Book.id).order_by(Book.name, Book.id.desc()), + ] + check_multiple_paging_orm(qs=qs) + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_orm_multiple_pages_different_extra_columns(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + s.query(Book.name).order_by(Book.b, Book.id), + s.query(Book.name).order_by(Book.id), + s.query(Book.name).order_by(Book.c, Book.id), + ] + check_multiple_paging_orm(qs=qs) + + +def test_orm_multiple_pages_one_query(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + s.query(Book).order_by(Book.id), + ] + check_multiple_paging_orm(qs=qs) + + +def test_orm_multiple_pages_empty_queries(): + assert get_homogeneous_pages([]) == [] + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_core_multiple_pages(no_sqlite_dburl): + # TODO: Add a test with an order by that adds an extra column. + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + select(Book).order_by(Book.name, Book.id), + select(Book).where(Book.author_id == 1).order_by(Book.id), + select(Book).order_by(Book.name, Book.id.desc()), + ] + check_multiple_paging_core(qs=qs, s=s) + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_core_multiple_pages_select_columns(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + select(Book.name, Book.author_id, Book.id).order_by(Book.name, Book.id), + select(Book.name, Book.author_id, Book.id).where(Book.author_id == 1).order_by(Book.id), + select(Book.name, Book.author_id, Book.id).order_by(Book.name, Book.id.desc()), + ] + check_multiple_paging_core(qs=qs, s=s) + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_core_multiple_pages_different_extra_columns(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + select(Book.name).order_by(Book.b, Book.id), + select(Book.name).order_by(Book.id), + select(Book.name).order_by(Book.c, Book.id), + ] + check_multiple_paging_core(qs=qs, s=s) + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_core_multiple_pages_one_query(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + select(Book.a, Book.b).order_by(Book.id), + ] + check_multiple_paging_core(qs=qs, s=s) + + +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Not supported in 1.3." +) +def test_core_multiple_pages_one_query_whole_model(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + qs = [ + select(Book).order_by(Book.id), + ] + check_multiple_paging_core(qs=qs, s=s) + + +def test_core_multiple_pages_empty_queries(no_sqlite_dburl): + with S(no_sqlite_dburl, echo=ECHO) as s: + assert select_homogeneous_pages([], s) == [] + + def test_core(dburl): selectable = ( select(Book.b, Book.d, Book.id, Book.c) @@ -376,6 +607,21 @@ def test_core(dburl): check_paging_core(selectable=selectable, s=s.connection()) +@pytest.mark.skipif( + SQLA_VERSION < version.parse("1.4.0b1"), + reason="Broken in 1.3." +) +def test_core_whole_model_plus_other_columns(dburl): + selectable = ( + select(Book, literal(0)) + .where(Book.id < 10) + .order_by(Book.id) + ) + + with S(dburl, echo=ECHO) as s: + check_paging_core(selectable=selectable, s=s) + + def test_core2(dburl): with S(dburl, echo=ECHO) as s: sel = select(Book.score).order_by(Book.id)