Skip to content

Commit

Permalink
Merge pull request #976 from lsst/tickets/DM-43191
Browse files Browse the repository at this point in the history
DM-43191: Drop 'offset' parameter from new query interface.
  • Loading branch information
TallJimbo committed Mar 8, 2024
2 parents 6ad781b + 703ef3c commit 9d79d6e
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 43 deletions.
17 changes: 3 additions & 14 deletions python/lsst/daf/butler/_butler.py
Original file line number Diff line number Diff line change
Expand Up @@ -1440,7 +1440,6 @@ def _query_data_ids(
with_dimension_records: bool = False,
order_by: Iterable[str] | str | None = None,
limit: int | None = None,
offset: int = 0,
explain: bool = True,
**kwargs: Any,
) -> list[DataCoordinate]:
Expand Down Expand Up @@ -1476,10 +1475,6 @@ def _query_data_ids(
descending ordering.
limit : `int`, optional
Upper limit on the number of returned records.
offset : `int`, optional
The number of records to skip before returning at most ``limit``
records. If ``offset`` is specified then ``limit`` must be
specified as well.
explain : `bool`, optional
If `True` (default) then `EmptyQueryResultError` exception is
raised when resulting list is empty. The exception contains
Expand Down Expand Up @@ -1509,8 +1504,7 @@ def _query_data_ids(
Raised when query generates empty result and ``explain`` is set to
`True`.
TypeError
Raised when the arguments are incompatible, e.g. ``offset`` is
specified, but ``limit`` is not.
Raised when the arguments are incompatible.
"""
if data_id is None:
data_id = DataCoordinate.make_empty(self.dimensions)
Expand All @@ -1519,7 +1513,7 @@ def _query_data_ids(
query.where(data_id, where, bind=bind, **kwargs)
.data_ids(dimensions)
.order_by(*ensure_iterable(order_by))
.limit(limit, offset)
.limit(limit)
)
if with_dimension_records:
result = result.with_dimension_records()
Expand Down Expand Up @@ -1641,7 +1635,6 @@ def _query_dimension_records(
bind: Mapping[str, Any] | None = None,
order_by: Iterable[str] | str | None = None,
limit: int | None = None,
offset: int = 0,
explain: bool = True,
**kwargs: Any,
) -> list[DimensionRecord]:
Expand Down Expand Up @@ -1670,10 +1663,6 @@ def _query_dimension_records(
descending ordering.
limit : `int`, optional
Upper limit on the number of returned records.
offset : `int`, optional
The number of records to skip before returning at most ``limit``
records. If ``offset`` is specified then ``limit`` must be
specified as well.
explain : `bool`, optional
If `True` (default) then `EmptyQueryResultError` exception is
raised when resulting list is empty. The exception contains
Expand Down Expand Up @@ -1713,7 +1702,7 @@ def _query_dimension_records(
query.where(data_id, where, bind=bind, **kwargs)
.dimension_records(element)
.order_by(*ensure_iterable(order_by))
.limit(limit, offset)
.limit(limit)
)
dimension_records = list(result)
if explain and not dimension_records:
Expand Down
7 changes: 2 additions & 5 deletions python/lsst/daf/butler/queries/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,17 +203,14 @@ def order_by(self, *args: str | OrderExpression | ExpressionProxy) -> Self:
self._tree, order_by=convert_order_by_args(self.dimensions, self._get_datasets(), *args)
)

def limit(self, limit: int | None = None, offset: int = 0) -> Self:
def limit(self, limit: int | None = None) -> Self:
"""Return a new query that slices its result rows positionally.
Parameters
----------
limit : `int` or `None`, optional
Upper limit on the number of returned records. `None` (default)
means no limit.
offset : `int`, optional
The number of records to skip before returning at most ``limit``
records.
Returns
-------
Expand All @@ -226,7 +223,7 @@ def limit(self, limit: int | None = None, offset: int = 0) -> Self:
replace the old ones. Slicing always occurs after sorting, even if
`limit` is called before `order_by`.
"""
return self._copy(self._tree, limit=limit, offset=offset)
return self._copy(self._tree, limit=limit)

def where(
self,
Expand Down
3 changes: 0 additions & 3 deletions python/lsst/daf/butler/queries/result_specs.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,6 @@ class ResultSpecBase(pydantic.BaseModel, ABC):
order_by: tuple[OrderExpression, ...] = ()
"""Expressions to sort the rows by."""

offset: int = 0
"""Index of the first row to return."""

limit: int | None = None
"""Maximum number of rows to return, or `None` for no bound."""

Expand Down
30 changes: 9 additions & 21 deletions tests/test_query_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -1416,7 +1416,7 @@ def test_dimension_record_results(self) -> None:
- joining against uploaded data coordinates;
- counting result rows;
- expanding dimensions as needed for 'where' conditions;
- order_by, limit, and offset.
- order_by and limit.
It does not include the iteration methods of
DimensionRecordQueryResults, since those require a different mock
Expand All @@ -1443,9 +1443,8 @@ def check(
results: DimensionRecordQueryResults,
order_by: Any = (),
limit: int | None = None,
offset: int = 0,
) -> list[str]:
results = results.order_by(*order_by).limit(limit, offset=offset)
results = results.order_by(*order_by).limit(limit)
self.assertEqual(results.element.name, "patch")
with self.assertRaises(_TestQueryExecution) as cm:
list(results)
Expand All @@ -1461,7 +1460,6 @@ def check(
self.assertEqual(result_spec.result_type, "dimension_record")
self.assertEqual(result_spec.element, self.universe["patch"])
self.assertEqual(result_spec.limit, limit)
self.assertEqual(result_spec.offset, offset)
for exact, discard in itertools.permutations([False, True], r=2):
with self.assertRaises(_TestQueryCount) as cm:
results.count(exact=exact, discard=discard)
Expand All @@ -1473,11 +1471,9 @@ def check(
# Run the closure's tests on variants of the base query.
self.assertEqual(check(results), [])
self.assertEqual(check(results, limit=2), [])
self.assertEqual(check(results, offset=1), [])
self.assertEqual(check(results, limit=3, offset=3), [])
self.assertEqual(check(results, order_by=[x.patch.cell_x]), ["patch.cell_x"])
self.assertEqual(
check(results, order_by=[x.patch.cell_x, x.patch.cell_y.desc], offset=2),
check(results, order_by=[x.patch.cell_x, x.patch.cell_y.desc]),
["patch.cell_x", "patch.cell_y DESC"],
)
with self.assertRaises(qt.InvalidQueryError):
Expand Down Expand Up @@ -1521,7 +1517,7 @@ def test_data_coordinate_results(self) -> None:
- counting result rows;
- expanding dimensions as needed for 'where' conditions;
- order_by, limit, and offset.
- order_by and limit.
It does not include the iteration methods of
DataCoordinateQueryResults, since those require a different mock
Expand All @@ -1542,10 +1538,9 @@ def check(
results: DataCoordinateQueryResults,
order_by: Any = (),
limit: int | None = None,
offset: int = 0,
include_dimension_records: bool = False,
) -> list[str]:
results = results.order_by(*order_by).limit(limit, offset=offset)
results = results.order_by(*order_by).limit(limit)
self.assertEqual(results.dimensions, self.universe.conform(["patch", "band"]))
with self.assertRaises(_TestQueryExecution) as cm:
list(results)
Expand All @@ -1560,7 +1555,6 @@ def check(
self.assertEqual(result_spec.dimensions, self.universe.conform(["patch", "band"]))
self.assertEqual(result_spec.include_dimension_records, include_dimension_records)
self.assertEqual(result_spec.limit, limit)
self.assertEqual(result_spec.offset, offset)
self.assertIsNone(result_spec.find_first_dataset)
for exact, discard in itertools.permutations([False, True], r=2):
with self.assertRaises(_TestQueryCount) as cm:
Expand All @@ -1578,15 +1572,13 @@ def check(
[],
)
self.assertEqual(check(results, limit=2), [])
self.assertEqual(check(results, offset=1), [])
self.assertEqual(check(results, limit=3, offset=3), [])
self.assertEqual(check(results, order_by=[x.patch.cell_x]), ["patch.cell_x"])
self.assertEqual(
check(results, order_by=[x.patch.cell_x, x.patch.cell_y.desc], offset=2),
check(results, order_by=[x.patch.cell_x, x.patch.cell_y.desc]),
["patch.cell_x", "patch.cell_y DESC"],
)
self.assertEqual(
check(results, order_by=["patch.cell_x", "-cell_y"], offset=2),
check(results, order_by=["patch.cell_x", "-cell_y"]),
["patch.cell_x", "patch.cell_y DESC"],
)

Expand All @@ -1612,7 +1604,7 @@ def test_dataset_results(self) -> None:
- counting result rows;
- expanding dimensions as needed for 'where' conditions;
- different ways of passing a data ID to 'where' methods;
- order_by, limit, and offset.
- order_by and limit.
It does not include the iteration methods of the DatasetRefQueryResults
classes, since those require a different mock driver setup (see
Expand All @@ -1636,10 +1628,9 @@ def check(
results: DatasetRefQueryResults,
order_by: Any = (),
limit: int | None = None,
offset: int = 0,
include_dimension_records: bool = False,
) -> list[str]:
results = results.order_by(*order_by).limit(limit, offset=offset)
results = results.order_by(*order_by).limit(limit)
with self.assertRaises(_TestQueryExecution) as cm:
list(results)
tree = cm.exception.tree
Expand All @@ -1660,7 +1651,6 @@ def check(
self.assertEqual(result_spec.result_type, "dataset_ref")
self.assertEqual(result_spec.include_dimension_records, include_dimension_records)
self.assertEqual(result_spec.limit, limit)
self.assertEqual(result_spec.offset, offset)
self.assertEqual(result_spec.find_first_dataset, result_spec.dataset_type_name)
for exact, discard in itertools.permutations([False, True], r=2):
with self.assertRaises(_TestQueryCount) as cm:
Expand Down Expand Up @@ -1690,8 +1680,6 @@ def check(
[],
)
self.assertEqual(check(results1, limit=2), [])
self.assertEqual(check(results1, offset=1), [])
self.assertEqual(check(results1, limit=3, offset=3), [])
self.assertEqual(check(results1, order_by=["raw.timespan.begin"]), ["raw.timespan.begin"])
self.assertEqual(check(results1, order_by=["detector"]), ["detector"])
self.assertEqual(check(results1, order_by=["ingest_date"]), ["raw.ingest_date"])
Expand Down

0 comments on commit 9d79d6e

Please sign in to comment.