Skip to content

Commit

Permalink
add BlockCursor::row_array_size
Browse files Browse the repository at this point in the history
  • Loading branch information
pacman82 committed Oct 8, 2023
1 parent 3d3ff84 commit 24a121a
Show file tree
Hide file tree
Showing 6 changed files with 38 additions and 3 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Changelog

## 2.2.0

* Adds `BlockCursor::row_array_size`, in order to infer maximum batch size without unbinding the row set buffer first. This allows downstream `arrow-odbc` for a faster implementation then creating a double buffered concurrent stream of batches directly from a block cursor.

## 2.1.0

It is now better possible to use `odbc-api` with multithreading in completly safe code.
Expand Down
2 changes: 1 addition & 1 deletion odbc-api/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "odbc-api"
version = "2.1.0"
version = "2.2.0"
authors = ["Markus Klein"]
edition = "2021"
license = "MIT"
Expand Down
7 changes: 7 additions & 0 deletions odbc-api/src/cursor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,13 @@ where
}
}

impl<C,B> BlockCursor<C, B> where B: RowSetBuffer, C: AsStatementRef {
/// Maximum amount of rows fetched from the database in the next call to fetch.
pub fn row_array_size(&self) -> usize {
self.buffer.row_array_size()
}
}

impl<C, B> Drop for BlockCursor<C, B>
where
C: AsStatementRef,
Expand Down
24 changes: 24 additions & 0 deletions odbc-api/tests/integration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3880,6 +3880,30 @@ fn cursor_get_text_from_text(profile: &Profile) {
assert_eq!("€".repeat(300), actual);
}

/// If we want to use two buffers alternating to fetch data (like in the concurrent use case in
/// the arrow-odbc downstream crate) we may want to generate a second row set buffer from an
/// existing one. For this it is useful if we can infer the capacity of the block cursor, without
/// unbinding it first.
#[test_case(MSSQL; "Microsoft SQL Server")]
#[test_case(MARIADB; "Maria DB")]
#[test_case(SQLITE_3; "SQLite 3")]
#[test_case(POSTGRES; "PostgreSQL")]
fn row_arrary_size_from_block_cursor(profile: &Profile) {
// Given a table
let table_name = table_name!();
let (conn, table) = profile.given(&table_name, &["INTEGER"]).unwrap();

// When
let capacity_used_to_create_buffer = 42;
let cursor = conn.execute(&table.sql_all_ordered_by_id(), ()).unwrap().unwrap();
let buffer = ColumnarAnyBuffer::from_descs(capacity_used_to_create_buffer, [BufferDesc::I32 { nullable: true }]);
let block_cursor = cursor.bind_buffer(buffer).unwrap();
let capacity_reported_by_block_cursor = block_cursor.row_array_size();

// Then
assert_eq!(capacity_used_to_create_buffer, capacity_reported_by_block_cursor);
}

#[test_case(MSSQL; "Microsoft SQL Server")]
#[test_case(MARIADB; "Maria DB")]
#[test_case(SQLITE_3; "SQLite 3")]
Expand Down
2 changes: 1 addition & 1 deletion odbcsv/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ readme = "Readme.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
odbc-api = { version = "2.1.0", path = "../odbc-api" }
odbc-api = { version = "2.2.0", path = "../odbc-api" }
csv = "1.3.0"
anyhow = "1.0.75"
stderrlog = "0.5.4"
Expand Down

0 comments on commit 24a121a

Please sign in to comment.