Index: .efiles ================================================================== --- .efiles +++ .efiles @@ -7,5 +7,6 @@ src/wrconn.rs src/rawhook.rs src/changehook.rs src/utils.rs examples/simple.rs +examples/rotest.rs Index: Cargo.toml ================================================================== --- Cargo.toml +++ Cargo.toml @@ -1,23 +1,25 @@ [package] name = "sqlsrv" -version = "0.5.0" -edition = "2021" +version = "0.10.0" +edition = "2024" license = "0BSD" # https://crates.io/category_slugs categories = [ "database" ] keywords = [ "sqlite", "server" ] repository = "https://repos.qrnch.tech/pub/sqlsrv" description = "Utility functions for managing SQLite connections in a server application." -rust-version = "1.56" +rust-version = "1.85" exclude = [ ".fossil-settings", ".efiles", ".fslckout", + "bacon.toml", "build_docs.sh", "examples", "www", + "bacon.toml", "rustfmt.toml" ] # https://doc.rust-lang.org/cargo/reference/manifest.html#the-badges-section [badges] @@ -25,26 +27,34 @@ [features] tpool = ["dep:swctx", "dep:threadpool"] [dependencies] -parking_lot = { version = "0.12.3" } +parking_lot = { version = "0.12.4" } r2d2 = { version = "0.8.10" } -r2d2_sqlite = { version = "0.25.0" } +r2d2_sqlite = { version = "0.29.0" } # Need to add the `hooks` feature. Unfortunately the version needs to be # specified here. It would be much more convenient if it could use the version # from r2d2_sqlite. Allegedely one can use the version "*", which apparently # does not mean "latest", but have not yet confirmed this. -rusqlite = { version = "0.32.1", features = ["hooks"] } -swctx = { version = "0.2.2", optional = true } +rusqlite = { version = "0.36.0", features = ["hooks"] } +swctx = { version = "0.3.0", optional = true } threadpool = { version = "1.8.1", optional = true } [dev-dependencies] hex = { version = "0.4.3" } -rand = { version = "0.8.5" } -rusqlite = { version = "0.32.1", features = ["functions"] } -sha2 = { version = "0.10.8" } +rand = { version = "0.9.1" } +rusqlite = { version = "0.36.0", features = ["functions"] } +sha2 = { version = "0.10.9" } [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"] +[lints.clippy] +all = { level = "warn", priority = -1 } +pedantic = { level = "warn", priority = -1 } +nursery = { level = "warn", priority = -1 } +cargo = { level = "warn", priority = -1 } + +multiple_crate_versions = "allow" + ADDED bacon.toml Index: bacon.toml ================================================================== --- /dev/null +++ bacon.toml @@ -0,0 +1,106 @@ +# This is a configuration file for the bacon tool +# +# Bacon repository: https://github.com/Canop/bacon +# Complete help on configuration: https://dystroy.org/bacon/config/ +# You can also check bacon's own bacon.toml file +# as an example: https://github.com/Canop/bacon/blob/main/bacon.toml + +# For information about clippy lints, see: +# https://github.com/rust-lang/rust-clippy/blob/master/README.md + +#default_job = "check" +default_job = "clippy-all" + +[jobs.check] +command = ["cargo", "check", "--color", "always"] +need_stdout = false + +[jobs.check-all] +command = ["cargo", "check", "--all-targets", "--color", "always"] +need_stdout = false + +# Run clippy on the default target +[jobs.clippy] +command = [ + "cargo", "clippy", + "--color", "always", +] +need_stdout = false + +# Run clippy on all targets +# To disable some lints, you may change the job this way: +# [jobs.clippy-all] +# command = [ +# "cargo", "clippy", +# "--all-targets", +# "--color", "always", +# "--", +# "-A", "clippy::bool_to_int_with_if", +# "-A", "clippy::collapsible_if", +# "-A", "clippy::derive_partial_eq_without_eq", +# ] +# need_stdout = false +[jobs.clippy-all] +command = [ + "cargo", "clippy", + "--all-targets", + "--all-features", + "--color", "always", +] +need_stdout = false + +# This job lets you run +# - all tests: bacon test +# - a specific test: bacon test -- config::test_default_files +# - the tests of a package: bacon test -- -- -p config +[jobs.test] +command = [ + "cargo", "test", "--color", "always", + "--", "--color", "always", # see https://github.com/Canop/bacon/issues/124 +] +need_stdout = true + +[jobs.doc] +command = ["cargo", "doc", "--color", "always", "--no-deps"] +need_stdout = false + +# If the doc compiles, then it opens in your browser and bacon switches +# to the previous job +[jobs.doc-open] +command = ["cargo", "doc", "--color", "always", "--no-deps", "--open"] +need_stdout = false +on_success = "back" # so that we don't open the browser at each change + +# You can run your application and have the result displayed in bacon, +# *if* it makes sense for this crate. +# Don't forget the `--color always` part or the errors won't be +# properly parsed. +# If your program never stops (eg a server), you may set `background` +# to false to have the cargo run output immediately displayed instead +# of waiting for program's end. +[jobs.run] +command = [ + "cargo", "run", + "--color", "always", + # put launch parameters for your program behind a `--` separator +] +need_stdout = true +allow_warnings = true +background = true + +# This parameterized job runs the example of your choice, as soon +# as the code compiles. +# Call it as +# bacon ex -- my-example +[jobs.ex] +command = ["cargo", "run", "--color", "always", "--example"] +need_stdout = true +allow_warnings = true + +# You may define here keybindings that would be specific to +# a project, for example a shortcut to launch a specific job. +# Shortcuts to internal functions (scrolling, toggling, etc.) +# should go in your personal global prefs.toml file instead. +[keybindings] +# alt-m = "job:my-job" +c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target ADDED examples/rotest.rs Index: examples/rotest.rs ================================================================== --- /dev/null +++ examples/rotest.rs @@ -0,0 +1,66 @@ +use rusqlite::{Connection, ToSql, params}; + +use sqlsrv::SchemaMgr; + +#[derive(Debug)] +#[allow(dead_code)] +enum MyError { + Sqlite(rusqlite::Error), + R2D2(r2d2::Error) +} + +impl From for MyError { + fn from(err: r2d2::Error) -> Self { + Self::R2D2(err) + } +} + +impl From for MyError { + fn from(err: rusqlite::Error) -> Self { + Self::Sqlite(err) + } +} + + +struct Schema {} + +impl SchemaMgr for Schema { + fn init( + &self, + conn: &mut Connection, + _newdb: bool + ) -> Result<(), sqlsrv::Error> { + conn.execute( + "CREATE TABLE IF NOT EXISTS agents ( + id INTEGER PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + age INTEGER NOT NULL +);", + &[] as &[&dyn ToSql] + )?; + + Ok(()) + } +} + + +#[allow(clippy::too_many_lines)] +fn main() { + let schema = Box::new(Schema {}); + + let bldr = sqlsrv::Builder::new(schema); + + let cpool = bldr.build("test.sqlite").unwrap(); + + let res: Result<(), MyError> = cpool.run_ro(|conn| { + const SQL: &str = "INSERT INTO agents (name, age) VALUES ('frank', 42);"; + let mut stmt = conn.prepare_cached(SQL)?; + stmt.execute(params![])?; + Ok(()) + }); + + println!("Should panic with an read-only error:"); + res.unwrap(); +} + +// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: examples/simple.rs ================================================================== --- examples/simple.rs +++ examples/simple.rs @@ -50,11 +50,15 @@ Ok(()) } } +#[allow(clippy::too_many_lines)] fn main() { + const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \ + WHERE type='table' AND name=?);"; + let schema = Box::new(Schema {}); #[allow(unused_mut)] let mut bldr = sqlsrv::Builder::new(schema) .incremental_autoclean(10, None) .reg_scalar_fn(RegOn::Both(register_genuid)) @@ -127,13 +131,10 @@ ) .unwrap(); } - const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \ - WHERE type='table' AND name=?);"; - { let rconn = connpool.reader().unwrap(); let mut stmt = rconn.prepare_cached(SQL_LOOKUP_TABLE).unwrap(); let have = stmt .query_row(params!("stuff"), |row| row.get::(0)) @@ -191,11 +192,11 @@ #[cfg(feature = "tpool")] tpool.join(); } -/// Register a pwhash() SQL function which returns a hex-encoded version of +/// Register a `pwhash()` SQL function that returns a hex-encoded version of /// the SHA256 hash of the input string. fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> { db.create_scalar_function( "pwhash", 1, @@ -211,10 +212,13 @@ Ok(hex::encode(&result[..])) } ) } +const CHARSET: &[u8] = + b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; + /// Register an SQL function called `genuid` that will generate a random /// (hopefully unique) identifier of a requested length. fn register_genuid(db: &Connection) -> Result<(), rusqlite::Error> { db.create_scalar_function( @@ -223,19 +227,16 @@ FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC, move |ctx| { assert_eq!(ctx.len(), 1, "called with unexpected number of arguments"); let len = ctx.get::(0)?; - const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789-_"; - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); let id: String = (0..len) .map(|_| { - let idx = rng.gen_range(0..CHARSET.len()); + let idx = rng.random_range(0..CHARSET.len()); CHARSET[idx] as char }) .collect(); Ok(id) Index: rustfmt.toml ================================================================== --- rustfmt.toml +++ rustfmt.toml @@ -1,8 +1,8 @@ blank_lines_upper_bound = 2 comment_width = 79 -edition = "2021" +edition = "2024" format_strings = true max_width = 79 match_block_trailing_comma = false # merge_imports = true newline_style = "Unix" Index: src/err.rs ================================================================== --- src/err.rs +++ src/err.rs @@ -8,50 +8,51 @@ R2D2(r2d2::Error), Sqlite(rusqlite::Error) } impl Error { - pub fn bad_format(s: S) -> Self { - Error::BadFormat(s.to_string()) + #[allow(clippy::needless_pass_by_value)] + pub fn bad_format(s: impl ToString) -> Self { + Self::BadFormat(s.to_string()) } } impl std::error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::BadFormat(s) => { - write!(f, "Bad format error; {}", s) - } - Error::IO(s) => { - write!(f, "I/O error; {}", s) - } - Error::R2D2(ref err) => { - write!(f, "r2d2 error; {}", err) - } - Error::Sqlite(ref err) => { - write!(f, "Sqlite error; {}", err) + Self::BadFormat(s) => { + write!(f, "Bad format error; {s}") + } + Self::IO(s) => { + write!(f, "I/O error; {s}") + } + Self::R2D2(err) => { + write!(f, "r2d2 error; {err}") + } + Self::Sqlite(err) => { + write!(f, "Sqlite error; {err}") } } } } impl From for Error { fn from(err: io::Error) -> Self { - Error::IO(err.to_string()) + Self::IO(err.to_string()) } } impl From for Error { fn from(err: r2d2::Error) -> Self { - Error::R2D2(err) + Self::R2D2(err) } } impl From for Error { fn from(err: rusqlite::Error) -> Self { - Error::Sqlite(err) + Self::Sqlite(err) } } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/lib.rs ================================================================== --- src/lib.rs +++ src/lib.rs @@ -1,5 +1,7 @@ +#![allow(clippy::doc_markdown)] + //! A library for implementing an in-process SQLite database server. //! //! # Connection pooling //! sqlsrv implements connection pooling that reflects the concurrency model //! of SQLite: It supports multiple parallel readers, but only one writer. @@ -40,16 +42,16 @@ use parking_lot::{Condvar, Mutex}; use r2d2::{CustomizeConnection, PooledConnection}; -use r2d2_sqlite::SqliteConnectionManager; +pub use r2d2_sqlite::SqliteConnectionManager; pub use r2d2; pub use rusqlite; -use rusqlite::{params, Connection, OpenFlags}; +use rusqlite::{Connection, OpenFlags, params}; #[cfg(feature = "tpool")] use threadpool::ThreadPool; pub use changehook::ChangeLogHook; @@ -60,11 +62,11 @@ /// Wrapper around a SQL functions registration callback used to select which /// connection types to perform registrations on. pub enum RegOn where - F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync + F: Fn(&Connection) -> Result<(), rusqlite::Error> { /// This registration callback should only be called for read-only /// connections. RO(F), @@ -99,19 +101,25 @@ /// While this method can be used to perform schema upgrades, there are two /// specialized methods (`need_upgrade()` and `upgrade()`) that can be used /// for this purpose instead. /// /// The default implementation does nothing but returns `Ok(())`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn init(&self, conn: &mut Connection, newdb: bool) -> Result<(), Error> { Ok(()) } /// Application callback used to determine if the database schema is out of /// date and needs to be updated. /// /// The default implementation does nothing but returns `Ok(false)`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn need_upgrade(&self, conn: &Connection) -> Result { Ok(false) } @@ -118,10 +126,13 @@ /// Upgrade the database schema. /// /// This is called if [`SchemaMgr::need_upgrade()`] returns `Ok(true)`. /// /// The default implementation does nothing but returns `Ok(())`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn upgrade(&self, conn: &mut Connection) -> Result<(), Error> { Ok(()) } } @@ -163,14 +174,14 @@ ) -> Result<(), rusqlite::Error> { conn.pragma_update(None, "foreign_keys", "ON")?; for rf in &self.regfuncs { match rf { - CbType::Ro(ref f) | CbType::Both(ref f) => { + CbType::Ro(f) | CbType::Both(f) => { f(conn)?; } - _ => {} + CbType::Rw(_) => {} } } Ok(()) } @@ -207,11 +218,14 @@ Ok(conn) } /// Run a full vacuum. - fn full_vacuum(&self, conn: &Connection) -> Result<(), rusqlite::Error> { + /// + /// This is an internal function that may be called by `build()` if a full + /// vacuum has been requested. + fn full_vacuum(conn: &Connection) -> Result<(), rusqlite::Error> { conn.execute("VACUUM;", params![])?; Ok(()) } fn create_ro_pool( @@ -236,10 +250,11 @@ /// Create a new `Builder` for constructing a [`ConnPool`] object. /// /// Default to not run a full vacuum of the database on initialization and /// create 2 read-only connections for the pool. /// No workers thread pool will be used. + #[must_use] pub fn new(schmgr: Box) -> Self { Self { schmgr, full_vacuum: false, max_readers: 2, @@ -252,42 +267,45 @@ } /// Trigger a full vacuum when initializing the connection pool. /// /// Operates on an owned `Builder` object. - pub fn init_vacuum(mut self) -> Self { + #[must_use] + pub const fn init_vacuum(mut self) -> Self { self.full_vacuum = true; self } /// Trigger a full vacuum when initializing the connection pool. /// /// Operates on a borrowed `Builder` object. - pub fn init_vacuum_r(&mut self) -> &mut Self { + pub const fn init_vacuum_r(&mut self) -> &mut Self { self.full_vacuum = true; self } /// Set maximum number of readers in the connection pool. /// /// Operates on an owned `Builder` object. - pub fn max_readers(mut self, n: usize) -> Self { + #[must_use] + pub const fn max_readers(mut self, n: usize) -> Self { self.max_readers = n; self } /// Set maximum number of readers in the connection pool. /// /// Operates on a borrowed `Builder` object. - pub fn max_readers_r(&mut self, n: usize) -> &mut Self { + pub const fn max_readers_r(&mut self, n: usize) -> &mut Self { self.max_readers = n; self } /// Request that a "raw" update hook be added to the writer connection. /// /// Operates on an owned `Builder` object. + #[must_use] pub fn hook(mut self, hook: Arc) -> Self { self.hook = Some(hook); self } @@ -302,11 +320,12 @@ /// Enable incremental autovacuum. /// /// `dirt_watermark` is used to set what amount of "dirt" is required in /// order to trigger an autoclean. `nfree` is the number of blocks in the /// freelists to process each time the autoclean is run. - pub fn incremental_autoclean( + #[must_use] + pub const fn incremental_autoclean( mut self, dirt_watermark: usize, npages: Option ) -> Self { self.autoclean = Some(AutoClean { @@ -320,10 +339,11 @@ /// /// The closure should be wrapped in a `RegOn::RO()` if the function should /// only be registered on read-only connections. `RegOn::RW()` is used to /// register the function on the read/write connection. Use `RegOn::Both()` /// to register in both read-only and the read/write connection. + #[must_use] pub fn reg_scalar_fn(mut self, r: RegOn) -> Self where F: Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static { self.reg_scalar_fn_r(r); @@ -360,10 +380,11 @@ } self } #[cfg(feature = "tpool")] + #[must_use] pub fn thread_pool(mut self, tpool: Arc) -> Self { self.tpool = Some(tpool); self } @@ -372,10 +393,13 @@ self.tpool = Some(tpool); self } /// Construct a connection pool. + /// + /// # Errors + /// [`Error::Sqlite`] will be returned if a database error occurred. pub fn build

(mut self, fname: P) -> Result where P: AsRef { // ToDo: Use std::path::absolute() once stabilized @@ -398,14 +422,14 @@ let regfuncs = self.regfuncs.take().unwrap_or_default(); // Call SQL function registration callbacks for read/write connection. for rf in ®funcs { match rf { - CbType::Rw(ref f) | CbType::Both(ref f) => { + CbType::Rw(f) | CbType::Both(f) => { f(&conn)?; } - _ => {} + CbType::Ro(_) => {} } } // // Perform schema initialization. @@ -421,18 +445,18 @@ // // Perform a full vacuum if requested to do so. // if self.full_vacuum { - self.full_vacuum(&conn)?; + Self::full_vacuum(&conn)?; } // // Register a callback hook // if let Some(ref hook) = self.hook { - rawhook::hook(&conn, Arc::clone(hook)); + rawhook::hook(&conn, hook); } // // Set up connection pool for read-only connections. // @@ -448,12 +472,12 @@ signal: Condvar::new(), autoclean: self.autoclean.clone() }); Ok(ConnPool { - sh, rpool, + sh, #[cfg(feature = "tpool")] tpool: self.tpool }) } @@ -464,11 +488,14 @@ /// writer as well. /// /// This method should not be called if the application has requested to add /// a raw update hook. /// - /// # Panic + /// # Errors + /// [`Error::Sqlite`] is returned for database errors. + /// + /// # Panics /// This method will panic if a hook has been added to the Builder. pub fn build_with_changelog_hook( mut self, fname: P, hook: Box + Send> @@ -476,15 +503,14 @@ where P: AsRef, D: FromStr + Send + Sized + 'static, T: FromStr + Send + Sized + 'static { - if self.hook.is_some() { - panic!( - "Can't build a connection pool with both a raw and changelog hook" - ); - } + assert!( + self.hook.is_some(), + "Can't build a connection pool with both a raw and changelog hook" + ); // ToDo: Use std::path::absolute() once stabilized let fname = fname.as_ref(); let db_exists = fname.exists(); @@ -504,14 +530,14 @@ let regfuncs = self.regfuncs.take().unwrap_or_default(); // Call SQL function registration callbacks for read/write connection. for rf in ®funcs { match rf { - CbType::Rw(ref f) | CbType::Both(ref f) => { + CbType::Rw(f) | CbType::Both(f) => { f(&conn)?; } - _ => {} + CbType::Ro(_) => {} } } // @@ -528,11 +554,11 @@ // // Perform a full vacuum if requested to do so. // if self.full_vacuum { - self.full_vacuum(&conn)?; + Self::full_vacuum(&conn)?; } // // Register a callback hook // @@ -553,12 +579,12 @@ signal: Condvar::new(), autoclean: self.autoclean.clone() }); Ok(ConnPool { - sh, rpool, + sh, #[cfg(feature = "tpool")] tpool: self.tpool }) } } @@ -596,27 +622,39 @@ /// SQLite connection pool. /// /// This is a specialized connection pool that is defined specifically for /// sqlite, and only allows a single writer but multiple readers. +// Note: In Rust the drop order of struct fields is in the order of +// declaration. If the writer is dropped before the readers, sqlite +// will not clean up its wal files when the writet closes (presumably +// because the readers are keeping the files locked). Therefore it is +// important that the r2d2 connection pool is declared before the +// `Shared` buffer (since it contains the writer). +#[derive(Clone)] pub struct ConnPool { - sh: Arc, rpool: r2d2::Pool, + sh: Arc, #[cfg(feature = "tpool")] tpool: Option> } impl ConnPool { /// Return the pool size. /// /// In effect, this is the size of the read-only pool plus one (for the /// read/write connection). + #[must_use] pub fn size(&self) -> usize { (self.rpool.max_size() + 1) as usize } /// Acquire a read-only connection. + /// + /// # Errors + /// [`r2d2::Error`] will be returned if a read-only connection could not be + /// acquired. pub fn reader( &self ) -> Result, r2d2::Error> { self.rpool.get() } @@ -623,18 +661,19 @@ /// Acquire the read/write connection. /// /// If the writer is already taken, then block and wait for it to become /// available. + #[must_use] + #[allow(clippy::significant_drop_tightening)] pub fn writer(&self) -> WrConn { let mut g = self.sh.inner.lock(); let conn = loop { if let Some(conn) = g.conn.take() { break conn; - } else { - self.sh.signal.wait(&mut g); } + self.sh.signal.wait(&mut g); }; WrConn { sh: Arc::clone(&self.sh), inner: ManuallyDrop::new(conn) @@ -643,14 +682,13 @@ /// Attempt to acquire the writer connection. /// /// Returns `Some(conn)` if the writer connection was available at the time /// of the request. Returns `None` if the writer has already been taken. + #[must_use] pub fn try_writer(&self) -> Option { - let mut g = self.sh.inner.lock(); - let conn = g.conn.take()?; - + let conn = self.sh.inner.lock().conn.take()?; Some(WrConn { sh: Arc::clone(&self.sh), inner: ManuallyDrop::new(conn) }) } @@ -658,10 +696,15 @@ /// Special queries. impl ConnPool { /// Return the number of unused pages. + /// + /// # Errors + /// [`Error::R2D2`] indicates that it wasn't possible to acquire a read-only + /// connection from the connection pool. [`Error::Sqlite`] means it was not + /// possible to query the free page list count. pub fn freelist_count(&self) -> Result { Ok(self.reader()?.query_row_and_then( "PRAGMA freelist_count;'", [], |row| row.get(0) @@ -668,38 +711,37 @@ )?) } } -pub enum RunError { - R2D2(r2d2::Error), - App(E) -} - /// Read-only connection processing. impl ConnPool { /// Run a read-only database operation. /// /// # Errors - /// If a connection could not be acquired from the connection pool, - /// `Err(RunError::R2D2(r2d2::Error))` will be returned. If the application - /// callback fails, this function will return `Err(RunError::App(E))`. - pub fn run_ro(&self, f: F) -> Result> + /// The error type `E` is used to return application-defined errors, though + /// it must be possible to convert a `r2d2::Error` into `E` using the `From` + /// trait. + pub fn run_ro(&self, f: F) -> Result where T: Send + 'static, - E: fmt::Debug + Send + 'static, - F: FnOnce(&Connection) -> Result + Send + 'static + F: FnOnce(&Connection) -> Result + Send + 'static, + E: From { // Acquire a read-only connection from the pool - let conn = self.reader().map_err(|e| RunError::R2D2(e))?; + let conn = self.reader()?; - // Run caller-provided closure. On error map error to RunError::App(). - f(&conn).map_err(|e| RunError::App(e)) + // Run caller-provided closure. + f(&conn) } /// Run a read-only database operation on a thread. /// + /// # Errors + /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only + /// connection from the connection pool. + /// /// # Panics /// A thread pool must be associated with the [`ConnPool`] or this method /// will panic. #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] @@ -726,10 +768,14 @@ /// /// The supplied closure in `f` should return a `Result` where the `Ok` /// case will be passed as a "set" value through the `swctx` channel, and the /// `Err` case will be passed as a "fail" value. /// + /// # Errors + /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only + /// connection from the connection pool. + /// /// # Panics /// A thread pool must be associated with the [`ConnPool`] or this method /// will panic. #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] @@ -748,22 +794,30 @@ let conn = self.reader()?; let (sctx, wctx) = swctx::mkpair(); + // Ignore errors relating to pass the results back tpool.execute(move || match f(&conn) { - Ok(t) => sctx.set(t), - Err(e) => sctx.fail(e) + Ok(t) => { + let _ = sctx.set(t); + } + Err(e) => { + let _ = sctx.fail(e); + } }); Ok(wctx) } } /// Read/Write connection processing. impl ConnPool { /// Run a read/write database operation. + /// + /// # Errors + /// Returns an application-specific type `E` on error. pub fn run_rw(&self, f: F) -> Result where T: Send + 'static, E: fmt::Debug + Send + 'static, F: FnOnce(&mut WrConn) -> Result + Send + 'static @@ -826,12 +880,16 @@ let mut conn = self.writer(); let (sctx, wctx) = swctx::mkpair(); tpool.execute(move || match f(&mut conn) { - Ok(t) => sctx.set(t), - Err(e) => sctx.fail(e) + Ok(t) => { + let _ = sctx.set(t); + } + Err(e) => { + let _ = sctx.fail(e); + } }); wctx } } @@ -838,14 +896,15 @@ impl ConnPool { #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] + #[must_use] pub fn incremental_vacuum( &self, n: Option ) -> swctx::WaitCtx<(), (), rusqlite::Error> { self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n)) } } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/rawhook.rs ================================================================== --- src/rawhook.rs +++ src/rawhook.rs @@ -13,13 +13,13 @@ impl TryFrom for Action { type Error = (); fn try_from(action: rusqlite::hooks::Action) -> Result { match action { - rusqlite::hooks::Action::SQLITE_INSERT => Ok(Action::Insert), - rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Action::Update), - rusqlite::hooks::Action::SQLITE_DELETE => Ok(Action::Delete), + rusqlite::hooks::Action::SQLITE_INSERT => Ok(Self::Insert), + rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Self::Update), + rusqlite::hooks::Action::SQLITE_DELETE => Ok(Self::Delete), _ => Err(()) } } } @@ -34,20 +34,20 @@ fn commit(&self) -> bool; fn rollback(&self); } -pub fn hook(conn: &Connection, cb: Arc) { - let cb2 = Arc::clone(&cb); +pub fn hook(conn: &Connection, cb: &Arc) { + let cb2 = Arc::clone(cb); conn.commit_hook(Some(move || cb2.commit())); - let cb2 = Arc::clone(&cb); + let cb2 = Arc::clone(cb); conn.rollback_hook(Some(move || { cb2.rollback(); })); - let cb2 = Arc::clone(&cb); + let cb2 = Arc::clone(cb); conn.update_hook(Some(move |action, dbname: &str, table: &str, rowid| { let Ok(action) = Action::try_from(action) else { // Just ignore unknown actions return; }; Index: src/utils.rs ================================================================== --- src/utils.rs +++ src/utils.rs @@ -7,18 +7,25 @@ #[cfg(feature = "tpool")] use super::ConnPool; /// Return the number of pages in the freelist. +/// +/// # Errors +/// Returns [`rusqlite::Error`]. pub fn freelist_count(conn: &Connection) -> Result { conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0)) } /// Run an incremental vacuum. /// /// If `n` is `None` the entrire list of free pages will be processed. If it /// is `Some(n)` then only up to `n` pages will be processed. +/// +/// # Errors +/// Returns [`rusqlite::Error`]. +#[allow(clippy::option_if_let_else)] pub fn incremental_vacuum( conn: &Connection, n: Option ) -> Result<(), rusqlite::Error> { if let Some(n) = n { @@ -29,23 +36,30 @@ .map(|_| ()) } #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] +#[must_use] pub fn pooled_incremental_vacuum( cpool: &ConnPool, tpool: &ThreadPool, n: Option ) -> swctx::WaitCtx<(), (), rusqlite::Error> { let (sctx, wctx) = swctx::mkpair(); let conn = cpool.writer(); + // Kick off incremental vacuum on the thread pool. Ignore any errors caused + // by returning the results. tpool.execute(move || match conn.incremental_vacuum(n) { - Ok(_) => sctx.set(()), - Err(e) => sctx.fail(e) + Ok(()) => { + let _ = sctx.set(()); + } + Err(e) => { + let _ = sctx.fail(e); + } }); wctx } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/wrconn.rs ================================================================== --- src/wrconn.rs +++ src/wrconn.rs @@ -27,46 +27,53 @@ pub(super) inner: ManuallyDrop } impl WrConn { /// Add dirt to the writer connection. + // ToDo: broken-lint + #[allow(clippy::missing_const_for_fn)] pub fn add_dirt(&mut self, weight: usize) { self.inner.dirt = self.inner.dirt.saturating_add(weight); } } impl WrConn { + /// Run incremental vacuum. + /// + /// # Errors + /// On error `rusqlite::Error` is returned. pub fn incremental_vacuum( &self, n: Option ) -> Result<(), rusqlite::Error> { - if let Some(n) = n { - self - .inner - .conn - .execute("PRAGMA incremental_vacuum(?);", params![n]) - } else { - self - .inner - .conn - .execute("PRAGMA incremental_vacuum;", params![]) - } + n.map_or_else( + || { + self + .inner + .conn + .execute("PRAGMA incremental_vacuum;", params![]) + }, + |n| { + self + .inner + .conn + .execute("PRAGMA incremental_vacuum(?);", params![n]) + } + ) .map(|_| ()) } } impl Deref for WrConn { type Target = Connection; - #[inline(always)] fn deref(&self) -> &Connection { &self.inner.conn } } impl DerefMut for WrConn { - #[inline(always)] fn deref_mut(&mut self) -> &mut Connection { &mut self.inner.conn } } Index: www/changelog.md ================================================================== --- www/changelog.md +++ www/changelog.md @@ -1,16 +1,99 @@ # Change Log +⚠️ indicates a breaking change. + ## [Unreleased] -[Details](/vdiff?from=sqlsrv-0.5.0&to=trunk) +[Details](/vdiff?from=sqlsrv-0.10.0&to=trunk) + +### Added + +### Changed + +### Removed + +--- + +## [0.10.0] - 2025-06-06 + +[Details](/vdiff?from=sqlsrv-0.9.2&to=sqlsrv-0.10.0) + +### Changed + +- ⚠️ Updated `rusqlite` to `0.36.0` +- ⚠️ Updated `r2d2_sqlite` to `0.29.0` + +--- + +## [0.9.2] - 2025-05-20 + +[Details](/vdiff?from=sqlsrv-0.9.1&to=sqlsrv-0.9.2) ### Added +- Re-export `r2d2_sqlite::SqliteConnectionManager`. + +--- + +## [0.9.1] - 2025-05-18 + +[Details](/vdiff?from=sqlsrv-0.9.0&to=sqlsrv-0.9.1) + +### Changed + +- Move read-only connection pool above the read/write connection in internal + struct, so that sqlite will clean up its wal files on drop. + +--- + +## [0.9.0] - 2025-05-07 + +[Details](/vdiff?from=sqlsrv-0.8.0&to=sqlsrv-0.9.0) + ### Changed + +- `ConnPool` is `Clone`:able. ### Removed + +- ⚠️ Removed `RunError`. `ConnPool:run_ro()` now uses a `From` + bound on `E` instead. + +--- + +## [0.8.0] - 2025-04-25 + +[Details](/vdiff?from=sqlsrv-0.7.0&to=sqlsrv-0.8.0) + +### Changed + +- ⚠️ Updated `rusqlite` to `0.35.0` +- ⚠️ Updated `r2d2_sqlite` to `0.28.0` +- Edition 2024 + +--- + +## [0.7.0] - 2025-03-13 + +[Details](/vdiff?from=sqlsrv-0.6.0&to=sqlsrv-0.7.0) + +### Changed + +- ⚠️ Updated `rusqlite` to version `0.34.0`. +- ⚠️ Updated `r2d2_sqlite` to version `0.27.0`. + +--- + +## [0.6.0] - 2025-02-09 + +[Details](/vdiff?from=sqlsrv-0.5.0&to=sqlsrv-0.6.0) + +### Changed + +- ⚠️ Updated `rusqlite` to version `0.33.0`. +- ⚠️ Updated `r2d2_sqlite` to version `0.26.0`. --- ## [0.5.0] - 2024-08-06 Index: www/index.md ================================================================== --- www/index.md +++ www/index.md @@ -24,7 +24,61 @@ maintained [Change Log](./changelog.md). ## Project status -This library is in _very_ early stages. +This crate is being actively developed and maintained. + +_sqlsrv_ is a _tracking crate_, meaning it tracks and re-exports other crates: +- `rusqlite` +- `r2d2` +- `r2d2_sqlite` + +This also means it is unlikely to reach `1.0.0`. + +### Version compatibility + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
sqlsrvrusqliter2d2r2d2_sqlite
0.5.x0.32.10.8.100.25.0
0.6.x0.33.00.8.100.26.0
0.7.x0.34.00.8.100.27.0
0.8.x0.35.00.8.100.28.0
0.9.x0.35.00.8.100.28.0
0.10.x0.36.00.8.100.29.0