Index: Cargo.toml ================================================================== --- Cargo.toml +++ Cargo.toml @@ -11,13 +11,15 @@ rust-version = "1.56" exclude = [ ".fossil-settings", ".efiles", ".fslckout", + "bacon.toml", "build_docs.sh", "examples", "www", + "bacon.toml", "rustfmt.toml" ] # https://doc.rust-lang.org/cargo/reference/manifest.html#the-badges-section [badges] @@ -33,11 +35,11 @@ # Need to add the `hooks` feature. Unfortunately the version needs to be # specified here. It would be much more convenient if it could use the version # from r2d2_sqlite. Allegedely one can use the version "*", which apparently # does not mean "latest", but have not yet confirmed this. rusqlite = { version = "0.32.1", features = ["hooks"] } -swctx = { version = "0.2.2", optional = true } +swctx = { version = "0.3.0", optional = true } threadpool = { version = "1.8.1", optional = true } [dev-dependencies] hex = { version = "0.4.3" } rand = { version = "0.8.5" } @@ -46,5 +48,11 @@ [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"] +[lints.clippy] +all = { level = "deny", priority = -1 } +pedantic = { level = "warn", priority = -1 } +nursery = { level = "warn", priority = -1 } +cargo = { level = "warn", priority = -1 } + ADDED bacon.toml Index: bacon.toml ================================================================== --- /dev/null +++ bacon.toml @@ -0,0 +1,105 @@ +# This is a configuration file for the bacon tool +# +# Bacon repository: https://github.com/Canop/bacon +# Complete help on configuration: https://dystroy.org/bacon/config/ +# You can also check bacon's own bacon.toml file +# as an example: https://github.com/Canop/bacon/blob/main/bacon.toml + +# For information about clippy lints, see: +# https://github.com/rust-lang/rust-clippy/blob/master/README.md + +#default_job = "check" +default_job = "clippy-all" + +[jobs.check] +command = ["cargo", "check", "--color", "always"] +need_stdout = false + +[jobs.check-all] +command = ["cargo", "check", "--all-targets", "--color", "always"] +need_stdout = false + +# Run clippy on the default target +[jobs.clippy] +command = [ + "cargo", "clippy", + "--color", "always", +] +need_stdout = false + +# Run clippy on all targets +# To disable some lints, you may change the job this way: +# [jobs.clippy-all] +# command = [ +# "cargo", "clippy", +# "--all-targets", +# "--color", "always", +# "--", +# "-A", "clippy::bool_to_int_with_if", +# "-A", "clippy::collapsible_if", +# "-A", "clippy::derive_partial_eq_without_eq", +# ] +# need_stdout = false +[jobs.clippy-all] +command = [ + "cargo", "clippy", + "--all-targets", + "--color", "always", +] +need_stdout = false + +# This job lets you run +# - all tests: bacon test +# - a specific test: bacon test -- config::test_default_files +# - the tests of a package: bacon test -- -- -p config +[jobs.test] +command = [ + "cargo", "test", "--color", "always", + "--", "--color", "always", # see https://github.com/Canop/bacon/issues/124 +] +need_stdout = true + +[jobs.doc] +command = ["cargo", "doc", "--color", "always", "--no-deps"] +need_stdout = false + +# If the doc compiles, then it opens in your browser and bacon switches +# to the previous job +[jobs.doc-open] +command = ["cargo", "doc", "--color", "always", "--no-deps", "--open"] +need_stdout = false +on_success = "back" # so that we don't open the browser at each change + +# You can run your application and have the result displayed in bacon, +# *if* it makes sense for this crate. +# Don't forget the `--color always` part or the errors won't be +# properly parsed. +# If your program never stops (eg a server), you may set `background` +# to false to have the cargo run output immediately displayed instead +# of waiting for program's end. +[jobs.run] +command = [ + "cargo", "run", + "--color", "always", + # put launch parameters for your program behind a `--` separator +] +need_stdout = true +allow_warnings = true +background = true + +# This parameterized job runs the example of your choice, as soon +# as the code compiles. +# Call it as +# bacon ex -- my-example +[jobs.ex] +command = ["cargo", "run", "--color", "always", "--example"] +need_stdout = true +allow_warnings = true + +# You may define here keybindings that would be specific to +# a project, for example a shortcut to launch a specific job. +# Shortcuts to internal functions (scrolling, toggling, etc.) +# should go in your personal global prefs.toml file instead. +[keybindings] +# alt-m = "job:my-job" +c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target Index: examples/simple.rs ================================================================== --- examples/simple.rs +++ examples/simple.rs @@ -50,11 +50,15 @@ Ok(()) } } +#[allow(clippy::too_many_lines)] fn main() { + const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \ + WHERE type='table' AND name=?);"; + let schema = Box::new(Schema {}); #[allow(unused_mut)] let mut bldr = sqlsrv::Builder::new(schema) .incremental_autoclean(10, None) .reg_scalar_fn(RegOn::Both(register_genuid)) @@ -127,13 +131,10 @@ ) .unwrap(); } - const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \ - WHERE type='table' AND name=?);"; - { let rconn = connpool.reader().unwrap(); let mut stmt = rconn.prepare_cached(SQL_LOOKUP_TABLE).unwrap(); let have = stmt .query_row(params!("stuff"), |row| row.get::(0)) @@ -191,11 +192,11 @@ #[cfg(feature = "tpool")] tpool.join(); } -/// Register a pwhash() SQL function which returns a hex-encoded version of +/// Register a `pwhash()` SQL function that returns a hex-encoded version of /// the SHA256 hash of the input string. fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> { db.create_scalar_function( "pwhash", 1, @@ -211,10 +212,13 @@ Ok(hex::encode(&result[..])) } ) } +const CHARSET: &[u8] = + b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; + /// Register an SQL function called `genuid` that will generate a random /// (hopefully unique) identifier of a requested length. fn register_genuid(db: &Connection) -> Result<(), rusqlite::Error> { db.create_scalar_function( @@ -223,13 +227,10 @@ FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC, move |ctx| { assert_eq!(ctx.len(), 1, "called with unexpected number of arguments"); let len = ctx.get::(0)?; - const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - abcdefghijklmnopqrstuvwxyz\ - 0123456789-_"; let mut rng = rand::thread_rng(); let id: String = (0..len) .map(|_| { Index: src/err.rs ================================================================== --- src/err.rs +++ src/err.rs @@ -8,50 +8,51 @@ R2D2(r2d2::Error), Sqlite(rusqlite::Error) } impl Error { - pub fn bad_format(s: S) -> Self { - Error::BadFormat(s.to_string()) + #[allow(clippy::needless_pass_by_value)] + pub fn bad_format(s: impl ToString) -> Self { + Self::BadFormat(s.to_string()) } } impl std::error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { - Error::BadFormat(s) => { + Self::BadFormat(s) => { write!(f, "Bad format error; {}", s) } - Error::IO(s) => { + Self::IO(s) => { write!(f, "I/O error; {}", s) } - Error::R2D2(ref err) => { + Self::R2D2(ref err) => { write!(f, "r2d2 error; {}", err) } - Error::Sqlite(ref err) => { + Self::Sqlite(ref err) => { write!(f, "Sqlite error; {}", err) } } } } impl From for Error { fn from(err: io::Error) -> Self { - Error::IO(err.to_string()) + Self::IO(err.to_string()) } } impl From for Error { fn from(err: r2d2::Error) -> Self { - Error::R2D2(err) + Self::R2D2(err) } } impl From for Error { fn from(err: rusqlite::Error) -> Self { - Error::Sqlite(err) + Self::Sqlite(err) } } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/lib.rs ================================================================== --- src/lib.rs +++ src/lib.rs @@ -1,5 +1,7 @@ +#![allow(clippy::doc_markdown)] + //! A library for implementing an in-process SQLite database server. //! //! # Connection pooling //! sqlsrv implements connection pooling that reflects the concurrency model //! of SQLite: It supports multiple parallel readers, but only one writer. @@ -60,11 +62,11 @@ /// Wrapper around a SQL functions registration callback used to select which /// connection types to perform registrations on. pub enum RegOn where - F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync + F: Fn(&Connection) -> Result<(), rusqlite::Error> { /// This registration callback should only be called for read-only /// connections. RO(F), @@ -99,19 +101,25 @@ /// While this method can be used to perform schema upgrades, there are two /// specialized methods (`need_upgrade()` and `upgrade()`) that can be used /// for this purpose instead. /// /// The default implementation does nothing but returns `Ok(())`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn init(&self, conn: &mut Connection, newdb: bool) -> Result<(), Error> { Ok(()) } /// Application callback used to determine if the database schema is out of /// date and needs to be updated. /// /// The default implementation does nothing but returns `Ok(false)`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn need_upgrade(&self, conn: &Connection) -> Result { Ok(false) } @@ -118,10 +126,13 @@ /// Upgrade the database schema. /// /// This is called if [`SchemaMgr::need_upgrade()`] returns `Ok(true)`. /// /// The default implementation does nothing but returns `Ok(())`. + /// + /// # Errors + /// Application-specific error. #[allow(unused_variables)] fn upgrade(&self, conn: &mut Connection) -> Result<(), Error> { Ok(()) } } @@ -166,11 +177,11 @@ for rf in &self.regfuncs { match rf { CbType::Ro(ref f) | CbType::Both(ref f) => { f(conn)?; } - _ => {} + CbType::Rw(_) => {} } } Ok(()) } @@ -207,11 +218,14 @@ Ok(conn) } /// Run a full vacuum. - fn full_vacuum(&self, conn: &Connection) -> Result<(), rusqlite::Error> { + /// + /// This is an internal function that may be called by `build()` if a full + /// vacuum has been requested. + fn full_vacuum(conn: &Connection) -> Result<(), rusqlite::Error> { conn.execute("VACUUM;", params![])?; Ok(()) } fn create_ro_pool( @@ -236,10 +250,11 @@ /// Create a new `Builder` for constructing a [`ConnPool`] object. /// /// Default to not run a full vacuum of the database on initialization and /// create 2 read-only connections for the pool. /// No workers thread pool will be used. + #[must_use] pub fn new(schmgr: Box) -> Self { Self { schmgr, full_vacuum: false, max_readers: 2, @@ -252,11 +267,12 @@ } /// Trigger a full vacuum when initializing the connection pool. /// /// Operates on an owned `Builder` object. - pub fn init_vacuum(mut self) -> Self { + #[must_use] + pub const fn init_vacuum(mut self) -> Self { self.full_vacuum = true; self } /// Trigger a full vacuum when initializing the connection pool. @@ -268,11 +284,12 @@ } /// Set maximum number of readers in the connection pool. /// /// Operates on an owned `Builder` object. - pub fn max_readers(mut self, n: usize) -> Self { + #[must_use] + pub const fn max_readers(mut self, n: usize) -> Self { self.max_readers = n; self } /// Set maximum number of readers in the connection pool. @@ -284,10 +301,11 @@ } /// Request that a "raw" update hook be added to the writer connection. /// /// Operates on an owned `Builder` object. + #[must_use] pub fn hook(mut self, hook: Arc) -> Self { self.hook = Some(hook); self } @@ -302,11 +320,12 @@ /// Enable incremental autovacuum. /// /// `dirt_watermark` is used to set what amount of "dirt" is required in /// order to trigger an autoclean. `nfree` is the number of blocks in the /// freelists to process each time the autoclean is run. - pub fn incremental_autoclean( + #[must_use] + pub const fn incremental_autoclean( mut self, dirt_watermark: usize, npages: Option ) -> Self { self.autoclean = Some(AutoClean { @@ -320,10 +339,11 @@ /// /// The closure should be wrapped in a `RegOn::RO()` if the function should /// only be registered on read-only connections. `RegOn::RW()` is used to /// register the function on the read/write connection. Use `RegOn::Both()` /// to register in both read-only and the read/write connection. + #[must_use] pub fn reg_scalar_fn(mut self, r: RegOn) -> Self where F: Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static { self.reg_scalar_fn_r(r); @@ -360,10 +380,11 @@ } self } #[cfg(feature = "tpool")] + #[must_use] pub fn thread_pool(mut self, tpool: Arc) -> Self { self.tpool = Some(tpool); self } @@ -372,10 +393,13 @@ self.tpool = Some(tpool); self } /// Construct a connection pool. + /// + /// # Errors + /// [`Errors::Sqlite`] will be returned if a database error occurred. pub fn build

(mut self, fname: P) -> Result where P: AsRef { // ToDo: Use std::path::absolute() once stabilized @@ -401,11 +425,11 @@ for rf in ®funcs { match rf { CbType::Rw(ref f) | CbType::Both(ref f) => { f(&conn)?; } - _ => {} + CbType::Ro(_) => {} } } // // Perform schema initialization. @@ -421,18 +445,18 @@ // // Perform a full vacuum if requested to do so. // if self.full_vacuum { - self.full_vacuum(&conn)?; + Self::full_vacuum(&conn)?; } // // Register a callback hook // if let Some(ref hook) = self.hook { - rawhook::hook(&conn, Arc::clone(hook)); + rawhook::hook(&conn, hook); } // // Set up connection pool for read-only connections. // @@ -464,11 +488,14 @@ /// writer as well. /// /// This method should not be called if the application has requested to add /// a raw update hook. /// - /// # Panic + /// # Errors + /// [`Errors::Sqlite`] is returned for database errors. + /// + /// # Panics /// This method will panic if a hook has been added to the Builder. pub fn build_with_changelog_hook( mut self, fname: P, hook: Box + Send> @@ -476,15 +503,14 @@ where P: AsRef, D: FromStr + Send + Sized + 'static, T: FromStr + Send + Sized + 'static { - if self.hook.is_some() { - panic!( - "Can't build a connection pool with both a raw and changelog hook" - ); - } + assert!( + self.hook.is_some(), + "Can't build a connection pool with both a raw and changelog hook" + ); // ToDo: Use std::path::absolute() once stabilized let fname = fname.as_ref(); let db_exists = fname.exists(); @@ -507,11 +533,11 @@ for rf in ®funcs { match rf { CbType::Rw(ref f) | CbType::Both(ref f) => { f(&conn)?; } - _ => {} + CbType::Ro(_) => {} } } // @@ -528,11 +554,11 @@ // // Perform a full vacuum if requested to do so. // if self.full_vacuum { - self.full_vacuum(&conn)?; + Self::full_vacuum(&conn)?; } // // Register a callback hook // @@ -608,15 +634,20 @@ impl ConnPool { /// Return the pool size. /// /// In effect, this is the size of the read-only pool plus one (for the /// read/write connection). + #[must_use] pub fn size(&self) -> usize { (self.rpool.max_size() + 1) as usize } /// Acquire a read-only connection. + /// + /// # Errors + /// [`r2d2::Errors`] will be returned if a read-only connection could not be + /// acquired. pub fn reader( &self ) -> Result, r2d2::Error> { self.rpool.get() } @@ -623,18 +654,19 @@ /// Acquire the read/write connection. /// /// If the writer is already taken, then block and wait for it to become /// available. + #[must_use] + #[allow(clippy::significant_drop_tightening)] pub fn writer(&self) -> WrConn { let mut g = self.sh.inner.lock(); let conn = loop { if let Some(conn) = g.conn.take() { break conn; - } else { - self.sh.signal.wait(&mut g); } + self.sh.signal.wait(&mut g); }; WrConn { sh: Arc::clone(&self.sh), inner: ManuallyDrop::new(conn) @@ -643,14 +675,13 @@ /// Attempt to acquire the writer connection. /// /// Returns `Some(conn)` if the writer connection was available at the time /// of the request. Returns `None` if the writer has already been taken. + #[must_use] pub fn try_writer(&self) -> Option { - let mut g = self.sh.inner.lock(); - let conn = g.conn.take()?; - + let conn = self.sh.inner.lock().conn.take()?; Some(WrConn { sh: Arc::clone(&self.sh), inner: ManuallyDrop::new(conn) }) } @@ -658,10 +689,15 @@ /// Special queries. impl ConnPool { /// Return the number of unused pages. + /// + /// # Errors + /// [`Error::R2D2`] indicates that it wasn't possible to acquire a read-only + /// connection from the connection pool. [`Error::Sqlite`] means it was not + /// possible to query the free page list count. pub fn freelist_count(&self) -> Result { Ok(self.reader()?.query_row_and_then( "PRAGMA freelist_count;'", [], |row| row.get(0) @@ -696,10 +732,14 @@ f(&conn).map_err(|e| RunError::App(e)) } /// Run a read-only database operation on a thread. /// + /// # Errors + /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only + /// connection from the connection pool. + /// /// # Panics /// A thread pool must be associated with the [`ConnPool`] or this method /// will panic. #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] @@ -726,10 +766,14 @@ /// /// The supplied closure in `f` should return a `Result` where the `Ok` /// case will be passed as a "set" value through the `swctx` channel, and the /// `Err` case will be passed as a "fail" value. /// + /// # Errors + /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only + /// connection from the connection pool. + /// /// # Panics /// A thread pool must be associated with the [`ConnPool`] or this method /// will panic. #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] @@ -748,22 +792,30 @@ let conn = self.reader()?; let (sctx, wctx) = swctx::mkpair(); + // Ignore errors relating to pass the results back tpool.execute(move || match f(&conn) { - Ok(t) => sctx.set(t), - Err(e) => sctx.fail(e) + Ok(t) => { + let _ = sctx.set(t); + } + Err(e) => { + let _ = sctx.fail(e); + } }); Ok(wctx) } } /// Read/Write connection processing. impl ConnPool { /// Run a read/write database operation. + /// + /// # Errors + /// Returns an application-specific type `E` on error. pub fn run_rw(&self, f: F) -> Result where T: Send + 'static, E: fmt::Debug + Send + 'static, F: FnOnce(&mut WrConn) -> Result + Send + 'static @@ -826,12 +878,16 @@ let mut conn = self.writer(); let (sctx, wctx) = swctx::mkpair(); tpool.execute(move || match f(&mut conn) { - Ok(t) => sctx.set(t), - Err(e) => sctx.fail(e) + Ok(t) => { + let _ = sctx.set(t); + } + Err(e) => { + let _ = sctx.fail(e); + } }); wctx } } @@ -838,14 +894,15 @@ impl ConnPool { #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] + #[must_use] pub fn incremental_vacuum( &self, n: Option ) -> swctx::WaitCtx<(), (), rusqlite::Error> { self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n)) } } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/rawhook.rs ================================================================== --- src/rawhook.rs +++ src/rawhook.rs @@ -13,13 +13,13 @@ impl TryFrom for Action { type Error = (); fn try_from(action: rusqlite::hooks::Action) -> Result { match action { - rusqlite::hooks::Action::SQLITE_INSERT => Ok(Action::Insert), - rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Action::Update), - rusqlite::hooks::Action::SQLITE_DELETE => Ok(Action::Delete), + rusqlite::hooks::Action::SQLITE_INSERT => Ok(Self::Insert), + rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Self::Update), + rusqlite::hooks::Action::SQLITE_DELETE => Ok(Self::Delete), _ => Err(()) } } } @@ -34,20 +34,20 @@ fn commit(&self) -> bool; fn rollback(&self); } -pub fn hook(conn: &Connection, cb: Arc) { - let cb2 = Arc::clone(&cb); +pub fn hook(conn: &Connection, cb: &Arc) { + let cb2 = Arc::clone(cb); conn.commit_hook(Some(move || cb2.commit())); - let cb2 = Arc::clone(&cb); + let cb2 = Arc::clone(cb); conn.rollback_hook(Some(move || { cb2.rollback(); })); - let cb2 = Arc::clone(&cb); + let cb2 = Arc::clone(cb); conn.update_hook(Some(move |action, dbname: &str, table: &str, rowid| { let Ok(action) = Action::try_from(action) else { // Just ignore unknown actions return; }; Index: src/utils.rs ================================================================== --- src/utils.rs +++ src/utils.rs @@ -7,18 +7,25 @@ #[cfg(feature = "tpool")] use super::ConnPool; /// Return the number of pages in the freelist. +/// +/// # Errors +/// Returns [`rusqlite::Error`]. pub fn freelist_count(conn: &Connection) -> Result { conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0)) } /// Run an incremental vacuum. /// /// If `n` is `None` the entrire list of free pages will be processed. If it /// is `Some(n)` then only up to `n` pages will be processed. +/// +/// # Errors +/// Returns [`rusqlite::Error`]. +#[allow(clippy::option_if_let_else)] pub fn incremental_vacuum( conn: &Connection, n: Option ) -> Result<(), rusqlite::Error> { if let Some(n) = n { @@ -29,23 +36,30 @@ .map(|_| ()) } #[cfg(feature = "tpool")] #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))] +#[must_use] pub fn pooled_incremental_vacuum( cpool: &ConnPool, tpool: &ThreadPool, n: Option ) -> swctx::WaitCtx<(), (), rusqlite::Error> { let (sctx, wctx) = swctx::mkpair(); let conn = cpool.writer(); + // Kick off incremental vacuum on the thread pool. Ignore any errors caused + // by returning the results. tpool.execute(move || match conn.incremental_vacuum(n) { - Ok(_) => sctx.set(()), - Err(e) => sctx.fail(e) + Ok(()) => { + let _ = sctx.set(()); + } + Err(e) => { + let _ = sctx.fail(e); + } }); wctx } // vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 : Index: src/wrconn.rs ================================================================== --- src/wrconn.rs +++ src/wrconn.rs @@ -33,40 +33,45 @@ self.inner.dirt = self.inner.dirt.saturating_add(weight); } } impl WrConn { + /// Run incremental vacuum. + /// + /// # Errors + /// On error `rusqlite::Error` is returned. pub fn incremental_vacuum( &self, n: Option ) -> Result<(), rusqlite::Error> { - if let Some(n) = n { - self - .inner - .conn - .execute("PRAGMA incremental_vacuum(?);", params![n]) - } else { - self - .inner - .conn - .execute("PRAGMA incremental_vacuum;", params![]) - } + n.map_or_else( + || { + self + .inner + .conn + .execute("PRAGMA incremental_vacuum;", params![]) + }, + |n| { + self + .inner + .conn + .execute("PRAGMA incremental_vacuum(?);", params![n]) + } + ) .map(|_| ()) } } impl Deref for WrConn { type Target = Connection; - #[inline(always)] fn deref(&self) -> &Connection { &self.inner.conn } } impl DerefMut for WrConn { - #[inline(always)] fn deref_mut(&mut self) -> &mut Connection { &mut self.inner.conn } } Index: www/changelog.md ================================================================== --- www/changelog.md +++ www/changelog.md @@ -1,6 +1,8 @@ # Change Log + +⚠️ indicates a breaking change. ## [Unreleased] [Details](/vdiff?from=sqlsrv-0.5.0&to=trunk)