sqlsrv

Check-in Differences
Login

Check-in Differences

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Difference From sqlsrv-0.5.0 To trunk

2025-06-05
23:36
Release maintenance. Leaf check-in: 30224ab8e1 user: jan tags: sqlsrv-0.10.0, trunk
23:33
Update rusqlite to 0.36 and r2d2_sqlite to 0.29. check-in: a9021ba9cd user: jan tags: trunk
2024-09-10
02:45
Updated swctx. Pedantic clippy updates. check-in: a561ae41d5 user: jan tags: trunk
2024-08-06
16:49
Release maintenance. check-in: f6bf902f10 user: jan tags: sqlsrv-0.5.0, trunk
16:46
Update changelog. check-in: 9355c6e401 user: jan tags: trunk

Changes to .efiles.
1
2
3
4
5
6
7
8
9
10
11

Cargo.toml
README.md
www/index.md
www/changelog.md
src/err.rs
src/lib.rs
src/wrconn.rs
src/rawhook.rs
src/changehook.rs
src/utils.rs
examples/simple.rs












>
1
2
3
4
5
6
7
8
9
10
11
12
Cargo.toml
README.md
www/index.md
www/changelog.md
src/err.rs
src/lib.rs
src/wrconn.rs
src/rawhook.rs
src/changehook.rs
src/utils.rs
examples/simple.rs
examples/rotest.rs
Changes to Cargo.toml.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15

16
17
18

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50








[package]
name = "sqlsrv"
version = "0.5.0"
edition = "2021"
license = "0BSD"
# https://crates.io/category_slugs
categories = [ "database" ]
keywords = [ "sqlite", "server" ]
repository = "https://repos.qrnch.tech/pub/sqlsrv"
description = "Utility functions for managing SQLite connections in a server application."
rust-version = "1.56"
exclude = [
  ".fossil-settings",
  ".efiles",
  ".fslckout",

  "build_docs.sh",
  "examples",
  "www",

  "rustfmt.toml"
]

# https://doc.rust-lang.org/cargo/reference/manifest.html#the-badges-section
[badges]
maintenance = { status = "experimental" }

[features]
tpool = ["dep:swctx", "dep:threadpool"]

[dependencies]
parking_lot = { version = "0.12.3" }
r2d2 = { version = "0.8.10" }
r2d2_sqlite = { version = "0.25.0" }
# Need to add the `hooks` feature.  Unfortunately the version needs to be
# specified here.  It would be much more convenient if it could use the version
# from r2d2_sqlite.  Allegedely one can use the version "*", which apparently
# does not mean "latest", but have not yet confirmed this.
rusqlite = { version = "0.32.1", features = ["hooks"] }
swctx = { version = "0.2.2", optional = true }
threadpool = { version = "1.8.1", optional = true }

[dev-dependencies]
hex = { version = "0.4.3" }
rand = { version = "0.8.5" }
rusqlite = { version = "0.32.1", features = ["functions"] }
sha2 = { version = "0.10.8" }

[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"]











|
|






|




>



>











|

|




|
|




|
|
|





>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
[package]
name = "sqlsrv"
version = "0.10.0"
edition = "2024"
license = "0BSD"
# https://crates.io/category_slugs
categories = [ "database" ]
keywords = [ "sqlite", "server" ]
repository = "https://repos.qrnch.tech/pub/sqlsrv"
description = "Utility functions for managing SQLite connections in a server application."
rust-version = "1.85"
exclude = [
  ".fossil-settings",
  ".efiles",
  ".fslckout",
  "bacon.toml",
  "build_docs.sh",
  "examples",
  "www",
  "bacon.toml",
  "rustfmt.toml"
]

# https://doc.rust-lang.org/cargo/reference/manifest.html#the-badges-section
[badges]
maintenance = { status = "experimental" }

[features]
tpool = ["dep:swctx", "dep:threadpool"]

[dependencies]
parking_lot = { version = "0.12.4" }
r2d2 = { version = "0.8.10" }
r2d2_sqlite = { version = "0.29.0" }
# Need to add the `hooks` feature.  Unfortunately the version needs to be
# specified here.  It would be much more convenient if it could use the version
# from r2d2_sqlite.  Allegedely one can use the version "*", which apparently
# does not mean "latest", but have not yet confirmed this.
rusqlite = { version = "0.36.0", features = ["hooks"] }
swctx = { version = "0.3.0", optional = true }
threadpool = { version = "1.8.1", optional = true }

[dev-dependencies]
hex = { version = "0.4.3" }
rand = { version = "0.9.1" }
rusqlite = { version = "0.36.0", features = ["functions"] }
sha2 = { version = "0.10.9" }

[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"]

[lints.clippy]
all = { level = "warn", priority = -1 }
pedantic = { level = "warn", priority = -1 }
nursery = { level = "warn", priority = -1 }
cargo = { level = "warn", priority = -1 }

multiple_crate_versions = "allow"

Added bacon.toml.




















































































































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
# This is a configuration file for the bacon tool
#
# Bacon repository: https://github.com/Canop/bacon
# Complete help on configuration: https://dystroy.org/bacon/config/
# You can also check bacon's own bacon.toml file
#  as an example: https://github.com/Canop/bacon/blob/main/bacon.toml

# For information about clippy lints, see:
# https://github.com/rust-lang/rust-clippy/blob/master/README.md

#default_job = "check"
default_job = "clippy-all"

[jobs.check]
command = ["cargo", "check", "--color", "always"]
need_stdout = false

[jobs.check-all]
command = ["cargo", "check", "--all-targets", "--color", "always"]
need_stdout = false

# Run clippy on the default target
[jobs.clippy]
command = [
    "cargo", "clippy",
    "--color", "always",
]
need_stdout = false

# Run clippy on all targets
# To disable some lints, you may change the job this way:
#    [jobs.clippy-all]
#    command = [
#        "cargo", "clippy",
#        "--all-targets",
#        "--color", "always",
#    	 "--",
#    	 "-A", "clippy::bool_to_int_with_if",
#    	 "-A", "clippy::collapsible_if",
#    	 "-A", "clippy::derive_partial_eq_without_eq",
#    ]
# need_stdout = false
[jobs.clippy-all]
command = [
    "cargo", "clippy",
    "--all-targets",
    "--all-features",
    "--color", "always",
]
need_stdout = false

# This job lets you run
# - all tests: bacon test
# - a specific test: bacon test -- config::test_default_files
# - the tests of a package: bacon test -- -- -p config
[jobs.test]
command = [
    "cargo", "test", "--color", "always",
    "--", "--color", "always", # see https://github.com/Canop/bacon/issues/124
]
need_stdout = true

[jobs.doc]
command = ["cargo", "doc", "--color", "always", "--no-deps"]
need_stdout = false

# If the doc compiles, then it opens in your browser and bacon switches
# to the previous job
[jobs.doc-open]
command = ["cargo", "doc", "--color", "always", "--no-deps", "--open"]
need_stdout = false
on_success = "back" # so that we don't open the browser at each change

# You can run your application and have the result displayed in bacon,
# *if* it makes sense for this crate.
# Don't forget the `--color always` part or the errors won't be
# properly parsed.
# If your program never stops (eg a server), you may set `background`
# to false to have the cargo run output immediately displayed instead
# of waiting for program's end.
[jobs.run]
command = [
    "cargo", "run",
    "--color", "always",
    # put launch parameters for your program behind a `--` separator
]
need_stdout = true
allow_warnings = true
background = true

# This parameterized job runs the example of your choice, as soon
# as the code compiles.
# Call it as
#    bacon ex -- my-example
[jobs.ex]
command = ["cargo", "run", "--color", "always", "--example"]
need_stdout = true
allow_warnings = true

# You may define here keybindings that would be specific to
# a project, for example a shortcut to launch a specific job.
# Shortcuts to internal functions (scrolling, toggling, etc.)
# should go in your personal global prefs.toml file instead.
[keybindings]
# alt-m = "job:my-job"
c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target
Added examples/rotest.rs.




































































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
use rusqlite::{Connection, ToSql, params};

use sqlsrv::SchemaMgr;

#[derive(Debug)]
#[allow(dead_code)]
enum MyError {
  Sqlite(rusqlite::Error),
  R2D2(r2d2::Error)
}

impl From<r2d2::Error> for MyError {
  fn from(err: r2d2::Error) -> Self {
    Self::R2D2(err)
  }
}

impl From<rusqlite::Error> for MyError {
  fn from(err: rusqlite::Error) -> Self {
    Self::Sqlite(err)
  }
}


struct Schema {}

impl SchemaMgr for Schema {
  fn init(
    &self,
    conn: &mut Connection,
    _newdb: bool
  ) -> Result<(), sqlsrv::Error> {
    conn.execute(
      "CREATE TABLE IF NOT EXISTS agents (
  id   INTEGER  PRIMARY KEY,
  name TEXT     UNIQUE NOT NULL,
  age  INTEGER  NOT NULL
);",
      &[] as &[&dyn ToSql]
    )?;

    Ok(())
  }
}


#[allow(clippy::too_many_lines)]
fn main() {
  let schema = Box::new(Schema {});

  let bldr = sqlsrv::Builder::new(schema);

  let cpool = bldr.build("test.sqlite").unwrap();

  let res: Result<(), MyError> = cpool.run_ro(|conn| {
    const SQL: &str = "INSERT INTO agents (name, age) VALUES ('frank', 42);";
    let mut stmt = conn.prepare_cached(SQL)?;
    stmt.execute(params![])?;
    Ok(())
  });

  println!("Should panic with an read-only error:");
  res.unwrap();
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to examples/simple.rs.
48
49
50
51
52
53
54

55



56
57
58
59
60
61
62
    }

    Ok(())
  }
}



fn main() {



  let schema = Box::new(Schema {});
  #[allow(unused_mut)]
  let mut bldr = sqlsrv::Builder::new(schema)
    .incremental_autoclean(10, None)
    .reg_scalar_fn(RegOn::Both(register_genuid))
    .reg_scalar_fn(RegOn::RO(register_pwhash));








>

>
>
>







48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
    }

    Ok(())
  }
}


#[allow(clippy::too_many_lines)]
fn main() {
  const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \
                                  WHERE type='table' AND name=?);";

  let schema = Box::new(Schema {});
  #[allow(unused_mut)]
  let mut bldr = sqlsrv::Builder::new(schema)
    .incremental_autoclean(10, None)
    .reg_scalar_fn(RegOn::Both(register_genuid))
    .reg_scalar_fn(RegOn::RO(register_pwhash));

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
);",
        &[] as &[&dyn ToSql]
      )
      .unwrap();
  }


  const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \
                                  WHERE type='table' AND name=?);";

  {
    let rconn = connpool.reader().unwrap();
    let mut stmt = rconn.prepare_cached(SQL_LOOKUP_TABLE).unwrap();
    let have = stmt
      .query_row(params!("stuff"), |row| row.get::<usize, bool>(0))
      .unwrap();
    assert!(have);







<
<
<







129
130
131
132
133
134
135



136
137
138
139
140
141
142
);",
        &[] as &[&dyn ToSql]
      )
      .unwrap();
  }





  {
    let rconn = connpool.reader().unwrap();
    let mut stmt = rconn.prepare_cached(SQL_LOOKUP_TABLE).unwrap();
    let have = stmt
      .query_row(params!("stuff"), |row| row.get::<usize, bool>(0))
      .unwrap();
    assert!(have);
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215



216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246


  #[cfg(feature = "tpool")]
  tpool.join();
}


/// Register a pwhash() SQL function which returns a hex-encoded version of
/// the SHA256 hash of the input string.
fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "pwhash",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let s = ctx.get::<String>(0)?;

      let mut hasher = Sha256::new();
      hasher.update(s.as_bytes());
      let result = hasher.finalize();

      Ok(hex::encode(&result[..]))
    }
  )
}





/// Register an SQL function called `genuid` that will generate a random
/// (hopefully unique) identifier of a requested length.
fn register_genuid(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "genuid",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let len = ctx.get::<usize>(0)?;

      const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
                            abcdefghijklmnopqrstuvwxyz\
                            0123456789-_";

      let mut rng = rand::thread_rng();

      let id: String = (0..len)
        .map(|_| {
          let idx = rng.gen_range(0..CHARSET.len());
          CHARSET[idx] as char
        })
        .collect();

      Ok(id)
    }
  )
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :







|



















>
>
>












<
<
<

|



|










190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231



232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247


  #[cfg(feature = "tpool")]
  tpool.join();
}


/// Register a `pwhash()` SQL function that returns a hex-encoded version of
/// the SHA256 hash of the input string.
fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "pwhash",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let s = ctx.get::<String>(0)?;

      let mut hasher = Sha256::new();
      hasher.update(s.as_bytes());
      let result = hasher.finalize();

      Ok(hex::encode(&result[..]))
    }
  )
}

const CHARSET: &[u8] =
  b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";


/// Register an SQL function called `genuid` that will generate a random
/// (hopefully unique) identifier of a requested length.
fn register_genuid(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "genuid",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let len = ctx.get::<usize>(0)?;





      let mut rng = rand::rng();

      let id: String = (0..len)
        .map(|_| {
          let idx = rng.random_range(0..CHARSET.len());
          CHARSET[idx] as char
        })
        .collect();

      Ok(id)
    }
  )
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to rustfmt.toml.
1
2
3
4
5
6
7
8
9
10
blank_lines_upper_bound = 2
comment_width = 79
edition = "2021"
format_strings = true
max_width = 79
match_block_trailing_comma = false
# merge_imports = true
newline_style = "Unix"
tab_spaces = 2
trailing_comma = "Never"


|







1
2
3
4
5
6
7
8
9
10
blank_lines_upper_bound = 2
comment_width = 79
edition = "2024"
format_strings = true
max_width = 79
match_block_trailing_comma = false
# merge_imports = true
newline_style = "Unix"
tab_spaces = 2
trailing_comma = "Never"
Changes to src/err.rs.
1
2
3
4
5
6
7
8
9
10
11
12

13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
use std::{fmt, io};

/// Errors that are returned by sqlsrv.
#[derive(Debug)]
pub enum Error {
  BadFormat(String),
  IO(String),
  R2D2(r2d2::Error),
  Sqlite(rusqlite::Error)
}

impl Error {

  pub fn bad_format<S: ToString>(s: S) -> Self {
    Error::BadFormat(s.to_string())
  }
}

impl std::error::Error for Error {}

impl fmt::Display for Error {
  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    match self {
      Error::BadFormat(s) => {
        write!(f, "Bad format error; {}", s)
      }
      Error::IO(s) => {
        write!(f, "I/O error; {}", s)
      }
      Error::R2D2(ref err) => {
        write!(f, "r2d2 error; {}", err)
      }
      Error::Sqlite(ref err) => {
        write!(f, "Sqlite error; {}", err)
      }
    }
  }
}

impl From<io::Error> for Error {
  fn from(err: io::Error) -> Self {
    Error::IO(err.to_string())
  }
}

impl From<r2d2::Error> for Error {
  fn from(err: r2d2::Error) -> Self {
    Error::R2D2(err)
  }
}

impl From<rusqlite::Error> for Error {
  fn from(err: rusqlite::Error) -> Self {
    Error::Sqlite(err)
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :












>
|
|








|
|

|
|

|
|

|
|







|





|





|




1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
use std::{fmt, io};

/// Errors that are returned by sqlsrv.
#[derive(Debug)]
pub enum Error {
  BadFormat(String),
  IO(String),
  R2D2(r2d2::Error),
  Sqlite(rusqlite::Error)
}

impl Error {
  #[allow(clippy::needless_pass_by_value)]
  pub fn bad_format(s: impl ToString) -> Self {
    Self::BadFormat(s.to_string())
  }
}

impl std::error::Error for Error {}

impl fmt::Display for Error {
  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    match self {
      Self::BadFormat(s) => {
        write!(f, "Bad format error; {s}")
      }
      Self::IO(s) => {
        write!(f, "I/O error; {s}")
      }
      Self::R2D2(err) => {
        write!(f, "r2d2 error; {err}")
      }
      Self::Sqlite(err) => {
        write!(f, "Sqlite error; {err}")
      }
    }
  }
}

impl From<io::Error> for Error {
  fn from(err: io::Error) -> Self {
    Self::IO(err.to_string())
  }
}

impl From<r2d2::Error> for Error {
  fn from(err: r2d2::Error) -> Self {
    Self::R2D2(err)
  }
}

impl From<rusqlite::Error> for Error {
  fn from(err: rusqlite::Error) -> Self {
    Self::Sqlite(err)
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to src/lib.rs.


1
2
3
4
5
6
7


//! A library for implementing an in-process SQLite database server.
//!
//! # Connection pooling
//! sqlsrv implements connection pooling that reflects the concurrency model
//! of SQLite:  It supports multiple parallel readers, but only one writer.
//!
//! # Thread pooling
>
>







1
2
3
4
5
6
7
8
9
#![allow(clippy::doc_markdown)]

//! A library for implementing an in-process SQLite database server.
//!
//! # Connection pooling
//! sqlsrv implements connection pooling that reflects the concurrency model
//! of SQLite:  It supports multiple parallel readers, but only one writer.
//!
//! # Thread pooling
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
  sync::Arc
};

use parking_lot::{Condvar, Mutex};

use r2d2::{CustomizeConnection, PooledConnection};

use r2d2_sqlite::SqliteConnectionManager;

pub use r2d2;
pub use rusqlite;

use rusqlite::{params, Connection, OpenFlags};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

pub use changehook::ChangeLogHook;
pub use err::Error;
pub use rawhook::{Action, Hook};
pub use wrconn::WrConn;


/// Wrapper around a SQL functions registration callback used to select which
/// connection types to perform registrations on.
pub enum RegOn<F>
where
  F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync
{
  /// This registration callback should only be called for read-only
  /// connections.
  RO(F),

  /// This registration callback should only be called for the read/write
  /// connections.







|




|














|







40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  sync::Arc
};

use parking_lot::{Condvar, Mutex};

use r2d2::{CustomizeConnection, PooledConnection};

pub use r2d2_sqlite::SqliteConnectionManager;

pub use r2d2;
pub use rusqlite;

use rusqlite::{Connection, OpenFlags, params};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

pub use changehook::ChangeLogHook;
pub use err::Error;
pub use rawhook::{Action, Hook};
pub use wrconn::WrConn;


/// Wrapper around a SQL functions registration callback used to select which
/// connection types to perform registrations on.
pub enum RegOn<F>
where
  F: Fn(&Connection) -> Result<(), rusqlite::Error>
{
  /// This registration callback should only be called for read-only
  /// connections.
  RO(F),

  /// This registration callback should only be called for the read/write
  /// connections.
97
98
99
100
101
102
103



104
105
106
107
108
109
110
111
112



113
114
115
116
117
118
119
120
121
122



123
124
125
126
127
128
129
  /// initialization.
  ///
  /// While this method can be used to perform schema upgrades, there are two
  /// specialized methods (`need_upgrade()` and `upgrade()`) that can be used
  /// for this purpose instead.
  ///
  /// The default implementation does nothing but returns `Ok(())`.



  #[allow(unused_variables)]
  fn init(&self, conn: &mut Connection, newdb: bool) -> Result<(), Error> {
    Ok(())
  }

  /// Application callback used to determine if the database schema is out of
  /// date and needs to be updated.
  ///
  /// The default implementation does nothing but returns `Ok(false)`.



  #[allow(unused_variables)]
  fn need_upgrade(&self, conn: &Connection) -> Result<bool, Error> {
    Ok(false)
  }

  /// Upgrade the database schema.
  ///
  /// This is called if [`SchemaMgr::need_upgrade()`] returns `Ok(true)`.
  ///
  /// The default implementation does nothing but returns `Ok(())`.



  #[allow(unused_variables)]
  fn upgrade(&self, conn: &mut Connection) -> Result<(), Error> {
    Ok(())
  }
}









>
>
>









>
>
>










>
>
>







99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  /// initialization.
  ///
  /// While this method can be used to perform schema upgrades, there are two
  /// specialized methods (`need_upgrade()` and `upgrade()`) that can be used
  /// for this purpose instead.
  ///
  /// The default implementation does nothing but returns `Ok(())`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn init(&self, conn: &mut Connection, newdb: bool) -> Result<(), Error> {
    Ok(())
  }

  /// Application callback used to determine if the database schema is out of
  /// date and needs to be updated.
  ///
  /// The default implementation does nothing but returns `Ok(false)`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn need_upgrade(&self, conn: &Connection) -> Result<bool, Error> {
    Ok(false)
  }

  /// Upgrade the database schema.
  ///
  /// This is called if [`SchemaMgr::need_upgrade()`] returns `Ok(true)`.
  ///
  /// The default implementation does nothing but returns `Ok(())`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn upgrade(&self, conn: &mut Connection) -> Result<(), Error> {
    Ok(())
  }
}


161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    &self,
    conn: &mut rusqlite::Connection
  ) -> Result<(), rusqlite::Error> {
    conn.pragma_update(None, "foreign_keys", "ON")?;

    for rf in &self.regfuncs {
      match rf {
        CbType::Ro(ref f) | CbType::Both(ref f) => {
          f(conn)?;
        }
        _ => {}
      }
    }

    Ok(())
  }

  fn on_release(&self, _conn: rusqlite::Connection) {}







|


|







172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
    &self,
    conn: &mut rusqlite::Connection
  ) -> Result<(), rusqlite::Error> {
    conn.pragma_update(None, "foreign_keys", "ON")?;

    for rf in &self.regfuncs {
      match rf {
        CbType::Ro(f) | CbType::Both(f) => {
          f(conn)?;
        }
        CbType::Rw(_) => {}
      }
    }

    Ok(())
  }

  fn on_release(&self, _conn: rusqlite::Connection) {}
205
206
207
208
209
210
211



212
213
214
215
216
217
218
219
      conn.pragma_update(None, "auto_vacuum", "INCREMENTAL")?;
    }

    Ok(conn)
  }

  /// Run a full vacuum.



  fn full_vacuum(&self, conn: &Connection) -> Result<(), rusqlite::Error> {
    conn.execute("VACUUM;", params![])?;
    Ok(())
  }

  fn create_ro_pool(
    &self,
    fname: &Path,







>
>
>
|







216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
      conn.pragma_update(None, "auto_vacuum", "INCREMENTAL")?;
    }

    Ok(conn)
  }

  /// Run a full vacuum.
  ///
  /// This is an internal function that may be called by `build()` if a full
  /// vacuum has been requested.
  fn full_vacuum(conn: &Connection) -> Result<(), rusqlite::Error> {
    conn.execute("VACUUM;", params![])?;
    Ok(())
  }

  fn create_ro_pool(
    &self,
    fname: &Path,
234
235
236
237
238
239
240

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288

289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

325
326
327
328
329
330
331

impl Builder {
  /// Create a new `Builder` for constructing a [`ConnPool`] object.
  ///
  /// Default to not run a full vacuum of the database on initialization and
  /// create 2 read-only connections for the pool.
  /// No workers thread pool will be used.

  pub fn new(schmgr: Box<dyn SchemaMgr>) -> Self {
    Self {
      schmgr,
      full_vacuum: false,
      max_readers: 2,
      autoclean: None,
      hook: None,
      regfuncs: None,
      #[cfg(feature = "tpool")]
      tpool: None
    }
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on an owned `Builder` object.

  pub fn init_vacuum(mut self) -> Self {
    self.full_vacuum = true;
    self
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn init_vacuum_r(&mut self) -> &mut Self {
    self.full_vacuum = true;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on an owned `Builder` object.

  pub fn max_readers(mut self, n: usize) -> Self {
    self.max_readers = n;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn max_readers_r(&mut self, n: usize) -> &mut Self {
    self.max_readers = n;
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on an owned `Builder` object.

  pub fn hook(mut self, hook: Arc<dyn Hook + Send + Sync>) -> Self {
    self.hook = Some(hook);
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn hook_r(&mut self, hook: Arc<dyn Hook + Send + Sync>) -> &mut Self {
    self.hook = Some(hook);
    self
  }

  /// Enable incremental autovacuum.
  ///
  /// `dirt_watermark` is used to set what amount of "dirt" is required in
  /// order to trigger an autoclean.  `nfree` is the number of blocks in the
  /// freelists to process each time the autoclean is run.

  pub fn incremental_autoclean(
    mut self,
    dirt_watermark: usize,
    npages: Option<NonZeroUsize>
  ) -> Self {
    self.autoclean = Some(AutoClean {
      dirt_threshold: dirt_watermark,
      npages
    });
    self
  }

  /// Add a callback to register one or more scalar SQL functions.
  ///
  /// The closure should be wrapped in a `RegOn::RO()` if the function should
  /// only be registered on read-only connections.  `RegOn::RW()` is used to
  /// register the function on the read/write connection.  Use `RegOn::Both()`
  /// to register in both read-only and the read/write connection.

  pub fn reg_scalar_fn<F>(mut self, r: RegOn<F>) -> Self
  where
    F: Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static
  {
    self.reg_scalar_fn_r(r);
    self
  }







>
















>
|







|







>
|







|







>


















>
|

















>







248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351

impl Builder {
  /// Create a new `Builder` for constructing a [`ConnPool`] object.
  ///
  /// Default to not run a full vacuum of the database on initialization and
  /// create 2 read-only connections for the pool.
  /// No workers thread pool will be used.
  #[must_use]
  pub fn new(schmgr: Box<dyn SchemaMgr>) -> Self {
    Self {
      schmgr,
      full_vacuum: false,
      max_readers: 2,
      autoclean: None,
      hook: None,
      regfuncs: None,
      #[cfg(feature = "tpool")]
      tpool: None
    }
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub const fn init_vacuum(mut self) -> Self {
    self.full_vacuum = true;
    self
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub const fn init_vacuum_r(&mut self) -> &mut Self {
    self.full_vacuum = true;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub const fn max_readers(mut self, n: usize) -> Self {
    self.max_readers = n;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub const fn max_readers_r(&mut self, n: usize) -> &mut Self {
    self.max_readers = n;
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub fn hook(mut self, hook: Arc<dyn Hook + Send + Sync>) -> Self {
    self.hook = Some(hook);
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn hook_r(&mut self, hook: Arc<dyn Hook + Send + Sync>) -> &mut Self {
    self.hook = Some(hook);
    self
  }

  /// Enable incremental autovacuum.
  ///
  /// `dirt_watermark` is used to set what amount of "dirt" is required in
  /// order to trigger an autoclean.  `nfree` is the number of blocks in the
  /// freelists to process each time the autoclean is run.
  #[must_use]
  pub const fn incremental_autoclean(
    mut self,
    dirt_watermark: usize,
    npages: Option<NonZeroUsize>
  ) -> Self {
    self.autoclean = Some(AutoClean {
      dirt_threshold: dirt_watermark,
      npages
    });
    self
  }

  /// Add a callback to register one or more scalar SQL functions.
  ///
  /// The closure should be wrapped in a `RegOn::RO()` if the function should
  /// only be registered on read-only connections.  `RegOn::RW()` is used to
  /// register the function on the read/write connection.  Use `RegOn::Both()`
  /// to register in both read-only and the read/write connection.
  #[must_use]
  pub fn reg_scalar_fn<F>(mut self, r: RegOn<F>) -> Self
  where
    F: Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static
  {
    self.reg_scalar_fn_r(r);
    self
  }
358
359
360
361
362
363
364

365
366
367
368
369
370
371
372
373
374
375
376



377
378
379
380
381
382
383
          .push(CbType::Both(Box::new(f)));
      }
    }
    self
  }

  #[cfg(feature = "tpool")]

  pub fn thread_pool(mut self, tpool: Arc<ThreadPool>) -> Self {
    self.tpool = Some(tpool);
    self
  }

  #[cfg(feature = "tpool")]
  pub fn thread_pool_r(&mut self, tpool: Arc<ThreadPool>) -> &mut Self {
    self.tpool = Some(tpool);
    self
  }

  /// Construct a connection pool.



  pub fn build<P>(mut self, fname: P) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>
  {
    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();







>












>
>
>







378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
          .push(CbType::Both(Box::new(f)));
      }
    }
    self
  }

  #[cfg(feature = "tpool")]
  #[must_use]
  pub fn thread_pool(mut self, tpool: Arc<ThreadPool>) -> Self {
    self.tpool = Some(tpool);
    self
  }

  #[cfg(feature = "tpool")]
  pub fn thread_pool_r(&mut self, tpool: Arc<ThreadPool>) -> &mut Self {
    self.tpool = Some(tpool);
    self
  }

  /// Construct a connection pool.
  ///
  /// # Errors
  /// [`Error::Sqlite`] will be returned if a database error occurred.
  pub fn build<P>(mut self, fname: P) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>
  {
    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454

455
456
457
458
459
460
461
462
463
464
465
466
467
468



469
470
471
472
473
474
475
476
477
478
479
480

481
482
483
484
485
486
487
488
489
490
491
492

    // Option<Vec<T>>  -->  Vec<T>
    let regfuncs = self.regfuncs.take().unwrap_or_default();

    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(ref f) | CbType::Both(ref f) => {
          f(&conn)?;
        }
        _ => {}
      }
    }

    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      self.full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    if let Some(ref hook) = self.hook {
      rawhook::hook(&conn, Arc::clone(hook));
    }

    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {
      sh,
      rpool,

      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }


  /// Construct a connection pool.
  ///
  /// Same as [`Builder::build()`], but register a change log callback on the
  /// writer as well.
  ///
  /// This method should not be called if the application has requested to add
  /// a raw update hook.
  ///



  /// # Panic
  /// This method will panic if a hook has been added to the Builder.
  pub fn build_with_changelog_hook<P, D, T>(
    mut self,
    fname: P,
    hook: Box<dyn ChangeLogHook<Database = D, Table = T> + Send>
  ) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>,
    D: FromStr + Send + Sized + 'static,
    T: FromStr + Send + Sized + 'static
  {

    if self.hook.is_some() {
      panic!(
        "Can't build a connection pool with both a raw and changelog hook"
      );
    }

    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();

    //
    // Set up the read/write connection







|


|



















|






|



















<

>














>
>
>
|











>
|
<
|
|
<







420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476

477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509

510
511

512
513
514
515
516
517
518

    // Option<Vec<T>>  -->  Vec<T>
    let regfuncs = self.regfuncs.take().unwrap_or_default();

    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(f) | CbType::Both(f) => {
          f(&conn)?;
        }
        CbType::Ro(_) => {}
      }
    }

    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      Self::full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    if let Some(ref hook) = self.hook {
      rawhook::hook(&conn, hook);
    }

    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {

      rpool,
      sh,
      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }


  /// Construct a connection pool.
  ///
  /// Same as [`Builder::build()`], but register a change log callback on the
  /// writer as well.
  ///
  /// This method should not be called if the application has requested to add
  /// a raw update hook.
  ///
  /// # Errors
  /// [`Error::Sqlite`] is returned for database errors.
  ///
  /// # Panics
  /// This method will panic if a hook has been added to the Builder.
  pub fn build_with_changelog_hook<P, D, T>(
    mut self,
    fname: P,
    hook: Box<dyn ChangeLogHook<Database = D, Table = T> + Send>
  ) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>,
    D: FromStr + Send + Sized + 'static,
    T: FromStr + Send + Sized + 'static
  {
    assert!(
      self.hook.is_some(),

      "Can't build a connection pool with both a raw and changelog hook"
    );


    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();

    //
    // Set up the read/write connection
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540

    // Option<Vec<T>>  -->  Vec<T>
    let regfuncs = self.regfuncs.take().unwrap_or_default();

    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(ref f) | CbType::Both(ref f) => {
          f(&conn)?;
        }
        _ => {}
      }
    }


    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      self.full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    changehook::hook(&conn, hook);








|


|




















|







528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566

    // Option<Vec<T>>  -->  Vec<T>
    let regfuncs = self.regfuncs.take().unwrap_or_default();

    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(f) | CbType::Both(f) => {
          f(&conn)?;
        }
        CbType::Ro(_) => {}
      }
    }


    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      Self::full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    changehook::hook(&conn, hook);

551
552
553
554
555
556
557
558
559

560
561
562
563
564
565
566
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {
      sh,
      rpool,

      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }
}









<

>







577
578
579
580
581
582
583

584
585
586
587
588
589
590
591
592
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {

      rpool,
      sh,
      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }
}


594
595
596
597
598
599
600







601
602
603

604
605
606
607
608
609
610
611
612

613
614
615
616
617




618
619
620
621
622
623
624
625
626
627


628
629
630
631
632
633
634
635

636
637
638
639
640
641
642
643
644
645
646
647

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662





663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690

691
692
693
694
695
696
697
698
699
700




701
702
703
704
705
706
707
}


/// SQLite connection pool.
///
/// This is a specialized connection pool that is defined specifically for
/// sqlite, and only allows a single writer but multiple readers.







pub struct ConnPool {
  sh: Arc<Shared>,
  rpool: r2d2::Pool<SqliteConnectionManager>,

  #[cfg(feature = "tpool")]
  tpool: Option<Arc<ThreadPool>>
}

impl ConnPool {
  /// Return the pool size.
  ///
  /// In effect, this is the size of the read-only pool plus one (for the
  /// read/write connection).

  pub fn size(&self) -> usize {
    (self.rpool.max_size() + 1) as usize
  }

  /// Acquire a read-only connection.




  pub fn reader(
    &self
  ) -> Result<PooledConnection<SqliteConnectionManager>, r2d2::Error> {
    self.rpool.get()
  }

  /// Acquire the read/write connection.
  ///
  /// If the writer is already taken, then block and wait for it to become
  /// available.


  pub fn writer(&self) -> WrConn {
    let mut g = self.sh.inner.lock();
    let conn = loop {
      if let Some(conn) = g.conn.take() {
        break conn;
      } else {
        self.sh.signal.wait(&mut g);
      }

    };

    WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    }
  }

  /// Attempt to acquire the writer connection.
  ///
  /// Returns `Some(conn)` if the writer connection was available at the time
  /// of the request.  Returns `None` if the writer has already been taken.

  pub fn try_writer(&self) -> Option<WrConn> {
    let mut g = self.sh.inner.lock();
    let conn = g.conn.take()?;

    Some(WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    })
  }
}


/// Special queries.
impl ConnPool {
  /// Return the number of unused pages.





  pub fn freelist_count(&self) -> Result<usize, Error> {
    Ok(self.reader()?.query_row_and_then(
      "PRAGMA freelist_count;'",
      [],
      |row| row.get(0)
    )?)
  }
}


pub enum RunError<E> {
  R2D2(r2d2::Error),
  App(E)
}

/// Read-only connection processing.
impl ConnPool {
  /// Run a read-only database operation.
  ///
  /// # Errors
  /// If a connection could not be acquired from the connection pool,
  /// `Err(RunError::R2D2(r2d2::Error))` will be returned.  If the application
  /// callback fails, this function will return `Err(RunError::App(E))`.
  pub fn run_ro<T, E, F>(&self, f: F) -> Result<T, RunError<E>>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&Connection) -> Result<T, E> + Send + 'static

  {
    // Acquire a read-only connection from the pool
    let conn = self.reader().map_err(|e| RunError::R2D2(e))?;

    // Run caller-provided closure.  On error map error to RunError::App().
    f(&conn).map_err(|e| RunError::App(e))
  }

  /// Run a read-only database operation on a thread.
  ///




  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd<F>(&self, f: F) -> Result<(), r2d2::Error>
  where







>
>
>
>
>
>
>

<

>









>





>
>
>
>










>
>





<
<

>












>

|
<
<











>
>
>
>
>










<
<
<
<
<





|
|
|
|


<
|
>


|

|
|




>
>
>
>







620
621
622
623
624
625
626
627
628
629
630
631
632
633
634

635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672


673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689


690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715





716
717
718
719
720
721
722
723
724
725
726

727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
}


/// SQLite connection pool.
///
/// This is a specialized connection pool that is defined specifically for
/// sqlite, and only allows a single writer but multiple readers.
// Note:  In Rust the drop order of struct fields is in the order of
//        declaration.  If the writer is dropped before the readers, sqlite
//        will not clean up its wal files when the writet closes (presumably
//        because the readers are keeping the files locked).  Therefore it is
//        important that the r2d2 connection pool is declared before the
//        `Shared` buffer (since it contains the writer).
#[derive(Clone)]
pub struct ConnPool {

  rpool: r2d2::Pool<SqliteConnectionManager>,
  sh: Arc<Shared>,
  #[cfg(feature = "tpool")]
  tpool: Option<Arc<ThreadPool>>
}

impl ConnPool {
  /// Return the pool size.
  ///
  /// In effect, this is the size of the read-only pool plus one (for the
  /// read/write connection).
  #[must_use]
  pub fn size(&self) -> usize {
    (self.rpool.max_size() + 1) as usize
  }

  /// Acquire a read-only connection.
  ///
  /// # Errors
  /// [`r2d2::Error`] will be returned if a read-only connection could not be
  /// acquired.
  pub fn reader(
    &self
  ) -> Result<PooledConnection<SqliteConnectionManager>, r2d2::Error> {
    self.rpool.get()
  }

  /// Acquire the read/write connection.
  ///
  /// If the writer is already taken, then block and wait for it to become
  /// available.
  #[must_use]
  #[allow(clippy::significant_drop_tightening)]
  pub fn writer(&self) -> WrConn {
    let mut g = self.sh.inner.lock();
    let conn = loop {
      if let Some(conn) = g.conn.take() {
        break conn;


      }
      self.sh.signal.wait(&mut g);
    };

    WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    }
  }

  /// Attempt to acquire the writer connection.
  ///
  /// Returns `Some(conn)` if the writer connection was available at the time
  /// of the request.  Returns `None` if the writer has already been taken.
  #[must_use]
  pub fn try_writer(&self) -> Option<WrConn> {
    let conn = self.sh.inner.lock().conn.take()?;


    Some(WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    })
  }
}


/// Special queries.
impl ConnPool {
  /// Return the number of unused pages.
  ///
  /// # Errors
  /// [`Error::R2D2`] indicates that it wasn't possible to acquire a read-only
  /// connection from the connection pool.  [`Error::Sqlite`] means it was not
  /// possible to query the free page list count.
  pub fn freelist_count(&self) -> Result<usize, Error> {
    Ok(self.reader()?.query_row_and_then(
      "PRAGMA freelist_count;'",
      [],
      |row| row.get(0)
    )?)
  }
}







/// Read-only connection processing.
impl ConnPool {
  /// Run a read-only database operation.
  ///
  /// # Errors
  /// The error type `E` is used to return application-defined errors, though
  /// it must be possible to convert a `r2d2::Error` into `E` using the `From`
  /// trait.
  pub fn run_ro<T, F, E>(&self, f: F) -> Result<T, E>
  where
    T: Send + 'static,

    F: FnOnce(&Connection) -> Result<T, E> + Send + 'static,
    E: From<r2d2::Error>
  {
    // Acquire a read-only connection from the pool
    let conn = self.reader()?;

    // Run caller-provided closure.
    f(&conn)
  }

  /// Run a read-only database operation on a thread.
  ///
  /// # Errors
  /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only
  /// connection from the connection pool.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd<F>(&self, f: F) -> Result<(), r2d2::Error>
  where
724
725
726
727
728
729
730




731
732
733
734
735
736
737
  /// receive the `Result<T, E>` of the supplied closure using a
  /// one-shot channel.
  ///
  /// The supplied closure in `f` should return a `Result<T, E>` where the `Ok`
  /// case will be passed as a "set" value through the `swctx` channel, and the
  /// `Err` case will be passed as a "fail" value.
  ///




  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd_result<T, E, F>(
    &self,







>
>
>
>







766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
  /// receive the `Result<T, E>` of the supplied closure using a
  /// one-shot channel.
  ///
  /// The supplied closure in `f` should return a `Result<T, E>` where the `Ok`
  /// case will be passed as a "set" value through the `swctx` channel, and the
  /// `Err` case will be passed as a "fail" value.
  ///
  /// # Errors
  /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only
  /// connection from the connection pool.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd_result<T, E, F>(
    &self,
746
747
748
749
750
751
752

753
754


755


756
757
758
759
760
761
762
763
764



765
766
767
768
769
770
771
      panic!("ConnPool does to have a thread pool");
    };

    let conn = self.reader()?;

    let (sctx, wctx) = swctx::mkpair();


    tpool.execute(move || match f(&conn) {
      Ok(t) => sctx.set(t),


      Err(e) => sctx.fail(e)


    });

    Ok(wctx)
  }
}

/// Read/Write connection processing.
impl ConnPool {
  /// Run a read/write database operation.



  pub fn run_rw<T, E, F>(&self, f: F) -> Result<T, E>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static
  {
    let mut conn = self.writer();







>

|
>
>
|
>
>









>
>
>







792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
      panic!("ConnPool does to have a thread pool");
    };

    let conn = self.reader()?;

    let (sctx, wctx) = swctx::mkpair();

    // Ignore errors relating to pass the results back
    tpool.execute(move || match f(&conn) {
      Ok(t) => {
        let _ = sctx.set(t);
      }
      Err(e) => {
        let _ = sctx.fail(e);
      }
    });

    Ok(wctx)
  }
}

/// Read/Write connection processing.
impl ConnPool {
  /// Run a read/write database operation.
  ///
  /// # Errors
  /// Returns an application-specific type `E` on error.
  pub fn run_rw<T, E, F>(&self, f: F) -> Result<T, E>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static
  {
    let mut conn = self.writer();
824
825
826
827
828
829
830
831


832


833
834
835
836
837
838
839
840
841
842

843
844
845
846
847
848
849
850
851
    };

    let mut conn = self.writer();

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&mut conn) {
      Ok(t) => sctx.set(t),


      Err(e) => sctx.fail(e)


    });

    wctx
  }
}


impl ConnPool {
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]

  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> swctx::WaitCtx<(), (), rusqlite::Error> {
    self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n))
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :







|
>
>
|
>
>










>









878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
    };

    let mut conn = self.writer();

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&mut conn) {
      Ok(t) => {
        let _ = sctx.set(t);
      }
      Err(e) => {
        let _ = sctx.fail(e);
      }
    });

    wctx
  }
}


impl ConnPool {
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  #[must_use]
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> swctx::WaitCtx<(), (), rusqlite::Error> {
    self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n))
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to src/rawhook.rs.
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
}

impl TryFrom<rusqlite::hooks::Action> for Action {
  type Error = ();

  fn try_from(action: rusqlite::hooks::Action) -> Result<Self, Self::Error> {
    match action {
      rusqlite::hooks::Action::SQLITE_INSERT => Ok(Action::Insert),
      rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Action::Update),
      rusqlite::hooks::Action::SQLITE_DELETE => Ok(Action::Delete),
      _ => Err(())
    }
  }
}


/// Application callback used to process changes to the database.
pub trait Hook {
  /// Called whenever a database change has been made within a transaction.
  ///
  /// At the point this method is called the change is not persistent yet.
  fn update(&self, action: Action, db: &str, table: &str, rowid: i64);

  fn commit(&self) -> bool;

  fn rollback(&self);
}

pub fn hook(conn: &Connection, cb: Arc<dyn Hook + Send + Sync>) {
  let cb2 = Arc::clone(&cb);
  conn.commit_hook(Some(move || cb2.commit()));

  let cb2 = Arc::clone(&cb);
  conn.rollback_hook(Some(move || {
    cb2.rollback();
  }));

  let cb2 = Arc::clone(&cb);
  conn.update_hook(Some(move |action, dbname: &str, table: &str, rowid| {
    let Ok(action) = Action::try_from(action) else {
      // Just ignore unknown actions
      return;
    };
    cb2.update(action, dbname, table, rowid);
  }));
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :







|
|
|


















|
|


|




|










11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
}

impl TryFrom<rusqlite::hooks::Action> for Action {
  type Error = ();

  fn try_from(action: rusqlite::hooks::Action) -> Result<Self, Self::Error> {
    match action {
      rusqlite::hooks::Action::SQLITE_INSERT => Ok(Self::Insert),
      rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Self::Update),
      rusqlite::hooks::Action::SQLITE_DELETE => Ok(Self::Delete),
      _ => Err(())
    }
  }
}


/// Application callback used to process changes to the database.
pub trait Hook {
  /// Called whenever a database change has been made within a transaction.
  ///
  /// At the point this method is called the change is not persistent yet.
  fn update(&self, action: Action, db: &str, table: &str, rowid: i64);

  fn commit(&self) -> bool;

  fn rollback(&self);
}

pub fn hook(conn: &Connection, cb: &Arc<dyn Hook + Send + Sync>) {
  let cb2 = Arc::clone(cb);
  conn.commit_hook(Some(move || cb2.commit()));

  let cb2 = Arc::clone(cb);
  conn.rollback_hook(Some(move || {
    cb2.rollback();
  }));

  let cb2 = Arc::clone(cb);
  conn.update_hook(Some(move |action, dbname: &str, table: &str, rowid| {
    let Ok(action) = Action::try_from(action) else {
      // Just ignore unknown actions
      return;
    };
    cb2.update(action, dbname, table, rowid);
  }));
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to src/utils.rs.
1
2
3
4
5
6
7
8
9
10
11



12
13
14
15
16
17
18
19




20
21
22
23
24
25
26
27
28
29
30
31
32
33

34
35
36
37
38
39
40
41
42


43

44

45


46
47
48
49
50
51
//! Utility functions around SQL commands.

use rusqlite::{params, Connection};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

#[cfg(feature = "tpool")]
use super::ConnPool;

/// Return the number of pages in the freelist.



pub fn freelist_count(conn: &Connection) -> Result<usize, rusqlite::Error> {
  conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0))
}

/// Run an incremental vacuum.
///
/// If `n` is `None` the entrire list of free pages will be processed.  If it
/// is `Some(n)` then only up to `n` pages will be processed.




pub fn incremental_vacuum(
  conn: &Connection,
  n: Option<usize>
) -> Result<(), rusqlite::Error> {
  if let Some(n) = n {
    conn.execute("PRAGMA incremental_vacuum(?);", params![n])
  } else {
    conn.execute("PRAGMA incremental_vacuum;", params![])
  }
  .map(|_| ())
}

#[cfg(feature = "tpool")]
#[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]

pub fn pooled_incremental_vacuum(
  cpool: &ConnPool,
  tpool: &ThreadPool,
  n: Option<usize>
) -> swctx::WaitCtx<(), (), rusqlite::Error> {
  let (sctx, wctx) = swctx::mkpair();

  let conn = cpool.writer();



  tpool.execute(move || match conn.incremental_vacuum(n) {

    Ok(_) => sctx.set(()),

    Err(e) => sctx.fail(e)


  });

  wctx
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :











>
>
>








>
>
>
>














>









>
>

>
|
>
|
>
>






1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
//! Utility functions around SQL commands.

use rusqlite::{params, Connection};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

#[cfg(feature = "tpool")]
use super::ConnPool;

/// Return the number of pages in the freelist.
///
/// # Errors
/// Returns [`rusqlite::Error`].
pub fn freelist_count(conn: &Connection) -> Result<usize, rusqlite::Error> {
  conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0))
}

/// Run an incremental vacuum.
///
/// If `n` is `None` the entrire list of free pages will be processed.  If it
/// is `Some(n)` then only up to `n` pages will be processed.
///
/// # Errors
/// Returns [`rusqlite::Error`].
#[allow(clippy::option_if_let_else)]
pub fn incremental_vacuum(
  conn: &Connection,
  n: Option<usize>
) -> Result<(), rusqlite::Error> {
  if let Some(n) = n {
    conn.execute("PRAGMA incremental_vacuum(?);", params![n])
  } else {
    conn.execute("PRAGMA incremental_vacuum;", params![])
  }
  .map(|_| ())
}

#[cfg(feature = "tpool")]
#[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
#[must_use]
pub fn pooled_incremental_vacuum(
  cpool: &ConnPool,
  tpool: &ThreadPool,
  n: Option<usize>
) -> swctx::WaitCtx<(), (), rusqlite::Error> {
  let (sctx, wctx) = swctx::mkpair();

  let conn = cpool.writer();

  // Kick off incremental vacuum on the thread pool.  Ignore any errors caused
  // by returning the results.
  tpool.execute(move || match conn.incremental_vacuum(n) {
    Ok(()) => {
      let _ = sctx.set(());
    }
    Err(e) => {
      let _ = sctx.fail(e);
    }
  });

  wctx
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :
Changes to src/wrconn.rs.
25
26
27
28
29
30
31


32
33
34
35
36
37




38
39
40
41
42

43
44
45
46
47

48
49
50
51
52

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  /// when it is dropped by [`Shared`] (which is why it must also hold a
  /// strong reference to `Shared`).
  pub(super) inner: ManuallyDrop<InnerWrConn>
}

impl WrConn {
  /// Add dirt to the writer connection.


  pub fn add_dirt(&mut self, weight: usize) {
    self.inner.dirt = self.inner.dirt.saturating_add(weight);
  }
}

impl WrConn {




  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> Result<(), rusqlite::Error> {
    if let Some(n) = n {

      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum(?);", params![n])
    } else {

      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum;", params![])
    }

    .map(|_| ())
  }
}

impl Deref for WrConn {
  type Target = Connection;

  #[inline(always)]
  fn deref(&self) -> &Connection {
    &self.inner.conn
  }
}

impl DerefMut for WrConn {
  #[inline(always)]
  fn deref_mut(&mut self) -> &mut Connection {
    &mut self.inner.conn
  }
}

impl Drop for WrConn {
  /// Return the write connection to the connection pool.







>
>






>
>
>
>




|
>
|
|
|
|
|
>
|
|
|
|
|
>







<






<







25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

69
70
71
72
73
74

75
76
77
78
79
80
81
  /// when it is dropped by [`Shared`] (which is why it must also hold a
  /// strong reference to `Shared`).
  pub(super) inner: ManuallyDrop<InnerWrConn>
}

impl WrConn {
  /// Add dirt to the writer connection.
  // ToDo: broken-lint
  #[allow(clippy::missing_const_for_fn)]
  pub fn add_dirt(&mut self, weight: usize) {
    self.inner.dirt = self.inner.dirt.saturating_add(weight);
  }
}

impl WrConn {
  /// Run incremental vacuum.
  ///
  /// # Errors
  /// On error `rusqlite::Error` is returned.
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> Result<(), rusqlite::Error> {
    n.map_or_else(
      || {
        self
          .inner
          .conn
          .execute("PRAGMA incremental_vacuum;", params![])
      },
      |n| {
        self
          .inner
          .conn
          .execute("PRAGMA incremental_vacuum(?);", params![n])
      }
    )
    .map(|_| ())
  }
}

impl Deref for WrConn {
  type Target = Connection;


  fn deref(&self) -> &Connection {
    &self.inner.conn
  }
}

impl DerefMut for WrConn {

  fn deref_mut(&mut self) -> &mut Connection {
    &mut self.inner.conn
  }
}

impl Drop for WrConn {
  /// Return the write connection to the connection pool.
Changes to www/changelog.md.
1
2


3
4












5
6











7
8








9
10













11





































12
13
14
15
16
17
18
# Change Log



## [Unreleased]













[Details](/vdiff?from=sqlsrv-0.5.0&to=trunk)












### Added









### Changed














### Removed






































---

## [0.5.0] - 2024-08-06

[Details](/vdiff?from=sqlsrv-0.4.0&to=sqlsrv-0.5.0)



>
>


>
>
>
>
>
>
>
>
>
>
>
>
|

>
>
>
>
>
>
>
>
>
>
>


>
>
>
>
>
>
>
>


>
>
>
>
>
>
>
>
>
>
>
>
>

>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# Change Log

⚠️  indicates a breaking change.

## [Unreleased]

[Details](/vdiff?from=sqlsrv-0.10.0&to=trunk)

### Added

### Changed

### Removed

---

## [0.10.0] - 2025-06-06

[Details](/vdiff?from=sqlsrv-0.9.2&to=sqlsrv-0.10.0)

### Changed

- ⚠️ Updated `rusqlite` to `0.36.0`
- ⚠️ Updated `r2d2_sqlite` to `0.29.0`

---

## [0.9.2] - 2025-05-20

[Details](/vdiff?from=sqlsrv-0.9.1&to=sqlsrv-0.9.2)

### Added

- Re-export `r2d2_sqlite::SqliteConnectionManager`.

---

## [0.9.1] - 2025-05-18

[Details](/vdiff?from=sqlsrv-0.9.0&to=sqlsrv-0.9.1)

### Changed

- Move read-only connection pool above the read/write connection in internal
  struct, so that sqlite will clean up its wal files on drop.

---

## [0.9.0] - 2025-05-07

[Details](/vdiff?from=sqlsrv-0.8.0&to=sqlsrv-0.9.0)

### Changed

- `ConnPool` is `Clone`:able.

### Removed

- ⚠️ Removed `RunError`.  `ConnPool:run_ro()` now uses a `From<r2d2::Error>`
  bound on `E` instead.

---

## [0.8.0] - 2025-04-25

[Details](/vdiff?from=sqlsrv-0.7.0&to=sqlsrv-0.8.0)

### Changed

- ⚠️ Updated `rusqlite` to `0.35.0`
- ⚠️ Updated `r2d2_sqlite` to `0.28.0`
- Edition 2024

---

## [0.7.0] - 2025-03-13

[Details](/vdiff?from=sqlsrv-0.6.0&to=sqlsrv-0.7.0)

### Changed

- ⚠️ Updated `rusqlite` to version `0.34.0`.
- ⚠️ Updated `r2d2_sqlite` to version `0.27.0`.

---

## [0.6.0] - 2025-02-09

[Details](/vdiff?from=sqlsrv-0.5.0&to=sqlsrv-0.6.0)

### Changed

- ⚠️ Updated `rusqlite` to version `0.33.0`.
- ⚠️ Updated `r2d2_sqlite` to version `0.26.0`.

---

## [0.5.0] - 2024-08-06

[Details](/vdiff?from=sqlsrv-0.4.0&to=sqlsrv-0.5.0)

Changes to www/index.md.
22
23
24
25
26
27
28

29





















































30
The details of changes can always be found in the timeline, but for a
high-level view of changes between released versions there's a manually
maintained [Change Log](./changelog.md).


## Project status


This library is in _very_ early stages.





























































>
|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
The details of changes can always be found in the timeline, but for a
high-level view of changes between released versions there's a manually
maintained [Change Log](./changelog.md).


## Project status

This crate is being actively developed and maintained.

_sqlsrv_ is a _tracking crate_, meaning it tracks and re-exports other crates:
- `rusqlite`
- `r2d2`
- `r2d2_sqlite`

This also means it is unlikely to reach `1.0.0`.

### Version compatibility
<table>
  <thead>
    <tr>
      <th>sqlsrv</th><th>rusqlite</th><th>r2d2</th><th>r2d2_sqlite</th>
    </tr>
  </thead>
  <tbody>
    <tr>
      <td>0.5.x</td>
      <td>0.32.1</td>
      <td>0.8.10</td>
      <td>0.25.0</td>
    </tr>
    <tr>
      <td>0.6.x</td>
      <td>0.33.0</td>
      <td>0.8.10</td>
      <td>0.26.0</td>
    </tr>
    <tr>
      <td>0.7.x</td>
      <td>0.34.0</td>
      <td>0.8.10</td>
      <td>0.27.0</td>
    </tr>
    <tr>
      <td>0.8.x</td>
      <td>0.35.0</td>
      <td>0.8.10</td>
      <td>0.28.0</td>
    </tr>
    <tr>
      <td>0.9.x</td>
      <td>0.35.0</td>
      <td>0.8.10</td>
      <td>0.28.0</td>
    </tr>
    <tr>
      <td>0.10.x</td>
      <td>0.36.0</td>
      <td>0.8.10</td>
      <td>0.29.0</td>
    </tr>
  </tbody>
</table>