sqlsrv

Check-in Differences
Login

Check-in Differences

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Difference From sqlsrv-0.5.0 To trunk

2024-09-18
09:45
Move clippy settings to Cargo.toml and add a bacon.toml config. Leaf check-in: d778cb71d8 user: jan tags: trunk
2024-09-10
02:45
Updated swctx. Pedantic clippy updates. check-in: a561ae41d5 user: jan tags: trunk
2024-08-06
16:49
Release maintenance. check-in: f6bf902f10 user: jan tags: sqlsrv-0.5.0, trunk
16:46
Update changelog. check-in: 9355c6e401 user: jan tags: trunk

Changes to Cargo.toml.

9
10
11
12
13
14
15

16
17
18

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

39
40
41
42
43
44
45
46
47
48
49
50






9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58







+



+



















-
+












+
+
+
+
+
+
repository = "https://repos.qrnch.tech/pub/sqlsrv"
description = "Utility functions for managing SQLite connections in a server application."
rust-version = "1.56"
exclude = [
  ".fossil-settings",
  ".efiles",
  ".fslckout",
  "bacon.toml",
  "build_docs.sh",
  "examples",
  "www",
  "bacon.toml",
  "rustfmt.toml"
]

# https://doc.rust-lang.org/cargo/reference/manifest.html#the-badges-section
[badges]
maintenance = { status = "experimental" }

[features]
tpool = ["dep:swctx", "dep:threadpool"]

[dependencies]
parking_lot = { version = "0.12.3" }
r2d2 = { version = "0.8.10" }
r2d2_sqlite = { version = "0.25.0" }
# Need to add the `hooks` feature.  Unfortunately the version needs to be
# specified here.  It would be much more convenient if it could use the version
# from r2d2_sqlite.  Allegedely one can use the version "*", which apparently
# does not mean "latest", but have not yet confirmed this.
rusqlite = { version = "0.32.1", features = ["hooks"] }
swctx = { version = "0.2.2", optional = true }
swctx = { version = "0.3.0", optional = true }
threadpool = { version = "1.8.1", optional = true }

[dev-dependencies]
hex = { version = "0.4.3" }
rand = { version = "0.8.5" }
rusqlite = { version = "0.32.1", features = ["functions"] }
sha2 = { version = "0.10.8" }

[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs", "--generate-link-to-definition"]

[lints.clippy]
all = { level = "deny", priority = -1 }
pedantic = { level = "warn", priority = -1 }
nursery = { level = "warn", priority = -1 }
cargo = { level = "warn", priority = -1 }

Added bacon.toml.










































































































1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
# This is a configuration file for the bacon tool
#
# Bacon repository: https://github.com/Canop/bacon
# Complete help on configuration: https://dystroy.org/bacon/config/
# You can also check bacon's own bacon.toml file
#  as an example: https://github.com/Canop/bacon/blob/main/bacon.toml

# For information about clippy lints, see:
# https://github.com/rust-lang/rust-clippy/blob/master/README.md

#default_job = "check"
default_job = "clippy-all"

[jobs.check]
command = ["cargo", "check", "--color", "always"]
need_stdout = false

[jobs.check-all]
command = ["cargo", "check", "--all-targets", "--color", "always"]
need_stdout = false

# Run clippy on the default target
[jobs.clippy]
command = [
    "cargo", "clippy",
    "--color", "always",
]
need_stdout = false

# Run clippy on all targets
# To disable some lints, you may change the job this way:
#    [jobs.clippy-all]
#    command = [
#        "cargo", "clippy",
#        "--all-targets",
#        "--color", "always",
#    	 "--",
#    	 "-A", "clippy::bool_to_int_with_if",
#    	 "-A", "clippy::collapsible_if",
#    	 "-A", "clippy::derive_partial_eq_without_eq",
#    ]
# need_stdout = false
[jobs.clippy-all]
command = [
    "cargo", "clippy",
    "--all-targets",
    "--color", "always",
]
need_stdout = false

# This job lets you run
# - all tests: bacon test
# - a specific test: bacon test -- config::test_default_files
# - the tests of a package: bacon test -- -- -p config
[jobs.test]
command = [
    "cargo", "test", "--color", "always",
    "--", "--color", "always", # see https://github.com/Canop/bacon/issues/124
]
need_stdout = true

[jobs.doc]
command = ["cargo", "doc", "--color", "always", "--no-deps"]
need_stdout = false

# If the doc compiles, then it opens in your browser and bacon switches
# to the previous job
[jobs.doc-open]
command = ["cargo", "doc", "--color", "always", "--no-deps", "--open"]
need_stdout = false
on_success = "back" # so that we don't open the browser at each change

# You can run your application and have the result displayed in bacon,
# *if* it makes sense for this crate.
# Don't forget the `--color always` part or the errors won't be
# properly parsed.
# If your program never stops (eg a server), you may set `background`
# to false to have the cargo run output immediately displayed instead
# of waiting for program's end.
[jobs.run]
command = [
    "cargo", "run",
    "--color", "always",
    # put launch parameters for your program behind a `--` separator
]
need_stdout = true
allow_warnings = true
background = true

# This parameterized job runs the example of your choice, as soon
# as the code compiles.
# Call it as
#    bacon ex -- my-example
[jobs.ex]
command = ["cargo", "run", "--color", "always", "--example"]
need_stdout = true
allow_warnings = true

# You may define here keybindings that would be specific to
# a project, for example a shortcut to launch a specific job.
# Shortcuts to internal functions (scrolling, toggling, etc.)
# should go in your personal global prefs.toml file instead.
[keybindings]
# alt-m = "job:my-job"
c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target

Changes to examples/simple.rs.

48
49
50
51
52
53
54

55



56
57
58
59
60
61
62
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66







+

+
+
+







    }

    Ok(())
  }
}


#[allow(clippy::too_many_lines)]
fn main() {
  const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \
                                  WHERE type='table' AND name=?);";

  let schema = Box::new(Schema {});
  #[allow(unused_mut)]
  let mut bldr = sqlsrv::Builder::new(schema)
    .incremental_autoclean(10, None)
    .reg_scalar_fn(RegOn::Both(register_genuid))
    .reg_scalar_fn(RegOn::RO(register_pwhash));

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
129
130
131
132
133
134
135



136
137
138
139
140
141
142







-
-
-







);",
        &[] as &[&dyn ToSql]
      )
      .unwrap();
  }


  const SQL_LOOKUP_TABLE: &str = "SELECT EXISTS(SELECT 1 FROM sqlite_master \
                                  WHERE type='table' AND name=?);";

  {
    let rconn = connpool.reader().unwrap();
    let mut stmt = rconn.prepare_cached(SQL_LOOKUP_TABLE).unwrap();
    let have = stmt
      .query_row(params!("stuff"), |row| row.get::<usize, bool>(0))
      .unwrap();
    assert!(have);
189
190
191
192
193
194
195
196

197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215



216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
190
191
192
193
194
195
196

197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231



232
233
234
235
236
237
238







-
+



















+
+
+












-
-
-









  #[cfg(feature = "tpool")]
  tpool.join();
}


/// Register a pwhash() SQL function which returns a hex-encoded version of
/// Register a `pwhash()` SQL function that returns a hex-encoded version of
/// the SHA256 hash of the input string.
fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "pwhash",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let s = ctx.get::<String>(0)?;

      let mut hasher = Sha256::new();
      hasher.update(s.as_bytes());
      let result = hasher.finalize();

      Ok(hex::encode(&result[..]))
    }
  )
}

const CHARSET: &[u8] =
  b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";


/// Register an SQL function called `genuid` that will generate a random
/// (hopefully unique) identifier of a requested length.
fn register_genuid(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(
    "genuid",
    1,
    FunctionFlags::SQLITE_UTF8 | FunctionFlags::SQLITE_DETERMINISTIC,
    move |ctx| {
      assert_eq!(ctx.len(), 1, "called with unexpected number of arguments");
      let len = ctx.get::<usize>(0)?;

      const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
                            abcdefghijklmnopqrstuvwxyz\
                            0123456789-_";

      let mut rng = rand::thread_rng();

      let id: String = (0..len)
        .map(|_| {
          let idx = rng.gen_range(0..CHARSET.len());
          CHARSET[idx] as char

Changes to src/err.rs.

1
2
3
4
5
6
7
8
9
10
11
12

13
14


15
16
17
18
19
20
21
22
23

24
25
26

27
28
29

30
31
32

33
34
35
36
37
38
39
40
41

42
43
44
45
46
47

48
49
50
51
52
53

54
55
56
57
1
2
3
4
5
6
7
8
9
10
11
12
13


14
15
16
17
18
19
20
21
22
23

24
25
26

27
28
29

30
31
32

33
34
35
36
37
38
39
40
41

42
43
44
45
46
47

48
49
50
51
52
53

54
55
56
57
58












+
-
-
+
+








-
+


-
+


-
+


-
+








-
+





-
+





-
+




use std::{fmt, io};

/// Errors that are returned by sqlsrv.
#[derive(Debug)]
pub enum Error {
  BadFormat(String),
  IO(String),
  R2D2(r2d2::Error),
  Sqlite(rusqlite::Error)
}

impl Error {
  #[allow(clippy::needless_pass_by_value)]
  pub fn bad_format<S: ToString>(s: S) -> Self {
    Error::BadFormat(s.to_string())
  pub fn bad_format(s: impl ToString) -> Self {
    Self::BadFormat(s.to_string())
  }
}

impl std::error::Error for Error {}

impl fmt::Display for Error {
  fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    match self {
      Error::BadFormat(s) => {
      Self::BadFormat(s) => {
        write!(f, "Bad format error; {}", s)
      }
      Error::IO(s) => {
      Self::IO(s) => {
        write!(f, "I/O error; {}", s)
      }
      Error::R2D2(ref err) => {
      Self::R2D2(ref err) => {
        write!(f, "r2d2 error; {}", err)
      }
      Error::Sqlite(ref err) => {
      Self::Sqlite(ref err) => {
        write!(f, "Sqlite error; {}", err)
      }
    }
  }
}

impl From<io::Error> for Error {
  fn from(err: io::Error) -> Self {
    Error::IO(err.to_string())
    Self::IO(err.to_string())
  }
}

impl From<r2d2::Error> for Error {
  fn from(err: r2d2::Error) -> Self {
    Error::R2D2(err)
    Self::R2D2(err)
  }
}

impl From<rusqlite::Error> for Error {
  fn from(err: rusqlite::Error) -> Self {
    Error::Sqlite(err)
    Self::Sqlite(err)
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Changes to src/lib.rs.



1
2
3
4
5
6
7
1
2
3
4
5
6
7
8
9
+
+







#![allow(clippy::doc_markdown)]

//! A library for implementing an in-process SQLite database server.
//!
//! # Connection pooling
//! sqlsrv implements connection pooling that reflects the concurrency model
//! of SQLite:  It supports multiple parallel readers, but only one writer.
//!
//! # Thread pooling
58
59
60
61
62
63
64
65

66
67
68
69
70
71
72
60
61
62
63
64
65
66

67
68
69
70
71
72
73
74







-
+







pub use wrconn::WrConn;


/// Wrapper around a SQL functions registration callback used to select which
/// connection types to perform registrations on.
pub enum RegOn<F>
where
  F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync
  F: Fn(&Connection) -> Result<(), rusqlite::Error>
{
  /// This registration callback should only be called for read-only
  /// connections.
  RO(F),

  /// This registration callback should only be called for the read/write
  /// connections.
97
98
99
100
101
102
103



104
105
106
107
108
109
110
111
112



113
114
115
116
117
118
119
120
121
122



123
124
125
126
127
128
129
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140







+
+
+









+
+
+










+
+
+







  /// initialization.
  ///
  /// While this method can be used to perform schema upgrades, there are two
  /// specialized methods (`need_upgrade()` and `upgrade()`) that can be used
  /// for this purpose instead.
  ///
  /// The default implementation does nothing but returns `Ok(())`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn init(&self, conn: &mut Connection, newdb: bool) -> Result<(), Error> {
    Ok(())
  }

  /// Application callback used to determine if the database schema is out of
  /// date and needs to be updated.
  ///
  /// The default implementation does nothing but returns `Ok(false)`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn need_upgrade(&self, conn: &Connection) -> Result<bool, Error> {
    Ok(false)
  }

  /// Upgrade the database schema.
  ///
  /// This is called if [`SchemaMgr::need_upgrade()`] returns `Ok(true)`.
  ///
  /// The default implementation does nothing but returns `Ok(())`.
  ///
  /// # Errors
  /// Application-specific error.
  #[allow(unused_variables)]
  fn upgrade(&self, conn: &mut Connection) -> Result<(), Error> {
    Ok(())
  }
}


164
165
166
167
168
169
170
171

172
173
174
175
176
177
178
175
176
177
178
179
180
181

182
183
184
185
186
187
188
189







-
+







    conn.pragma_update(None, "foreign_keys", "ON")?;

    for rf in &self.regfuncs {
      match rf {
        CbType::Ro(ref f) | CbType::Both(ref f) => {
          f(conn)?;
        }
        _ => {}
        CbType::Rw(_) => {}
      }
    }

    Ok(())
  }

  fn on_release(&self, _conn: rusqlite::Connection) {}
205
206
207
208
209
210
211



212

213
214
215
216
217
218
219
216
217
218
219
220
221
222
223
224
225

226
227
228
229
230
231
232
233







+
+
+
-
+







      conn.pragma_update(None, "auto_vacuum", "INCREMENTAL")?;
    }

    Ok(conn)
  }

  /// Run a full vacuum.
  ///
  /// This is an internal function that may be called by `build()` if a full
  /// vacuum has been requested.
  fn full_vacuum(&self, conn: &Connection) -> Result<(), rusqlite::Error> {
  fn full_vacuum(conn: &Connection) -> Result<(), rusqlite::Error> {
    conn.execute("VACUUM;", params![])?;
    Ok(())
  }

  fn create_ro_pool(
    &self,
    fname: &Path,
234
235
236
237
238
239
240

241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

257

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

273

274
275
276
277
278
279
280
281
282
283
284
285
286
287
288

289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306

307

308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324

325
326
327
328
329
330
331
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289

290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325

326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351







+
















+
-
+















+
-
+















+


















+
-
+

















+








impl Builder {
  /// Create a new `Builder` for constructing a [`ConnPool`] object.
  ///
  /// Default to not run a full vacuum of the database on initialization and
  /// create 2 read-only connections for the pool.
  /// No workers thread pool will be used.
  #[must_use]
  pub fn new(schmgr: Box<dyn SchemaMgr>) -> Self {
    Self {
      schmgr,
      full_vacuum: false,
      max_readers: 2,
      autoclean: None,
      hook: None,
      regfuncs: None,
      #[cfg(feature = "tpool")]
      tpool: None
    }
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub fn init_vacuum(mut self) -> Self {
  pub const fn init_vacuum(mut self) -> Self {
    self.full_vacuum = true;
    self
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn init_vacuum_r(&mut self) -> &mut Self {
    self.full_vacuum = true;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub fn max_readers(mut self, n: usize) -> Self {
  pub const fn max_readers(mut self, n: usize) -> Self {
    self.max_readers = n;
    self
  }

  /// Set maximum number of readers in the connection pool.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn max_readers_r(&mut self, n: usize) -> &mut Self {
    self.max_readers = n;
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on an owned `Builder` object.
  #[must_use]
  pub fn hook(mut self, hook: Arc<dyn Hook + Send + Sync>) -> Self {
    self.hook = Some(hook);
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn hook_r(&mut self, hook: Arc<dyn Hook + Send + Sync>) -> &mut Self {
    self.hook = Some(hook);
    self
  }

  /// Enable incremental autovacuum.
  ///
  /// `dirt_watermark` is used to set what amount of "dirt" is required in
  /// order to trigger an autoclean.  `nfree` is the number of blocks in the
  /// freelists to process each time the autoclean is run.
  #[must_use]
  pub fn incremental_autoclean(
  pub const fn incremental_autoclean(
    mut self,
    dirt_watermark: usize,
    npages: Option<NonZeroUsize>
  ) -> Self {
    self.autoclean = Some(AutoClean {
      dirt_threshold: dirt_watermark,
      npages
    });
    self
  }

  /// Add a callback to register one or more scalar SQL functions.
  ///
  /// The closure should be wrapped in a `RegOn::RO()` if the function should
  /// only be registered on read-only connections.  `RegOn::RW()` is used to
  /// register the function on the read/write connection.  Use `RegOn::Both()`
  /// to register in both read-only and the read/write connection.
  #[must_use]
  pub fn reg_scalar_fn<F>(mut self, r: RegOn<F>) -> Self
  where
    F: Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static
  {
    self.reg_scalar_fn_r(r);
    self
  }
358
359
360
361
362
363
364

365
366
367
368
369
370
371
372
373
374
375
376



377
378
379
380
381
382
383
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407







+












+
+
+







          .push(CbType::Both(Box::new(f)));
      }
    }
    self
  }

  #[cfg(feature = "tpool")]
  #[must_use]
  pub fn thread_pool(mut self, tpool: Arc<ThreadPool>) -> Self {
    self.tpool = Some(tpool);
    self
  }

  #[cfg(feature = "tpool")]
  pub fn thread_pool_r(&mut self, tpool: Arc<ThreadPool>) -> &mut Self {
    self.tpool = Some(tpool);
    self
  }

  /// Construct a connection pool.
  ///
  /// # Errors
  /// [`Errors::Sqlite`] will be returned if a database error occurred.
  pub fn build<P>(mut self, fname: P) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>
  {
    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();
399
400
401
402
403
404
405
406

407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426

427
428
429
430
431
432
433

434
435
436
437
438
439
440
423
424
425
426
427
428
429

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

450
451
452
453
454
455
456

457
458
459
460
461
462
463
464







-
+



















-
+






-
+








    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(ref f) | CbType::Both(ref f) => {
          f(&conn)?;
        }
        _ => {}
        CbType::Ro(_) => {}
      }
    }

    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      self.full_vacuum(&conn)?;
      Self::full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    if let Some(ref hook) = self.hook {
      rawhook::hook(&conn, Arc::clone(hook));
      rawhook::hook(&conn, hook);
    }

    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

462
463
464
465
466
467
468



469

470
471
472
473
474
475
476
477
478
479
480

481

482
483
484


485
486
487
488
489
490
491
492
486
487
488
489
490
491
492
493
494
495

496
497
498
499
500
501
502
503
504
505
506
507
508

509



510
511

512
513
514
515
516
517
518







+
+
+
-
+











+
-
+
-
-
-
+
+
-







  ///
  /// Same as [`Builder::build()`], but register a change log callback on the
  /// writer as well.
  ///
  /// This method should not be called if the application has requested to add
  /// a raw update hook.
  ///
  /// # Errors
  /// [`Errors::Sqlite`] is returned for database errors.
  ///
  /// # Panic
  /// # Panics
  /// This method will panic if a hook has been added to the Builder.
  pub fn build_with_changelog_hook<P, D, T>(
    mut self,
    fname: P,
    hook: Box<dyn ChangeLogHook<Database = D, Table = T> + Send>
  ) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>,
    D: FromStr + Send + Sized + 'static,
    T: FromStr + Send + Sized + 'static
  {
    assert!(
    if self.hook.is_some() {
      self.hook.is_some(),
      panic!(
        "Can't build a connection pool with both a raw and changelog hook"
      );
      "Can't build a connection pool with both a raw and changelog hook"
    );
    }

    // ToDo: Use std::path::absolute() once stabilized
    let fname = fname.as_ref();
    let db_exists = fname.exists();

    //
    // Set up the read/write connection
505
506
507
508
509
510
511
512

513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533

534
535
536
537
538
539
540
531
532
533
534
535
536
537

538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558

559
560
561
562
563
564
565
566







-
+




















-
+








    // Call SQL function registration callbacks for read/write connection.
    for rf in &regfuncs {
      match rf {
        CbType::Rw(ref f) | CbType::Both(ref f) => {
          f(&conn)?;
        }
        _ => {}
        CbType::Ro(_) => {}
      }
    }


    //
    // Perform schema initialization.
    //
    // This must be done after auto_vacuum is set, because auto_vacuum requires
    // configuration before any tables have been created.
    // See: https://www.sqlite.org/pragma.html#pragma_auto_vacuum
    //
    self.schmgr.init(&mut conn, !db_exists)?;
    if self.schmgr.need_upgrade(&conn)? {
      self.schmgr.upgrade(&mut conn)?;
    }

    //
    // Perform a full vacuum if requested to do so.
    //
    if self.full_vacuum {
      self.full_vacuum(&conn)?;
      Self::full_vacuum(&conn)?;
    }

    //
    // Register a callback hook
    //
    changehook::hook(&conn, hook);

606
607
608
609
610
611
612

613
614
615
616
617




618
619
620
621
622
623
624
625
626
627


628
629
630
631
632
633
634
635

636
637
638
639
640
641
642
643
644
645
646
647

648
649

650
651
652
653
654
655
656
657
658
659
660
661
662





663
664
665
666
667
668
669
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665


666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681

682


683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705







+





+
+
+
+










+
+





-
-

+












+

-
+
-
-











+
+
+
+
+







}

impl ConnPool {
  /// Return the pool size.
  ///
  /// In effect, this is the size of the read-only pool plus one (for the
  /// read/write connection).
  #[must_use]
  pub fn size(&self) -> usize {
    (self.rpool.max_size() + 1) as usize
  }

  /// Acquire a read-only connection.
  ///
  /// # Errors
  /// [`r2d2::Errors`] will be returned if a read-only connection could not be
  /// acquired.
  pub fn reader(
    &self
  ) -> Result<PooledConnection<SqliteConnectionManager>, r2d2::Error> {
    self.rpool.get()
  }

  /// Acquire the read/write connection.
  ///
  /// If the writer is already taken, then block and wait for it to become
  /// available.
  #[must_use]
  #[allow(clippy::significant_drop_tightening)]
  pub fn writer(&self) -> WrConn {
    let mut g = self.sh.inner.lock();
    let conn = loop {
      if let Some(conn) = g.conn.take() {
        break conn;
      } else {
        self.sh.signal.wait(&mut g);
      }
      self.sh.signal.wait(&mut g);
    };

    WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    }
  }

  /// Attempt to acquire the writer connection.
  ///
  /// Returns `Some(conn)` if the writer connection was available at the time
  /// of the request.  Returns `None` if the writer has already been taken.
  #[must_use]
  pub fn try_writer(&self) -> Option<WrConn> {
    let mut g = self.sh.inner.lock();
    let conn = self.sh.inner.lock().conn.take()?;
    let conn = g.conn.take()?;

    Some(WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    })
  }
}


/// Special queries.
impl ConnPool {
  /// Return the number of unused pages.
  ///
  /// # Errors
  /// [`Error::R2D2`] indicates that it wasn't possible to acquire a read-only
  /// connection from the connection pool.  [`Error::Sqlite`] means it was not
  /// possible to query the free page list count.
  pub fn freelist_count(&self) -> Result<usize, Error> {
    Ok(self.reader()?.query_row_and_then(
      "PRAGMA freelist_count;'",
      [],
      |row| row.get(0)
    )?)
  }
694
695
696
697
698
699
700




701
702
703
704
705
706
707
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747







+
+
+
+








    // Run caller-provided closure.  On error map error to RunError::App().
    f(&conn).map_err(|e| RunError::App(e))
  }

  /// Run a read-only database operation on a thread.
  ///
  /// # Errors
  /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only
  /// connection from the connection pool.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd<F>(&self, f: F) -> Result<(), r2d2::Error>
  where
724
725
726
727
728
729
730




731
732
733
734
735
736
737
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781







+
+
+
+







  /// receive the `Result<T, E>` of the supplied closure using a
  /// one-shot channel.
  ///
  /// The supplied closure in `f` should return a `Result<T, E>` where the `Ok`
  /// case will be passed as a "set" value through the `swctx` channel, and the
  /// `Err` case will be passed as a "fail" value.
  ///
  /// # Errors
  /// [`r2d2::Error`] is returned if it wasn't possible to acquire a read-only
  /// connection from the connection pool.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd_result<T, E, F>(
    &self,
746
747
748
749
750
751
752

753
754
755






756
757
758
759
760
761
762
763
764



765
766
767
768
769
770
771
790
791
792
793
794
795
796
797
798


799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823







+

-
-
+
+
+
+
+
+









+
+
+







      panic!("ConnPool does to have a thread pool");
    };

    let conn = self.reader()?;

    let (sctx, wctx) = swctx::mkpair();

    // Ignore errors relating to pass the results back
    tpool.execute(move || match f(&conn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
      Ok(t) => {
        let _ = sctx.set(t);
      }
      Err(e) => {
        let _ = sctx.fail(e);
      }
    });

    Ok(wctx)
  }
}

/// Read/Write connection processing.
impl ConnPool {
  /// Run a read/write database operation.
  ///
  /// # Errors
  /// Returns an application-specific type `E` on error.
  pub fn run_rw<T, E, F>(&self, f: F) -> Result<T, E>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static
  {
    let mut conn = self.writer();
824
825
826
827
828
829
830
831
832






833
834
835
836
837
838
839
840
841
842

843
844
845
846
847
848
849
850
851
876
877
878
879
880
881
882


883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908







-
-
+
+
+
+
+
+










+









    };

    let mut conn = self.writer();

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&mut conn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
      Ok(t) => {
        let _ = sctx.set(t);
      }
      Err(e) => {
        let _ = sctx.fail(e);
      }
    });

    wctx
  }
}


impl ConnPool {
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  #[must_use]
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> swctx::WaitCtx<(), (), rusqlite::Error> {
    self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n))
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Changes to src/rawhook.rs.

11
12
13
14
15
16
17
18
19
20



21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40


41
42
43

44
45
46
47
48

49
50
51
52
53
54
55
56
57
58
11
12
13
14
15
16
17



18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38


39
40
41
42

43
44
45
46
47

48
49
50
51
52
53
54
55
56
57
58







-
-
-
+
+
+


















-
-
+
+


-
+




-
+










}

impl TryFrom<rusqlite::hooks::Action> for Action {
  type Error = ();

  fn try_from(action: rusqlite::hooks::Action) -> Result<Self, Self::Error> {
    match action {
      rusqlite::hooks::Action::SQLITE_INSERT => Ok(Action::Insert),
      rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Action::Update),
      rusqlite::hooks::Action::SQLITE_DELETE => Ok(Action::Delete),
      rusqlite::hooks::Action::SQLITE_INSERT => Ok(Self::Insert),
      rusqlite::hooks::Action::SQLITE_UPDATE => Ok(Self::Update),
      rusqlite::hooks::Action::SQLITE_DELETE => Ok(Self::Delete),
      _ => Err(())
    }
  }
}


/// Application callback used to process changes to the database.
pub trait Hook {
  /// Called whenever a database change has been made within a transaction.
  ///
  /// At the point this method is called the change is not persistent yet.
  fn update(&self, action: Action, db: &str, table: &str, rowid: i64);

  fn commit(&self) -> bool;

  fn rollback(&self);
}

pub fn hook(conn: &Connection, cb: Arc<dyn Hook + Send + Sync>) {
  let cb2 = Arc::clone(&cb);
pub fn hook(conn: &Connection, cb: &Arc<dyn Hook + Send + Sync>) {
  let cb2 = Arc::clone(cb);
  conn.commit_hook(Some(move || cb2.commit()));

  let cb2 = Arc::clone(&cb);
  let cb2 = Arc::clone(cb);
  conn.rollback_hook(Some(move || {
    cb2.rollback();
  }));

  let cb2 = Arc::clone(&cb);
  let cb2 = Arc::clone(cb);
  conn.update_hook(Some(move |action, dbname: &str, table: &str, rowid| {
    let Ok(action) = Action::try_from(action) else {
      // Just ignore unknown actions
      return;
    };
    cb2.update(action, dbname, table, rowid);
  }));
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Changes to src/utils.rs.

1
2
3
4
5
6
7
8
9
10
11



12
13
14
15
16
17
18
19




20
21
22
23
24
25
26
27
28
29
30
31
32
33

34
35
36
37
38
39
40
41
42


43

44
45





46
47
48
49
50
51
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


55
56
57
58
59
60
61
62
63
64
65











+
+
+








+
+
+
+














+









+
+

+
-
-
+
+
+
+
+






//! Utility functions around SQL commands.

use rusqlite::{params, Connection};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

#[cfg(feature = "tpool")]
use super::ConnPool;

/// Return the number of pages in the freelist.
///
/// # Errors
/// Returns [`rusqlite::Error`].
pub fn freelist_count(conn: &Connection) -> Result<usize, rusqlite::Error> {
  conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0))
}

/// Run an incremental vacuum.
///
/// If `n` is `None` the entrire list of free pages will be processed.  If it
/// is `Some(n)` then only up to `n` pages will be processed.
///
/// # Errors
/// Returns [`rusqlite::Error`].
#[allow(clippy::option_if_let_else)]
pub fn incremental_vacuum(
  conn: &Connection,
  n: Option<usize>
) -> Result<(), rusqlite::Error> {
  if let Some(n) = n {
    conn.execute("PRAGMA incremental_vacuum(?);", params![n])
  } else {
    conn.execute("PRAGMA incremental_vacuum;", params![])
  }
  .map(|_| ())
}

#[cfg(feature = "tpool")]
#[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
#[must_use]
pub fn pooled_incremental_vacuum(
  cpool: &ConnPool,
  tpool: &ThreadPool,
  n: Option<usize>
) -> swctx::WaitCtx<(), (), rusqlite::Error> {
  let (sctx, wctx) = swctx::mkpair();

  let conn = cpool.writer();

  // Kick off incremental vacuum on the thread pool.  Ignore any errors caused
  // by returning the results.
  tpool.execute(move || match conn.incremental_vacuum(n) {
    Ok(()) => {
    Ok(_) => sctx.set(()),
    Err(e) => sctx.fail(e)
      let _ = sctx.set(());
    }
    Err(e) => {
      let _ = sctx.fail(e);
    }
  });

  wctx
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Changes to src/wrconn.rs.

31
32
33
34
35
36
37




38
39
40
41
42
43
44
45
46
47
48
49
50
51
52














53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45











46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

67
68
69
70
71
72

73
74
75
76
77
78
79







+
+
+
+




-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+







-






-







  /// Add dirt to the writer connection.
  pub fn add_dirt(&mut self, weight: usize) {
    self.inner.dirt = self.inner.dirt.saturating_add(weight);
  }
}

impl WrConn {
  /// Run incremental vacuum.
  ///
  /// # Errors
  /// On error `rusqlite::Error` is returned.
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> Result<(), rusqlite::Error> {
    if let Some(n) = n {
      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum(?);", params![n])
    } else {
      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum;", params![])
    }
    n.map_or_else(
      || {
        self
          .inner
          .conn
          .execute("PRAGMA incremental_vacuum;", params![])
      },
      |n| {
        self
          .inner
          .conn
          .execute("PRAGMA incremental_vacuum(?);", params![n])
      }
    )
    .map(|_| ())
  }
}

impl Deref for WrConn {
  type Target = Connection;

  #[inline(always)]
  fn deref(&self) -> &Connection {
    &self.inner.conn
  }
}

impl DerefMut for WrConn {
  #[inline(always)]
  fn deref_mut(&mut self) -> &mut Connection {
    &mut self.inner.conn
  }
}

impl Drop for WrConn {
  /// Return the write connection to the connection pool.

Changes to www/changelog.md.

1


2
3
4
5
6
7
8
1
2
3
4
5
6
7
8
9
10

+
+







# Change Log

⚠️  indicates a breaking change.

## [Unreleased]

[Details](/vdiff?from=sqlsrv-0.5.0&to=trunk)

### Added