sqlsrv

Check-in Differences
Login

Check-in Differences

Many hyperlinks are disabled.
Use anonymous login to enable hyperlinks.

Difference From sqlsrv-0.2.0 To sqlsrv-0.3.0

2024-02-20
16:27
Updated rusqlite and r2d2_sqlite. check-in: f7cfffaac0 user: jan tags: sqlsrv-0.4.0, trunk
2024-02-10
14:36
Release maintenance. check-in: 5bc22c4333 user: jan tags: sqlsrv-0.3.0, trunk
14:18
Rework thread pooling to allow using a shared thread pool. Also reworked the database operation wrappers. check-in: 46faa310c8 user: jan tags: trunk
2024-01-28
19:22
Document RegOn. check-in: 120fc8ace0 user: jan tags: trunk
19:06
Release maintenance. check-in: 8a3250a1c4 user: jan tags: sqlsrv-0.2.0, trunk
18:58
Document features and how to build docs. check-in: 7c6bc3a949 user: jan tags: trunk

Changes to .efiles.

1
2
3
4
5
6
7
8
9

10
Cargo.toml
README.md
www/index.md
www/changelog.md
src/err.rs
src/lib.rs
src/wrconn.rs
src/rawhook.rs
src/changehook.rs

examples/simple.rs









>

1
2
3
4
5
6
7
8
9
10
11
Cargo.toml
README.md
www/index.md
www/changelog.md
src/err.rs
src/lib.rs
src/wrconn.rs
src/rawhook.rs
src/changehook.rs
src/utils.rs
examples/simple.rs

Changes to Cargo.toml.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
[package]
name = "sqlsrv"
version = "0.2.0"
edition = "2021"
license = "0BSD"
categories = [ "database" ]
keywords = [ "sqlite", "server" ]
repository = "https://repos.qrnch.tech/pub/sqlsrv"
description = "Utility functions for managing SQLite connections in a server application."
rust-version = "1.56"
exclude = [
  ".fossil-settings",
  ".efiles",
  ".fslckout",
  "build_docs.sh",
  "examples",
  "www",
  "rustfmt.toml"
]

[features]
default = ["tpool"]
tpool = ["dep:swctx", "dep:threadpool"]

[dependencies]
parking_lot = { version = "0.12.1" }
r2d2 = { version = "0.8.10" }
r2d2_sqlite = { version = "0.23.0" }
rusqlite = { version = "0.30.0", features = ["hooks"] }


|


















<







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21

22
23
24
25
26
27
28
[package]
name = "sqlsrv"
version = "0.3.0"
edition = "2021"
license = "0BSD"
categories = [ "database" ]
keywords = [ "sqlite", "server" ]
repository = "https://repos.qrnch.tech/pub/sqlsrv"
description = "Utility functions for managing SQLite connections in a server application."
rust-version = "1.56"
exclude = [
  ".fossil-settings",
  ".efiles",
  ".fslckout",
  "build_docs.sh",
  "examples",
  "www",
  "rustfmt.toml"
]

[features]

tpool = ["dep:swctx", "dep:threadpool"]

[dependencies]
parking_lot = { version = "0.12.1" }
r2d2 = { version = "0.8.10" }
r2d2_sqlite = { version = "0.23.0" }
rusqlite = { version = "0.30.0", features = ["hooks"] }

Changes to examples/simple.rs.




1
2
3
4
5
6
7



use rusqlite::{functions::FunctionFlags, params, Connection, ToSql};

use rand::Rng;

use sha2::{Digest, Sha256};

use sqlsrv::{RegOn, SchemaMgr};
>
>
>







1
2
3
4
5
6
7
8
9
10
#[cfg(feature = "tpool")]
use std::sync::Arc;

use rusqlite::{functions::FunctionFlags, params, Connection, ToSql};

use rand::Rng;

use sha2::{Digest, Sha256};

use sqlsrv::{RegOn, SchemaMgr};
56
57
58
59
60
61
62


63


64
65
66
67
68
69
70
    .incremental_autoclean(10, None)
    .reg_scalar_fn(RegOn::Both(register_genuid))
    .reg_scalar_fn(RegOn::RO(register_pwhash));

  // If 'tpool' is enabled, then create a thread pool with as many workers as
  // there are connections in the pool.
  #[cfg(feature = "tpool")]


  bldr.worker_threads_r(None);



  let connpool = bldr.build("test.sqlite").unwrap();

  // Acquire the writer connection and use it to conditionally create the
  // `stuff` table.
  {
    let wconn = connpool.writer();







>
>
|
>
>







59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    .incremental_autoclean(10, None)
    .reg_scalar_fn(RegOn::Both(register_genuid))
    .reg_scalar_fn(RegOn::RO(register_pwhash));

  // If 'tpool' is enabled, then create a thread pool with as many workers as
  // there are connections in the pool.
  #[cfg(feature = "tpool")]
  let tpool = {
    let tpool = Arc::new(threadpool::Builder::new().build());
    bldr.thread_pool_r(Arc::clone(&tpool));
    tpool
  };

  let connpool = bldr.build("test.sqlite").unwrap();

  // Acquire the writer connection and use it to conditionally create the
  // `stuff` table.
  {
    let wconn = connpool.writer();
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
      .query_row(params!("stuff"), |row| row.get::<usize, bool>(0))
      .unwrap();
    assert!(have);
  }

  #[cfg(feature = "tpool")]
  connpool
    .ro_run(|conn| {
      const SQL: &str = "SELECT * FROM snarks;";
      let mut _stmt = conn.prepare_cached(SQL).unwrap();
    })
    .unwrap();

  #[cfg(feature = "tpool")]
  connpool.rw_run(|conn| {
    const SQL: &str = "INSERT INTO whoop (name) VALUES (?);";
    let mut stmt = conn.prepare_cached(SQL).unwrap();
    stmt.execute(params!["test"]).unwrap();
    Some(1)
  });


  #[cfg(feature = "tpool")]
  connpool.rw_run(|conn| {
    const SQL: &str = "INSERT INTO uids (uid) VALUES (genuid(8));";
    let mut stmt = conn.prepare_cached(SQL).unwrap();
    stmt.execute(params![]).unwrap();
    Some(1)
  });

  #[cfg(feature = "tpool")]
  {
    let wctx: swctx::WaitCtx<_, _, rusqlite::Error> =
      connpool.rw_run_result(|conn| {
        const SQL: &str = "INSERT INTO uids (uid) VALUES (genuid(8));";
        let mut stmt = conn.prepare_cached(SQL)?;
        stmt.execute([])?;
        Ok(())
      });
    wctx.wait().unwrap();
  }


  #[cfg(feature = "tpool")]
  connpool.shutdown();
}


/// Register a pwhash() SQL function which returns a hex-encoded version of
/// the SHA256 hash of the input string.
fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(







|






|








|








|
|










|







148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
      .query_row(params!("stuff"), |row| row.get::<usize, bool>(0))
      .unwrap();
    assert!(have);
  }

  #[cfg(feature = "tpool")]
  connpool
    .run_ro_thrd(|conn| {
      const SQL: &str = "SELECT * FROM snarks;";
      let mut _stmt = conn.prepare_cached(SQL).unwrap();
    })
    .unwrap();

  #[cfg(feature = "tpool")]
  connpool.run_rw_thrd(|conn| {
    const SQL: &str = "INSERT INTO whoop (name) VALUES (?);";
    let mut stmt = conn.prepare_cached(SQL).unwrap();
    stmt.execute(params!["test"]).unwrap();
    Some(1)
  });


  #[cfg(feature = "tpool")]
  connpool.run_rw_thrd(|conn| {
    const SQL: &str = "INSERT INTO uids (uid) VALUES (genuid(8));";
    let mut stmt = conn.prepare_cached(SQL).unwrap();
    stmt.execute(params![]).unwrap();
    Some(1)
  });

  #[cfg(feature = "tpool")]
  {
    let wctx: swctx::WaitCtx<_, _, rusqlite::Error> = connpool
      .run_rw_thrd_result(|conn| {
        const SQL: &str = "INSERT INTO uids (uid) VALUES (genuid(8));";
        let mut stmt = conn.prepare_cached(SQL)?;
        stmt.execute([])?;
        Ok(())
      });
    wctx.wait().unwrap();
  }


  #[cfg(feature = "tpool")]
  tpool.join();
}


/// Register a pwhash() SQL function which returns a hex-encoded version of
/// the SHA256 hash of the input string.
fn register_pwhash(db: &Connection) -> Result<(), rusqlite::Error> {
  db.create_scalar_function(

Changes to src/lib.rs.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35


36
37
38
39
40
41
42
43
44
45
46
47

48
49
50
51
52
53
54
55
56
57
58
59
60


61
62
63
64


65



66



67
68
69
70
71
72
73
//! A library for implementing an in-process SQLite database server.
//!
//! # Connection pooling
//! sqlsrv implements connection pooling that reflects the concurrency model
//! for SQLite:  It supports only one writer but multiple readers.
//!
//! # Connection task pooling
//! In addition to pooling connections, the library can pool threads to run
//! database operations on.
//!
//! # Incremental auto-clean
//! The connection pool has built-in support for setting up incremental
//! autovacuum, and can be configured to implicitly run incremental vacuuming.
//!
//! To use this feature, a "maximum dirt" value is configured on the connection
//! pool.  Whenever the writer connection performs changes to the database it
//! can add "dirt" to the connection.  When the writer connection is returned
//! to the connection pool it checks to see if the amount of dirt is equal to
//! or greater than the configured "maximum dirt" threshold.  If the threshold
//! has been reached, an incremental autovacuum is performed.
//!
//! # Features
//! | Feature  | Function
//! |----------|----------
//! | `tpool`  | Enable internal thread pool.
//!
//! The `tpool` feature is enabled by default, and will allow the connection
//! pool to dispatch database work to threads within that pool.

#![cfg_attr(docsrs, feature(doc_cfg))]

mod changehook;
mod err;
mod rawhook;
mod wrconn;



use std::{
  fmt, mem::ManuallyDrop, num::NonZeroUsize, path::Path, str::FromStr,
  sync::Arc
};

use parking_lot::{Condvar, Mutex};

use r2d2::{CustomizeConnection, PooledConnection};

use r2d2_sqlite::SqliteConnectionManager;


pub use rusqlite;

use rusqlite::{params, Connection, OpenFlags};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

pub use changehook::ChangeLogHook;
pub use err::Error;
pub use rawhook::{Action, Hook};
pub use wrconn::WrConn;




pub enum RegOn<F>
where
  F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync
{


  RO(F),



  RW(F),



  Both(F)
}


type RegCb = dyn Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync;

enum CbType {




|

|
|
|















|
<
<
<







>
>












>













>
>




>
>

>
>
>

>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25



26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
//! A library for implementing an in-process SQLite database server.
//!
//! # Connection pooling
//! sqlsrv implements connection pooling that reflects the concurrency model
//! of SQLite:  It supports multiple parallel readers, but only one writer.
//!
//! # Thread pooling
//! In addition to pooling connections, the library supports optionally using
//! a thread pool for diaptching database operations onto threads.
//!
//! # Incremental auto-clean
//! The connection pool has built-in support for setting up incremental
//! autovacuum, and can be configured to implicitly run incremental vacuuming.
//!
//! To use this feature, a "maximum dirt" value is configured on the connection
//! pool.  Whenever the writer connection performs changes to the database it
//! can add "dirt" to the connection.  When the writer connection is returned
//! to the connection pool it checks to see if the amount of dirt is equal to
//! or greater than the configured "maximum dirt" threshold.  If the threshold
//! has been reached, an incremental autovacuum is performed.
//!
//! # Features
//! | Feature  | Function
//! |----------|----------
//! | `tpool`  | Enable functions/methods that use a thread pool.




#![cfg_attr(docsrs, feature(doc_cfg))]

mod changehook;
mod err;
mod rawhook;
mod wrconn;

pub mod utils;

use std::{
  fmt, mem::ManuallyDrop, num::NonZeroUsize, path::Path, str::FromStr,
  sync::Arc
};

use parking_lot::{Condvar, Mutex};

use r2d2::{CustomizeConnection, PooledConnection};

use r2d2_sqlite::SqliteConnectionManager;

pub use r2d2;
pub use rusqlite;

use rusqlite::{params, Connection, OpenFlags};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

pub use changehook::ChangeLogHook;
pub use err::Error;
pub use rawhook::{Action, Hook};
pub use wrconn::WrConn;


/// Wrapper around a SQL functions registration callback used to select which
/// connection types to perform registrations on.
pub enum RegOn<F>
where
  F: Fn(&Connection) -> Result<(), rusqlite::Error> + Sync + Sync
{
  /// This registration callback should only be called for read-only
  /// connections.
  RO(F),

  /// This registration callback should only be called for the read/write
  /// connections.
  RW(F),

  /// This registration callback should be called for both the read-only and
  /// read/write connections.
  Both(F)
}


type RegCb = dyn Fn(&Connection) -> Result<(), rusqlite::Error> + Send + Sync;

enum CbType {
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191


192
193
194
195
196
197
198
    Ok(())
  }

  fn on_release(&self, _conn: rusqlite::Connection) {}
}


#[cfg(feature = "tpool")]
#[derive(Default)]
enum ThrdPool {
  #[default]
  Disable,
  Enable {
    nthreads: Option<usize>
  }
}

/// Builder for constructing a [`ConnPool`] object.
pub struct Builder {
  schmgr: Box<dyn SchemaMgr>,
  full_vacuum: bool,
  max_readers: usize,
  #[cfg(feature = "tpool")]
  thrdpool: ThrdPool,
  autoclean: Option<AutoClean>,
  hook: Option<Arc<dyn Hook + Send + Sync>>,
  regfuncs: Option<Vec<CbType>>


}

/// Internal methods.
impl Builder {
  /// Open the writer connection.
  fn open_writer(&self, fname: &Path) -> Result<Connection, rusqlite::Error> {
    let conn = Connection::open(fname)?;







<
<
<
<
<
<
<
<
<
<





<
<


|
>
>







175
176
177
178
179
180
181










182
183
184
185
186


187
188
189
190
191
192
193
194
195
196
197
198
    Ok(())
  }

  fn on_release(&self, _conn: rusqlite::Connection) {}
}












/// Builder for constructing a [`ConnPool`] object.
pub struct Builder {
  schmgr: Box<dyn SchemaMgr>,
  full_vacuum: bool,
  max_readers: usize,


  autoclean: Option<AutoClean>,
  hook: Option<Arc<dyn Hook + Send + Sync>>,
  regfuncs: Option<Vec<CbType>>,
  #[cfg(feature = "tpool")]
  tpool: Option<Arc<ThreadPool>>
}

/// Internal methods.
impl Builder {
  /// Open the writer connection.
  fn open_writer(&self, fname: &Path) -> Result<Connection, rusqlite::Error> {
    let conn = Connection::open(fname)?;
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266


267
268
269
270
271
272
273
    let roconn_initterm = RoConn { regfuncs };
    let max_readers = u32::try_from(self.max_readers).unwrap();
    r2d2::Pool::builder()
      .max_size(max_readers)
      .connection_customizer(Box::new(roconn_initterm))
      .build(manager)
  }

  #[cfg(feature = "tpool")]
  fn init_tpool(&self) -> Option<ThreadPool> {
    match self.thrdpool {
      ThrdPool::Disable => None,
      ThrdPool::Enable { nthreads } => {
        let nthreads = if let Some(nthreads) = nthreads {
          nthreads
        } else {
          self.max_readers + 1
        };
        let tpool = ThreadPool::new(nthreads);
        Some(tpool)
      }
    }
  }
}


impl Builder {
  /// Create a new `Builder` for constructing a [`ConnPool`] object.
  ///
  /// Default to not run a full vacuum of the database on initialization and
  /// create 2 read-only connections for the pool.
  /// No workers thread pool will be used.
  pub fn new(schmgr: Box<dyn SchemaMgr>) -> Self {
    Self {
      schmgr,
      full_vacuum: false,
      max_readers: 2,
      #[cfg(feature = "tpool")]
      thrdpool: ThrdPool::default(),
      autoclean: None,
      hook: None,
      regfuncs: None


    }
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  pub fn init_vacuum(mut self) -> Self {







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<














<
<


|
>
>







225
226
227
228
229
230
231
















232
233
234
235
236
237
238
239
240
241
242
243
244
245


246
247
248
249
250
251
252
253
254
255
256
257
    let roconn_initterm = RoConn { regfuncs };
    let max_readers = u32::try_from(self.max_readers).unwrap();
    r2d2::Pool::builder()
      .max_size(max_readers)
      .connection_customizer(Box::new(roconn_initterm))
      .build(manager)
  }
















}


impl Builder {
  /// Create a new `Builder` for constructing a [`ConnPool`] object.
  ///
  /// Default to not run a full vacuum of the database on initialization and
  /// create 2 read-only connections for the pool.
  /// No workers thread pool will be used.
  pub fn new(schmgr: Box<dyn SchemaMgr>) -> Self {
    Self {
      schmgr,
      full_vacuum: false,
      max_readers: 2,


      autoclean: None,
      hook: None,
      regfuncs: None,
      #[cfg(feature = "tpool")]
      tpool: None
    }
  }

  /// Trigger a full vacuum when initializing the connection pool.
  ///
  /// Operates on an owned `Builder` object.
  pub fn init_vacuum(mut self) -> Self {
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn max_readers_r(&mut self, n: usize) -> &mut Self {
    self.max_readers = n;
    self
  }

  /// Enable a thread pool for running connection tasks on.
  ///
  /// Unless this is called, no thread pool will be allocated and the
  /// [`ConnPool::ro_run()`] and [`ConnPool::rw_run()`] (and associated
  /// methods) will panic.
  ///
  /// The `nthreads` can be used to specify the number of worker threads to
  /// allocate.  If this is `None`, the number of threads will default to the
  /// number of reader connections plus one (for the writer).
  ///
  /// # Panic
  /// Panics if `nthreads` is set to `Some(0)`.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn worker_threads(mut self, nthreads: Option<usize>) -> Self {
    self.worker_threads_r(nthreads);
    self
  }

  /// Enable a thread pool for running connection tasks on.
  ///
  /// This does the same as [`Builder::worker_threads()`], but operates on a
  /// borrowed object.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn worker_threads_r(&mut self, nthreads: Option<usize>) -> &mut Self {
    assert_ne!(nthreads, Some(0));
    self.thrdpool = ThrdPool::Enable { nthreads };
    self
  }

  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on an owned `Builder` object.
  pub fn hook(mut self, hook: Arc<dyn Hook + Send + Sync>) -> Self {
    self.hook = Some(hook);
    self
  }







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<







279
280
281
282
283
284
285































286
287
288
289
290
291
292
  ///
  /// Operates on a borrowed `Builder` object.
  pub fn max_readers_r(&mut self, n: usize) -> &mut Self {
    self.max_readers = n;
    self
  }
































  /// Request that a "raw" update hook be added to the writer connection.
  ///
  /// Operates on an owned `Builder` object.
  pub fn hook(mut self, hook: Arc<dyn Hook + Send + Sync>) -> Self {
    self.hook = Some(hook);
    self
  }
404
405
406
407
408
409
410











411
412
413
414
415
416
417
          .get_or_insert(Vec::new())
          .push(CbType::Both(Box::new(f)));
      }
    }
    self
  }













  /// Construct a connection pool.
  pub fn build<P>(mut self, fname: P) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>
  {
    // ToDo: Use std::path::absolute() once stabilized







>
>
>
>
>
>
>
>
>
>
>







357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
          .get_or_insert(Vec::new())
          .push(CbType::Both(Box::new(f)));
      }
    }
    self
  }

  #[cfg(feature = "tpool")]
  pub fn thread_pool(mut self, tpool: Arc<ThreadPool>) -> Self {
    self.tpool = Some(tpool);
    self
  }

  #[cfg(feature = "tpool")]
  pub fn thread_pool_r(&mut self, tpool: Arc<ThreadPool>) -> &mut Self {
    self.tpool = Some(tpool);
    self
  }

  /// Construct a connection pool.
  pub fn build<P>(mut self, fname: P) -> Result<ConnPool, Error>
  where
    P: AsRef<Path>
  {
    // ToDo: Use std::path::absolute() once stabilized
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496

497
498
499
500
501
502
503
504
505
    //
    // Register a callback hook
    //
    if let Some(ref hook) = self.hook {
      rawhook::hook(&conn, Arc::clone(hook));
    }

    //
    // Create thread pool if requested to do so
    //
    #[cfg(feature = "tpool")]
    let tpool = self.init_tpool();

    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {
      rpool,
      sh,

      #[cfg(feature = "tpool")]
      tpool
    })
  }


  /// Construct a connection pool.
  ///
  /// Same as [`Builder::build()`], but register a change log callback on the







<
<
<
<
<
<

















<

>

|







429
430
431
432
433
434
435






436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452

453
454
455
456
457
458
459
460
461
462
463
    //
    // Register a callback hook
    //
    if let Some(ref hook) = self.hook {
      rawhook::hook(&conn, Arc::clone(hook));
    }







    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {

      sh,
      rpool,
      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }


  /// Construct a connection pool.
  ///
  /// Same as [`Builder::build()`], but register a change log callback on the
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607

608
609
610
611
612
613
614
615
616
    }

    //
    // Register a callback hook
    //
    changehook::hook(&conn, hook);

    //
    // Create thread pool if requested to do so
    //
    #[cfg(feature = "tpool")]
    let tpool = self.init_tpool();

    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {
      rpool,
      sh,

      #[cfg(feature = "tpool")]
      tpool
    })
  }
}


/// Inner writer connection object.
///







<
<
<
<
<
<

















<

>

|







534
535
536
537
538
539
540






541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557

558
559
560
561
562
563
564
565
566
567
568
    }

    //
    // Register a callback hook
    //
    changehook::hook(&conn, hook);







    //
    // Set up connection pool for read-only connections.
    //
    let rpool = self.create_ro_pool(fname, regfuncs)?;

    //
    // Prepare shared data
    //
    let iconn = InnerWrConn { conn, dirt: 0 };
    let inner = Inner { conn: Some(iconn) };
    let sh = Arc::new(Shared {
      inner: Mutex::new(inner),
      signal: Condvar::new(),
      autoclean: self.autoclean.clone()
    });

    Ok(ConnPool {

      sh,
      rpool,
      #[cfg(feature = "tpool")]
      tpool: self.tpool
    })
  }
}


/// Inner writer connection object.
///
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656








657
658
659
660
661
662
663
  signal: Condvar,
  autoclean: Option<AutoClean>
}


/// SQLite connection pool.
///
/// This is a somewhat specialized connection pool that only allows a single
/// writer but multiple readers.
pub struct ConnPool {
  sh: Arc<Shared>,
  rpool: r2d2::Pool<SqliteConnectionManager>,
  #[cfg(feature = "tpool")]
  tpool: Option<ThreadPool>
}

impl ConnPool {








  /// Acquire a read-only connection.
  pub fn reader(
    &self
  ) -> Result<PooledConnection<SqliteConnectionManager>, r2d2::Error> {
    self.rpool.get()
  }








|
|




|



>
>
>
>
>
>
>
>







592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
  signal: Condvar,
  autoclean: Option<AutoClean>
}


/// SQLite connection pool.
///
/// This is a specialized connection pool that is defined specifically for
/// sqlite, and only allows a single writer but multiple readers.
pub struct ConnPool {
  sh: Arc<Shared>,
  rpool: r2d2::Pool<SqliteConnectionManager>,
  #[cfg(feature = "tpool")]
  tpool: Option<Arc<ThreadPool>>
}

impl ConnPool {
  /// Return the pool size.
  ///
  /// In effect, this is the size of the read-only pool plus one (for the
  /// read/write connection).
  pub fn size(&self) -> usize {
    (self.rpool.max_size() + 1) as usize
  }

  /// Acquire a read-only connection.
  pub fn reader(
    &self
  ) -> Result<PooledConnection<SqliteConnectionManager>, r2d2::Error> {
    self.rpool.get()
  }

683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699




















700


701
702











703









704
705
706
707
708
709
710
711
712
713


714
715
716
717

718
719
720


721
722



723
724
725

726
727
728
729
730
731
732
733
734
735

736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754













755




756
757

758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778


779



780
781
782

783
784
785
786
787
788
789

790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840

  /// Attempt to acquire the writer connection.
  ///
  /// Returns `Some(conn)` if the writer connection was available at the time
  /// of the request.  Returns `None` if the writer has already been taken.
  pub fn try_writer(&self) -> Option<WrConn> {
    let mut g = self.sh.inner.lock();
    let Some(conn) = g.conn.take() else {
      return None;
    };

    Some(WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    })
  }





















  /// Run a closure with a read-only connection


  ///
  /// # Panic











  /// Panics if the connection pool was not configured to use a thread pool.









  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn ro_run<F>(&self, f: F) -> Result<(), r2d2::Error>
  where
    F: FnOnce(&Connection) + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("Connection pool does not have a thread pool");
    };



    let roconn = self.reader()?;

    tpool.execute(move || f(&roconn));


    Ok(())
  }



  /// Run a closure with read-only connection, returning a channel end-point
  /// for retreiving result.



  ///
  /// # Panic
  /// Panics if the connection pool was not configured to use a thread pool.

  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn ro_run_result<F, T, E>(
    &self,
    f: F
  ) -> Result<swctx::WaitCtx<T, (), E>, r2d2::Error>
  where
    F: FnOnce(&Connection) -> Result<T, E> + Send + 'static,
    T: Send + 'static,
    E: Send + 'static

  {
    let Some(ref tpool) = self.tpool else {
      panic!("Connection pool does not have a thread pool");
    };

    let roconn = self.reader()?;

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&roconn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
    });

    Ok(wctx)
  }


  /// Run a closure with read/write connection













  ///




  /// # Panic
  /// Panics if the connection pool was not configured to use a thread pool.

  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn rw_run<F>(&self, f: F)
  where
    F: FnOnce(&mut Connection) -> Option<usize> + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("Connection pool does not have a thread pool");
    };

    let mut rwconn = self.writer();

    tpool.execute(move || {
      let dirt = f(&mut rwconn);
      if let Some(dirt) = dirt {
        rwconn.add_dirt(dirt);
      }
    });
  }

  /// Run a closure with read/write connection, returning a channel end-point


  /// for retreiving result.



  ///
  /// # Panic
  /// Panics if the connection pool was not configured to use a thread pool.

  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn rw_run_result<F, T, E>(&self, f: F) -> swctx::WaitCtx<T, (), E>
  where
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static,
    T: Send + 'static,
    E: Send + 'static

  {
    let Some(ref tpool) = self.tpool else {
      panic!("Connection pool does not have a thread pool");
    };

    let mut rwconn = self.writer();

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&mut rwconn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
    });

    wctx
  }

  /// Perform an incremental vacuum.
  ///
  /// `n` is the number of freelist nodes to reclaim.  If `None` all nodes will
  /// be reclaimed.
  ///
  /// # Panic
  /// Panics if the connection pool was not configured to use a thread pool.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> swctx::WaitCtx<(), (), rusqlite::Error> {
    self.rw_run_result(move |wconn| {
      if let Some(n) = n {
        wconn.execute("PRAGMA incremental_vacuum(?);", params![n])
      } else {
        wconn.execute("PRAGMA incremental_vacuum;", params![])
      }
      .map(|_| ())
    })
  }

  /// Consume self and wait for all threads in thread pool to complete.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn shutdown(self) {
    if let Some(tpool) = self.tpool {
      tpool.join();
    }
  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :







|
<
<






|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
|
>
>

|
>
>
>
>
>
>
>
>
>
>
>
|
>
>
>
>
>
>
>
>
>


|




|


>
>
|
<
|
|
>



>
>
|
|
>
>
>

|
|
>


|




<

|
>


|


|



|






|

|
>
>
>
>
>
>
>
>
>
>
>
>
>

>
>
>
>
|
|
>


|

|


|


|
<

|

|




|
>
>
|
>
>
>

|
|
>


|

<

|
>


|


|



|






|
|
|
<
|
<
<
<






<
<
<
<
<
<
<
<
<
|
<
<
<
<
<
<
<




643
644
645
646
647
648
649
650


651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716

717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740

741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794

795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817

818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839

840



841
842
843
844
845
846









847







848
849
850
851

  /// Attempt to acquire the writer connection.
  ///
  /// Returns `Some(conn)` if the writer connection was available at the time
  /// of the request.  Returns `None` if the writer has already been taken.
  pub fn try_writer(&self) -> Option<WrConn> {
    let mut g = self.sh.inner.lock();
    let conn = g.conn.take()?;



    Some(WrConn {
      sh: Arc::clone(&self.sh),
      inner: ManuallyDrop::new(conn)
    })
  }
}


/// Special queries.
impl ConnPool {
  /// Return the number of unused pages.
  pub fn freelist_count(&self) -> Result<usize, Error> {
    Ok(self.reader()?.query_row_and_then(
      "PRAGMA freelist_count;'",
      [],
      |row| row.get(0)
    )?)
  }
}


pub enum RunError<E> {
  R2D2(r2d2::Error),
  App(E)
}

/// Read-only connection processing.
impl ConnPool {
  /// Run a read-only database operation.
  ///
  /// # Errors
  /// If a connection could not be acquired from the connection pool,
  /// `Err(RunError::R2D2(r2d2::Error))` will be returned.  If the application
  /// callback fails, this function will return `Err(RunError::App(E))`.
  pub fn run_ro<T, E, F>(&self, f: F) -> Result<T, RunError<E>>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&Connection) -> Result<T, E> + Send + 'static
  {
    // Acquire a read-only connection from the pool
    let conn = self.reader().map_err(|e| RunError::R2D2(e))?;

    // Run caller-provided closure.  On error map error to RunError::App().
    f(&conn).map_err(|e| RunError::App(e))
  }

  /// Run a read-only database operation on a thread.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd<F>(&self, f: F) -> Result<(), r2d2::Error>
  where
    F: FnOnce(&Connection) + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("ConnPool does to have a thread pool");
    };

    // Acquire a read-only connection from the pool and then run the provided
    // closure on a thread from the thread pool.
    let conn = self.reader()?;

    tpool.execute(move || {
      f(&conn);
    });
    Ok(())
  }

  /// Run a read-only database operation on a thread, allowing the caller to
  /// receive the `Result<T, E>` of the supplied closure using a
  /// one-shot channel.
  ///
  /// The supplied closure in `f` should return a `Result<T, E>` where the `Ok`
  /// case will be passed as a "set" value through the `swctx` channel, and the
  /// `Err` case will be passed as a "fail" value.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_ro_thrd_result<T, E, F>(
    &self,
    f: F
  ) -> Result<swctx::WaitCtx<T, (), E>, r2d2::Error>
  where

    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&Connection) -> Result<T, E> + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("ConnPool does to have a thread pool");
    };

    let conn = self.reader()?;

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&conn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
    });

    Ok(wctx)
  }
}

/// Read/Write connection processing.
impl ConnPool {
  /// Run a read/write database operation.
  pub fn run_rw<T, E, F>(&self, f: F) -> Result<T, E>
  where
    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static
  {
    let mut conn = self.writer();
    f(&mut conn)
  }

  /// Run a read/write database operation on a thread.
  ///
  /// The supplied closure should return an `Option<usize>`, where the `Some()`
  /// case denotes the specified amount of "dirt" should be added to the write
  /// connection.  `None` means no dirt should be added.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_rw_thrd<F>(&self, f: F)
  where
    F: FnOnce(&mut WrConn) -> Option<usize> + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("ConnPool does to have a thread pool");
    };

    let mut conn = self.writer();

    tpool.execute(move || {
      let dirt = f(&mut conn);
      if let Some(dirt) = dirt {
        conn.add_dirt(dirt);
      }
    });
  }

  /// Run a read/write database operation on a thread, allowing the
  /// caller to receive the `Result<T, E>` of the supplied closure using a
  /// one-shot channel.
  ///
  /// The supplied closure in `f` should return a `Result<T, E>` where the `Ok`
  /// case will be passed as a "set" value through the `swctx` channel, and the
  /// `Err` case will be passed as a "fail" value.
  ///
  /// # Panics
  /// A thread pool must be associated with the [`ConnPool`] or this method
  /// will panic.
  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn run_rw_thrd_result<T, E, F>(&self, f: F) -> swctx::WaitCtx<T, (), E>
  where

    T: Send + 'static,
    E: fmt::Debug + Send + 'static,
    F: FnOnce(&mut WrConn) -> Result<T, E> + Send + 'static
  {
    let Some(ref tpool) = self.tpool else {
      panic!("ConnPool does to have a thread pool");
    };

    let mut conn = self.writer();

    let (sctx, wctx) = swctx::mkpair();

    tpool.execute(move || match f(&mut conn) {
      Ok(t) => sctx.set(t),
      Err(e) => sctx.fail(e)
    });

    wctx
  }
}



impl ConnPool {



  #[cfg(feature = "tpool")]
  #[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> swctx::WaitCtx<(), (), rusqlite::Error> {









    self.run_rw_thrd_result(move |conn| conn.incremental_vacuum(n))







  }
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Added src/utils.rs.







































































































>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
//! Utility functions around SQL commands.

use rusqlite::{params, Connection};

#[cfg(feature = "tpool")]
use threadpool::ThreadPool;

#[cfg(feature = "tpool")]
use super::ConnPool;

/// Return the number of pages in the freelist.
pub fn freelist_count(conn: &Connection) -> Result<usize, rusqlite::Error> {
  conn.query_row_and_then("PRAGMA freelist_count;'", [], |row| row.get(0))
}

/// Run an incremental vacuum.
///
/// If `n` is `None` the entrire list of free pages will be processed.  If it
/// is `Some(n)` then only up to `n` pages will be processed.
pub fn incremental_vacuum(
  conn: &Connection,
  n: Option<usize>
) -> Result<(), rusqlite::Error> {
  if let Some(n) = n {
    conn.execute("PRAGMA incremental_vacuum(?);", params![n])
  } else {
    conn.execute("PRAGMA incremental_vacuum;", params![])
  }
  .map(|_| ())
}

#[cfg(feature = "tpool")]
#[cfg_attr(docsrs, doc(cfg(feature = "tpool")))]
pub fn pooled_incremental_vacuum(
  cpool: &ConnPool,
  tpool: &ThreadPool,
  n: Option<usize>
) -> swctx::WaitCtx<(), (), rusqlite::Error> {
  let (sctx, wctx) = swctx::mkpair();

  let conn = cpool.writer();

  tpool.execute(move || match conn.incremental_vacuum(n) {
    Ok(_) => sctx.set(()),
    Err(e) => sctx.fail(e)
  });

  wctx
}

// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :

Changes to src/wrconn.rs.

29
30
31
32
33
34
35




















36
37
38
39
40
41
42

impl WrConn {
  /// Add dirt to the writer connection.
  pub fn add_dirt(&mut self, weight: usize) {
    self.inner.dirt = self.inner.dirt.saturating_add(weight);
  }
}





















impl Deref for WrConn {
  type Target = Connection;

  #[inline(always)]
  fn deref(&self) -> &Connection {
    &self.inner.conn







>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

impl WrConn {
  /// Add dirt to the writer connection.
  pub fn add_dirt(&mut self, weight: usize) {
    self.inner.dirt = self.inner.dirt.saturating_add(weight);
  }
}

impl WrConn {
  pub fn incremental_vacuum(
    &self,
    n: Option<usize>
  ) -> Result<(), rusqlite::Error> {
    if let Some(n) = n {
      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum(?);", params![n])
    } else {
      self
        .inner
        .conn
        .execute("PRAGMA incremental_vacuum;", params![])
    }
    .map(|_| ())
  }
}

impl Deref for WrConn {
  type Target = Connection;

  #[inline(always)]
  fn deref(&self) -> &Connection {
    &self.inner.conn

Changes to www/changelog.md.

1
2
3
4
5












6
7
8









9
10



11




12
13
14
15
16
17
18
# Change Log

## [Unreleased]

[Details](/vdiff?from=sqlsrv-0.2.0&to=trunk)













### Added










### Changed




### Removed





---

## [0.2.0] - 2024-01-28

[Details](/vdiff?from=sqlsrv-0.1.1&to=sqlsrv-0.2.0)





|
>
>
>
>
>
>
>
>
>
>
>
>



>
>
>
>
>
>
>
>
>


>
>
>

>
>
>
>







1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# Change Log

## [Unreleased]

[Details](/vdiff?from=sqlsrv-0.3.0&to=trunk)

### Added

### Changed

### Removed

---

## [0.3.0] - 2024-02-10

[Details](/vdiff?from=sqlsrv-0.2.0&to=sqlsrv-0.3.0)

### Added

- Re-export `r2d2`.
- Add `ConnPool::size()` for getting number of connections in connection pool.
- Add `ConnPool::freelist_count()` for getting number of unused pages in the
  database.
- `WrConn::incremental_vacuum()` added.
- Add an `utils` sudmodule for collecting SQL command wrappers.
- Add several new `ConnPool` wrappers for running closures with either
  read-only or read/write connections.

### Changed

- `tpool` feature is no longer the default.
- Redesigned thread pooling to support sharing a thread pool with others.

### Removed

- Removed support for the thread pool in the `ConnPool` and opt for providing
  functions/methods that take in a reference to a `threadpool::ThreadPool`
  instead.

---

## [0.2.0] - 2024-01-28

[Details](/vdiff?from=sqlsrv-0.1.1&to=sqlsrv-0.2.0)