Index: Cargo.toml ================================================================== --- Cargo.toml +++ Cargo.toml @@ -1,8 +1,8 @@ [package] name = "qsu" -version = "0.6.2" +version = "0.7.0" edition = "2021" license = "0BSD" # https://crates.io/category_slugs categories = [ "os" ] keywords = [ "service", "systemd", "winsvc" ] Index: src/argp.rs ================================================================== --- src/argp.rs +++ src/argp.rs @@ -641,11 +641,14 @@ /// aguments `--name ` will be added. /// /// # Errors /// Application-defined error will be returned as `CbErr::Aop` to the /// original caller. - pub fn proc(mut self) -> Result<(), CbErr> { + pub fn proc(mut self) -> Result<(), CbErr> + where + ApEr: std::fmt::Debug + { // Give application the opportunity to modify root Command self.cli = self .cb .conf_cmd(Cmd::Root, self.cli) .map_err(|ae| CbErr::App(ae))?; Index: src/err.rs ================================================================== --- src/err.rs +++ src/err.rs @@ -245,10 +245,17 @@ impl From for Error { fn from(err: windows_service::Error) -> Self { Self::SubSystem(err.to_string()) } } + +#[cfg(windows)] +impl From for CbErr { + fn from(err: windows_service::Error) -> Self { + Self::Lib(Error::SubSystem(err.to_string())) + } +} /* impl From for Error { /// Wrap an [`AppErr`] in an [`Error`]. Index: src/rt.rs ================================================================== --- src/rt.rs +++ src/rt.rs @@ -187,10 +187,18 @@ /// /// For foreground processes and services that do not support startup state /// notifications this method has no effect. pub fn report(&self, status: Option) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); + if let Some(ref msg) = status { + tracing::trace!( + "Reached init checkpoint {checkpoint}; {}", + msg.as_ref() + ); + } else { + tracing::trace!("Reached init checkpoint {checkpoint}"); + } self.sr.starting(checkpoint, status); } } @@ -219,10 +227,18 @@ /// /// For foreground processes and services that do not support shutdown state /// notifications this method has no effect. pub fn report(&self, status: Option) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); + if let Some(ref msg) = status { + tracing::trace!( + "Reached term checkpoint {checkpoint}; {}", + msg.as_ref() + ); + } else { + tracing::trace!("Reached term checkpoint {checkpoint}"); + } self.sr.stopping(checkpoint, status); } } @@ -437,11 +453,11 @@ impl RunCtx { /// Run as a systemd service. #[cfg(all(target_os = "linux", feature = "systemd"))] fn systemd(self, st: SrvAppRt) -> Result<(), CbErr> where - ApEr: Send + ApEr: Send + std::fmt::Debug { LumberJack::default().set_init(self.log_init).init()?; tracing::debug!("Running service '{}'", self.svcname); @@ -491,21 +507,21 @@ /// Run as a Windows service. #[cfg(windows)] fn winsvc(self, st: SrvAppRt) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { winsvc::run(&self.svcname, st)?; Ok(()) } /// Run as a foreground server fn foreground(self, st: SrvAppRt) -> Result<(), CbErr> where - ApEr: Send + ApEr: Send + std::fmt::Debug { LumberJack::default().set_init(self.log_init).init()?; tracing::debug!("Running service '{}'", self.svcname); @@ -633,11 +649,11 @@ /// [`CbErr::App`] is returned, containing application-specific error, if n /// application callback returned an error. [`CbErr::Lib`] indicates that an /// error occurred in the qsu runtime. pub fn run(self, st: SrvAppRt) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { if self.service { let _ = RUNAS.set(RunAs::SvcSubsys); #[cfg(all(target_os = "linux", feature = "systemd"))] @@ -667,11 +683,11 @@ self, svcevt_handler: Box, rt_handler: Box + Send> ) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Sync { svcevt_handler, rt_handler }) @@ -686,11 +702,11 @@ rtbldr: Option, svcevt_handler: Box, rt_handler: Box + Send> ) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Tokio { rtbldr, svcevt_handler, rt_handler @@ -705,11 +721,11 @@ self, svcevt_handler: Box, rt_handler: Box + Send> ) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Rocket { svcevt_handler, rt_handler }) Index: src/rt/rttype.rs ================================================================== --- src/rt/rttype.rs +++ src/rt/rttype.rs @@ -10,10 +10,13 @@ mod tokio; #[cfg(feature = "rocket")] #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))] mod rocket; + +const SVCAPP_INIT_MSG: &str = "Begin service application initialization"; +const SVCAPP_TERM_MSG: &str = "Begin service application termination"; pub use sync::{main as sync_main, MainParams as SyncMainParams}; #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] Index: src/rt/rttype/rocket.rs ================================================================== --- src/rt/rttype/rocket.rs +++ src/rt/rttype/rocket.rs @@ -83,17 +83,17 @@ (tx_svcevt, rx_svcevt) } else { init_svc_channels(&ks) }; - // Call application's init() method. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(1)) }; + ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let (rockets, init_apperr) = match rt_handler.init(ictx).await { Ok(rockets) => (rockets, None), Err(e) => (Vec::new(), Some(e)) }; @@ -182,11 +182,11 @@ }; // Always send the first shutdown checkpoint here. Either init() failed or // run retuned. Either way, we're shutting down. - sr.stopping(1, None); + sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Now that the main application has terminated kill off any remaining // auxiliary tasks (read: signal waiters) ks.trigger(); if (ks.finalize().await).is_err() { @@ -195,12 +195,13 @@ // Call the application's shutdown() function. let tctx = TermCtx { re, sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(2)) // 1 used above, so start at 2 }; + //tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).await.err(); // Inform the service subsystem that the the shutdown is complete sr.stopped(); Index: src/rt/rttype/sync.rs ================================================================== --- src/rt/rttype/sync.rs +++ src/rt/rttype/sync.rs @@ -94,18 +94,20 @@ signals::sync_kill_to_event(tx2, test_mode)?; (tx, rx) }; + //sr.starting(1, Some(super::SVCAPP_INIT_MSG.into())); // Call server application's init() method, passing along a startup state // reporter object. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(1)) }; + ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let init_apperr = rt_handler.init(ictx).err(); // If init() was successful, set the service's state to "started" and then // call the server application's run() method. let run_apperr = if init_apperr.is_none() { @@ -137,20 +139,21 @@ } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or - // run retuned. Either way, we're shutting down. - sr.stopping(1, None); + // run returned. Either way, we're shutting down. + //sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Call the application's shutdown() function, passing along a shutdown state // reporter object. let tctx = TermCtx { re, sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(1)) }; + tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).err(); // Inform the service subsystem that the the shutdown is complete sr.stopped(); Index: src/rt/rttype/tokio.rs ================================================================== --- src/rt/rttype/tokio.rs +++ src/rt/rttype/tokio.rs @@ -34,11 +34,11 @@ pub fn main( rtbldr: Option, params: MainParams ) -> Result<(), CbErr> where - ApEr: Send + ApEr: Send + std::fmt::Debug { let rt = if let Some(mut bldr) = rtbldr { bldr.build()? } else { tokio::runtime::Runtime::new()? @@ -61,11 +61,11 @@ sr, svcevt_ch }: MainParams ) -> Result<(), CbErr> where - ApEr: Send + ApEr: Send + std::fmt::Debug { let ks = KillSwitch::new(); // If a SvcEvt receiver end-point was handed to us, then use it. Otherwise // create our own and spawn the monitoring tasks that will generate events @@ -79,13 +79,18 @@ // Call application's init() method. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(1)) }; + ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let init_apperr = rt_handler.init(ictx).await.err(); + + if let Some(ref e) = init_apperr { + tracing::error!("Service handler init() failed; {e:?}"); + } let run_apperr = if init_apperr.is_none() { sr.started(); // Kick off service event monitoring thread before running main app @@ -97,10 +102,15 @@ // Run the main service application callback. // // This is basically the service application's "main()". let ret = rt_handler.run(&re).await.err(); + + if let Some(ref e) = ret { + tracing::error!("Service handler run() failed; {e:?}"); + } + // Shut down svcevent thread // // Tell it that an (implicit) shutdown event has occurred. // Duplicates don't matter, because once the first one is processed the @@ -115,11 +125,11 @@ None }; // Always send the first shutdown checkpoint here. Either init() failed or // run retuned. Either way, we're shutting down. - sr.stopping(1, None); + sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Now that the main application has terminated kill off any remaining // auxiliary tasks (read: signal waiters) ks.trigger(); @@ -129,13 +139,18 @@ // Call the application's shutdown() function. let tctx = TermCtx { re, sr: Arc::clone(&sr), - cnt: Arc::new(AtomicU32::new(2)) // 1 is used by the runtime, so start at 2 + cnt: Arc::new(AtomicU32::new(2)) // 1 used above, so start at 2 }; + //tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).await.err(); + + if let Some(ref e) = term_apperr { + tracing::error!("Service handler shutdown() failed; {e:?}"); + } // Inform the service subsystem that the the shutdown is complete sr.stopped(); // There can be multiple failures, and we don't want to lose information Index: src/rt/winsvc.rs ================================================================== --- src/rt/winsvc.rs +++ src/rt/winsvc.rs @@ -113,11 +113,10 @@ fn started(&self) { if let Err(e) = self.tx.send(ToSvcMsg::Started) { log::error!("Unable to send Started message; {}", e); } - log::trace!("Started"); } fn stopping(&self, checkpoint: u32, status: Option) { if let Err(e) = self.tx.send(ToSvcMsg::Stopping(checkpoint)) { log::error!("Unable to send Stopping message; {}", e); @@ -129,11 +128,10 @@ fn stopped(&self) { if let Err(e) = self.tx.send(ToSvcMsg::Stopped) { log::error!("Unable to send Stopped message; {}", e); } - log::trace!("Stopped"); } } /// Run a service application under the Windows service subsystem. @@ -140,13 +138,13 @@ /// /// # Errors /// `Error::SubSystem` menas the service could not be started. `Error::IO` /// means the internal worker could not be launched. #[allow(clippy::missing_panics_doc)] -pub fn run(svcname: &str, st: SrvAppRt) -> Result<(), Error> +pub fn run(svcname: &str, st: SrvAppRt) -> Result<(), CbErr> where - ApEr: Send + 'static + ApEr: Send + 'static + std::fmt::Debug { #[cfg(feature = "wait-for-debugger")] { debugger::wait_for_then_break(); debugger::output("Hello, debugger"); @@ -182,14 +180,22 @@ service_dispatcher::start(svcname, ffi_service_main)?; // The return value should be hard-coded to `Result<(), Error>`, so this // unwrap should be okay. match jh.join() { - Ok(_) => Ok(()), - Err(e) => *e - .downcast::>() - .expect("Unable to downcast error from svcapp thread") + Ok(res) => { + tracing::trace!("srvapp_thread::join res={res:?}"); + match res { + Ok(()) => Ok(()), + Err(be) => Err(be) + } + } + Err(e) => { + tracing::error!("srvapp_thread() could not be joined; {e:?}"); + let msg = format!("Unable to join srvapp_thread(); {e:?}"); + Err(CbErr::Lib(Error::Internal(msg))) + } } } /// Internal server application wrapper thread. @@ -197,11 +203,11 @@ st: SrvAppRt, svcname: String, rx_fromsvc: oneshot::Receiver> ) -> Result<(), CbErr> where - ApEr: Send + ApEr: Send + std::fmt::Debug { // Wait for the service subsystem to report that it has initialized. // It passes along a channel end-point that can be used to send events to // the service manager. let Ok(res) = rx_fromsvc.blocking_recv() else { @@ -247,11 +253,10 @@ rt_handler, sr, svcevt_ch: Some((tx_svcevt, rx_svcevt)) } ), - #[cfg(feature = "rocket")] SrvAppRt::Rocket { svcevt_handler, rt_handler } => rttype::rocket_main(rttype::RocketMainParams { Index: www/changelog.md ================================================================== --- www/changelog.md +++ www/changelog.md @@ -2,18 +2,30 @@ ⚠️ indicates a breaking change. ## [Unreleased] -[Details](/vdiff?from=qsu-0.6.2&to=trunk) +[Details](/vdiff?from=qsu-0.7.0&to=trunk) ### Added ### Changed ### Removed +--- + +## [0.7.0] - 2024-10-17 + +[Details](/vdiff?from=qsu-0.6.2&to=qsu-0.7.0) + +### Changed + +- Fix bug in winsvc rt implementation that caused error to get lost. +- ⚠️ Add `std::fmt::Debug` bounds to application errors at certain locations to + assist in debugging. + --- ## [0.6.2] - 2024-10-17 [Details](/vdiff?from=qsu-0.6.1&to=qsu-0.6.2)