Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Difference From qsu-0.6.2 To qsu-0.7.0
2024-11-18
| ||
01:20 | Add crate version to clap. check-in: 0bf34bcb4f user: jan tags: trunk | |
2024-10-17
| ||
14:57 | Release maintenance. check-in: bdeb55f63b user: jan tags: qsu-0.7.0, trunk | |
14:52 | Add missing bound. check-in: 150898a4e2 user: jan tags: trunk | |
11:47 | Implicitly trace log when status reporter reports service state changes. check-in: 1bfa789ca3 user: jan tags: trunk | |
06:11 | Release maintenance. check-in: 0e755c10e7 user: jan tags: qsu-0.6.2, trunk | |
06:10 | Crate maintenance. check-in: 7ec935e407 user: jan tags: trunk | |
Changes to Cargo.toml.
1 2 | [package] name = "qsu" | | | 1 2 3 4 5 6 7 8 9 10 | [package] name = "qsu" version = "0.7.0" edition = "2021" license = "0BSD" # https://crates.io/category_slugs categories = [ "os" ] keywords = [ "service", "systemd", "winsvc" ] repository = "https://repos.qrnch.tech/pub/qsu" description = "Service subsystem utilities and runtime wrapper." |
︙ | ︙ |
Changes to src/argp.rs.
︙ | ︙ | |||
639 640 641 642 643 644 645 | /// name (determined by /// [`default_service_name()`](crate::default_service_name), then the /// aguments `--name <service name>` will be added. /// /// # Errors /// Application-defined error will be returned as `CbErr::Aop` to the /// original caller. | | > > > | 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 | /// name (determined by /// [`default_service_name()`](crate::default_service_name), then the /// aguments `--name <service name>` will be added. /// /// # Errors /// Application-defined error will be returned as `CbErr::Aop` to the /// original caller. pub fn proc(mut self) -> Result<(), CbErr<ApEr>> where ApEr: std::fmt::Debug { // Give application the opportunity to modify root Command self.cli = self .cb .conf_cmd(Cmd::Root, self.cli) .map_err(|ae| CbErr::App(ae))?; // Create registration subcommand and give application the opportunity to |
︙ | ︙ |
Changes to src/err.rs.
︙ | ︙ | |||
243 244 245 246 247 248 249 250 251 252 253 254 255 256 | #[cfg(windows)] impl From<windows_service::Error> for Error { fn from(err: windows_service::Error) -> Self { Self::SubSystem(err.to_string()) } } /* impl<ApEr> From<ApEr> for Error<ApEr> { /// Wrap an [`AppErr`] in an [`Error`]. fn from(err: ApEr) -> Self { Error::App(err) | > > > > > > > | 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 | #[cfg(windows)] impl From<windows_service::Error> for Error { fn from(err: windows_service::Error) -> Self { Self::SubSystem(err.to_string()) } } #[cfg(windows)] impl<ApEr> From<windows_service::Error> for CbErr<ApEr> { fn from(err: windows_service::Error) -> Self { Self::Lib(Error::SubSystem(err.to_string())) } } /* impl<ApEr> From<ApEr> for Error<ApEr> { /// Wrap an [`AppErr`] in an [`Error`]. fn from(err: ApEr) -> Self { Error::App(err) |
︙ | ︙ |
Changes to src/rt.rs.
︙ | ︙ | |||
185 186 187 188 189 190 191 192 193 194 195 196 197 198 | /// Report startup state to the system service manager. /// /// For foreground processes and services that do not support startup state /// notifications this method has no effect. pub fn report(&self, status: Option<StateMsg>) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); self.sr.starting(checkpoint, status); } } /// Context passed to `term()` service application callback. /// | > > > > > > > > | 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 | /// Report startup state to the system service manager. /// /// For foreground processes and services that do not support startup state /// notifications this method has no effect. pub fn report(&self, status: Option<StateMsg>) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); if let Some(ref msg) = status { tracing::trace!( "Reached init checkpoint {checkpoint}; {}", msg.as_ref() ); } else { tracing::trace!("Reached init checkpoint {checkpoint}"); } self.sr.starting(checkpoint, status); } } /// Context passed to `term()` service application callback. /// |
︙ | ︙ | |||
217 218 219 220 221 222 223 224 225 226 227 228 229 230 | /// Report shutdown state to the system service manager. /// /// For foreground processes and services that do not support shutdown state /// notifications this method has no effect. pub fn report(&self, status: Option<StateMsg>) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); self.sr.stopping(checkpoint, status); } } /// "Synchronous" (non-`async`) server application. /// | > > > > > > > > | 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 | /// Report shutdown state to the system service manager. /// /// For foreground processes and services that do not support shutdown state /// notifications this method has no effect. pub fn report(&self, status: Option<StateMsg>) { let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst); if let Some(ref msg) = status { tracing::trace!( "Reached term checkpoint {checkpoint}; {}", msg.as_ref() ); } else { tracing::trace!("Reached term checkpoint {checkpoint}"); } self.sr.stopping(checkpoint, status); } } /// "Synchronous" (non-`async`) server application. /// |
︙ | ︙ | |||
435 436 437 438 439 440 441 | } impl RunCtx { /// Run as a systemd service. #[cfg(all(target_os = "linux", feature = "systemd"))] fn systemd<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where | | | 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 | } impl RunCtx { /// Run as a systemd service. #[cfg(all(target_os = "linux", feature = "systemd"))] fn systemd<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where ApEr: Send + std::fmt::Debug { LumberJack::default().set_init(self.log_init).init()?; tracing::debug!("Running service '{}'", self.svcname); let sr = Arc::new(systemd::ServiceReporter {}); |
︙ | ︙ | |||
489 490 491 492 493 494 495 | } } /// Run as a Windows service. #[cfg(windows)] fn winsvc<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where | | | | 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 | } } /// Run as a Windows service. #[cfg(windows)] fn winsvc<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { winsvc::run(&self.svcname, st)?; Ok(()) } /// Run as a foreground server fn foreground<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where ApEr: Send + std::fmt::Debug { LumberJack::default().set_init(self.log_init).init()?; tracing::debug!("Running service '{}'", self.svcname); let sr = Arc::new(nosvc::ServiceReporter {}); |
︙ | ︙ | |||
631 632 633 634 635 636 637 | /// /// # Errors /// [`CbErr::App`] is returned, containing application-specific error, if n /// application callback returned an error. [`CbErr::Lib`] indicates that an /// error occurred in the qsu runtime. pub fn run<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where | | | 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 | /// /// # Errors /// [`CbErr::App`] is returned, containing application-specific error, if n /// application callback returned an error. [`CbErr::Lib`] indicates that an /// error occurred in the qsu runtime. pub fn run<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { if self.service { let _ = RUNAS.set(RunAs::SvcSubsys); #[cfg(all(target_os = "linux", feature = "systemd"))] self.systemd(st)?; |
︙ | ︙ | |||
665 666 667 668 669 670 671 | #[allow(clippy::missing_errors_doc)] pub fn run_sync<ApEr>( self, svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>, rt_handler: Box<dyn ServiceHandler<AppErr = ApEr> + Send> ) -> Result<(), CbErr<ApEr>> where | | | | | 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 | #[allow(clippy::missing_errors_doc)] pub fn run_sync<ApEr>( self, svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>, rt_handler: Box<dyn ServiceHandler<AppErr = ApEr> + Send> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Sync { svcevt_handler, rt_handler }) } /// Convenience method around [`Self::run()`] using [`SrvAppRt::Tokio`]. #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] #[allow(clippy::missing_errors_doc)] pub fn run_tokio<ApEr>( self, rtbldr: Option<runtime::Builder>, svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>, rt_handler: Box<dyn TokioServiceHandler<AppErr = ApEr> + Send> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Tokio { rtbldr, svcevt_handler, rt_handler }) } /// Convenience method around [`Self::run()`] using [`SrvAppRt::Rocket`]. #[cfg(feature = "rocket")] #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))] #[allow(clippy::missing_errors_doc)] pub fn run_rocket<ApEr>( self, svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>, rt_handler: Box<dyn RocketServiceHandler<AppErr = ApEr> + Send> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { self.run(SrvAppRt::Rocket { svcevt_handler, rt_handler }) } } |
︙ | ︙ |
Changes to src/rt/rttype.rs.
︙ | ︙ | |||
8 9 10 11 12 13 14 15 16 17 18 19 20 21 | #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] mod tokio; #[cfg(feature = "rocket")] #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))] mod rocket; pub use sync::{main as sync_main, MainParams as SyncMainParams}; #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] pub use tokio::{main as tokio_main, MainParams as TokioMainParams}; | > > > | 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 | #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] mod tokio; #[cfg(feature = "rocket")] #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))] mod rocket; const SVCAPP_INIT_MSG: &str = "Begin service application initialization"; const SVCAPP_TERM_MSG: &str = "Begin service application termination"; pub use sync::{main as sync_main, MainParams as SyncMainParams}; #[cfg(feature = "tokio")] #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))] pub use tokio::{main as tokio_main, MainParams as TokioMainParams}; |
︙ | ︙ |
Changes to src/rt/rttype/rocket.rs.
︙ | ︙ | |||
81 82 83 84 85 86 87 | let (tx_svcevt, rx_svcevt) = if let Some((tx_svcevt, rx_svcevt)) = svcevt_ch { (tx_svcevt, rx_svcevt) } else { init_svc_channels(&ks) }; | < | > | 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | let (tx_svcevt, rx_svcevt) = if let Some((tx_svcevt, rx_svcevt)) = svcevt_ch { (tx_svcevt, rx_svcevt) } else { init_svc_channels(&ks) }; // Call application's init() method. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(1)) }; ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let (rockets, init_apperr) = match rt_handler.init(ictx).await { Ok(rockets) => (rockets, None), Err(e) => (Vec::new(), Some(e)) }; // Ignite rockets so we can get Shutdown contexts for each of the instances. // There are two cases where the rockets vector will be empty: |
︙ | ︙ | |||
180 181 182 183 184 185 186 | } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or // run retuned. Either way, we're shutting down. | | | > | 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or // run retuned. Either way, we're shutting down. sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Now that the main application has terminated kill off any remaining // auxiliary tasks (read: signal waiters) ks.trigger(); if (ks.finalize().await).is_err() { log::warn!("Attempted to finalize KillSwitch that wasn't triggered yet"); } // Call the application's shutdown() function. let tctx = TermCtx { re, sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(2)) // 1 used above, so start at 2 }; //tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).await.err(); // Inform the service subsystem that the the shutdown is complete sr.stopped(); // There can be multiple failures, and we don't want to lose information // about what went wrong, so return an error context that can contain all |
︙ | ︙ |
Changes to src/rt/rttype/sync.rs.
︙ | ︙ | |||
92 93 94 95 96 97 98 99 100 101 102 103 104 | // register a Ctrl+C handler. #[cfg(windows)] signals::sync_kill_to_event(tx2, test_mode)?; (tx, rx) }; // Call server application's init() method, passing along a startup state // reporter object. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), | > | > | 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | // register a Ctrl+C handler. #[cfg(windows)] signals::sync_kill_to_event(tx2, test_mode)?; (tx, rx) }; //sr.starting(1, Some(super::SVCAPP_INIT_MSG.into())); // Call server application's init() method, passing along a startup state // reporter object. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(1)) }; ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let init_apperr = rt_handler.init(ictx).err(); // If init() was successful, set the service's state to "started" and then // call the server application's run() method. let run_apperr = if init_apperr.is_none() { sr.started(); |
︙ | ︙ | |||
135 136 137 138 139 140 141 | ret } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or | | | | > | 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 | ret } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or // run returned. Either way, we're shutting down. //sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Call the application's shutdown() function, passing along a shutdown state // reporter object. let tctx = TermCtx { re, sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(1)) }; tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).err(); // Inform the service subsystem that the the shutdown is complete sr.stopped(); // There can be multiple failures, and we don't want to lose information |
︙ | ︙ |
Changes to src/rt/rttype/tokio.rs.
︙ | ︙ | |||
32 33 34 35 36 37 38 | /// Internal `main()`-like routine for server applications that run the tokio /// runtime for `async` code. pub fn main<ApEr>( rtbldr: Option<runtime::Builder>, params: MainParams<ApEr> ) -> Result<(), CbErr<ApEr>> where | | | 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | /// Internal `main()`-like routine for server applications that run the tokio /// runtime for `async` code. pub fn main<ApEr>( rtbldr: Option<runtime::Builder>, params: MainParams<ApEr> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + std::fmt::Debug { let rt = if let Some(mut bldr) = rtbldr { bldr.build()? } else { tokio::runtime::Runtime::new()? }; rt.block_on(async_main(params))?; |
︙ | ︙ | |||
59 60 61 62 63 64 65 | svcevt_handler, mut rt_handler, sr, svcevt_ch }: MainParams<ApEr> ) -> Result<(), CbErr<ApEr>> where | | | > > > > > > > > > > | | > > > > > | 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | svcevt_handler, mut rt_handler, sr, svcevt_ch }: MainParams<ApEr> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + std::fmt::Debug { let ks = KillSwitch::new(); // If a SvcEvt receiver end-point was handed to us, then use it. Otherwise // create our own and spawn the monitoring tasks that will generate events // for it. let (tx_svcevt, rx_svcevt) = if let Some((tx_svcevt, rx_svcevt)) = svcevt_ch { (tx_svcevt, rx_svcevt) } else { init_svc_channels(&ks) }; // Call application's init() method. let ictx = InitCtx { re: re.clone(), sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(1)) }; ictx.report(Some(super::SVCAPP_INIT_MSG.into())); let init_apperr = rt_handler.init(ictx).await.err(); if let Some(ref e) = init_apperr { tracing::error!("Service handler init() failed; {e:?}"); } let run_apperr = if init_apperr.is_none() { sr.started(); // Kick off service event monitoring thread before running main app // callback let jh = thread::Builder::new() .name("svcevt".into()) .spawn(|| crate::rt::svcevt_thread(rx_svcevt, svcevt_handler)) .unwrap(); // Run the main service application callback. // // This is basically the service application's "main()". let ret = rt_handler.run(&re).await.err(); if let Some(ref e) = ret { tracing::error!("Service handler run() failed; {e:?}"); } // Shut down svcevent thread // // Tell it that an (implicit) shutdown event has occurred. // Duplicates don't matter, because once the first one is processed the // thread will terminate. let _ = tx_svcevt.send(SvcEvt::Shutdown(Demise::ReachedEnd)); // Wait for service event monitoring thread to terminate let _ = task::spawn_blocking(|| jh.join()).await; ret } else { None }; // Always send the first shutdown checkpoint here. Either init() failed or // run retuned. Either way, we're shutting down. sr.stopping(1, Some(super::SVCAPP_TERM_MSG.into())); // Now that the main application has terminated kill off any remaining // auxiliary tasks (read: signal waiters) ks.trigger(); if (ks.finalize().await).is_err() { log::warn!("Attempted to finalize KillSwitch that wasn't triggered yet"); } // Call the application's shutdown() function. let tctx = TermCtx { re, sr: Arc::clone(&sr), cnt: Arc::new(AtomicU32::new(2)) // 1 used above, so start at 2 }; //tctx.report(Some(super::SVCAPP_TERM_MSG.into())); let term_apperr = rt_handler.shutdown(tctx).await.err(); if let Some(ref e) = term_apperr { tracing::error!("Service handler shutdown() failed; {e:?}"); } // Inform the service subsystem that the the shutdown is complete sr.stopped(); // There can be multiple failures, and we don't want to lose information // about what went wrong, so return an error context that can contain all // callback errors. |
︙ | ︙ |
Changes to src/rt/winsvc.rs.
︙ | ︙ | |||
111 112 113 114 115 116 117 | } } fn started(&self) { if let Err(e) = self.tx.send(ToSvcMsg::Started) { log::error!("Unable to send Started message; {}", e); } | < < | | | 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | } } fn started(&self) { if let Err(e) = self.tx.send(ToSvcMsg::Started) { log::error!("Unable to send Started message; {}", e); } } fn stopping(&self, checkpoint: u32, status: Option<StateMsg>) { if let Err(e) = self.tx.send(ToSvcMsg::Stopping(checkpoint)) { log::error!("Unable to send Stopping message; {}", e); } if let Some(status) = status { log::trace!("Starting[{}] {}", checkpoint, status.as_ref()); } } fn stopped(&self) { if let Err(e) = self.tx.send(ToSvcMsg::Stopped) { log::error!("Unable to send Stopped message; {}", e); } } } /// Run a service application under the Windows service subsystem. /// /// # Errors /// `Error::SubSystem` menas the service could not be started. `Error::IO` /// means the internal worker could not be launched. #[allow(clippy::missing_panics_doc)] pub fn run<ApEr>(svcname: &str, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>> where ApEr: Send + 'static + std::fmt::Debug { #[cfg(feature = "wait-for-debugger")] { debugger::wait_for_then_break(); debugger::output("Hello, debugger"); } |
︙ | ︙ | |||
180 181 182 183 184 185 186 | // Register generated `ffi_service_main` with the system and start the // service, blocking this thread until the service is stopped. service_dispatcher::start(svcname, ffi_service_main)?; // The return value should be hard-coded to `Result<(), Error>`, so this // unwrap should be okay. match jh.join() { | > > > | > > > | < > | > > | | 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 | // Register generated `ffi_service_main` with the system and start the // service, blocking this thread until the service is stopped. service_dispatcher::start(svcname, ffi_service_main)?; // The return value should be hard-coded to `Result<(), Error>`, so this // unwrap should be okay. match jh.join() { Ok(res) => { tracing::trace!("srvapp_thread::join res={res:?}"); match res { Ok(()) => Ok(()), Err(be) => Err(be) } } Err(e) => { tracing::error!("srvapp_thread() could not be joined; {e:?}"); let msg = format!("Unable to join srvapp_thread(); {e:?}"); Err(CbErr::Lib(Error::Internal(msg))) } } } /// Internal server application wrapper thread. fn srvapp_thread<ApEr>( st: SrvAppRt<ApEr>, svcname: String, rx_fromsvc: oneshot::Receiver<Result<HandshakeMsg, Error>> ) -> Result<(), CbErr<ApEr>> where ApEr: Send + std::fmt::Debug { // Wait for the service subsystem to report that it has initialized. // It passes along a channel end-point that can be used to send events to // the service manager. let Ok(res) = rx_fromsvc.blocking_recv() else { panic!("Unable to receive handshake"); }; |
︙ | ︙ | |||
245 246 247 248 249 250 251 | re, svcevt_handler, rt_handler, sr, svcevt_ch: Some((tx_svcevt, rx_svcevt)) } ), | < | 251 252 253 254 255 256 257 258 259 260 261 262 263 264 | re, svcevt_handler, rt_handler, sr, svcevt_ch: Some((tx_svcevt, rx_svcevt)) } ), #[cfg(feature = "rocket")] SrvAppRt::Rocket { svcevt_handler, rt_handler } => rttype::rocket_main(rttype::RocketMainParams { re, svcevt_handler, |
︙ | ︙ |
Changes to www/changelog.md.
1 2 3 4 5 6 | # Change log ⚠️ indicates a breaking change. ## [Unreleased] | | > > > > > > > > > > > > | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | # Change log ⚠️ indicates a breaking change. ## [Unreleased] [Details](/vdiff?from=qsu-0.7.0&to=trunk) ### Added ### Changed ### Removed --- ## [0.7.0] - 2024-10-17 [Details](/vdiff?from=qsu-0.6.2&to=qsu-0.7.0) ### Changed - Fix bug in winsvc rt implementation that caused error to get lost. - ⚠️ Add `std::fmt::Debug` bounds to application errors at certain locations to assist in debugging. --- ## [0.6.2] - 2024-10-17 [Details](/vdiff?from=qsu-0.6.1&to=qsu-0.6.2) ### Added |
︙ | ︙ |