diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f4510882dc..c69a6cb11c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -84,6 +84,22 @@ pub trait X // <- this trait have 3 generics, A, B, and C fn do_other_stuff(&self, a: A, b: B); // <- this is not ideal because it does not have C. } ``` +- Generic naming should be consistent. Do NOT use multiple name for the same generic, it just makes things more confusing. Do +```rust +pub struct X { + phantom: PhanomData, +} + +impl X {} +``` +But not, +```rust +pub struct X { + phantom: PhanomData, +} + +impl X {} // <- Do NOT do that, use A instead of B +``` - Always alphabetically order the type generics. Therefore, ```rust pub struct X {}; // <- Generics are alphabetically ordered diff --git a/libafl/src/events/broker_hooks/centralized.rs b/libafl/src/events/broker_hooks/centralized.rs index 5fbb437719..28381279b0 100644 --- a/libafl/src/events/broker_hooks/centralized.rs +++ b/libafl/src/events/broker_hooks/centralized.rs @@ -5,7 +5,6 @@ use core::{fmt::Debug, marker::PhantomData}; use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED}; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag}, - shmem::ShMemProvider, ClientId, Error, }; use serde::de::DeserializeOwned; @@ -21,14 +20,13 @@ pub struct CentralizedLlmpHook { phantom: PhantomData, } -impl LlmpHook for CentralizedLlmpHook +impl LlmpHook for CentralizedLlmpHook where I: DeserializeOwned, - SP: ShMemProvider, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl/src/events/broker_hooks/centralized_multi_machine.rs b/libafl/src/events/broker_hooks/centralized_multi_machine.rs index e522c62831..08fdf2f7bb 100644 --- a/libafl/src/events/broker_hooks/centralized_multi_machine.rs +++ b/libafl/src/events/broker_hooks/centralized_multi_machine.rs @@ -11,7 +11,6 @@ use libafl_bolts::llmp::LLMP_FLAG_COMPRESSED; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag, LLMP_FLAG_FROM_MM}, ownedref::OwnedRef, - shmem::ShMemProvider, ClientId, Error, }; use serde::Serialize; @@ -149,16 +148,15 @@ where } } -impl LlmpHook for TcpMultiMachineLlmpSenderHook +impl LlmpHook for TcpMultiMachineLlmpSenderHook where I: Input, A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, - SP: ShMemProvider, { /// check for received messages, and forward them alongside the incoming message to inner. fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, @@ -211,16 +209,15 @@ where } } -impl LlmpHook for TcpMultiMachineLlmpReceiverHook +impl LlmpHook for TcpMultiMachineLlmpReceiverHook where I: Input, A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, - SP: ShMemProvider, { /// check for received messages, and forward them alongside the incoming message to inner. fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl/src/events/broker_hooks/mod.rs b/libafl/src/events/broker_hooks/mod.rs index baac20f950..f65f1c131d 100644 --- a/libafl/src/events/broker_hooks/mod.rs +++ b/libafl/src/events/broker_hooks/mod.rs @@ -6,7 +6,6 @@ use core::marker::PhantomData; use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED}; use libafl_bolts::{ llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag}, - shmem::ShMemProvider, ClientId, }; use serde::de::DeserializeOwned; @@ -40,15 +39,14 @@ pub struct StdLlmpEventHook { phantom: PhantomData, } -impl LlmpHook for StdLlmpEventHook +impl LlmpHook for StdLlmpEventHook where I: DeserializeOwned, - SP: ShMemProvider, MT: Monitor, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, #[cfg(feature = "llmp_compression")] msg_flags: &mut Flags, diff --git a/libafl/src/events/centralized.rs b/libafl/src/events/centralized.rs index bc636b6bf9..0784d08c4f 100644 --- a/libafl/src/events/centralized.rs +++ b/libafl/src/events/centralized.rs @@ -18,7 +18,7 @@ use libafl_bolts::{ }; use libafl_bolts::{ llmp::{LlmpClient, LlmpClientDescription, Tag}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::{Handle, MatchNameRef}, ClientId, }; @@ -46,13 +46,10 @@ pub(crate) const _LLMP_TAG_TO_MAIN: Tag = Tag(0x3453453); /// A wrapper manager to implement a main-secondary architecture with another broker #[derive(Debug)] -pub struct CentralizedEventManager -where - SP: ShMemProvider, -{ +pub struct CentralizedEventManager { inner: EM, /// The centralized LLMP client for inter process communication - client: LlmpClient, + client: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, time_ref: Option>, @@ -61,7 +58,16 @@ where phantom: PhantomData<(I, S)>, } -impl CentralizedEventManager, NopShMemProvider> { +impl + CentralizedEventManager< + NopEventManager, + (), + NopInput, + NopState, + NopShMem, + NopShMemProvider, + > +{ /// Creates a builder for [`CentralizedEventManager`] #[must_use] pub fn builder() -> CentralizedEventManagerBuilder { @@ -95,16 +101,13 @@ impl CentralizedEventManagerBuilder { } /// Creates a new [`CentralizedEventManager`]. - pub fn build_from_client( + pub fn build_from_client( self, inner: EM, hooks: EMH, - client: LlmpClient, + client: LlmpClient, time_obs: Option>, - ) -> Result, Error> - where - SP: ShMemProvider, - { + ) -> Result, Error> { Ok(CentralizedEventManager { inner, hooks, @@ -121,16 +124,17 @@ impl CentralizedEventManagerBuilder { /// /// If the port is not yet bound, it will act as a broker; otherwise, it /// will act as a client. - pub fn build_on_port( + pub fn build_on_port( self, inner: EM, hooks: EMH, shmem_provider: SP, port: u16, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Self::build_from_client(self, inner, hooks, client, time_obs) @@ -138,42 +142,43 @@ impl CentralizedEventManagerBuilder { /// If a client respawns, it may reuse the existing connection, previously /// stored by [`LlmpClient::to_env()`]. - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, inner: EM, hooks: EMH, shmem_provider: SP, env_name: &str, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Self::build_from_client(self, inner, hooks, client, time_obs) } /// Create an existing client from description - pub fn existing_client_from_description( + pub fn existing_client_from_description( self, inner: EM, hooks: EMH, shmem_provider: SP, description: &LlmpClientDescription, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let client = LlmpClient::existing_client_from_description(shmem_provider, description)?; Self::build_from_client(self, inner, hooks, client, time_obs) } } -impl AdaptiveSerializer for CentralizedEventManager +impl AdaptiveSerializer for CentralizedEventManager where EM: AdaptiveSerializer, - SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.inner.serialization_time() @@ -206,13 +211,14 @@ where } } -impl EventFirer for CentralizedEventManager +impl EventFirer for CentralizedEventManager where EM: HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - SP: ShMemProvider, S: Stoppable, I: Input, + SHM: ShMem, + SP: ShMemProvider, { fn should_send(&self) -> bool { self.inner.should_send() @@ -262,10 +268,11 @@ where } } -impl EventRestarter for CentralizedEventManager +impl EventRestarter for CentralizedEventManager where - SP: ShMemProvider, EM: EventRestarter, + SHM: ShMem, + SP: ShMemProvider, { #[inline] fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -275,10 +282,10 @@ where } } -impl CanSerializeObserver for CentralizedEventManager +impl CanSerializeObserver + for CentralizedEventManager where EM: AdaptiveSerializer, - SP: ShMemProvider, OT: Serialize + MatchNameRef, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { @@ -291,10 +298,11 @@ where } } -impl ManagerExit for CentralizedEventManager +impl ManagerExit for CentralizedEventManager where EM: ManagerExit, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.client.sender_mut().send_exiting()?; @@ -308,15 +316,17 @@ where } } -impl EventProcessor for CentralizedEventManager +impl EventProcessor + for CentralizedEventManager where E: HasObservers, E::Observers: DeserializeOwned, EM: EventProcessor + HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - S: Stoppable, I: Input, - SP: ShMemProvider, + S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor + EvaluatorObservers, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -336,13 +346,14 @@ where } } -impl ProgressReporter for CentralizedEventManager +impl ProgressReporter for CentralizedEventManager where EM: EventFirer + HasEventManagerId, EMH: EventManagerHooksTuple, - S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, I: Input, - SP: ShMemProvider, + S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -357,19 +368,19 @@ where } } -impl HasEventManagerId for CentralizedEventManager +impl HasEventManagerId for CentralizedEventManager where EM: HasEventManagerId, - SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { self.inner.mgr_id() } } -impl CentralizedEventManager +impl CentralizedEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Describe the client event manager's LLMP parts in a restorable fashion pub fn describe(&self) -> Result { @@ -388,13 +399,14 @@ where } } -impl CentralizedEventManager +impl CentralizedEventManager where EM: HasEventManagerId + EventFirer, EMH: EventManagerHooksTuple, - S: Stoppable, I: Input, - SP: ShMemProvider, + S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, { #[cfg(feature = "llmp_compression")] fn forward_to_main(&mut self, event: &Event) -> Result<(), Error> { diff --git a/libafl/src/events/launcher.rs b/libafl/src/events/launcher.rs index 5fb4695bfa..f3ff7f38ad 100644 --- a/libafl/src/events/launcher.rs +++ b/libafl/src/events/launcher.rs @@ -14,6 +14,7 @@ use core::{ fmt::{self, Debug, Formatter}, + marker::PhantomData, num::NonZeroUsize, time::Duration, }; @@ -21,7 +22,7 @@ use std::{net::SocketAddr, string::String}; use libafl_bolts::{ core_affinity::{CoreId, Cores}, - shmem::ShMemProvider, + shmem::{ShMem, ShMemProvider}, tuples::{tuple_list, Handle}, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -129,7 +130,7 @@ impl ClientDescription { /// /// Will hide child output, unless the settings indicate otherwise, or the `LIBAFL_DEBUG_OUTPUT` env variable is set. #[derive(TypedBuilder)] -pub struct Launcher<'a, CF, MT, SP> { +pub struct Launcher<'a, CF, MT, SHM, SP> { /// The `ShmemProvider` to use shmem_provider: SP, /// The monitor instance to use @@ -183,9 +184,11 @@ pub struct Launcher<'a, CF, MT, SP> { /// Tell the manager to serialize or not the state on restart #[builder(default = LlmpShouldSaveState::OnRestart)] serialize_state: LlmpShouldSaveState, + #[builder(default = PhantomData)] + phantom: PhantomData, } -impl Debug for Launcher<'_, CF, MT, SP> { +impl Debug for Launcher<'_, CF, MT, SHM, SP> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut dbg_struct = f.debug_struct("Launcher"); dbg_struct @@ -205,10 +208,9 @@ impl Debug for Launcher<'_, CF, MT, SP> { } } -impl Launcher<'_, CF, MT, SP> +impl Launcher<'_, CF, MT, SHM, SP> where MT: Monitor + Clone, - SP: ShMemProvider, { /// Launch the broker and the clients and fuzz #[cfg(any(windows, not(feature = "fork"), all(unix, feature = "fork")))] @@ -216,20 +218,23 @@ where where CF: FnOnce( Option, - LlmpRestartingEventManager<(), I, S, SP>, + LlmpRestartingEventManager<(), I, S, SHM, SP>, ClientDescription, ) -> Result<(), Error>, I: DeserializeOwned, S: DeserializeOwned + Serialize, + SHM: ShMem, + SP: ShMemProvider, { Self::launch_with_hooks(self, tuple_list!()) } } -impl Launcher<'_, CF, MT, SP> +impl Launcher<'_, CF, MT, SHM, SP> where MT: Monitor + Clone, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Launch the broker and the clients and fuzz with a user-supplied hook #[cfg(all(unix, feature = "fork"))] @@ -240,7 +245,7 @@ where EMH: EventManagerHooksTuple + Clone + Copy, CF: FnOnce( Option, - LlmpRestartingEventManager, + LlmpRestartingEventManager, ClientDescription, ) -> Result<(), Error>, { @@ -312,7 +317,7 @@ where ClientDescription::new(index, overcommit_id, bind_to); // Fuzzer client. keeps retrying the connection to broker till the broker starts - let builder = RestartingMgr::::builder() + let builder = RestartingMgr::::builder() .shmem_provider(self.shmem_provider.clone()) .broker_port(self.broker_port) .kind(ManagerKind::Client { @@ -339,7 +344,7 @@ where log::info!("I am broker!!."); // TODO we don't want always a broker here, think about using different laucher process to spawn different configurations - let builder = RestartingMgr::::builder() + let builder = RestartingMgr::::builder() .shmem_provider(self.shmem_provider.clone()) .monitor(Some(self.monitor.clone())) .broker_port(self.broker_port) @@ -385,7 +390,7 @@ where where CF: FnOnce( Option, - LlmpRestartingEventManager, + LlmpRestartingEventManager, ClientDescription, ) -> Result<(), Error>, EMH: EventManagerHooksTuple + Clone + Copy, @@ -401,7 +406,7 @@ where let client_description = ClientDescription::from_safe_string(&core_conf); // the actual client. do the fuzzing - let builder = RestartingMgr::::builder() + let builder = RestartingMgr::::builder() .shmem_provider(self.shmem_provider.clone()) .broker_port(self.broker_port) .kind(ManagerKind::Client { @@ -502,7 +507,7 @@ where if self.spawn_broker { log::info!("I am broker!!."); - let builder = RestartingMgr::::builder() + let builder = RestartingMgr::::builder() .shmem_provider(self.shmem_provider.clone()) .monitor(Some(self.monitor.clone())) .broker_port(self.broker_port) @@ -541,7 +546,7 @@ where /// This is for centralized, the 4th argument of the closure should mean if this is the main node. #[cfg(all(unix, feature = "fork"))] #[derive(TypedBuilder)] -pub struct CentralizedLauncher<'a, CF, MF, MT, SP> { +pub struct CentralizedLauncher<'a, CF, MF, MT, SHM, SP> { /// The `ShmemProvider` to use shmem_provider: SP, /// The monitor instance to use @@ -601,10 +606,11 @@ pub struct CentralizedLauncher<'a, CF, MF, MT, SP> { /// Tell the manager to serialize or not the state on restart #[builder(default = LlmpShouldSaveState::OnRestart)] serialize_state: LlmpShouldSaveState, + phantom: PhantomData, } #[cfg(all(unix, feature = "fork"))] -impl Debug for CentralizedLauncher<'_, CF, MF, MT, SP> { +impl Debug for CentralizedLauncher<'_, CF, MF, MT, SHM, SP> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Launcher") .field("configuration", &self.configuration) @@ -620,13 +626,14 @@ impl Debug for CentralizedLauncher<'_, CF, MF, MT, SP> { } /// The standard inner manager of centralized -pub type StdCentralizedInnerMgr = LlmpRestartingEventManager<(), I, S, SP>; +pub type StdCentralizedInnerMgr = LlmpRestartingEventManager<(), I, S, SHM, SP>; #[cfg(all(unix, feature = "fork"))] -impl CentralizedLauncher<'_, CF, MF, MT, SP> +impl CentralizedLauncher<'_, CF, MF, MT, SHM, SP> where MT: Monitor + Clone + 'static, - SP: ShMemProvider + 'static, + SHM: ShMem + 'static, + SP: ShMemProvider + 'static, { /// Launch a standard Centralized-based fuzzer pub fn launch(&mut self) -> Result<(), Error> @@ -635,19 +642,19 @@ where I: DeserializeOwned + Input + Send + Sync + 'static, CF: FnOnce( Option, - CentralizedEventManager, (), I, S, SP>, + CentralizedEventManager, (), I, S, SHM, SP>, ClientDescription, ) -> Result<(), Error>, MF: FnOnce( Option, - CentralizedEventManager, (), I, S, SP>, + CentralizedEventManager, (), I, S, SHM, SP>, ClientDescription, ) -> Result<(), Error>, { let restarting_mgr_builder = |centralized_launcher: &Self, client_description: ClientDescription| { // Fuzzer client. keeps retrying the connection to broker till the broker starts - let builder = RestartingMgr::<(), I, MT, S, SP>::builder() + let builder = RestartingMgr::<(), I, MT, S, SHM, SP>::builder() .shmem_provider(centralized_launcher.shmem_provider.clone()) .broker_port(centralized_launcher.broker_port) .kind(ManagerKind::Client { client_description }) @@ -665,10 +672,11 @@ where } #[cfg(all(unix, feature = "fork"))] -impl CentralizedLauncher<'_, CF, MF, MT, SP> +impl CentralizedLauncher<'_, CF, MF, MT, SHM, SP> where MT: Monitor + Clone + 'static, - SP: ShMemProvider + 'static, + SHM: ShMem + 'static, + SP: ShMemProvider + 'static, { /// Launch a Centralized-based fuzzer. /// - `main_inner_mgr_builder` will be called to build the inner manager of the main node. @@ -682,13 +690,13 @@ where I: Input + Send + Sync + 'static, CF: FnOnce( Option, - CentralizedEventManager, + CentralizedEventManager, ClientDescription, ) -> Result<(), Error>, EMB: FnOnce(&Self, ClientDescription) -> Result<(Option, EM), Error>, MF: FnOnce( Option, - CentralizedEventManager, // No broker_hooks for centralized EM + CentralizedEventManager, // No broker_hooks for centralized EM ClientDescription, ) -> Result<(), Error>, { diff --git a/libafl/src/events/llmp/mgr.rs b/libafl/src/events/llmp/mgr.rs index 8cf5379f47..c7eeceff95 100644 --- a/libafl/src/events/llmp/mgr.rs +++ b/libafl/src/events/llmp/mgr.rs @@ -4,7 +4,7 @@ #[cfg(feature = "std")] use alloc::string::ToString; use alloc::vec::Vec; -use core::{marker::PhantomData, time::Duration}; +use core::{fmt::Debug, marker::PhantomData, time::Duration}; #[cfg(feature = "std")] use std::net::TcpStream; @@ -18,7 +18,7 @@ use libafl_bolts::{ use libafl_bolts::{ current_time, llmp::{LlmpClient, LlmpClientDescription, LLMP_FLAG_FROM_MM}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::Handle, ClientId, }; @@ -57,9 +57,10 @@ const INITIAL_EVENT_BUFFER_SIZE: usize = 1024 * 4; /// An `EventManager` that forwards all events to other attached fuzzers on shared maps or via tcp, /// using low-level message passing, `llmp`. -pub struct LlmpEventManager +pub struct LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// We only send 1 testcase for every `throttle` second pub(crate) throttle: Option, @@ -67,7 +68,7 @@ where last_sent: Duration, hooks: EMH, /// The LLMP client for inter process communication - llmp: LlmpClient, + llmp: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, /// The configuration defines this specific fuzzer. @@ -83,7 +84,7 @@ where event_buffer: Vec, } -impl LlmpEventManager<(), NopState, NopInput, NopShMemProvider> { +impl LlmpEventManager<(), NopState, NopInput, NopShMem, NopShMemProvider> { /// Creates a builder for [`LlmpEventManager`] #[must_use] pub fn builder() -> LlmpEventManagerBuilder<()> { @@ -132,14 +133,15 @@ impl LlmpEventManagerBuilder { } /// Create a manager from a raw LLMP client - pub fn build_from_client( + pub fn build_from_client( self, - llmp: LlmpClient, + llmp: LlmpClient, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { Ok(LlmpEventManager { throttle: self.throttle, @@ -162,15 +164,16 @@ impl LlmpEventManagerBuilder { /// Create an LLMP event manager on a port. /// It expects a broker to exist on this port. #[cfg(feature = "std")] - pub fn build_on_port( + pub fn build_on_port( self, shmem_provider: SP, port: u16, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Self::build_from_client(self, llmp, configuration, time_ref) @@ -179,30 +182,32 @@ impl LlmpEventManagerBuilder { /// If a client respawns, it may reuse the existing connection, previously /// stored by [`LlmpClient::to_env()`]. #[cfg(feature = "std")] - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, shmem_provider: SP, env_name: &str, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Self::build_from_client(self, llmp, configuration, time_ref) } /// Create an existing client from description - pub fn build_existing_client_from_description( + pub fn build_existing_client_from_description( self, shmem_provider: SP, description: &LlmpClientDescription, configuration: EventConfig, time_ref: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::existing_client_from_description(shmem_provider, description)?; Self::build_from_client(self, llmp, configuration, time_ref) @@ -210,19 +215,21 @@ impl LlmpEventManagerBuilder { } #[cfg(feature = "std")] -impl CanSerializeObserver for LlmpEventManager +impl CanSerializeObserver for LlmpEventManager where - SP: ShMemProvider, OT: Serialize + MatchNameRef, + SHM: ShMem, + SP: ShMemProvider, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { serialize_observers_adaptive::(self, observers, 2, 80) } } -impl AdaptiveSerializer for LlmpEventManager +impl AdaptiveSerializer for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.serialization_time @@ -255,9 +262,10 @@ where } } -impl core::fmt::Debug for LlmpEventManager +impl Debug for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut debug_struct = f.debug_struct("LlmpEventManager"); @@ -272,9 +280,10 @@ where } } -impl Drop for LlmpEventManager +impl Drop for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// LLMP clients will have to wait until their pages are mapped by somebody. fn drop(&mut self) { @@ -282,9 +291,10 @@ where } } -impl LlmpEventManager +impl LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Calling this function will tell the llmp broker that this client is exiting /// This should be called from the restarter not from the actual fuzzer client @@ -332,9 +342,10 @@ where } } -impl LlmpEventManager +impl LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { // Handle arriving events in the client fn handle_in_client( @@ -407,7 +418,11 @@ where } } -impl LlmpEventManager { +impl LlmpEventManager +where + SHM: ShMem, + SP: ShMemProvider, +{ /// Send information that this client is exiting. /// The other side may free up all allocated memory. /// We are no longer allowed to send anything afterwards. @@ -416,18 +431,12 @@ impl LlmpEventManager { } } -impl EventFirer for LlmpEventManager +impl EventFirer for LlmpEventManager where I: Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - fn should_send(&self) -> bool { - if let Some(throttle) = self.throttle { - current_time() - self.last_sent > throttle - } else { - true - } - } fn fire(&mut self, _state: &mut S, event: Event) -> Result<(), Error> { #[cfg(feature = "llmp_compression")] let flags = LLMP_FLAG_INITIALIZED; @@ -474,46 +483,57 @@ where self.last_sent = current_time(); Ok(()) } - fn configuration(&self) -> EventConfig { self.configuration } + + fn should_send(&self) -> bool { + if let Some(throttle) = self.throttle { + current_time() - self.last_sent > throttle + } else { + true + } + } } -impl EventRestarter for LlmpEventManager +impl EventRestarter for LlmpEventManager where - SP: ShMemProvider, S: HasCurrentStageId, + SHM: ShMem, + SP: ShMemProvider, { fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { std_on_restart(self, state) } } -impl ManagerExit for LlmpEventManager +impl ManagerExit for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SHM: ShMem, + SP: ShMemProvider, { + fn send_exiting(&mut self) -> Result<(), Error> { + self.llmp.sender_mut().send_exiting() + } + /// The LLMP client needs to wait until a broker has mapped all pages before shutting down. /// Otherwise, the OS may already have removed the shared maps. fn await_restart_safe(&mut self) { // wait until we can drop the message safely. self.llmp.await_safe_to_unmap_blocking(); } - - fn send_exiting(&mut self) -> Result<(), Error> { - self.llmp.sender_mut().send_exiting() - } } -impl EventProcessor for LlmpEventManager +impl EventProcessor for LlmpEventManager where E: HasObservers, E::Observers: DeserializeOwned, - S: HasImported + Stoppable, EMH: EventManagerHooksTuple, I: DeserializeOwned + Input, - SP: ShMemProvider, + S: HasImported + Stoppable, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor + EvaluatorObservers, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -521,14 +541,15 @@ where let self_id = self.llmp.sender().id(); let mut count = 0; while let Some((client_id, tag, flags, msg)) = self.llmp.recv_buf_with_flags()? { - assert!( - tag != _LLMP_TAG_EVENT_TO_BROKER, + assert_ne!( + tag, _LLMP_TAG_EVENT_TO_BROKER, "EVENT_TO_BROKER parcel should not have arrived in the client!" ); if client_id == self_id { continue; } + #[cfg(not(feature = "llmp_compression"))] let event_bytes = msg; #[cfg(feature = "llmp_compression")] @@ -540,6 +561,7 @@ where } else { msg }; + let event: Event = postcard::from_bytes(event_bytes)?; log::debug!("Received event in normal llmp {}", event.name_detailed()); @@ -560,11 +582,12 @@ where } } -impl ProgressReporter for LlmpEventManager +impl ProgressReporter for LlmpEventManager where - S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor, - SP: ShMemProvider, I: Serialize, + S: HasExecutions + HasLastReportTime + HasMetadata + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -579,9 +602,10 @@ where } } -impl HasEventManagerId for LlmpEventManager +impl HasEventManagerId for LlmpEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Gets the id assigned to this staterestorer. fn mgr_id(&self) -> EventManagerId { diff --git a/libafl/src/events/llmp/mod.rs b/libafl/src/events/llmp/mod.rs index 2d76ba52dd..879e3803a2 100644 --- a/libafl/src/events/llmp/mod.rs +++ b/libafl/src/events/llmp/mod.rs @@ -9,7 +9,7 @@ use libafl_bolts::{ }; use libafl_bolts::{ llmp::{LlmpClient, LlmpClientDescription, Tag}, - shmem::{NopShMemProvider, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, ClientId, }; use serde::{de::DeserializeOwned, Serialize}; @@ -82,12 +82,9 @@ impl LlmpShouldSaveState { } /// A manager-like llmp client that converts between input types -pub struct LlmpEventConverter -where - SP: ShMemProvider, -{ +pub struct LlmpEventConverter { throttle: Option, - llmp: LlmpClient, + llmp: LlmpClient, last_sent: Duration, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, @@ -102,6 +99,7 @@ impl NopInputConverter, NopInputConverter, NopState, + NopShMem, NopShMemProvider, > { @@ -134,15 +132,12 @@ impl LlmpEventConverterBuilder { } /// Create a event converter from a raw llmp client - pub fn build_from_client( + pub fn build_from_client( self, - llmp: LlmpClient, + llmp: LlmpClient, converter: Option, converter_back: Option, - ) -> Result, Error> - where - SP: ShMemProvider, - { + ) -> Result, Error> { Ok(LlmpEventConverter { throttle: self.throttle, last_sent: Duration::from_secs(0), @@ -157,15 +152,16 @@ impl LlmpEventConverterBuilder { /// Create a client from port and the input converters #[cfg(feature = "std")] - pub fn build_on_port( + pub fn build_on_port( self, shmem_provider: SP, port: u16, converter: Option, converter_back: Option, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Ok(LlmpEventConverter { @@ -182,15 +178,16 @@ impl LlmpEventConverterBuilder { /// If a client respawns, it may reuse the existing connection, previously stored by [`LlmpClient::to_env()`]. #[cfg(feature = "std")] - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, shmem_provider: SP, env_name: &str, converter: Option, converter_back: Option, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { let llmp = LlmpClient::on_existing_from_env(shmem_provider, env_name)?; Ok(LlmpEventConverter { @@ -206,11 +203,12 @@ impl LlmpEventConverterBuilder { } } -impl Debug for LlmpEventConverter +impl Debug for LlmpEventConverter where - SP: ShMemProvider, IC: Debug, ICB: Debug, + SHM: Debug, + SP: Debug, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { let mut debug_struct = f.debug_struct("LlmpEventConverter"); @@ -226,9 +224,10 @@ where } } -impl LlmpEventConverter +impl LlmpEventConverter where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { // TODO other new_* routines @@ -315,8 +314,8 @@ where let self_id = self.llmp.sender().id(); let mut count = 0; while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? { - assert!( - tag != _LLMP_TAG_EVENT_TO_BROKER, + assert_ne!( + tag, _LLMP_TAG_EVENT_TO_BROKER, "EVENT_TO_BROKER parcel should not have arrived in the client!" ); @@ -344,11 +343,12 @@ where } } -impl EventFirer for LlmpEventConverter +impl EventFirer for LlmpEventConverter where IC: InputConverter, - SP: ShMemProvider, IC::To: Serialize, + SHM: ShMem, + SP: ShMemProvider, { fn should_send(&self) -> bool { if let Some(throttle) = self.throttle { diff --git a/libafl/src/events/llmp/restarting.rs b/libafl/src/events/llmp/restarting.rs index 8c8d5f31b3..e439f1b23d 100644 --- a/libafl/src/events/llmp/restarting.rs +++ b/libafl/src/events/llmp/restarting.rs @@ -22,7 +22,7 @@ use libafl_bolts::{ core_affinity::CoreId, llmp::{Broker, LlmpBroker, LlmpConnection}, os::CTRL_C_EXIT, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMem, ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::{tuple_list, Handle, MatchNameRef}, }; @@ -52,21 +52,23 @@ use crate::{ /// A manager that can restart on the fly, storing states in-between (in `on_restart`) #[derive(Debug)] -pub struct LlmpRestartingEventManager +pub struct LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// The embedded LLMP event manager - llmp_mgr: LlmpEventManager, + llmp_mgr: LlmpEventManager, /// The staterestorer to serialize the state for the next runner - staterestorer: StateRestorer, + staterestorer: StateRestorer, /// Decide if the state restorer must save the serialized state save_state: LlmpShouldSaveState, } -impl AdaptiveSerializer for LlmpRestartingEventManager +impl AdaptiveSerializer for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn serialization_time(&self) -> Duration { self.llmp_mgr.serialization_time() @@ -99,11 +101,12 @@ where } } -impl ProgressReporter for LlmpRestartingEventManager +impl ProgressReporter for LlmpRestartingEventManager where - S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor, - SP: ShMemProvider, I: Serialize, + S: HasExecutions + HasLastReportTime + HasMetadata + Serialize + MaybeHasClientPerfMonitor, + SHM: ShMem, + SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -118,16 +121,13 @@ where } } -impl EventFirer for LlmpRestartingEventManager +impl EventFirer for LlmpRestartingEventManager where I: Serialize, S: Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - fn should_send(&self) -> bool { - as EventFirer>::should_send(&self.llmp_mgr) - } - fn fire(&mut self, state: &mut S, event: Event) -> Result<(), Error> { // Check if we are going to crash in the event, in which case we store our current state for the next runner self.llmp_mgr.fire(state, event)?; @@ -136,25 +136,32 @@ where } fn configuration(&self) -> EventConfig { - as EventFirer>::configuration(&self.llmp_mgr) + as EventFirer>::configuration(&self.llmp_mgr) + } + + fn should_send(&self) -> bool { + as EventFirer>::should_send(&self.llmp_mgr) } } #[cfg(feature = "std")] -impl CanSerializeObserver for LlmpRestartingEventManager +impl CanSerializeObserver + for LlmpRestartingEventManager where - SP: ShMemProvider, OT: Serialize + MatchNameRef, + SHM: ShMem, + SP: ShMemProvider, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { serialize_observers_adaptive::(self, observers, 2, 80) } } -impl EventRestarter for LlmpRestartingEventManager +impl EventRestarter for LlmpRestartingEventManager where - SP: ShMemProvider, S: Serialize + HasCurrentStageId, + SHM: ShMem, + SP: ShMemProvider, { /// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner. fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -177,9 +184,10 @@ where } } -impl ManagerExit for LlmpRestartingEventManager +impl ManagerExit for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); @@ -196,16 +204,18 @@ where } } -impl EventProcessor for LlmpRestartingEventManager +impl EventProcessor + for LlmpRestartingEventManager where - EMH: EventManagerHooksTuple, E: HasObservers, E::Observers: DeserializeOwned, - S: HasImported + Stoppable + Serialize, + EMH: EventManagerHooksTuple, I: DeserializeOwned + Input, - SP: ShMemProvider, - Z: ExecutionProcessor, I, E::Observers, S> - + EvaluatorObservers, I, S>, + S: HasImported + Stoppable + Serialize, + SHM: ShMem, + SP: ShMemProvider, + Z: ExecutionProcessor, I, E::Observers, S> + + EvaluatorObservers, I, S>, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { let res = self.llmp_mgr.process(fuzzer, state, executor)?; @@ -218,9 +228,10 @@ where } } -impl HasEventManagerId for LlmpRestartingEventManager +impl HasEventManagerId for LlmpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { self.llmp_mgr.mgr_id() @@ -233,15 +244,16 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER"; /// The llmp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages) const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; -impl LlmpRestartingEventManager +impl LlmpRestartingEventManager where - SP: ShMemProvider, S: Serialize, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new runner, the executed child doing the actual fuzzing. pub fn new( - llmp_mgr: LlmpEventManager, - staterestorer: StateRestorer, + llmp_mgr: LlmpEventManager, + staterestorer: StateRestorer, ) -> Self { Self { llmp_mgr, @@ -252,8 +264,8 @@ where /// Create a new runner specifying if it must save the serialized state on restart. pub fn with_save_state( - llmp_mgr: LlmpEventManager, - staterestorer: StateRestorer, + llmp_mgr: LlmpEventManager, + staterestorer: StateRestorer, save_state: LlmpShouldSaveState, ) -> Self { Self { @@ -264,12 +276,12 @@ where } /// Get the staterestorer - pub fn staterestorer(&self) -> &StateRestorer { + pub fn staterestorer(&self) -> &StateRestorer { &self.staterestorer } /// Get the staterestorer (mutable) - pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { + pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { &mut self.staterestorer } @@ -311,7 +323,7 @@ pub fn setup_restarting_mgr_std( ) -> Result< ( Option, - LlmpRestartingEventManager<(), I, S, StdShMemProvider>, + LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -344,7 +356,7 @@ pub fn setup_restarting_mgr_std_adaptive( ) -> Result< ( Option, - LlmpRestartingEventManager<(), I, S, StdShMemProvider>, + LlmpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -370,7 +382,7 @@ where /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[derive(TypedBuilder, Debug)] -pub struct RestartingMgr { +pub struct RestartingMgr { /// The shared memory provider to use for the broker or client spawned by the restarting /// manager. shmem_provider: SP, @@ -404,27 +416,28 @@ pub struct RestartingMgr { #[builder(default = None)] time_ref: Option>, #[builder(setter(skip), default = PhantomData)] - phantom_data: PhantomData<(EMH, I, S)>, + phantom_data: PhantomData<(EMH, I, S, SHM)>, } #[expect(clippy::type_complexity, clippy::too_many_lines)] -impl RestartingMgr +impl RestartingMgr where EMH: EventManagerHooksTuple + Copy + Clone, - SP: ShMemProvider, - S: Serialize + DeserializeOwned, I: DeserializeOwned, MT: Monitor + Clone, + S: Serialize + DeserializeOwned, + SHM: ShMem, + SP: ShMemProvider, { /// Launch the broker and the clients and fuzz pub fn launch( &mut self, - ) -> Result<(Option, LlmpRestartingEventManager), Error> { + ) -> Result<(Option, LlmpRestartingEventManager), Error> { // We start ourselves as child process to actually fuzz let (staterestorer, new_shmem_provider, core_id) = if std::env::var(_ENV_FUZZER_SENDER) .is_err() { - let broker_things = |mut broker: LlmpBroker<_, SP>, remote_broker_addr| { + let broker_things = |mut broker: LlmpBroker<_, SHM, SP>, remote_broker_addr| { if let Some(remote_broker_addr) = remote_broker_addr { log::info!("B2b: Connecting to {:?}", &remote_broker_addr); broker.inner_mut().connect_b2b(remote_broker_addr)?; @@ -464,13 +477,14 @@ where return Err(Error::shutting_down()); } LlmpConnection::IsClient { client } => { - let mgr: LlmpEventManager = LlmpEventManager::builder() - .hooks(self.hooks) - .build_from_client( - client, - self.configuration, - self.time_ref.clone(), - )?; + let mgr: LlmpEventManager = + LlmpEventManager::builder() + .hooks(self.hooks) + .build_from_client( + client, + self.configuration, + self.time_ref.clone(), + )?; (mgr, None) } } @@ -513,11 +527,11 @@ where // First, create a channel from the current fuzzer to the next to store state between restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); // Store the information to a map. staterestorer.write_to_env(_ENV_FUZZER_SENDER)?; @@ -680,7 +694,7 @@ mod tests { use libafl_bolts::{ llmp::{LlmpClient, LlmpSharedMap}, rands::StdRand, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::{tuple_list, Handled}, ClientId, @@ -769,7 +783,7 @@ mod tests { let mut stages = tuple_list!(StdMutationalStage::new(mutator)); // First, create a channel from the current fuzzer to the next to store state between restarts. - let mut staterestorer = StateRestorer::::new( + let mut staterestorer = StateRestorer::::new( shmem_provider.new_shmem(256 * 1024 * 1024).unwrap(), ); diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index 475517be4b..dde1305bec 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -11,9 +11,9 @@ use libafl_bolts::os::startable_self; use libafl_bolts::os::unix_signals::setup_signal_handler; #[cfg(all(feature = "std", feature = "fork", unix))] use libafl_bolts::os::{fork, ForkResult}; -use libafl_bolts::ClientId; #[cfg(feature = "std")] use libafl_bolts::{os::CTRL_C_EXIT, shmem::ShMemProvider, staterestore::StateRestorer}; +use libafl_bolts::{shmem::ShMem, ClientId}; #[cfg(feature = "std")] use serde::de::DeserializeOwned; use serde::Serialize; @@ -280,23 +280,19 @@ where /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] #[derive(Debug)] -pub struct SimpleRestartingEventManager -where - SP: ShMemProvider, -{ +pub struct SimpleRestartingEventManager { /// The actual simple event mgr inner: SimpleEventManager, /// [`StateRestorer`] for restarts - staterestorer: StateRestorer, + staterestorer: StateRestorer, } #[cfg(feature = "std")] -impl EventFirer for SimpleRestartingEventManager +impl EventFirer for SimpleRestartingEventManager where I: Debug, MT: Monitor, S: Stoppable, - SP: ShMemProvider, { fn should_send(&self) -> bool { true @@ -308,9 +304,10 @@ where } #[cfg(feature = "std")] -impl EventRestarter for SimpleRestartingEventManager +impl EventRestarter for SimpleRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, S: HasCurrentStageId + Serialize, MT: Monitor, { @@ -329,9 +326,9 @@ where } #[cfg(feature = "std")] -impl CanSerializeObserver for SimpleRestartingEventManager +impl CanSerializeObserver + for SimpleRestartingEventManager where - SP: ShMemProvider, OT: Serialize, { fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> { @@ -340,9 +337,10 @@ where } #[cfg(feature = "std")] -impl ManagerExit for SimpleRestartingEventManager +impl ManagerExit for SimpleRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); @@ -354,12 +352,14 @@ where } #[cfg(feature = "std")] -impl EventProcessor for SimpleRestartingEventManager +impl EventProcessor + for SimpleRestartingEventManager where I: Debug, MT: Monitor, - SP: ShMemProvider, S: Stoppable, + SHM: ShMem, + SP: ShMemProvider, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { self.inner.process(fuzzer, state, executor) @@ -371,11 +371,10 @@ where } #[cfg(feature = "std")] -impl ProgressReporter for SimpleRestartingEventManager +impl ProgressReporter for SimpleRestartingEventManager where I: Debug, MT: Monitor, - SP: ShMemProvider, S: HasExecutions + HasMetadata + HasLastReportTime + Stoppable + MaybeHasClientPerfMonitor, { fn maybe_report_progress( @@ -392,25 +391,23 @@ where } #[cfg(feature = "std")] -impl HasEventManagerId for SimpleRestartingEventManager -where - SP: ShMemProvider, -{ +impl HasEventManagerId for SimpleRestartingEventManager { fn mgr_id(&self) -> EventManagerId { self.inner.mgr_id() } } #[cfg(feature = "std")] -impl SimpleRestartingEventManager +impl SimpleRestartingEventManager where I: Debug, MT: Monitor, S: Stoppable, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Creates a new [`SimpleEventManager`]. - fn launched(monitor: MT, staterestorer: StateRestorer) -> Self { + fn launched(monitor: MT, staterestorer: StateRestorer) -> Self { Self { staterestorer, inner: SimpleEventManager::new(monitor), @@ -429,10 +426,10 @@ where let mut staterestorer = if std::env::var(_ENV_FUZZER_SENDER).is_err() { // First, create a place to store state in, for restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(shmem_provider.new_shmem(256 * 1024 * 1024)?); //let staterestorer = { LlmpSender::new(shmem_provider.clone(), 0, false)? }; diff --git a/libafl/src/events/tcp.rs b/libafl/src/events/tcp.rs index cbb7b4e932..0e60d2d6ae 100644 --- a/libafl/src/events/tcp.rs +++ b/libafl/src/events/tcp.rs @@ -23,9 +23,10 @@ use libafl_bolts::os::unix_signals::setup_signal_handler; #[cfg(all(feature = "fork", unix))] use libafl_bolts::os::{fork, ForkResult}; use libafl_bolts::{ + bolts_prelude::ShMem, core_affinity::CoreId, os::CTRL_C_EXIT, - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, tuples::tuple_list, ClientId, @@ -790,24 +791,20 @@ impl HasEventManagerId for TcpEventManager { /// A manager that can restart on the fly, storing states in-between (in `on_restart`) #[derive(Debug)] -pub struct TcpRestartingEventManager -where - SP: ShMemProvider, -{ +pub struct TcpRestartingEventManager { /// The embedded TCP event manager tcp_mgr: TcpEventManager, /// The staterestorer to serialize the state for the next runner - staterestorer: StateRestorer, + staterestorer: StateRestorer, /// Decide if the state restorer must save the serialized state save_state: bool, } -impl ProgressReporter for TcpRestartingEventManager +impl ProgressReporter for TcpRestartingEventManager where EMH: EventManagerHooksTuple, S: HasMetadata + HasExecutions + HasLastReportTime + MaybeHasClientPerfMonitor, I: Serialize, - SP: ShMemProvider, { fn maybe_report_progress( &mut self, @@ -822,11 +819,10 @@ where } } -impl EventFirer for TcpRestartingEventManager +impl EventFirer for TcpRestartingEventManager where EMH: EventManagerHooksTuple, I: Serialize, - SP: ShMemProvider, { fn should_send(&self) -> bool { self.tcp_mgr.should_send() @@ -842,30 +838,32 @@ where } } -impl ManagerExit for TcpRestartingEventManager +impl ManagerExit for TcpRestartingEventManager where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - /// The tcp client needs to wait until a broker mapped all pages, before shutting down. - /// Otherwise, the OS may already have removed the shared maps, - #[inline] - fn await_restart_safe(&mut self) { - self.tcp_mgr.await_restart_safe(); - } - fn send_exiting(&mut self) -> Result<(), Error> { self.staterestorer.send_exiting(); // Also inform the broker that we are about to exit. // This way, the broker can clean up the pages, and eventually exit. self.tcp_mgr.send_exiting() } + + /// The tcp client needs to wait until a broker mapped all pages, before shutting down. + /// Otherwise, the OS may already have removed the shared maps, + #[inline] + fn await_restart_safe(&mut self) { + self.tcp_mgr.await_restart_safe(); + } } -impl EventRestarter for TcpRestartingEventManager +impl EventRestarter for TcpRestartingEventManager where EMH: EventManagerHooksTuple, S: HasExecutions + HasCurrentStageId + Serialize, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reset the single page (we reuse it over and over from pos 0), then send the current state to the next runner. fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { @@ -884,7 +882,8 @@ where } } -impl EventProcessor for TcpRestartingEventManager +impl EventProcessor + for TcpRestartingEventManager where E: HasObservers + Executor, I, S, Z>, for<'a> E::Observers: Deserialize<'a>, @@ -892,7 +891,8 @@ where EMH: EventManagerHooksTuple, I: DeserializeOwned, S: HasExecutions + HasMetadata + HasImported + Stoppable, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: ExecutionProcessor, I, E::Observers, S> + EvaluatorObservers, I, S>, { @@ -905,10 +905,7 @@ where } } -impl HasEventManagerId for TcpRestartingEventManager -where - SP: ShMemProvider, -{ +impl HasEventManagerId for TcpRestartingEventManager { fn mgr_id(&self) -> EventManagerId { self.tcp_mgr.mgr_id() } @@ -920,13 +917,12 @@ const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER"; /// The tcp (2 way) connection from a fuzzer to the broker (broadcasting all other fuzzer messages) const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; -impl TcpRestartingEventManager +impl TcpRestartingEventManager where EMH: EventManagerHooksTuple, - SP: ShMemProvider, { /// Create a new runner, the executed child doing the actual fuzzing. - pub fn new(tcp_mgr: TcpEventManager, staterestorer: StateRestorer) -> Self { + pub fn new(tcp_mgr: TcpEventManager, staterestorer: StateRestorer) -> Self { Self { tcp_mgr, staterestorer, @@ -937,7 +933,7 @@ where /// Create a new runner specifying if it must save the serialized state on restart. pub fn with_save_state( tcp_mgr: TcpEventManager, - staterestorer: StateRestorer, + staterestorer: StateRestorer, save_state: bool, ) -> Self { Self { @@ -948,12 +944,12 @@ where } /// Get the staterestorer - pub fn staterestorer(&self) -> &StateRestorer { + pub fn staterestorer(&self) -> &StateRestorer { &self.staterestorer } /// Get the staterestorer (mutable) - pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { + pub fn staterestorer_mut(&mut self) -> &mut StateRestorer { &mut self.staterestorer } } @@ -984,7 +980,7 @@ pub fn setup_restarting_mgr_tcp( ) -> Result< ( Option, - TcpRestartingEventManager<(), I, S, StdShMemProvider>, + TcpRestartingEventManager<(), I, S, StdShMem, StdShMemProvider>, ), Error, > @@ -1009,12 +1005,7 @@ where /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[derive(TypedBuilder, Debug)] -pub struct TcpRestartingMgr -where - MT: Monitor, - S: DeserializeOwned, - SP: ShMemProvider + 'static, -{ +pub struct TcpRestartingMgr { /// The shared memory provider to use for the broker or client spawned by the restarting /// manager. shmem_provider: SP, @@ -1046,22 +1037,23 @@ where /// The hooks for `handle_in_client` hooks: EMH, #[builder(setter(skip), default = PhantomData)] - phantom_data: PhantomData<(I, S)>, + phantom_data: PhantomData<(I, S, SHM)>, } #[expect(clippy::type_complexity, clippy::too_many_lines)] -impl TcpRestartingMgr +impl TcpRestartingMgr where EMH: EventManagerHooksTuple + Copy + Clone, I: Input, MT: Monitor + Clone, - SP: ShMemProvider, S: HasExecutions + HasMetadata + HasImported + DeserializeOwned + Stoppable, + SHM: ShMem, + SP: ShMemProvider, { /// Launch the restarting manager pub fn launch( &mut self, - ) -> Result<(Option, TcpRestartingEventManager), Error> { + ) -> Result<(Option, TcpRestartingEventManager), Error> { // We start ourself as child process to actually fuzz let (staterestorer, _new_shmem_provider, core_id) = if env::var(_ENV_FUZZER_SENDER).is_err() { @@ -1139,7 +1131,7 @@ where // First, create a channel from the current fuzzer to the next to store state between restarts. #[cfg(unix)] - let staterestorer: StateRestorer = + let staterestorer: StateRestorer = StateRestorer::new(self.shmem_provider.new_shmem(256 * 1024 * 1024)?); #[cfg(not(unix))] diff --git a/libafl/src/executors/forkserver.rs b/libafl/src/executors/forkserver.rs index 825990a940..a5eb7ad189 100644 --- a/libafl/src/executors/forkserver.rs +++ b/libafl/src/executors/forkserver.rs @@ -22,7 +22,7 @@ use libafl_bolts::{ fs::{get_unique_std_input_file, InputFile}, os::{dup2, pipes::Pipe}, ownedref::OwnedSlice, - shmem::{ShMem, ShMemProvider, UnixShMemProvider}, + shmem::{ShMem, ShMemProvider, UnixShMem, UnixShMemProvider}, tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable}, AsSlice, AsSliceMut, Truncate, }; @@ -606,10 +606,7 @@ impl Forkserver { /// /// Shared memory feature is also available, but you have to set things up in your code. /// Please refer to AFL++'s docs. -pub struct ForkserverExecutor -where - SP: ShMemProvider, -{ +pub struct ForkserverExecutor { target: OsString, args: Vec, input_file: InputFile, @@ -617,7 +614,7 @@ where uses_shmem_testcase: bool, forkserver: Forkserver, observers: OT, - map: Option, + map: Option, phantom: PhantomData<(I, S)>, map_size: Option, min_input_size: usize, @@ -628,11 +625,11 @@ where crash_exitcode: Option, } -impl Debug for ForkserverExecutor +impl Debug for ForkserverExecutor where TC: Debug, OT: Debug, - SP: ShMemProvider, + SHM: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("ForkserverExecutor") @@ -648,21 +645,24 @@ where } } -impl ForkserverExecutor<(), (), (), UnixShMemProvider, ()> { +impl ForkserverExecutor<(), (), (), UnixShMem, ()> { /// Builder for `ForkserverExecutor` #[must_use] - pub fn builder( - ) -> ForkserverExecutorBuilder<'static, NopTargetBytesConverter, UnixShMemProvider> - { + pub fn builder() -> ForkserverExecutorBuilder< + 'static, + NopTargetBytesConverter, + UnixShMem, + UnixShMemProvider, + > { ForkserverExecutorBuilder::new() } } -impl ForkserverExecutor +impl ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, TC: TargetBytesConverter, + SHM: ShMem, { /// The `target` binary that's going to run. pub fn target(&self) -> &OsString { @@ -804,7 +804,7 @@ where /// The builder for `ForkserverExecutor` #[derive(Debug)] #[expect(clippy::struct_excessive_bools)] -pub struct ForkserverExecutorBuilder<'a, TC, SP> { +pub struct ForkserverExecutorBuilder<'a, TC, SHM, SP> { program: Option, arguments: Vec, envs: Vec<(OsString, OsString)>, @@ -825,11 +825,13 @@ pub struct ForkserverExecutorBuilder<'a, TC, SP> { asan_obs: Option>, crash_exitcode: Option, target_bytes_converter: TC, + phantom: PhantomData, } -impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> +impl<'a, TC, SHM, SP> ForkserverExecutorBuilder<'a, TC, SHM, SP> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Builds `ForkserverExecutor`. /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. @@ -840,10 +842,9 @@ where pub fn build( mut self, observers: OT, - ) -> Result, Error> + ) -> Result, Error> where OT: ObserversTuple, - SP: ShMemProvider, TC: TargetBytesConverter, { let (forkserver, input_file, map) = self.build_helper()?; @@ -905,13 +906,12 @@ where mut self, mut map_observer: A, other_observers: OT, - ) -> Result, Error> + ) -> Result, Error> where A: Observer + AsMut, I: Input + HasTargetBytes, MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map OT: ObserversTuple + Prepend, - SP: ShMemProvider, { let (forkserver, input_file, map) = self.build_helper()?; @@ -965,10 +965,7 @@ where } #[expect(clippy::pedantic)] - fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option), Error> - where - SP: ShMemProvider, - { + fn build_helper(&mut self) -> Result<(Forkserver, InputFile, Option), Error> { let input_filename = match &self.input_filename { Some(name) => name.clone(), None => { @@ -1042,7 +1039,7 @@ where fn initialize_forkserver( &mut self, status: i32, - map: Option<&SP::ShMem>, + map: Option<&SHM>, forkserver: &mut Forkserver, ) -> Result<(), Error> { let keep = status; @@ -1140,7 +1137,7 @@ where fn initialize_old_forkserver( &mut self, status: i32, - map: Option<&SP::ShMem>, + map: Option<&SHM>, forkserver: &mut Forkserver, ) -> Result<(), Error> { if status & FS_OPT_ENABLED == FS_OPT_ENABLED && status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { @@ -1468,7 +1465,9 @@ where } } -impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMemProvider> { +impl<'a> + ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMem, UnixShMemProvider> +{ /// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers. /// This is the builder for `ForkserverExecutor` /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. @@ -1476,8 +1475,12 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, Unix /// in case no input file is specified. /// If `debug_child` is set, the child will print to `stdout`/`stderr`. #[must_use] - pub fn new( - ) -> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMemProvider> { + pub fn new() -> ForkserverExecutorBuilder< + 'a, + NopTargetBytesConverter, + UnixShMem, + UnixShMemProvider, + > { ForkserverExecutorBuilder { program: None, arguments: vec![], @@ -1499,16 +1502,17 @@ impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, Unix asan_obs: None, crash_exitcode: None, target_bytes_converter: NopTargetBytesConverter::new(), + phantom: PhantomData, } } } -impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> { +impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMem, UnixShMemProvider> { /// Shmem provider for forkserver's shared memory testcase feature. - pub fn shmem_provider( + pub fn shmem_provider( self, shmem_provider: &'a mut SP, - ) -> ForkserverExecutorBuilder<'a, TC, SP> { + ) -> ForkserverExecutorBuilder<'a, TC, SHM, SP> { ForkserverExecutorBuilder { // Set the new provider shmem_provider: Some(shmem_provider), @@ -1532,16 +1536,17 @@ impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> { asan_obs: self.asan_obs, crash_exitcode: self.crash_exitcode, target_bytes_converter: self.target_bytes_converter, + phantom: PhantomData, } } } -impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> { +impl<'a, TC, SHM, SP> ForkserverExecutorBuilder<'a, TC, SHM, SP> { /// Shmem provider for forkserver's shared memory testcase feature. pub fn target_bytes_converter>( self, target_bytes_converter: TC2, - ) -> ForkserverExecutorBuilder<'a, TC2, SP> { + ) -> ForkserverExecutorBuilder<'a, TC2, SHM, SP> { ForkserverExecutorBuilder { // Set the new provider shmem_provider: self.shmem_provider, @@ -1565,24 +1570,30 @@ impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> { asan_obs: self.asan_obs, crash_exitcode: self.crash_exitcode, target_bytes_converter, + phantom: PhantomData, } } } impl Default - for ForkserverExecutorBuilder<'_, NopTargetBytesConverter, UnixShMemProvider> + for ForkserverExecutorBuilder< + '_, + NopTargetBytesConverter, + UnixShMem, + UnixShMemProvider, + > { fn default() -> Self { Self::new() } } -impl Executor for ForkserverExecutor +impl Executor for ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, S: HasExecutions, TC: TargetBytesConverter, + SHM: ShMem, { #[inline] fn run_target( @@ -1596,10 +1607,7 @@ where } } -impl HasTimeout for ForkserverExecutor -where - SP: ShMemProvider, -{ +impl HasTimeout for ForkserverExecutor { #[inline] fn set_timeout(&mut self, timeout: Duration) { self.timeout = TimeSpec::from_duration(timeout); @@ -1611,10 +1619,9 @@ where } } -impl HasObservers for ForkserverExecutor +impl HasObservers for ForkserverExecutor where OT: ObserversTuple, - SP: ShMemProvider, { type Observers = OT; diff --git a/libafl/src/executors/inprocess/inner.rs b/libafl/src/executors/inprocess/inner.rs index f5297ba65a..f94317e0cd 100644 --- a/libafl/src/executors/inprocess/inner.rs +++ b/libafl/src/executors/inprocess/inner.rs @@ -74,6 +74,7 @@ where /// # Safety /// This function sets a bunch of raw pointers in global variables, reused in other parts of /// the code. + // TODO: Remove EM and Z from function bound and add it to struct instead to avoid possible type confusion #[inline] pub unsafe fn enter_target( &mut self, diff --git a/libafl/src/executors/inprocess/stateful.rs b/libafl/src/executors/inprocess/stateful.rs index c2f347952a..943afffcae 100644 --- a/libafl/src/executors/inprocess/stateful.rs +++ b/libafl/src/executors/inprocess/stateful.rs @@ -27,8 +27,8 @@ use crate::{ /// The process executor simply calls a target function, as mutable reference to a closure /// The internal state of the executor is made available to the harness. -pub type StatefulInProcessExecutor<'a, H, I, OT, S, ES> = - StatefulGenericInProcessExecutor; +pub type StatefulInProcessExecutor<'a, ES, H, I, OT, S> = + StatefulGenericInProcessExecutor; /// The process executor simply calls a target function, as boxed `FnMut` trait object /// The internal state of the executor is made available to the harness. @@ -44,7 +44,7 @@ pub type OwnedInProcessExecutor = StatefulGenericInProcessExecutor /// The inmem executor simply calls a target function, then returns afterwards. /// The harness can access the internal state of the executor. -pub struct StatefulGenericInProcessExecutor { +pub struct StatefulGenericInProcessExecutor { /// The harness function, being executed for each fuzzing loop execution harness_fn: HB, /// The state used as argument of the harness @@ -54,7 +54,7 @@ pub struct StatefulGenericInProcessExecutor { phantom: PhantomData<(ES, *const H)>, } -impl Debug for StatefulGenericInProcessExecutor +impl Debug for StatefulGenericInProcessExecutor where OT: Debug, { @@ -67,7 +67,7 @@ where } impl Executor - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -99,7 +99,7 @@ where } impl HasObservers - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -118,7 +118,7 @@ where } } -impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, H, I, OT, S, ES> +impl<'a, H, I, OT, S, ES> StatefulInProcessExecutor<'a, ES, H, I, OT, S> where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, OT: ObserversTuple, @@ -224,7 +224,7 @@ where } } -impl StatefulGenericInProcessExecutor { +impl StatefulGenericInProcessExecutor { /// The executor state given to the harness pub fn exposed_executor_state(&self) -> &ES { &self.exposed_executor_state @@ -236,7 +236,7 @@ impl StatefulGenericInProcessExecutor StatefulGenericInProcessExecutor +impl StatefulGenericInProcessExecutor where H: FnMut(&mut ES, &mut S, &I) -> ExitKind + Sized, HB: BorrowMut, @@ -364,7 +364,7 @@ where } impl HasInProcessHooks - for StatefulGenericInProcessExecutor + for StatefulGenericInProcessExecutor { /// the timeout handler #[inline] diff --git a/libafl/src/executors/inprocess_fork/inner.rs b/libafl/src/executors/inprocess_fork/inner.rs index b585bac18b..e5ca44b785 100644 --- a/libafl/src/executors/inprocess_fork/inner.rs +++ b/libafl/src/executors/inprocess_fork/inner.rs @@ -32,7 +32,7 @@ use crate::{ }; /// Inner state of GenericInProcessExecutor-like structures. -pub struct GenericInProcessForkExecutorInner { +pub struct GenericInProcessForkExecutorInner { pub(super) hooks: (InChildProcessHooks, HT), pub(super) shmem_provider: SP, pub(super) observers: OT, @@ -40,10 +40,11 @@ pub struct GenericInProcessForkExecutorInner { pub(super) itimerspec: libc::itimerspec, #[cfg(all(unix, not(target_os = "linux")))] pub(super) itimerval: Itimerval, - pub(super) phantom: PhantomData<(I, S, EM, Z)>, + pub(super) phantom: PhantomData<(EM, I, S, SHM, Z)>, } -impl Debug for GenericInProcessForkExecutorInner +impl Debug + for GenericInProcessForkExecutorInner where HT: Debug, OT: Debug, @@ -104,11 +105,11 @@ fn parse_itimerval(timeout: Duration) -> Itimerval { } } -impl GenericInProcessForkExecutorInner +impl GenericInProcessForkExecutorInner where HT: ExecutorHooksTuple, - SP: ShMemProvider, OT: ObserversTuple, + SP: ShMemProvider, { pub(super) unsafe fn pre_run_target_child( &mut self, @@ -195,7 +196,7 @@ where } } -impl GenericInProcessForkExecutorInner +impl GenericInProcessForkExecutorInner where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -284,8 +285,8 @@ where } } -impl HasObservers - for GenericInProcessForkExecutorInner +impl HasObservers + for GenericInProcessForkExecutorInner { type Observers = OT; diff --git a/libafl/src/executors/inprocess_fork/mod.rs b/libafl/src/executors/inprocess_fork/mod.rs index e0968334af..8601232d1e 100644 --- a/libafl/src/executors/inprocess_fork/mod.rs +++ b/libafl/src/executors/inprocess_fork/mod.rs @@ -6,7 +6,7 @@ use core::{ use libafl_bolts::{ os::unix_signals::{ucontext_t, Signal}, - shmem::ShMemProvider, + shmem::{ShMem, ShMemProvider}, tuples::{tuple_list, RefIndexable}, }; use libc::siginfo_t; @@ -39,10 +39,10 @@ pub mod stateful; /// /// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). /// Else panics can not be caught by `LibAFL`. -pub type InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z> = - GenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, EM, Z>; +pub type InProcessForkExecutor<'a, EM, H, I, OT, S, SHM, SP, Z> = + GenericInProcessForkExecutor<'a, EM, H, (), I, OT, S, SHM, SP, Z>; -impl<'a, H, I, OT, S, SP, EM, Z> InProcessForkExecutor<'a, H, I, OT, S, SP, EM, Z> +impl<'a, H, I, OT, S, SHM, SP, EM, Z> InProcessForkExecutor<'a, EM, H, I, OT, S, SHM, SP, Z> where OT: ObserversTuple, { @@ -73,13 +73,13 @@ where /// /// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). /// Else panics can not be caught by `LibAFL`. -pub struct GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z> { +pub struct GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SHM, SP, Z> { harness_fn: &'a mut H, - inner: GenericInProcessForkExecutorInner, + inner: GenericInProcessForkExecutorInner, } -impl Debug - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> +impl Debug + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SHM, SP, Z> where HT: Debug, OT: Debug, @@ -102,12 +102,13 @@ where } } -impl Executor - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> +impl Executor + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SHM, SP, Z> where H: FnMut(&I) -> ExitKind + Sized, S: HasExecutions, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, HT: ExecutorHooksTuple, OT: ObserversTuple, { @@ -141,7 +142,8 @@ where } } -impl<'a, H, HT, I, OT, S, SP, EM, Z> GenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, EM, Z> +impl<'a, H, HT, I, OT, S, SHM, SP, EM, Z> + GenericInProcessForkExecutor<'a, EM, H, HT, I, OT, S, SHM, SP, Z> where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -186,8 +188,8 @@ where { } } -impl HasObservers - for GenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, EM, Z> +impl HasObservers + for GenericInProcessForkExecutor<'_, EM, H, HT, I, OT, S, SHM, SP, Z> { type Observers = OT; #[inline] diff --git a/libafl/src/executors/inprocess_fork/stateful.rs b/libafl/src/executors/inprocess_fork/stateful.rs index 0a1f1df141..bbb6cb68c7 100644 --- a/libafl/src/executors/inprocess_fork/stateful.rs +++ b/libafl/src/executors/inprocess_fork/stateful.rs @@ -4,12 +4,11 @@ //! The harness can access internal state. use core::{ fmt::{self, Debug, Formatter}, - marker::PhantomData, time::Duration, }; use libafl_bolts::{ - shmem::ShMemProvider, + shmem::{ShMem, ShMemProvider}, tuples::{tuple_list, RefIndexable}, }; use nix::unistd::{fork, ForkResult}; @@ -25,12 +24,15 @@ use crate::{ }; /// The `StatefulInProcessForkExecutor` with no user hooks -pub type StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z> = - StatefulGenericInProcessForkExecutor<'a, H, (), I, OT, S, SP, ES, EM, Z>; +pub type StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SHM, SP, Z> = + StatefulGenericInProcessForkExecutor<'a, EM, ES, H, (), I, OT, S, SHM, SP, Z>; -impl<'a, H, I, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, ES, EM, Z> +impl<'a, H, I, OT, S, SHM, SP, ES, EM, Z> + StatefulInProcessForkExecutor<'a, EM, ES, H, I, OT, S, SHM, SP, Z> where OT: ObserversTuple, + SHM: ShMem, + SP: ShMemProvider, { #[expect(clippy::too_many_arguments)] /// The constructor for `InProcessForkExecutor` @@ -59,18 +61,17 @@ where } /// [`StatefulGenericInProcessForkExecutor`] is an executor that forks the current process before each execution. Harness can access some internal state. -pub struct StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z> { +pub struct StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SHM, SP, Z> { /// The harness function, being executed for each fuzzing loop execution harness_fn: &'a mut H, /// The state used as argument of the harness pub exposed_executor_state: ES, /// Inner state of the executor - pub inner: GenericInProcessForkExecutorInner, - phantom: PhantomData, + pub inner: GenericInProcessForkExecutorInner, } -impl Debug - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> +impl Debug + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SHM, SP, Z> where HT: Debug, OT: Debug, @@ -93,13 +94,14 @@ where } } -impl Executor - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> +impl Executor + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SHM, SP, Z> where H: FnMut(&mut ES, &I) -> ExitKind + Sized, HT: ExecutorHooksTuple, S: HasExecutions, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, OT: ObserversTuple, { #[inline] @@ -132,8 +134,8 @@ where } } -impl<'a, H, HT, I, OT, S, SP, ES, EM, Z> - StatefulGenericInProcessForkExecutor<'a, H, HT, I, OT, S, SP, ES, EM, Z> +impl<'a, H, HT, I, OT, S, SHM, SP, ES, EM, Z> + StatefulGenericInProcessForkExecutor<'a, EM, ES, H, HT, I, OT, S, SHM, SP, Z> where HT: ExecutorHooksTuple, OT: ObserversTuple, @@ -163,7 +165,6 @@ where timeout, shmem_provider, )?, - phantom: PhantomData, }) } @@ -180,8 +181,8 @@ where } } -impl HasObservers - for StatefulGenericInProcessForkExecutor<'_, H, HT, I, OT, S, SP, ES, EM, Z> +impl HasObservers + for StatefulGenericInProcessForkExecutor<'_, EM, ES, H, HT, I, OT, S, SHM, SP, Z> { type Observers = OT; diff --git a/libafl/src/observers/concolic/serialization_format.rs b/libafl/src/observers/concolic/serialization_format.rs index 9c7bcba2ad..3718d4eb4c 100644 --- a/libafl/src/observers/concolic/serialization_format.rs +++ b/libafl/src/observers/concolic/serialization_format.rs @@ -219,7 +219,7 @@ impl MessageFileReader { /// A `MessageFileWriter` writes a stream of [`SymExpr`] to any [`Write`]. For each written expression, it returns /// a [`SymExprRef`] which should be used to refer back to it. -pub struct MessageFileWriter { +pub struct MessageFileWriter { id_counter: usize, writer: W, writer_start_position: u64, @@ -396,7 +396,7 @@ impl MessageFileWriter { } } -use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMemProvider}; +use libafl_bolts::shmem::{ShMem, ShMemCursor, ShMemProvider, StdShMem, StdShMemProvider}; /// The default environment variable name to use for the shared memory used by the concolic tracing pub const DEFAULT_ENV_NAME: &str = "SHARED_MEMORY_MESSAGES"; @@ -439,14 +439,17 @@ impl<'buffer> MessageFileReader> { } } -impl MessageFileWriter> { +impl MessageFileWriter> +where + SHM: ShMem, +{ /// Creates a new `MessageFileWriter` from the given [`ShMemCursor`]. - pub fn from_shmem(shmem: T) -> io::Result { + pub fn from_shmem(shmem: SHM) -> io::Result { Self::from_writer(ShMemCursor::new(shmem)) } } -impl MessageFileWriter::ShMem>> { +impl MessageFileWriter> { /// Creates a new `MessageFileWriter` by reading a [`ShMem`] from the given environment variable. pub fn from_stdshmem_env_with_name(env_name: impl AsRef) -> io::Result { Self::from_shmem( @@ -464,8 +467,7 @@ impl MessageFileWriter::ShMem>> } /// A writer that will write messages to a shared memory buffer. -pub type StdShMemMessageFileWriter = - MessageFileWriter::ShMem>>; +pub type StdShMemMessageFileWriter = MessageFileWriter>; #[cfg(test)] mod serialization_tests { diff --git a/libafl/src/stages/sync.rs b/libafl/src/stages/sync.rs index 9d5c7033d0..6e694f6242 100644 --- a/libafl/src/stages/sync.rs +++ b/libafl/src/stages/sync.rs @@ -7,7 +7,12 @@ use alloc::{ use core::{marker::PhantomData, time::Duration}; use std::path::{Path, PathBuf}; -use libafl_bolts::{current_time, fs::find_new_files_rec, shmem::ShMemProvider, Named}; +use libafl_bolts::{ + current_time, + fs::find_new_files_rec, + shmem::{ShMem, ShMemProvider}, + Named, +}; use serde::{Deserialize, Serialize}; use crate::{ @@ -216,14 +221,12 @@ impl SyncFromBrokerMetadata { /// A stage that loads testcases from disk to sync with other fuzzers such as AFL++ #[derive(Debug)] -pub struct SyncFromBrokerStage -where - SP: ShMemProvider, -{ - client: LlmpEventConverter, +pub struct SyncFromBrokerStage { + client: LlmpEventConverter, } -impl Stage for SyncFromBrokerStage +impl Stage + for SyncFromBrokerStage where DI: Input, EM: EventFirer, @@ -233,7 +236,8 @@ where IC: InputConverter, ICB: InputConverter, S: HasExecutions + HasCorpus + HasRand + HasMetadata + Stoppable + MaybeHasClientPerfMonitor, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: EvaluatorObservers + ExecutionProcessor, { #[inline] @@ -307,13 +311,10 @@ where } } -impl SyncFromBrokerStage -where - SP: ShMemProvider, -{ +impl SyncFromBrokerStage { /// Creates a new [`SyncFromBrokerStage`] #[must_use] - pub fn new(client: LlmpEventConverter) -> Self { + pub fn new(client: LlmpEventConverter) -> Self { Self { client } } } diff --git a/libafl_bolts/examples/llmp_test/main.rs b/libafl_bolts/examples/llmp_test/main.rs index 50762a536c..7a75a342ff 100644 --- a/libafl_bolts/examples/llmp_test/main.rs +++ b/libafl_bolts/examples/llmp_test/main.rs @@ -113,13 +113,13 @@ impl Default for LlmpExampleHook { } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl LlmpHook for LlmpExampleHook +impl LlmpHook for LlmpExampleHook where - SP: ShMemProvider + 'static, + SP: ShMemProvider + 'static, { fn on_new_message( &mut self, - _broker_inner: &mut LlmpBrokerInner, + _broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, _msg_flags: &mut Flags, diff --git a/libafl_bolts/src/llmp.rs b/libafl_bolts/src/llmp.rs index acf3ef28ac..bce423a20d 100644 --- a/libafl_bolts/src/llmp.rs +++ b/libafl_bolts/src/llmp.rs @@ -707,25 +707,23 @@ impl LlmpMsg { /// An Llmp instance #[derive(Debug)] -pub enum LlmpConnection -where - SP: ShMemProvider, -{ +pub enum LlmpConnection { /// A broker and a thread using this tcp background thread IsBroker { /// The [`LlmpBroker`] of this [`LlmpConnection`]. - broker: LlmpBroker, + broker: LlmpBroker, }, /// A client, connected to the port IsClient { /// The [`LlmpClient`] of this [`LlmpConnection`]. - client: LlmpClient, + client: LlmpClient, }, } -impl LlmpConnection<(), SP> +impl LlmpConnection<(), SHM, SP> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { #[cfg(feature = "std")] /// Creates either a broker, if the tcp port is not bound, or a client, connected to this port. @@ -776,10 +774,11 @@ where } } -impl LlmpConnection +impl LlmpConnection where - MT: LlmpHookTuple, - SP: ShMemProvider, + MT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { /// Describe this in a reproducible fashion, if it's a client pub fn describe(&self) -> Result { @@ -793,7 +792,7 @@ where pub fn existing_client_from_description( shmem_provider: SP, description: &LlmpClientDescription, - ) -> Result, Error> { + ) -> Result, Error> { Ok(LlmpConnection::IsClient { client: LlmpClient::existing_client_from_description(shmem_provider, description)?, }) @@ -891,23 +890,20 @@ struct LlmpClientExitInfo { /// Sending end on a (unidirectional) sharedmap channel #[derive(Debug)] -pub struct LlmpSender -where - SP: ShMemProvider, -{ +pub struct LlmpSender { /// ID of this sender. id: ClientId, /// Ref to the last message this sender sent on the last page. /// If null, a new page (just) started. last_msg_sent: *const LlmpMsg, /// A vec of page wrappers, each containing an initialized [`ShMem`] - out_shmems: Vec>, + out_shmems: Vec>, /// A vec of pages that we previously used, but that have served its purpose /// (no potential receivers are left). /// Instead of freeing them, we keep them around to potentially reuse them later, /// if they are still large enough. /// This way, the OS doesn't have to spend time zeroing pages, and getting rid of our old pages - unused_shmem_cache: Vec>, + unused_shmem_cache: Vec>, /// If true, pages will never be pruned. /// The broker uses this feature. /// By keeping the message history around, @@ -920,9 +916,10 @@ where } /// An actor on the sending part of the shared map -impl LlmpSender +impl LlmpSender where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new [`LlmpSender`] using a given [`ShMemProvider`], and `id`. /// If `keep_pages_forever` is `true`, `ShMem` will never be freed. @@ -1068,7 +1065,7 @@ where /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_shmem( shmem_provider: SP, - current_out_shmem: SP::ShMem, + current_out_shmem: SHM, last_msg_sent_offset: Option, ) -> Result { let mut out_shmem = LlmpSharedMap::existing(current_out_shmem); @@ -1307,7 +1304,7 @@ where &mut self, sender_id: ClientId, next_min_shmem_size: usize, - ) -> Result::ShMem>, Error> { + ) -> Result, Error> { // Find a shared map that has been released to reuse, from which all receivers left / finished reading. let cached_shmem = self .unused_shmem_cache @@ -1586,10 +1583,7 @@ where /// Receiving end on a (unidirectional) sharedmap channel #[derive(Debug)] -pub struct LlmpReceiver -where - SP: ShMemProvider, -{ +pub struct LlmpReceiver { /// Client Id of this receiver id: ClientId, /// Pointer to the last message received @@ -1600,15 +1594,16 @@ where /// The shmem provider shmem_provider: SP, /// current page. After EOP, this gets replaced with the new one - current_recv_shmem: LlmpSharedMap, + current_recv_shmem: LlmpSharedMap, /// Caches the highest msg id we've seen so far highest_msg_id: MessageId, } /// Receiving end of an llmp channel -impl LlmpReceiver +impl LlmpReceiver where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reattach to a vacant `recv_shmem`, to with a previous sender stored the information in an env before. #[cfg(feature = "std")] @@ -1634,7 +1629,7 @@ where /// else reattach will get a new, empty page, from the OS, or fail. pub fn on_existing_shmem( shmem_provider: SP, - current_sender_shmem: SP::ShMem, + current_sender_shmem: SHM, last_msg_recvd_offset: Option, ) -> Result { let mut current_recv_shmem = LlmpSharedMap::existing(current_sender_shmem); @@ -1897,10 +1892,7 @@ where /// A page wrapper #[derive(Clone, Debug)] -pub struct LlmpSharedMap -where - SHM: ShMem, -{ +pub struct LlmpSharedMap { /// Shmem containg the actual (unsafe) page, /// shared between one `LlmpSender` and one `LlmpReceiver` shmem: SHM, @@ -2050,18 +2042,15 @@ where /// The inner state of [`LlmpBroker`] #[derive(Debug)] -pub struct LlmpBrokerInner -where - SP: ShMemProvider, -{ +pub struct LlmpBrokerInner { /// Broadcast map from broker to all clients - llmp_out: LlmpSender, + llmp_out: LlmpSender, /// Users of Llmp can add message handlers in the broker. /// This allows us to intercept messages right in the broker. /// This keeps the out map clean. /// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless) /// Make sure to always increase `num_clients_seen` when pushing a new [`LlmpReceiver`] to `llmp_clients`! - llmp_clients: Vec>, + llmp_clients: Vec>, /// The own listeners we spawned via `launch_listener` or `crate_attach_to_tcp`. /// Listeners will be ignored for `exit_cleanly_after` and they are never considered to have timed out. listeners: Vec, @@ -2078,12 +2067,9 @@ where /// The broker (node 0) #[derive(Debug)] -pub struct LlmpBroker -where - SP: ShMemProvider, -{ +pub struct LlmpBroker { /// The broker - inner: LlmpBrokerInner, + inner: LlmpBrokerInner, /// Llmp hooks hooks: HT, } @@ -2118,10 +2104,11 @@ pub trait Broker { fn nb_listeners(&self) -> usize; } -impl Broker for LlmpBroker +impl Broker for LlmpBroker where - HT: LlmpHookTuple, - SP: ShMemProvider, + HT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { fn is_shutting_down(&self) -> bool { self.inner.is_shutting_down() @@ -2215,15 +2202,12 @@ impl CtrlHandler for LlmpShutdownSignalHandler { } /// Llmp hooks -pub trait LlmpHook -where - SP: ShMemProvider, -{ +pub trait LlmpHook { /// Hook called whenever a new message is received. It receives an llmp message as input, does /// something with it (read, transform, forward, etc...) and decides to discard it or not. fn on_new_message( &mut self, - broker_inner: &mut LlmpBrokerInner, + broker_inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2238,14 +2222,11 @@ where } /// A tuple of Llmp hooks. They are evaluated sequentially, and returns if one decides to filter out the evaluated message. -pub trait LlmpHookTuple -where - SP: ShMemProvider, -{ +pub trait LlmpHookTuple { /// Call all hook callbacks on new message. fn on_new_message_all( &mut self, - inner: &mut LlmpBrokerInner, + inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2257,13 +2238,10 @@ where fn on_timeout_all(&mut self) -> Result<(), Error>; } -impl LlmpHookTuple for () -where - SP: ShMemProvider, -{ +impl LlmpHookTuple for () { fn on_new_message_all( &mut self, - _inner: &mut LlmpBrokerInner, + _inner: &mut LlmpBrokerInner, _client_id: ClientId, _msg_tag: &mut Tag, _msg_flags: &mut Flags, @@ -2278,15 +2256,14 @@ where } } -impl LlmpHookTuple for (Head, Tail) +impl LlmpHookTuple for (Head, Tail) where - Head: LlmpHook, - Tail: LlmpHookTuple, - SP: ShMemProvider, + Head: LlmpHook, + Tail: LlmpHookTuple, { fn on_new_message_all( &mut self, - inner: &mut LlmpBrokerInner, + inner: &mut LlmpBrokerInner, client_id: ClientId, msg_tag: &mut Tag, msg_flags: &mut Flags, @@ -2315,15 +2292,12 @@ where } } -impl LlmpBroker<(), SP> -where - SP: ShMemProvider, -{ +impl LlmpBroker<(), SHM, SP> { /// Add hooks to a hookless [`LlmpBroker`]. /// We do not support replacing hooks for now. - pub fn add_hooks(self, hooks: HT) -> LlmpBroker + pub fn add_hooks(self, hooks: HT) -> LlmpBroker where - HT: LlmpHookTuple, + HT: LlmpHookTuple, { LlmpBroker { inner: self.inner, @@ -2446,10 +2420,11 @@ impl Brokers { } } -impl LlmpBroker +impl LlmpBroker where - HT: LlmpHookTuple, - SP: ShMemProvider, + HT: LlmpHookTuple, + SHM: ShMem, + SP: ShMemProvider, { /// Create and initialize a new [`LlmpBroker`], associated with some hooks. pub fn new(shmem_provider: SP, hooks: HT) -> Result { @@ -2496,12 +2471,12 @@ where } /// Get the inner state of the broker - pub fn inner(&self) -> &LlmpBrokerInner { + pub fn inner(&self) -> &LlmpBrokerInner { &self.inner } /// Get the inner mutable state of the broker - pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner { + pub fn inner_mut(&mut self) -> &mut LlmpBrokerInner { &mut self.inner } @@ -2829,9 +2804,10 @@ where /// The broker forwards all messages to its own bus-like broadcast map. /// It may intercept messages passing through. -impl LlmpBrokerInner +impl LlmpBrokerInner where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create and initialize a new [`LlmpBrokerInner`], associated with some hooks. pub fn new(shmem_provider: SP) -> Result { @@ -2917,7 +2893,7 @@ where /// Will increase `num_clients_seen`. /// The backing values of `llmp_clients` [`ClientId`]s will always be sorted (but not gapless) /// returns the [`ClientId`] of the new client. - pub fn add_client(&mut self, mut client_receiver: LlmpReceiver) -> ClientId { + pub fn add_client(&mut self, mut client_receiver: LlmpReceiver) -> ClientId { let id = self.peek_next_client_id(); client_receiver.id = id; self.llmp_clients.push(client_receiver); @@ -2932,7 +2908,7 @@ where /// Registers a new client for the given sharedmap str and size. /// Returns the id of the new client in [`broker.client_shmem`] - pub fn register_client(&mut self, mut client_page: LlmpSharedMap) -> ClientId { + pub fn register_client(&mut self, mut client_page: LlmpSharedMap) -> ClientId { // Tell the client it may unmap its initial allocated shmem page now. // Since we now have a handle to it, it won't be umapped too early (only after we also unmap it) client_page.mark_safe_to_unmap(); @@ -3090,7 +3066,7 @@ where /// Upon receiving this message, the broker should map the announced page and start tracking it for new messages. #[cfg(feature = "std")] fn announce_new_client( - sender: &mut LlmpSender, + sender: &mut LlmpSender, shmem_description: &ShMemDescription, ) -> Result<(), Error> { unsafe { @@ -3108,7 +3084,7 @@ where /// Tell the broker to disconnect this client from it. #[cfg(feature = "std")] - fn announce_client_exit(sender: &mut LlmpSender, client_id: u32) -> Result<(), Error> { + fn announce_client_exit(sender: &mut LlmpSender, client_id: u32) -> Result<(), Error> { // # Safety // No user-provided potentially unsafe parameters. unsafe { @@ -3280,7 +3256,7 @@ where mut stream: TcpStream, request: &TcpRequest, current_client_id: &mut ClientId, - sender: &mut LlmpSender, + sender: &mut LlmpSender, broker_shmem_description: &ShMemDescription, ) { match request { @@ -3451,21 +3427,19 @@ pub struct LlmpClientDescription { /// Client side of LLMP #[derive(Debug)] -pub struct LlmpClient -where - SP: ShMemProvider, -{ +pub struct LlmpClient { /// Outgoing channel to the broker - sender: LlmpSender, + sender: LlmpSender, /// Incoming (broker) broadcast map - receiver: LlmpReceiver, + receiver: LlmpReceiver, } /// `n` clients connect to a broker. They share an outgoing map with the broker, /// and get incoming messages from the shared broker bus -impl LlmpClient +impl LlmpClient where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Reattach to a vacant client map. /// It is essential, that the broker (or someone else) kept a pointer to the `out_shmem` @@ -3473,9 +3447,9 @@ where #[allow(clippy::needless_pass_by_value)] // no longer necessary on nightly pub fn on_existing_shmem( shmem_provider: SP, - _current_out_shmem: SP::ShMem, + _current_out_shmem: SHM, _last_msg_sent_offset: Option, - current_broker_shmem: SP::ShMem, + current_broker_shmem: SHM, last_msg_recvd_offset: Option, ) -> Result { Ok(Self { @@ -3542,25 +3516,25 @@ where /// Outgoing channel to the broker #[must_use] - pub fn sender(&self) -> &LlmpSender { + pub fn sender(&self) -> &LlmpSender { &self.sender } /// Outgoing channel to the broker (mut) #[must_use] - pub fn sender_mut(&mut self) -> &mut LlmpSender { + pub fn sender_mut(&mut self) -> &mut LlmpSender { &mut self.sender } /// Incoming (broker) broadcast map #[must_use] - pub fn receiver(&self) -> &LlmpReceiver { + pub fn receiver(&self) -> &LlmpReceiver { &self.receiver } /// Incoming (broker) broadcast map (mut) #[must_use] - pub fn receiver_mut(&mut self) -> &mut LlmpReceiver { + pub fn receiver_mut(&mut self) -> &mut LlmpReceiver { &mut self.receiver } @@ -3588,7 +3562,7 @@ where /// Creates a new [`LlmpClient`] pub fn new( mut shmem_provider: SP, - initial_broker_shmem: LlmpSharedMap, + initial_broker_shmem: LlmpSharedMap, sender_id: ClientId, ) -> Result { Ok(Self { diff --git a/libafl_bolts/src/os/unix_shmem_server.rs b/libafl_bolts/src/os/unix_shmem_server.rs index 2fef01ab84..3f440f1990 100644 --- a/libafl_bolts/src/os/unix_shmem_server.rs +++ b/libafl_bolts/src/os/unix_shmem_server.rs @@ -11,6 +11,7 @@ use alloc::{ vec::Vec, }; use core::{ + fmt::Debug, mem::ManuallyDrop, ops::{Deref, DerefMut}, }; @@ -60,33 +61,27 @@ const AFL_SHMEM_SERVICE_STARTED: &str = "AFL_SHMEM_SERVICE_STARTED"; /// s out served shared maps, as used on Android. #[derive(Debug)] -pub struct ServedShMemProvider -where - SP: ShMemProvider, -{ +pub struct ServedShMemProvider { stream: UnixStream, inner: SP, id: i32, /// A referencde to the [`ShMemService`] backing this provider. /// It will be started only once for all processes and providers. - service: ShMemService, + service: ShMemService, about_to_restart: bool, } /// [`ShMem`] that got served from a [`ShMemService`] via domain sockets and can now be used in this program. /// It works around Android's lack of "proper" shared maps. #[derive(Clone, Debug)] -pub struct ServedShMem -where - SH: ShMem, -{ - inner: ManuallyDrop, +pub struct ServedShMem { + inner: ManuallyDrop, server_fd: i32, } -impl Deref for ServedShMem +impl Deref for ServedShMem where - SH: ShMem, + SHM: Deref, { type Target = [u8]; @@ -95,18 +90,18 @@ where } } -impl DerefMut for ServedShMem +impl DerefMut for ServedShMem where - SH: ShMem, + SHM: DerefMut, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } -impl ShMem for ServedShMem +impl ShMem for ServedShMem where - SH: ShMem, + SHM: ShMem, { fn id(&self) -> ShMemId { let client_id = self.inner.id(); @@ -114,10 +109,7 @@ where } } -impl ServedShMemProvider -where - SP: ShMemProvider, -{ +impl ServedShMemProvider { /// Send a request to the server, and wait for a response #[expect(clippy::similar_names)] // id and fd fn send_receive(&mut self, request: ServedShMemRequest) -> Result<(i32, i32), Error> { @@ -152,18 +144,20 @@ where } } -impl Default for ServedShMemProvider +impl Default for ServedShMemProvider where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn default() -> Self { Self::new().unwrap() } } -impl Clone for ServedShMemProvider +impl Clone for ServedShMemProvider where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { fn clone(&self) -> Self { let mut cloned = Self::new().unwrap(); @@ -172,17 +166,16 @@ where } } -impl ShMemProvider for ServedShMemProvider +impl ShMemProvider> for ServedShMemProvider where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { - type ShMem = ServedShMem; - /// Connect to the server and return a new [`ServedShMemProvider`] /// Will try to spawn a [`ShMemService`]. This will only work for the first try. fn new() -> Result { // Needed for `MacOS` and Android to get sharedmaps working. - let service = ShMemService::::start(); + let service = ShMemService::::start(); let mut res = Self { stream: UnixStream::connect_to_unix_addr(&UnixSocketAddr::new(UNIX_SERVER_NAME)?).map_err(|err| Error::illegal_state(if cfg!(target_vendor = "apple") { @@ -200,7 +193,7 @@ where Ok(res) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result, Error> { let (server_fd, client_fd) = self.send_receive(ServedShMemRequest::NewMap(map_size))?; Ok(ServedShMem { @@ -214,7 +207,11 @@ where }) } - fn shmem_from_id_and_size(&mut self, id: ShMemId, size: usize) -> Result { + fn shmem_from_id_and_size( + &mut self, + id: ShMemId, + size: usize, + ) -> Result, Error> { let parts = id.as_str().split(':').collect::>(); let server_id_str = parts.first().unwrap(); let (server_fd, client_fd) = self.send_receive(ServedShMemRequest::ExistingMap( @@ -252,7 +249,7 @@ where Ok(()) } - fn release_shmem(&mut self, map: &mut Self::ShMem) { + fn release_shmem(&mut self, map: &mut ServedShMem) { if self.about_to_restart { return; } @@ -290,17 +287,14 @@ pub enum ServedShMemRequest { /// Client side communicating with the [`ShMemServer`] #[derive(Debug)] -struct SharedShMemClient -where - SH: ShMem, -{ +struct SharedShMemClient { stream: UnixStream, - maps: HashMap>>>, + maps: HashMap>>>, } -impl SharedShMemClient +impl SharedShMemClient where - SH: ShMem, + SHM: ShMem, { fn new(stream: UnixStream) -> Self { Self { @@ -312,11 +306,8 @@ where /// Response from Server to Client #[derive(Debug)] -enum ServedShMemResponse -where - SP: ShMemProvider, -{ - Mapping(Rc>), +enum ServedShMemResponse { + Mapping(Rc>), Id(i32), RefCount(u32), } @@ -332,23 +323,20 @@ enum ShMemServiceStatus { /// The [`ShMemService`] is a service handing out [`ShMem`] pages via unix domain sockets. /// It is mainly used and needed on Android. #[derive(Debug, Clone)] -pub enum ShMemService -where - SP: ShMemProvider, -{ +pub enum ShMemService { /// A started service Started { /// The background thread bg_thread: Arc>, /// The pantom data - phantom: PhantomData, + phantom: PhantomData<(SHM, SP)>, }, /// A failed service Failed { /// The error message err_msg: String, /// The pantom data - phantom: PhantomData, + phantom: PhantomData<(SHM, SP)>, }, } @@ -393,9 +381,10 @@ impl Drop for ShMemServiceThread { } } -impl ShMemService +impl ShMemService where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new [`ShMemService`], then listen and service incoming connections in a new thread. /// Returns [`ShMemService::Failed`] on error. @@ -412,7 +401,7 @@ where let syncpair = Arc::new((Mutex::new(ShMemServiceStatus::Starting), Condvar::new())); let childsyncpair = Arc::clone(&syncpair); let join_handle = thread::spawn(move || { - let mut worker = match ServedShMemServiceWorker::::new() { + let mut worker = match ServedShMemServiceWorker::::new() { Ok(worker) => worker, Err(e) => { // Make sure the parent processes can continue @@ -472,20 +461,18 @@ where /// The struct for the worker, handling incoming requests for [`ShMem`]. #[expect(clippy::type_complexity)] -struct ServedShMemServiceWorker -where - SP: ShMemProvider, -{ +struct ServedShMemServiceWorker { provider: SP, - clients: HashMap>, + clients: HashMap>, /// Maps from a pre-fork (parent) client id to its cloned maps. - forking_clients: HashMap>>>>, - all_shmems: HashMap>>, + forking_clients: HashMap>>>>, + all_shmems: HashMap>>, } -impl ServedShMemServiceWorker +impl ServedShMemServiceWorker where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Create a new [`ShMemService`] fn new() -> Result { @@ -497,7 +484,7 @@ where }) } - fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc> { + fn upgrade_shmem_with_id(&mut self, description_id: i32) -> Rc> { self.all_shmems .get_mut(&description_id) .unwrap() @@ -507,7 +494,7 @@ where } /// Read and handle the client request, send the answer over unix fd. - fn handle_request(&mut self, client_id: RawFd) -> Result, Error> { + fn handle_request(&mut self, client_id: RawFd) -> Result, Error> { let request = self.read_request(client_id)?; // log::trace!("got ashmem client: {}, request:{:?}", client_id, request); diff --git a/libafl_bolts/src/ownedref.rs b/libafl_bolts/src/ownedref.rs index c271dba53e..d15046d352 100644 --- a/libafl_bolts/src/ownedref.rs +++ b/libafl_bolts/src/ownedref.rs @@ -190,7 +190,7 @@ where /// # Safety /// The shared memory needs to start with a valid object of type `T`. /// Any use of this [`OwnedRef`] will dereference a pointer to the shared memory accordingly. - pub unsafe fn from_shmem(shmem: &mut S) -> Self { + pub unsafe fn from_shmem(shmem: &mut SHM) -> Self { Self::from_ptr(shmem.as_mut_ptr_of().unwrap()) } @@ -325,7 +325,7 @@ where /// # Safety /// The shared memory needs to start with a valid object of type `T`. /// Any use of this [`OwnedRefMut`] will dereference a pointer to the shared memory accordingly. - pub unsafe fn from_shmem(shmem: &mut S) -> Self { + pub unsafe fn from_shmem(shmem: &mut SHM) -> Self { Self::from_mut_ptr(shmem.as_mut_ptr_of().unwrap()) } diff --git a/libafl_bolts/src/shmem.rs b/libafl_bolts/src/shmem.rs index 55a587b0ab..744a8a6600 100644 --- a/libafl_bolts/src/shmem.rs +++ b/libafl_bolts/src/shmem.rs @@ -7,6 +7,7 @@ use alloc::{rc::Rc, string::ToString, vec::Vec}; use core::{cell::RefCell, fmt, fmt::Display, mem::ManuallyDrop}; use core::{ fmt::Debug, + marker::PhantomData, mem::size_of, ops::{Deref, DerefMut}, }; @@ -38,19 +39,44 @@ use crate::Error; /// The standard sharedmem provider #[cfg(all(windows, feature = "std"))] pub type StdShMemProvider = Win32ShMemProvider; +/// The standard sharedmem +#[cfg(all(windows, feature = "std"))] +pub type StdShMem = Win32ShMem; + +/// The standard sharedmem +#[cfg(all(target_os = "android", feature = "std"))] +pub type StdShMem = unix_shmem::ashmem::AshmemShMem; + /// The standard sharedmem provider #[cfg(all(target_os = "android", feature = "std"))] -pub type StdShMemProvider = - RcShMemProvider>; +pub type StdShMemProvider = RcShMemProvider< + unix_shmem::ashmem::AshmemShMem, + ServedShMemProvider, +>; + /// The standard sharedmem service #[cfg(all(target_os = "android", feature = "std"))] -pub type StdShMemService = ShMemService; +pub type StdShMemService = + ShMemService; + +/// The standard sharedmem +#[cfg(all(feature = "std", target_vendor = "apple"))] +pub type StdShMem = MmapShMem; + /// The standard sharedmem provider #[cfg(all(feature = "std", target_vendor = "apple"))] -pub type StdShMemProvider = RcShMemProvider>; +pub type StdShMemProvider = RcShMemProvider>; #[cfg(all(feature = "std", target_vendor = "apple"))] /// The standard sharedmem service -pub type StdShMemService = ShMemService; +pub type StdShMemService = ShMemService; + +/// The default [`ShMem`]. +#[cfg(all( + feature = "std", + unix, + not(any(target_os = "android", target_vendor = "apple", target_os = "haiku")) +))] +pub type StdShMem = UnixShMem; /// The default [`ShMemProvider`] for this os. #[cfg(all( feature = "std", @@ -79,7 +105,8 @@ pub type StdServedShMemProvider = RcShMemProvider>; +pub type StdServedShMemProvider = + RcShMemProvider>; /// Description of a shared map. /// May be used to restore the map by id. @@ -245,21 +272,21 @@ pub trait ShMem: Sized + Debug + Clone + DerefMut { /// /// They are the backbone of [`crate::llmp`] for inter-process communication. /// All you need for scaling on a new target is to implement this interface, as well as the respective [`ShMem`]. -pub trait ShMemProvider: Clone + Default + Debug { - /// The actual shared map handed out by this [`ShMemProvider`]. - type ShMem: ShMem; - +pub trait ShMemProvider: Clone + Default + Debug { /// Create a new instance of the provider fn new() -> Result; /// Create a new shared memory mapping - fn new_shmem(&mut self, map_size: usize) -> Result; + fn new_shmem(&mut self, map_size: usize) -> Result; /// Get a mapping given its id and size - fn shmem_from_id_and_size(&mut self, id: ShMemId, size: usize) -> Result; + fn shmem_from_id_and_size(&mut self, id: ShMemId, size: usize) -> Result; /// Create a new shared memory mapping to hold an object of the given type, and initializes it with the given value. - fn new_on_shmem(&mut self, value: T) -> Result { + fn new_on_shmem(&mut self, value: T) -> Result + where + SHM: ShMem, + { self.uninit_on_shmem::().map(|mut shmem| { // # Safety // The map has been created at this point in time, and is large enough. @@ -270,26 +297,26 @@ pub trait ShMemProvider: Clone + Default + Debug { } /// Create a new shared memory mapping to hold an object of the given type, and initializes it with the given value. - fn uninit_on_shmem(&mut self) -> Result { + fn uninit_on_shmem(&mut self) -> Result { self.new_shmem(size_of::()) } /// Get a mapping given a description - fn shmem_from_description( - &mut self, - description: ShMemDescription, - ) -> Result { + fn shmem_from_description(&mut self, description: ShMemDescription) -> Result { self.shmem_from_id_and_size(description.id, description.size) } /// Create a new sharedmap reference from an existing `id` and `len` - fn clone_ref(&mut self, mapping: &Self::ShMem) -> Result { + fn clone_ref(&mut self, mapping: &SHM) -> Result + where + SHM: ShMem, + { self.shmem_from_id_and_size(mapping.id(), mapping.len()) } /// Reads an existing map config from env vars, then maps it #[cfg(feature = "std")] - fn existing_from_env(&mut self, env_name: &str) -> Result { + fn existing_from_env(&mut self, env_name: &str) -> Result { let map_shm_str = env::var(env_name)?; let map_size = str::parse::(&env::var(format!("{env_name}_SIZE"))?)?; self.shmem_from_description(ShMemDescription::from_string_and_size( @@ -315,7 +342,7 @@ pub trait ShMemProvider: Clone + Default + Debug { } /// Release the resources associated with the given [`ShMem`] - fn release_shmem(&mut self, _shmem: &mut Self::ShMem) { + fn release_shmem(&mut self, _shmem: &mut SHM) { // do nothing } } @@ -333,22 +360,16 @@ pub trait ShMemProvider: Clone + Default + Debug { pub struct NopShMemProvider; #[cfg(feature = "alloc")] -impl ShMemProvider for NopShMemProvider { - type ShMem = NopShMem; - +impl ShMemProvider for NopShMemProvider { fn new() -> Result { Ok(Self) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result { self.shmem_from_id_and_size(ShMemId::default(), map_size) } - fn shmem_from_id_and_size( - &mut self, - id: ShMemId, - map_size: usize, - ) -> Result { + fn shmem_from_id_and_size(&mut self, id: ShMemId, map_size: usize) -> Result { Ok(NopShMem { id, buf: vec![0; map_size], @@ -392,15 +413,19 @@ impl Deref for NopShMem { /// Useful if the `ShMemProvider` needs to keep local state. #[cfg(feature = "alloc")] #[derive(Debug, Clone, Default)] -pub struct RcShMem { - internal: ManuallyDrop, - provider: Rc>, +pub struct RcShMem +where + SP: ShMemProvider, +{ + internal: ManuallyDrop, + provider: Rc>, } #[cfg(feature = "alloc")] -impl ShMem for RcShMem +impl ShMem for RcShMem where - T: ShMemProvider + Debug, + SHM: ShMem, + SP: ShMemProvider, { fn id(&self) -> ShMemId { self.internal.id() @@ -408,9 +433,10 @@ where } #[cfg(feature = "alloc")] -impl Deref for RcShMem +impl Deref for RcShMem where - T: ShMemProvider + Debug, + SHM: Deref, + SP: ShMemProvider, { type Target = [u8]; @@ -420,9 +446,10 @@ where } #[cfg(feature = "alloc")] -impl DerefMut for RcShMem +impl DerefMut for RcShMem where - T: ShMemProvider + Debug, + SHM: DerefMut, + SP: ShMemProvider, { fn deref_mut(&mut self) -> &mut [u8] { &mut self.internal @@ -430,7 +457,10 @@ where } #[cfg(feature = "alloc")] -impl Drop for RcShMem { +impl Drop for RcShMem +where + SP: ShMemProvider, +{ fn drop(&mut self) { self.provider.borrow_mut().release_shmem(&mut self.internal); } @@ -441,10 +471,7 @@ impl Drop for RcShMem { /// Useful if the `ShMemProvider` needs to keep local state. #[derive(Debug, Clone)] #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -pub struct RcShMemProvider -where - SP: ShMemProvider, -{ +pub struct RcShMemProvider { /// The wrapped [`ShMemProvider`]. internal: Rc>, /// A pipe the child uses to communicate progress to the parent after fork. @@ -455,35 +482,37 @@ where /// A pipe the parent uses to communicate progress to the child after fork. /// This prevents a potential race condition when using the [`ShMemService`]. parent_child_pipe: Option, + phantom: PhantomData, } -//#[cfg(all(unix, feature = "std"))] -//unsafe impl Send for RcShMemProvider {} - #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl ShMemProvider for RcShMemProvider +impl ShMemProvider> for RcShMemProvider where - SP: ShMemProvider + Debug, + SHM: ShMem, + SP: ShMemProvider, { - type ShMem = RcShMem; - fn new() -> Result { Ok(Self { internal: Rc::new(RefCell::new(SP::new()?)), child_parent_pipe: None, parent_child_pipe: None, + phantom: PhantomData, }) } - fn new_shmem(&mut self, map_size: usize) -> Result { - Ok(Self::ShMem { + fn new_shmem(&mut self, map_size: usize) -> Result, Error> { + Ok(RcShMem { internal: ManuallyDrop::new(self.internal.borrow_mut().new_shmem(map_size)?), provider: self.internal.clone(), }) } - fn shmem_from_id_and_size(&mut self, id: ShMemId, size: usize) -> Result { - Ok(Self::ShMem { + fn shmem_from_id_and_size( + &mut self, + id: ShMemId, + size: usize, + ) -> Result, Error> { + Ok(RcShMem { internal: ManuallyDrop::new( self.internal .borrow_mut() @@ -493,12 +522,12 @@ where }) } - fn release_shmem(&mut self, map: &mut Self::ShMem) { + fn release_shmem(&mut self, map: &mut RcShMem) { self.internal.borrow_mut().release_shmem(&mut map.internal); } - fn clone_ref(&mut self, mapping: &Self::ShMem) -> Result { - Ok(Self::ShMem { + fn clone_ref(&mut self, mapping: &RcShMem) -> Result, Error> { + Ok(RcShMem { internal: ManuallyDrop::new(self.internal.borrow_mut().clone_ref(&mapping.internal)?), provider: self.internal.clone(), }) @@ -535,10 +564,7 @@ where } #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl RcShMemProvider -where - SP: ShMemProvider, -{ +impl RcShMemProvider { /// "set" the "latch" /// (we abuse `pipes` as `semaphores`, as they don't need an additional shared mem region.) fn pipe_set(pipe: &mut Option) -> Result<(), Error> { @@ -597,9 +623,10 @@ where } #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl Default for RcShMemProvider +impl Default for RcShMemProvider where - SP: ShMemProvider + Debug, + SHM: ShMem, + SP: ShMemProvider, { fn default() -> Self { Self::new().unwrap() @@ -607,10 +634,7 @@ where } #[cfg(all(unix, feature = "std", not(target_os = "haiku")))] -impl RcShMemProvider> -where - SP: ShMemProvider + Debug, -{ +impl RcShMemProvider> { /// Forward to `ServedShMemProvider::on_restart` pub fn on_restart(&mut self) { self.internal.borrow_mut().on_restart(); @@ -965,14 +989,12 @@ pub mod unix_shmem { /// Implement [`ShMemProvider`] for [`MmapShMemProvider`]. #[cfg(unix)] - impl ShMemProvider for MmapShMemProvider { - type ShMem = MmapShMem; - + impl ShMemProvider for MmapShMemProvider { fn new() -> Result { Ok(Self {}) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result { let mut rand = StdRand::with_seed(crate::rands::random_seed()); let id = rand.next() as u32; let mut full_file_name = format!("libafl_{}_{}", process::id(), id); @@ -985,11 +1007,11 @@ pub mod unix_shmem { &mut self, id: ShMemId, size: usize, - ) -> Result { + ) -> Result { MmapShMem::shmem_from_id_and_size(id, size) } - fn release_shmem(&mut self, shmem: &mut Self::ShMem) { + fn release_shmem(&mut self, shmem: &mut MmapShMem) { let fd = CStr::from_bytes_until_nul(shmem.id().as_array()) .unwrap() .to_str() @@ -1113,13 +1135,11 @@ pub mod unix_shmem { /// Implement [`ShMemProvider`] for [`UnixShMemProvider`]. #[cfg(unix)] - impl ShMemProvider for CommonUnixShMemProvider { - type ShMem = CommonUnixShMem; - + impl ShMemProvider for CommonUnixShMemProvider { fn new() -> Result { Ok(Self {}) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result { CommonUnixShMem::new(map_size) } @@ -1127,7 +1147,7 @@ pub mod unix_shmem { &mut self, id: ShMemId, size: usize, - ) -> Result { + ) -> Result { CommonUnixShMem::shmem_from_id_and_size(id, size) } } @@ -1327,14 +1347,12 @@ pub mod unix_shmem { } /// Implement [`ShMemProvider`] for [`AshmemShMemProvider`], for the Android `ShMem`. - impl ShMemProvider for AshmemShMemProvider { - type ShMem = AshmemShMem; - + impl ShMemProvider for AshmemShMemProvider { fn new() -> Result { Ok(Self {}) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result { let mapping = AshmemShMem::new(map_size)?; Ok(mapping) } @@ -1343,7 +1361,7 @@ pub mod unix_shmem { &mut self, id: ShMemId, size: usize, - ) -> Result { + ) -> Result { AshmemShMem::shmem_from_id_and_size(id, size) } } @@ -1508,14 +1526,12 @@ pub mod unix_shmem { /// Implement [`ShMemProvider`] for [`MemfdShMemProvider`] #[cfg(unix)] - impl ShMemProvider for MemfdShMemProvider { - type ShMem = MemfdShMem; - + impl ShMemProvider for MemfdShMemProvider { fn new() -> Result { Ok(Self {}) } - fn new_shmem(&mut self, map_size: usize) -> Result { + fn new_shmem(&mut self, map_size: usize) -> Result { let mapping = MemfdShMem::new(map_size)?; Ok(mapping) } @@ -1524,7 +1540,7 @@ pub mod unix_shmem { &mut self, id: ShMemId, size: usize, - ) -> Result { + ) -> Result { MemfdShMem::shmem_from_id_and_size(id, size) } } @@ -1692,9 +1708,7 @@ pub mod win32_shmem { } /// Implement [`ShMemProvider`] for [`Win32ShMemProvider`] - impl ShMemProvider for Win32ShMemProvider { - type ShMem = Win32ShMem; - + impl ShMemProvider for Win32ShMemProvider { fn new() -> Result { Ok(Self {}) } @@ -1729,15 +1743,15 @@ impl DummyShMemService { /// A cursor around [`ShMem`] that immitates [`std::io::Cursor`]. Notably, this implements [`Write`] for [`ShMem`] in std environments. #[cfg(feature = "std")] #[derive(Debug)] -pub struct ShMemCursor { - inner: T, +pub struct ShMemCursor { + inner: SHM, pos: usize, } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl ShMemCursor { +impl ShMemCursor { /// Create a new [`ShMemCursor`] around [`ShMem`] - pub fn new(shmem: T) -> Self { + pub fn new(shmem: SHM) -> Self { Self { inner: shmem, pos: 0, @@ -1745,14 +1759,20 @@ impl ShMemCursor { } /// Slice from the current location on this map to the end, mutable - fn empty_slice_mut(&mut self) -> &mut [u8] { + fn empty_slice_mut(&mut self) -> &mut [u8] + where + SHM: DerefMut, + { use crate::AsSliceMut; &mut (self.inner.as_slice_mut()[self.pos..]) } } #[cfg(all(feature = "std", not(target_os = "haiku")))] -impl Write for ShMemCursor { +impl Write for ShMemCursor +where + SHM: DerefMut, +{ fn write(&mut self, buf: &[u8]) -> std::io::Result { match self.empty_slice_mut().write(buf) { Ok(w) => { @@ -1789,7 +1809,10 @@ impl Write for ShMemCursor { } #[cfg(feature = "std")] -impl std::io::Seek for ShMemCursor { +impl std::io::Seek for ShMemCursor +where + SHM: DerefMut, +{ fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result { let effective_new_pos = match pos { std::io::SeekFrom::Start(s) => s, diff --git a/libafl_bolts/src/staterestore.rs b/libafl_bolts/src/staterestore.rs index fbe27c492e..127bfcd488 100644 --- a/libafl_bolts/src/staterestore.rs +++ b/libafl_bolts/src/staterestore.rs @@ -65,17 +65,15 @@ impl StateShMemContent { /// it will instead write to disk, and store the file name into the map. /// Writing to [`StateRestorer`] multiple times is not allowed. #[derive(Debug, Clone)] -pub struct StateRestorer -where - SP: ShMemProvider, -{ - shmem: SP::ShMem, +pub struct StateRestorer { + shmem: SHM, phantom: PhantomData<*const SP>, } -impl StateRestorer +impl StateRestorer where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Get the map size backing this [`StateRestorer`]. pub fn mapsize(&self) -> usize { @@ -96,7 +94,7 @@ where } /// Create a new [`StateRestorer`]. - pub fn new(shmem: SP::ShMem) -> Self { + pub fn new(shmem: SHM) -> Self { let mut ret = Self { shmem, phantom: PhantomData, @@ -296,7 +294,7 @@ mod tests { }; use crate::{ - shmem::{ShMemProvider, StdShMemProvider}, + shmem::{ShMemProvider, StdShMem, StdShMemProvider}, staterestore::StateRestorer, }; @@ -304,7 +302,7 @@ mod tests { let mut shmem_provider = StdShMemProvider::new().unwrap(); let shmem = shmem_provider.new_shmem(TESTMAP_SIZE).unwrap(); - let mut state_restorer = StateRestorer::::new(shmem); + let mut state_restorer = StateRestorer::::new(shmem); let state = "hello world".to_string(); diff --git a/libafl_concolic/symcc_runtime/src/lib.rs b/libafl_concolic/symcc_runtime/src/lib.rs index 7746364cb6..00c02e94b4 100644 --- a/libafl_concolic/symcc_runtime/src/lib.rs +++ b/libafl_concolic/symcc_runtime/src/lib.rs @@ -46,6 +46,7 @@ pub mod cpp_runtime { #[doc(hidden)] pub use ctor::ctor; use libafl::observers::concolic; +pub use libafl_bolts::shmem::StdShMem; #[doc(hidden)] pub use libc::atexit; #[doc(hidden)] diff --git a/libafl_concolic/symcc_runtime/src/tracing.rs b/libafl_concolic/symcc_runtime/src/tracing.rs index d456407340..5b0cdf3113 100644 --- a/libafl_concolic/symcc_runtime/src/tracing.rs +++ b/libafl_concolic/symcc_runtime/src/tracing.rs @@ -2,23 +2,30 @@ pub use libafl::observers::concolic::serialization_format::StdShMemMessageFileWriter; use libafl::observers::concolic::SymExpr; +use libafl_bolts::shmem::ShMem; use crate::{RSymExpr, Runtime}; /// Traces the expressions according to the format described in [`libafl::observers::concolic::serialization_format`]. /// /// The format can be read from elsewhere to perform processing of the expressions outside of the runtime. -pub struct TracingRuntime { - writer: StdShMemMessageFileWriter, +pub struct TracingRuntime +where + SHM: ShMem, +{ + writer: StdShMemMessageFileWriter, trace_locations: bool, } -impl TracingRuntime { +impl TracingRuntime +where + SHM: ShMem, +{ /// Creates the runtime, tracing using the given writer. /// When `trace_locations` is true, location information for calls, returns and basic blocks will also be part of the trace. /// Tracing location information can drastically increase trace size. It is therefore recommended to not active this if not needed. #[must_use] - pub fn new(writer: StdShMemMessageFileWriter, trace_locations: bool) -> Self { + pub fn new(writer: StdShMemMessageFileWriter, trace_locations: bool) -> Self { Self { writer, trace_locations, @@ -62,7 +69,10 @@ macro_rules! binary_expression_builder { }; } -impl Runtime for TracingRuntime { +impl Runtime for TracingRuntime +where + SHM: ShMem, +{ #[no_mangle] fn build_integer_from_buffer( &mut self, @@ -201,7 +211,10 @@ impl Runtime for TracingRuntime { } } -impl Drop for TracingRuntime { +impl Drop for TracingRuntime +where + SHM: ShMem, +{ fn drop(&mut self) { // manually end the writer to update the length prefix self.writer diff --git a/libafl_concolic/test/runtime_test/src/lib.rs b/libafl_concolic/test/runtime_test/src/lib.rs index 56c1ce003c..d6ec636d97 100644 --- a/libafl_concolic/test/runtime_test/src/lib.rs +++ b/libafl_concolic/test/runtime_test/src/lib.rs @@ -7,9 +7,11 @@ use symcc_runtime::{ export_runtime, filter::NoFloat, tracing::{self, StdShMemMessageFileWriter}, - Runtime, + Runtime, StdShMem, }; +// use libafl_bolts::StdShmem; + export_runtime!( NoFloat => NoFloat; tracing::TracingRuntime::new( @@ -17,5 +19,5 @@ export_runtime!( .expect("unable to construct tracing runtime writer. (missing env?)"), false ) - => tracing::TracingRuntime + => tracing::TracingRuntime ); diff --git a/libafl_qemu/src/executor.rs b/libafl_qemu/src/executor.rs index 08f133f279..6e861957fa 100644 --- a/libafl_qemu/src/executor.rs +++ b/libafl_qemu/src/executor.rs @@ -28,6 +28,7 @@ use libafl::{ use libafl_bolts::shmem::ShMemProvider; use libafl_bolts::{ os::unix_signals::{ucontext_t, Signal}, + shmem::ShMem, tuples::RefIndexable, }; #[cfg(feature = "systemmode")] @@ -43,7 +44,7 @@ use crate::Qemu; use crate::{command::CommandManager, modules::EmulatorModuleTuple, Emulator, EmulatorDriver}; type EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> = - StatefulInProcessExecutor<'a, H, I, OT, S, Emulator>; + StatefulInProcessExecutor<'a, Emulator, H, I, OT, S>; pub struct QemuExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM> { inner: EmulatorInProcessExecutor<'a, C, CM, ED, ET, H, I, OT, S, SM>, @@ -207,7 +208,7 @@ where } inner.inprocess_hooks_mut().timeout_handler = inproc_qemu_timeout_handler::< - StatefulInProcessExecutor<'a, H, I, OT, S, Emulator>, + StatefulInProcessExecutor<'a, Emulator, H, I, OT, S>, EM, ET, I, @@ -298,27 +299,41 @@ where } } -pub type QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> = - StatefulInProcessForkExecutor<'a, H, I, OT, S, SP, Emulator, EM, Z>; +pub type QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> = + StatefulInProcessForkExecutor< + 'a, + EM, + Emulator, + H, + I, + OT, + S, + SHM, + SP, + Z, + >; #[cfg(feature = "fork")] -pub struct QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> { - inner: QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z>, +#[expect(clippy::type_complexity)] +pub struct QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> { + inner: QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z>, } #[cfg(feature = "fork")] -impl Debug - for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> +impl Debug + for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> where C: Debug, CM: Debug, ED: Debug, + EM: Debug, ET: EmulatorModuleTuple + Debug, OT: ObserversTuple + Debug, I: Debug, S: Debug, SM: Debug, - SP: ShMemProvider, + SHM: Debug, + SP: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("QemuForkExecutor") @@ -329,14 +344,15 @@ where } #[cfg(feature = "fork")] -impl<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> - QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> +impl<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> + QemuForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> where EM: EventFirer + EventRestarter, ET: EmulatorModuleTuple, OT: ObserversTuple, S: HasSolutions, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: HasObjective, Z::Objective: Feedback, { @@ -370,14 +386,14 @@ where #[allow(clippy::type_complexity)] pub fn inner( &self, - ) -> &QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> { + ) -> &QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> { &self.inner } #[allow(clippy::type_complexity)] pub fn inner_mut( &mut self, - ) -> &mut QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> { + ) -> &mut QemuInProcessForkExecutor<'a, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> { &mut self.inner } @@ -391,8 +407,8 @@ where } #[cfg(feature = "fork")] -impl Executor - for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> +impl Executor + for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> where C: Clone, CM: CommandManager, @@ -404,7 +420,8 @@ where OT: ObserversTuple + Debug, I: Input + Unpin, S: HasExecutions + Unpin, - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, Z: HasObjective, { fn run_target( @@ -432,12 +449,11 @@ where } #[cfg(feature = "fork")] -impl HasObservers - for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SM, SP, Z> +impl HasObservers + for QemuForkExecutor<'_, C, CM, ED, EM, ET, H, I, OT, S, SHM, SM, SP, Z> where ET: EmulatorModuleTuple, OT: ObserversTuple, - SP: ShMemProvider, { type Observers = OT; #[inline] diff --git a/libafl_sugar/src/forkserver.rs b/libafl_sugar/src/forkserver.rs index e21a78d9c5..2e285897ee 100644 --- a/libafl_sugar/src/forkserver.rs +++ b/libafl_sugar/src/forkserver.rs @@ -119,7 +119,7 @@ impl ForkserverBytesCoverageSugar<'_> { let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_sugar/src/inmemory.rs b/libafl_sugar/src/inmemory.rs index 2a9b41ad7b..e26765a26d 100644 --- a/libafl_sugar/src/inmemory.rs +++ b/libafl_sugar/src/inmemory.rs @@ -147,7 +147,7 @@ where let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_sugar/src/qemu.rs b/libafl_sugar/src/qemu.rs index fa4be32f8a..50c8d3a284 100644 --- a/libafl_sugar/src/qemu.rs +++ b/libafl_sugar/src/qemu.rs @@ -150,7 +150,7 @@ where let time_ref = time_observer.handle(); let mut run_client = |state: Option<_>, - mut mgr: LlmpRestartingEventManager<_, _, _, _>, + mut mgr: LlmpRestartingEventManager<_, _, _, _, _>, _core_id| { let time_observer = time_observer.clone(); diff --git a/libafl_tinyinst/src/executor.rs b/libafl_tinyinst/src/executor.rs index 03381bd172..bf50c1b327 100644 --- a/libafl_tinyinst/src/executor.rs +++ b/libafl_tinyinst/src/executor.rs @@ -1,4 +1,5 @@ use core::{marker::PhantomData, ptr, time::Duration}; +use std::fmt::{Debug, Formatter}; use libafl::{ executors::{Executor, ExitKind, HasObservers}, @@ -8,50 +9,44 @@ use libafl::{ }; use libafl_bolts::{ fs::{InputFile, INPUTFILE_STD}, - shmem::{NopShMemProvider, ShMem, ShMemProvider}, + shmem::{NopShMem, NopShMemProvider, ShMem, ShMemProvider}, tuples::RefIndexable, AsSlice, AsSliceMut, }; use tinyinst::tinyinst::{litecov::RunResult, TinyInst}; /// [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor -pub struct TinyInstExecutor -where - SP: ShMemProvider, -{ +pub struct TinyInstExecutor { tinyinst: TinyInst, coverage_ptr: *mut Vec, timeout: Duration, observers: OT, phantom: PhantomData, cur_input: InputFile, - map: Option<::ShMem>, + map: Option, } -impl TinyInstExecutor<(), NopShMemProvider, ()> { +impl TinyInstExecutor<(), NopShMem, ()> { /// Create a builder for [`TinyInstExecutor`] #[must_use] - pub fn builder<'a>() -> TinyInstExecutorBuilder<'a, NopShMemProvider> { + pub fn builder<'a>() -> TinyInstExecutorBuilder<'a, NopShMem, NopShMemProvider> { TinyInstExecutorBuilder::new() } } -impl std::fmt::Debug for TinyInstExecutor -where - SP: ShMemProvider, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl Debug for TinyInstExecutor { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { f.debug_struct("TinyInstExecutor") .field("timeout", &self.timeout) .finish_non_exhaustive() } } -impl Executor for TinyInstExecutor +impl Executor for TinyInstExecutor where S: HasExecutions, I: HasTargetBytes, - SP: ShMemProvider, + SHM: ShMem, { #[inline] fn run_target( @@ -101,55 +96,59 @@ where /// Builder for `TinyInstExecutor` #[derive(Debug)] -pub struct TinyInstExecutorBuilder<'a, SP> { +pub struct TinyInstExecutorBuilder<'a, SHM, SP> { tinyinst_args: Vec, program_args: Vec, timeout: Duration, coverage_ptr: *mut Vec, shmem_provider: Option<&'a mut SP>, + phantom: PhantomData, } const MAX_FILE: usize = 1024 * 1024; const SHMEM_FUZZ_HDR_SIZE: usize = 4; -impl Default for TinyInstExecutorBuilder<'_, NopShMemProvider> { +impl Default for TinyInstExecutorBuilder<'_, NopShMem, NopShMemProvider> { fn default() -> Self { Self::new() } } -impl<'a> TinyInstExecutorBuilder<'a, NopShMemProvider> { +impl<'a> TinyInstExecutorBuilder<'a, NopShMem, NopShMemProvider> { /// Constructor #[must_use] - pub fn new() -> TinyInstExecutorBuilder<'a, NopShMemProvider> { + pub fn new() -> TinyInstExecutorBuilder<'a, NopShMem, NopShMemProvider> { Self { tinyinst_args: vec![], program_args: vec![], timeout: Duration::new(3, 0), shmem_provider: None, coverage_ptr: ptr::null_mut(), + phantom: PhantomData, } } /// Use this to enable shmem testcase passing. #[must_use] - pub fn shmem_provider( + pub fn shmem_provider( self, shmem_provider: &'a mut SP, - ) -> TinyInstExecutorBuilder<'a, SP> { + ) -> TinyInstExecutorBuilder<'a, SHM, SP> { TinyInstExecutorBuilder { tinyinst_args: self.tinyinst_args, program_args: self.program_args, timeout: self.timeout, shmem_provider: Some(shmem_provider), coverage_ptr: ptr::null_mut(), + phantom: PhantomData, } } } -impl TinyInstExecutorBuilder<'_, SP> +impl TinyInstExecutorBuilder<'_, SHM, SP> where - SP: ShMemProvider, + SHM: ShMem, + SP: ShMemProvider, { /// Argument for tinyinst instrumentation #[must_use] @@ -246,7 +245,7 @@ where } /// Build [`TinyInst`](https://github.com/googleprojectzero/TinyInst) executor - pub fn build(&mut self, observers: OT) -> Result, Error> { + pub fn build(&mut self, observers: OT) -> Result, Error> { if self.coverage_ptr.is_null() { return Err(Error::illegal_argument("Coverage pointer may not be null.")); } @@ -313,10 +312,7 @@ where } } -impl HasObservers for TinyInstExecutor -where - SP: ShMemProvider, -{ +impl HasObservers for TinyInstExecutor { type Observers = OT; fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> {