From 4e0a28df696589a039061aed0b7179ba3c9989f5 Mon Sep 17 00:00:00 2001 From: Dimitris Sarlis Date: Mon, 13 Jan 2025 17:38:48 +0200 Subject: [PATCH 01/33] fix: Account instructions for bytes accessed and dirty pages on system subnets (#3396) This is another change that aims to make system subnets behave closer to application subnets. --- rs/config/src/subnet_config.rs | 3 +- .../tests/wasmtime_random_memory_writes.rs | 184 +++++++----------- .../src/execution/update/tests.rs | 10 +- rs/system_api/src/lib.rs | 7 +- 4 files changed, 78 insertions(+), 126 deletions(-) diff --git a/rs/config/src/subnet_config.rs b/rs/config/src/subnet_config.rs index 3724a39fa5d..0bbefe812c5 100644 --- a/rs/config/src/subnet_config.rs +++ b/rs/config/src/subnet_config.rs @@ -140,7 +140,6 @@ const DEFAULT_REFERENCE_SUBNET_SIZE: usize = 13; /// Costs for each newly created dirty page in stable memory. const DEFAULT_DIRTY_PAGE_OVERHEAD: NumInstructions = NumInstructions::new(1_000); -const SYSTEM_SUBNET_DIRTY_PAGE_OVERHEAD: NumInstructions = NumInstructions::new(0); /// Accumulated priority reset interval, rounds. /// @@ -341,7 +340,7 @@ impl SchedulerConfig { // This limit should be high enough (1000T) to effectively disable // rate-limiting for the system subnets. install_code_rate_limit: NumInstructions::from(1_000_000_000_000_000), - dirty_page_overhead: SYSTEM_SUBNET_DIRTY_PAGE_OVERHEAD, + dirty_page_overhead: DEFAULT_DIRTY_PAGE_OVERHEAD, accumulated_priority_reset_interval: ACCUMULATED_PRIORITY_RESET_INTERVAL, upload_wasm_chunk_instructions: NumInstructions::from(0), canister_snapshot_baseline_instructions: NumInstructions::from(0), diff --git a/rs/embedders/tests/wasmtime_random_memory_writes.rs b/rs/embedders/tests/wasmtime_random_memory_writes.rs index 14eb3700fa0..beb4733dbcb 100644 --- a/rs/embedders/tests/wasmtime_random_memory_writes.rs +++ b/rs/embedders/tests/wasmtime_random_memory_writes.rs @@ -980,126 +980,90 @@ mod tests { #[test] fn test_proportional_instructions_consumption_to_data_size() { - with_test_replica_logger(|log| { - let subnet_type = SubnetType::Application; - let dst: u32 = 0; - - let dirty_heap_cost = match EmbeddersConfig::default().metering_type { - ic_config::embedders::MeteringType::New => SchedulerConfig::application_subnet() - .dirty_page_overhead - .get(), - _ => 0, - }; + for subnet_type in [ + SubnetType::Application, + SubnetType::VerifiedApplication, + SubnetType::System, + ] { + with_test_replica_logger(|log| { + let dst: u32 = 0; + + let dirty_heap_cost = match EmbeddersConfig::default().metering_type { + ic_config::embedders::MeteringType::New => match subnet_type { + SubnetType::System => { + SchedulerConfig::system_subnet().dirty_page_overhead.get() + } + SubnetType::Application => SchedulerConfig::application_subnet() + .dirty_page_overhead + .get(), + SubnetType::VerifiedApplication => { + SchedulerConfig::verified_application_subnet() + .dirty_page_overhead + .get() + } + }, + _ => 0, + }; - let mut payload: Vec = dst.to_le_bytes().to_vec(); - payload.extend(random_payload()); - let payload_size = payload.len() - 4; + let mut payload: Vec = dst.to_le_bytes().to_vec(); + payload.extend(random_payload()); + let payload_size = payload.len() - 4; - let mut double_size_payload: Vec = payload.clone(); - double_size_payload.extend(random_payload()); + let mut double_size_payload: Vec = payload.clone(); + double_size_payload.extend(random_payload()); - let (instructions_consumed_without_data, dry_run_stats) = run_and_get_stats( - log.clone(), - "write_bytes", - dst.to_le_bytes().to_vec(), - MAX_NUM_INSTRUCTIONS, - subnet_type, - ) - .unwrap(); - let dry_run_dirty_heap = dry_run_stats.wasm_dirty_pages.len() as u64; - - { - // Number of instructions consumed only for copying the payload. - let (consumed_instructions, run_stats) = run_and_get_stats( + let (instructions_consumed_without_data, dry_run_stats) = run_and_get_stats( log.clone(), "write_bytes", - payload, - MAX_NUM_INSTRUCTIONS, - subnet_type, - ) - .unwrap(); - let dirty_heap = run_stats.wasm_dirty_pages.len() as u64; - let consumed_instructions = - consumed_instructions - instructions_consumed_without_data; - assert_eq!( - (consumed_instructions.get() - dirty_heap * dirty_heap_cost) as usize, - (payload_size / BYTES_PER_INSTRUCTION) - - (dry_run_dirty_heap * dirty_heap_cost) as usize, - ); - } - - { - // Number of instructions consumed increased with the size of the data. - let (consumed_instructions, run_stats) = run_and_get_stats( - log, - "write_bytes", - double_size_payload, + dst.to_le_bytes().to_vec(), MAX_NUM_INSTRUCTIONS, subnet_type, ) .unwrap(); - let dirty_heap = run_stats.wasm_dirty_pages.len() as u64; - let consumed_instructions = - consumed_instructions - instructions_consumed_without_data; - - assert_eq!( - (consumed_instructions.get() - dirty_heap * dirty_heap_cost) as usize, - (2 * payload_size / BYTES_PER_INSTRUCTION) - - (dry_run_dirty_heap * dirty_heap_cost) as usize - ); - } - }) - } - - #[test] - fn test_no_instructions_consumption_based_on_data_size_on_system_subnet() { - with_test_replica_logger(|log| { - let subnet_type = SubnetType::System; - let dst: u32 = 0; - - let mut payload: Vec = dst.to_le_bytes().to_vec(); - payload.extend(random_payload()); - - let mut double_size_payload: Vec = payload.clone(); - double_size_payload.extend(random_payload()); - - let instructions_consumed_without_data = get_num_instructions_consumed( - log.clone(), - "write_bytes", - dst.to_le_bytes().to_vec(), - MAX_NUM_INSTRUCTIONS, - subnet_type, - ) - .unwrap(); - - { - // Number of instructions consumed for copying the payload is zero. - let consumed_instructions = get_num_instructions_consumed( - log.clone(), - "write_bytes", - payload, - MAX_NUM_INSTRUCTIONS, - subnet_type, - ) - .unwrap() - - instructions_consumed_without_data; - assert_eq!(consumed_instructions.get(), 0); - } + let dry_run_dirty_heap = dry_run_stats.wasm_dirty_pages.len() as u64; + + { + // Number of instructions consumed only for copying the payload. + let (consumed_instructions, run_stats) = run_and_get_stats( + log.clone(), + "write_bytes", + payload, + MAX_NUM_INSTRUCTIONS, + subnet_type, + ) + .unwrap(); + let dirty_heap = run_stats.wasm_dirty_pages.len() as u64; + let consumed_instructions = + consumed_instructions - instructions_consumed_without_data; + assert_eq!( + (consumed_instructions.get() - dirty_heap * dirty_heap_cost) as usize, + (payload_size / BYTES_PER_INSTRUCTION) + - (dry_run_dirty_heap * dirty_heap_cost) as usize, + ); + } - { - // Number of instructions consumed for copying the payload is zero. - let consumed_instructions = get_num_instructions_consumed( - log, - "write_bytes", - double_size_payload, - MAX_NUM_INSTRUCTIONS, - subnet_type, - ) - .unwrap() - - instructions_consumed_without_data; - assert_eq!(consumed_instructions.get(), 0); - } - }) + { + // Number of instructions consumed increased with the size of the data. + let (consumed_instructions, run_stats) = run_and_get_stats( + log, + "write_bytes", + double_size_payload, + MAX_NUM_INSTRUCTIONS, + subnet_type, + ) + .unwrap(); + let dirty_heap = run_stats.wasm_dirty_pages.len() as u64; + let consumed_instructions = + consumed_instructions - instructions_consumed_without_data; + + assert_eq!( + (consumed_instructions.get() - dirty_heap * dirty_heap_cost) as usize, + (2 * payload_size / BYTES_PER_INSTRUCTION) + - (dry_run_dirty_heap * dirty_heap_cost) as usize + ); + } + }) + } } fn run_and_get_stats( diff --git a/rs/execution_environment/src/execution/update/tests.rs b/rs/execution_environment/src/execution/update/tests.rs index f8673ace1e6..2cd76a57e7e 100644 --- a/rs/execution_environment/src/execution/update/tests.rs +++ b/rs/execution_environment/src/execution/update/tests.rs @@ -3,7 +3,6 @@ use std::time::Duration; use assert_matches::assert_matches; use ic_base_types::NumSeconds; -use ic_config::subnet_config::SchedulerConfig; use ic_error_types::ErrorCode; use ic_interfaces::execution_environment::SubnetAvailableMemory; use ic_registry_subnet_type::SubnetType; @@ -352,7 +351,7 @@ fn dts_update_concurrent_cycles_change_fails() { } #[test] -fn dirty_pages_are_free_on_system_subnet() { +fn dirty_pages_cost_the_same_on_app_and_system_subnets() { fn instructions_to_write_stable_byte(mut test: ExecutionTest) -> NumInstructions { let initial_cycles = Cycles::new(1_000_000_000_000); let a_id = test.universal_canister_with_cycles(initial_cycles).unwrap(); @@ -376,12 +375,7 @@ fn dirty_pages_are_free_on_system_subnet() { .build(); let app_instructions = instructions_to_write_stable_byte(app_test); - // Can't check for equality because there are other charges that are omitted - // on system subnets. - assert!( - app_instructions - > system_instructions + SchedulerConfig::application_subnet().dirty_page_overhead - ); + assert_eq!(app_instructions, system_instructions); } #[test] diff --git a/rs/system_api/src/lib.rs b/rs/system_api/src/lib.rs index 4a0c6575bc3..4925252e6f7 100644 --- a/rs/system_api/src/lib.rs +++ b/rs/system_api/src/lib.rs @@ -1639,12 +1639,7 @@ impl SystemApi for SystemApiImpl { } fn get_num_instructions_from_bytes(&self, num_bytes: NumBytes) -> NumInstructions { - match self.sandbox_safe_system_state.subnet_type { - SubnetType::System => NumInstructions::from(0), - SubnetType::VerifiedApplication | SubnetType::Application => { - NumInstructions::from(num_bytes.get()) - } - } + NumInstructions::from(num_bytes.get()) } fn stable_memory_dirty_pages(&self) -> Vec<(PageIndex, &PageBytes)> { From a358756a6fef81b8d3d6bf70c56f2886097825a9 Mon Sep 17 00:00:00 2001 From: Andre Popovitch Date: Mon, 13 Jan 2025 11:00:45 -0600 Subject: [PATCH 02/33] refactor: clean up the SNS Governance API type definitions (#3392) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SNS Governance API types are for manual editing, but still have a bunch of stuff left over from when they were autogenerated. 1. Replaced `::core::option::Option` with `Option` and similar 2. Removed `prost` derives. API types aren't ever serialized to `proto`. Why not get rid of the `prost` derives? They make things really confusing and hard to edit, and lead to us accidentally serializing the wrong types (see the previous PR). I also removed other derives like `comparable::Comparable` which I don't think we normally see on our types. 3. Added additional derives for things like Debug and Default (which previously `prost` was giving us). 4. Remove the `// This file is @generated by prost-build.` comment These changes were pretty much done all mechanically. Sorry for not splitting them up into separate commits Now we should be able to start evolving our API types and our internal types independently! [← Previous PR](https://github.com/dfinity/ic/pull/3391) --- .../api/src/ic_sns_governance.pb.v1.rs | 2367 +++-------------- 1 file changed, 442 insertions(+), 1925 deletions(-) diff --git a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs index da87417e25f..09f66dafdbe 100644 --- a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs +++ b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs @@ -1,90 +1,57 @@ -// This file is @generated by prost-build. +use std::collections::BTreeMap; + /// A principal with a particular set of permissions over a neuron. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronPermission { /// The principal that has the permissions. - #[prost(message, optional, tag = "1")] - pub principal: ::core::option::Option<::ic_base_types::PrincipalId>, + pub principal: Option<::ic_base_types::PrincipalId>, /// The list of permissions that this principal has. - #[prost(enumeration = "NeuronPermissionType", repeated, tag = "2")] - pub permission_type: ::prost::alloc::vec::Vec, + pub permission_type: Vec, } /// The id of a specific neuron, which equals the neuron's subaccount on the ledger canister /// (the account that holds the neuron's staked tokens). #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Eq, - std::hash::Hash, - Clone, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, Eq, std::hash::Hash, Clone, PartialEq, )] pub struct NeuronId { - #[prost(bytes = "vec", tag = "1")] #[serde(with = "serde_bytes")] - pub id: ::prost::alloc::vec::Vec, + pub id: Vec, } /// A sequence of NeuronIds, which is used to get prost to generate a type isomorphic to Option>. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronIds { - #[prost(message, repeated, tag = "1")] - pub neuron_ids: ::prost::alloc::vec::Vec, + pub neuron_ids: Vec, } /// The id of a specific proposal. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable, serde::Serialize)] -#[self_describing] -#[derive(Clone, Copy, PartialEq, ::prost::Message)] -pub struct ProposalId { - #[prost(uint64, tag = "1")] - pub id: u64, -} #[derive( + Default, candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, + serde::Serialize, Clone, + Copy, PartialEq, - ::prost::Message, )] +pub struct ProposalId { + pub id: u64, +} +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct DisburseMaturityInProgress { /// This field is the quantity of maturity in e8s that has been decremented from a Neuron to /// be modulated and disbursed as SNS tokens. - #[prost(uint64, tag = "1")] pub amount_e8s: u64, - #[prost(uint64, tag = "2")] pub timestamp_of_disbursement_seconds: u64, - #[prost(message, optional, tag = "3")] - pub account_to_disburse_to: ::core::option::Option, - #[prost(uint64, optional, tag = "4")] - pub finalize_disbursement_timestamp_seconds: ::core::option::Option, + pub account_to_disburse_to: Option, + pub finalize_disbursement_timestamp_seconds: Option, } /// A neuron in the governance system. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[compare_default] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Neuron { /// The unique id of this neuron. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, + pub id: Option, /// The principal or list of principals with a particular set of permissions over a neuron. - #[prost(message, repeated, tag = "2")] - pub permissions: ::prost::alloc::vec::Vec, + pub permissions: Vec, /// The cached record of the neuron's staked governance tokens, measured in /// fractions of 10E-8 of a governance token. /// @@ -92,7 +59,6 @@ pub struct Neuron { /// that can be set by each SNS. Neurons that are created by claiming a neuron, spawning a neuron, /// or splitting a neuron must have at least that stake (in the case of splitting both the parent neuron /// and the new neuron must have at least that stake). - #[prost(uint64, tag = "3")] pub cached_neuron_stake_e8s: u64, /// TODO NNS1-1052 - Update if this ticket is done and fees are burned / minted instead of tracked in this attribute. /// @@ -100,10 +66,8 @@ pub struct Neuron { /// due to making proposals that were subsequently rejected. /// Must be smaller than 'cached_neuron_stake_e8s'. When a neuron is /// disbursed, these governance tokens will be burned. - #[prost(uint64, tag = "4")] pub neuron_fees_e8s: u64, /// The timestamp, in seconds from the Unix epoch, when the neuron was created. - #[prost(uint64, tag = "5")] pub created_timestamp_seconds: u64, /// The timestamp, in seconds from the Unix epoch, when this neuron has entered /// the non-dissolving state. This is either the creation time or the last time at @@ -112,30 +76,25 @@ pub struct Neuron { /// This value is meaningless when the neuron is dissolving, since a /// dissolving neurons always has age zero. The canonical value of /// this field for a dissolving neuron is `u64::MAX`. - #[prost(uint64, tag = "6")] pub aging_since_timestamp_seconds: u64, /// The neuron's followees, specified as a map of proposal functions IDs to followees neuron IDs. /// The map's keys are represented by integers as Protobuf does not support enum keys in maps. - #[prost(btree_map = "uint64, message", tag = "11")] - pub followees: ::prost::alloc::collections::BTreeMap, + pub followees: BTreeMap, /// The accumulated unstaked maturity of the neuron, measured in "e8s equivalent", i.e., in equivalent of /// 10E-8 of a governance token. /// /// The unit is "equivalent" to insist that, while this quantity is on the /// same scale as the governance token, maturity is not directly convertible to /// governance tokens: conversion requires a minting event and the conversion rate is variable. - #[prost(uint64, tag = "12")] pub maturity_e8s_equivalent: u64, /// A percentage multiplier to be applied when calculating the voting power of a neuron. /// The multiplier's unit is a integer percentage in the range of 0 to 100. The /// voting_power_percentage_multiplier can only be less than 100 for a developer neuron /// that is created at SNS initialization. - #[prost(uint64, tag = "13")] pub voting_power_percentage_multiplier: u64, /// The ID of the NNS neuron whose Community Fund participation resulted in the /// creation of this SNS neuron. - #[prost(uint64, optional, tag = "14")] - pub source_nns_neuron_id: ::core::option::Option, + pub source_nns_neuron_id: Option, /// The accumulated staked maturity of the neuron, in "e8s equivalent" (see /// "maturity_e8s_equivalent"). Staked maturity becomes regular maturity once /// the neuron is dissolved. @@ -145,12 +104,10 @@ pub struct Neuron { /// and rewards. Once the neuron is dissolved, this maturity will be "moved" /// to 'maturity_e8s_equivalent' and will be able to be spawned (with maturity /// modulation). - #[prost(uint64, optional, tag = "15")] - pub staked_maturity_e8s_equivalent: ::core::option::Option, + pub staked_maturity_e8s_equivalent: Option, /// If set and true the maturity rewarded to this neuron for voting will be /// automatically staked and will contribute to the neuron's voting power. - #[prost(bool, optional, tag = "16")] - pub auto_stake_maturity: ::core::option::Option, + pub auto_stake_maturity: Option, /// The duration that this neuron is vesting. /// /// A neuron that is vesting is non-dissolving and cannot start dissolving until the vesting duration has elapsed. @@ -159,14 +116,12 @@ pub struct Neuron { /// for a particular SNS instance might be 1 year, but the devs of the project may set their vesting duration to 3 /// years and dissolve delay to 1 year in order to prove that they are making a minimum 4 year commitment to the /// project. - #[prost(uint64, optional, tag = "17")] - pub vesting_period_seconds: ::core::option::Option, + pub vesting_period_seconds: Option, /// Disburse maturity operations that are currently underway. /// The entries are sorted by `timestamp_of_disbursement_seconds`-values, /// with the oldest entries first, i.e. it holds for all i that: /// entry\[i\].timestamp_of_disbursement_seconds <= entry\[i+1\].timestamp_of_disbursement_seconds - #[prost(message, repeated, tag = "18")] - pub disburse_maturity_in_progress: ::prost::alloc::vec::Vec, + pub disburse_maturity_in_progress: Vec, /// The neuron's dissolve state, specifying whether the neuron is dissolving, /// non-dissolving, or dissolved. /// @@ -182,23 +137,14 @@ pub struct Neuron { /// `Dissolved`. All other states represent the dissolved /// state. That is, (a) `when_dissolved_timestamp_seconds` is set and in the past, /// (b) `when_dissolved_timestamp_seconds` is set to zero, (c) neither value is set. - #[prost(oneof = "neuron::DissolveState", tags = "7, 8")] - pub dissolve_state: ::core::option::Option, + pub dissolve_state: Option, } /// Nested message and enum types in `Neuron`. pub mod neuron { /// A list of a neuron's followees for a specific function. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Followees { - #[prost(message, repeated, tag = "1")] - pub followees: ::prost::alloc::vec::Vec, + pub followees: Vec, } /// The neuron's dissolve state, specifying whether the neuron is dissolving, /// non-dissolving, or dissolved. @@ -215,15 +161,7 @@ pub mod neuron { /// `Dissolved`. All other states represent the dissolved /// state. That is, (a) `when_dissolved_timestamp_seconds` is set and in the past, /// (b) `when_dissolved_timestamp_seconds` is set to zero, (c) neither value is set. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub enum DissolveState { /// When the dissolve timer is running, this stores the timestamp, /// in seconds from the Unix epoch, at which the neuron is dissolved. @@ -232,7 +170,6 @@ pub mod neuron { /// may pause dissolving, in which case `dissolve_delay_seconds` /// will get assigned to: `when_dissolved_timestamp_seconds - /// `. - #[prost(uint64, tag = "7")] WhenDissolvedTimestampSeconds(u64), /// When the dissolve timer is stopped, this stores how much time, /// in seconds, the dissolve timer will be started with if the neuron is set back to 'Dissolving'. @@ -241,7 +178,6 @@ pub mod neuron { /// dissolving, in which case `when_dissolved_timestamp_seconds` /// will get assigned to: ` + /// dissolve_delay_seconds`. - #[prost(uint64, tag = "8")] DissolveDelaySeconds(u64), } } @@ -257,77 +193,46 @@ pub mod neuron { /// /// Note that the target, validator and rendering methods can all coexist in /// the same canister or be on different canisters. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NervousSystemFunction { /// The unique id of this function. /// /// Ids 0-999 are reserved for native governance proposals and can't /// be used by generic NervousSystemFunction's. - #[prost(uint64, tag = "1")] pub id: u64, /// A short (<256 chars) description of the NervousSystemFunction. - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, + pub name: String, /// An optional description of what the NervousSystemFunction does. - #[prost(string, optional, tag = "3")] - pub description: ::core::option::Option<::prost::alloc::string::String>, - #[prost(oneof = "nervous_system_function::FunctionType", tags = "4, 5")] - pub function_type: ::core::option::Option, + pub description: Option, + pub function_type: Option, } /// Nested message and enum types in `NervousSystemFunction`. pub mod nervous_system_function { - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GenericNervousSystemFunction { /// The id of the target canister that will be called to execute the proposal. - #[prost(message, optional, tag = "2")] - pub target_canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub target_canister_id: Option<::ic_base_types::PrincipalId>, /// The name of the method that will be called to execute the proposal. /// The signature of the method must be equivalent to the following: /// (proposal_data: ProposalData) -> Result<(), String>. - #[prost(string, optional, tag = "3")] - pub target_method_name: ::core::option::Option<::prost::alloc::string::String>, + pub target_method_name: Option, /// The id of the canister that will be called to validate the proposal before /// it is put up for a vote. - #[prost(message, optional, tag = "4")] - pub validator_canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub validator_canister_id: Option<::ic_base_types::PrincipalId>, /// The name of the method that will be called to validate the proposal /// before it is put up for a vote. /// The signature of the method must be equivalent to the following: /// (proposal_data: ProposalData) -> Result - #[prost(string, optional, tag = "5")] - pub validator_method_name: ::core::option::Option<::prost::alloc::string::String>, + pub validator_method_name: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum FunctionType { /// Whether this is a native function (i.e. a Action::Motion or /// Action::UpgradeSnsControlledCanister) or one of user-defined /// NervousSystemFunctions. - #[prost(message, tag = "4")] NativeNervousSystemFunction(super::Empty), /// Whether this is a GenericNervousSystemFunction which can call /// any canister. - #[prost(message, tag = "5")] GenericNervousSystemFunction(GenericNervousSystemFunction), } } @@ -335,92 +240,53 @@ pub mod nervous_system_function { /// that is not build into the standard SNS and calls a canister outside /// the SNS for execution. /// The canister and method to call are derived from the `function_id`. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ExecuteGenericNervousSystemFunction { /// This enum value determines what canister to call and what /// function to call on that canister. /// /// 'function_id` must be in the range `\[1000--u64:MAX\]` as this /// can't be used to execute native functions. - #[prost(uint64, tag = "1")] pub function_id: u64, /// The payload of the nervous system function's payload. - #[prost(bytes = "vec", tag = "2")] #[serde(with = "serde_bytes")] - pub payload: ::prost::alloc::vec::Vec, + pub payload: Vec, } /// A proposal function that should guide the future strategy of the SNS's /// ecosystem but does not have immediate effect in the sense that a method is executed. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[self_describing] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Motion { /// The text of the motion, which can at most be 100kib. - #[prost(string, tag = "1")] - pub motion_text: ::prost::alloc::string::String, + pub motion_text: String, } /// A proposal function that upgrades a canister that is controlled by the /// SNS governance canister. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct UpgradeSnsControlledCanister { /// The id of the canister that is upgraded. - #[prost(message, optional, tag = "1")] - pub canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub canister_id: Option<::ic_base_types::PrincipalId>, /// The new wasm module that the canister is upgraded to. - #[prost(bytes = "vec", tag = "2")] #[serde(with = "serde_bytes")] - pub new_canister_wasm: ::prost::alloc::vec::Vec, + pub new_canister_wasm: Vec, /// Arguments passed to the post-upgrade method of the new wasm module. - #[prost(bytes = "vec", optional, tag = "3")] #[serde(deserialize_with = "ic_utils::deserialize::deserialize_option_blob")] - pub canister_upgrade_arg: ::core::option::Option<::prost::alloc::vec::Vec>, + pub canister_upgrade_arg: Option>, /// Canister install_code mode. - #[prost( - enumeration = "::ic_protobuf::types::v1::CanisterInstallMode", - optional, - tag = "4" - )] - pub mode: ::core::option::Option, + pub mode: Option, } /// A proposal to transfer SNS treasury funds to (optionally a Subaccount of) the /// target principal. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct TransferSnsTreasuryFunds { - #[prost(enumeration = "transfer_sns_treasury_funds::TransferFrom", tag = "1")] pub from_treasury: i32, /// The amount to transfer, in e8s. - #[prost(uint64, tag = "2")] pub amount_e8s: u64, /// An optional memo to use for the transfer. - #[prost(uint64, optional, tag = "3")] - pub memo: ::core::option::Option, + pub memo: Option, /// The principal to transfer the funds to. - #[prost(message, optional, tag = "4")] - pub to_principal: ::core::option::Option<::ic_base_types::PrincipalId>, + pub to_principal: Option<::ic_base_types::PrincipalId>, /// An (optional) Subaccount of the principal to transfer the funds to. - #[prost(message, optional, tag = "5")] - pub to_subaccount: ::core::option::Option, + pub to_subaccount: Option, } /// Nested message and enum types in `TransferSnsTreasuryFunds`. pub mod transfer_sns_treasury_funds { @@ -429,16 +295,14 @@ pub mod transfer_sns_treasury_funds { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum TransferFrom { @@ -459,7 +323,7 @@ pub mod transfer_sns_treasury_funds { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "TRANSFER_FROM_UNSPECIFIED" => Some(Self::Unspecified), "TRANSFER_FROM_ICP_TREASURY" => Some(Self::IcpTreasury), @@ -471,212 +335,113 @@ pub mod transfer_sns_treasury_funds { } /// A proposal function that changes the ledger's parameters. /// Fields with None values will remain unchanged. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ManageLedgerParameters { - #[prost(uint64, optional, tag = "1")] - pub transfer_fee: ::core::option::Option, - #[prost(string, optional, tag = "2")] - pub token_name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "3")] - pub token_symbol: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "4")] - pub token_logo: ::core::option::Option<::prost::alloc::string::String>, + pub transfer_fee: Option, + pub token_name: Option, + pub token_symbol: Option, + pub token_logo: Option, } /// A proposal to mint SNS tokens to (optionally a Subaccount of) the /// target principal. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct MintSnsTokens { /// The amount to transfer, in e8s. - #[prost(uint64, optional, tag = "1")] - pub amount_e8s: ::core::option::Option, + pub amount_e8s: Option, /// An optional memo to use for the transfer. - #[prost(uint64, optional, tag = "2")] - pub memo: ::core::option::Option, + pub memo: Option, /// The principal to transfer the funds to. - #[prost(message, optional, tag = "3")] - pub to_principal: ::core::option::Option<::ic_base_types::PrincipalId>, + pub to_principal: Option<::ic_base_types::PrincipalId>, /// An (optional) Subaccount of the principal to transfer the funds to. - #[prost(message, optional, tag = "4")] - pub to_subaccount: ::core::option::Option, + pub to_subaccount: Option, } /// A proposal function to change the values of SNS metadata. /// Fields with None values will remain unchanged. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ManageSnsMetadata { /// Base64 representation of the logo. Max length is 341334 characters, roughly 256 Kb. - #[prost(string, optional, tag = "1")] - pub logo: ::core::option::Option<::prost::alloc::string::String>, + pub logo: Option, /// Url string, must be between 10 and 256 characters. - #[prost(string, optional, tag = "2")] - pub url: ::core::option::Option<::prost::alloc::string::String>, + pub url: Option, /// Name string, must be between 4 and 255 characters. - #[prost(string, optional, tag = "3")] - pub name: ::core::option::Option<::prost::alloc::string::String>, + pub name: Option, /// Description string, must be between 10 and 10000 characters. - #[prost(string, optional, tag = "4")] - pub description: ::core::option::Option<::prost::alloc::string::String>, + pub description: Option, } /// A proposal function to upgrade the SNS to the next version. The versions are such that only /// one kind of canister will update at the same time. /// This returns an error if the canister cannot be upgraded or no upgrades are available. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct UpgradeSnsToNextVersion {} /// A proposal to register a list of dapps in the root canister. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct RegisterDappCanisters { /// The canister IDs to be registered (i.e. under the management of the SNS). /// The canisters must be already controlled by the SNS root canister before /// making this proposal. Any controllers besides the root canister will be /// removed when the proposal is executed. /// At least one canister ID is required. - #[prost(message, repeated, tag = "1")] - pub canister_ids: ::prost::alloc::vec::Vec<::ic_base_types::PrincipalId>, + pub canister_ids: Vec<::ic_base_types::PrincipalId>, } /// A proposal to remove a list of dapps from the SNS and assign them to new controllers -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct DeregisterDappCanisters { /// The canister IDs to be deregistered (i.e. removed from the management of the SNS). - #[prost(message, repeated, tag = "1")] - pub canister_ids: ::prost::alloc::vec::Vec<::ic_base_types::PrincipalId>, + pub canister_ids: Vec<::ic_base_types::PrincipalId>, /// The new controllers for the deregistered canisters. - #[prost(message, repeated, tag = "2")] - pub new_controllers: ::prost::alloc::vec::Vec<::ic_base_types::PrincipalId>, + pub new_controllers: Vec<::ic_base_types::PrincipalId>, } /// A proposal to manage the settings of one or more dapp canisters. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ManageDappCanisterSettings { /// The canister IDs of the dapp canisters to be modified. - #[prost(message, repeated, tag = "1")] - pub canister_ids: ::prost::alloc::vec::Vec<::ic_base_types::PrincipalId>, + pub canister_ids: Vec<::ic_base_types::PrincipalId>, /// Below are fields under CanisterSettings defined at /// - #[prost(uint64, optional, tag = "2")] - pub compute_allocation: ::core::option::Option, - #[prost(uint64, optional, tag = "3")] - pub memory_allocation: ::core::option::Option, - #[prost(uint64, optional, tag = "4")] - pub freezing_threshold: ::core::option::Option, - #[prost(uint64, optional, tag = "5")] - pub reserved_cycles_limit: ::core::option::Option, - #[prost(enumeration = "LogVisibility", optional, tag = "6")] - pub log_visibility: ::core::option::Option, - #[prost(uint64, optional, tag = "7")] - pub wasm_memory_limit: ::core::option::Option, - #[prost(uint64, optional, tag = "8")] - pub wasm_memory_threshold: ::core::option::Option, + pub compute_allocation: Option, + pub memory_allocation: Option, + pub freezing_threshold: Option, + pub reserved_cycles_limit: Option, + pub log_visibility: Option, + pub wasm_memory_limit: Option, + pub wasm_memory_threshold: Option, } /// Unlike `Governance.Version`, this message has optional fields and is the recommended one /// to use in APIs that can evolve. For example, the SNS Governance could eventually support /// a shorthand notation for SNS versions, enabling clients to specify SNS versions without having /// to set each individual SNS framework canister's WASM hash. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct SnsVersion { /// The hash of the Governance canister WASM. - #[prost(bytes = "vec", optional, tag = "1")] - pub governance_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub governance_wasm_hash: Option>, /// The hash of the Swap canister WASM. - #[prost(bytes = "vec", optional, tag = "2")] - pub swap_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub swap_wasm_hash: Option>, /// The hash of the Root canister WASM. - #[prost(bytes = "vec", optional, tag = "3")] - pub root_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub root_wasm_hash: Option>, /// The hash of the Index canister WASM. - #[prost(bytes = "vec", optional, tag = "4")] - pub index_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub index_wasm_hash: Option>, /// The hash of the Ledger canister WASM. - #[prost(bytes = "vec", optional, tag = "5")] - pub ledger_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub ledger_wasm_hash: Option>, /// The hash of the Ledger Archive canister WASM. - #[prost(bytes = "vec", optional, tag = "6")] - pub archive_wasm_hash: ::core::option::Option<::prost::alloc::vec::Vec>, + pub archive_wasm_hash: Option>, } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct AdvanceSnsTargetVersion { /// If not specified, the target will advance to the latest SNS version known to this SNS. - #[prost(message, optional, tag = "1")] - pub new_target: ::core::option::Option, + pub new_target: Option, } /// A proposal is the immutable input of a proposal submission. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[compare_default] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Proposal { /// The proposal's title as a text, which can be at most 256 bytes. - #[prost(string, tag = "1")] - pub title: ::prost::alloc::string::String, + pub title: String, /// The description of the proposal which is a short text, composed /// using a maximum of 30000 bytes of characters. - #[prost(string, tag = "2")] - pub summary: ::prost::alloc::string::String, + pub summary: String, /// The web address of additional content required to evaluate the /// proposal, specified using HTTPS. The URL string must not be longer than /// 2000 bytes. - #[prost(string, tag = "3")] - pub url: ::prost::alloc::string::String, + pub url: String, /// The action that the proposal proposes to take on adoption. /// /// Each action is associated with an function id that can be used for following. @@ -686,11 +451,7 @@ pub struct Proposal { /// /// See `impl From<&Action> for u64` in src/types.rs for the implementation /// of this mapping. - #[prost( - oneof = "proposal::Action", - tags = "4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19" - )] - pub action: ::core::option::Option, + pub action: Option, } /// Nested message and enum types in `Proposal`. pub mod proposal { @@ -703,24 +464,20 @@ pub mod proposal { /// /// See `impl From<&Action> for u64` in src/types.rs for the implementation /// of this mapping. - #[derive( - candid::CandidType, candid::Deserialize, comparable::Comparable, strum_macros::EnumIter, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug)] #[allow(clippy::large_enum_variant)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq)] pub enum Action { /// The `Unspecified` action is used as a fallback when /// following. That is, if no followees are specified for a given /// action, the followees for this action are used instead. /// /// Id = 0. - #[prost(message, tag = "4")] Unspecified(super::Empty), /// A motion that should guide the future strategy of the SNS's ecosystem /// but does not have immediate effect in the sense that a method is executed. /// /// Id = 1. - #[prost(message, tag = "5")] Motion(super::Motion), /// Change the nervous system's parameters. /// Note that a change of a parameter will only affect future actions where @@ -732,98 +489,78 @@ pub mod proposal { /// neurons created before this change may have less stake. /// /// Id = 2. - #[prost(message, tag = "6")] ManageNervousSystemParameters(super::NervousSystemParameters), /// Upgrade a canister that is controlled by the SNS governance canister. /// /// Id = 3. - #[prost(message, tag = "7")] UpgradeSnsControlledCanister(super::UpgradeSnsControlledCanister), /// Add a new NervousSystemFunction, of generic type, to be executable by proposal. /// /// Id = 4. - #[prost(message, tag = "8")] AddGenericNervousSystemFunction(super::NervousSystemFunction), /// Remove a NervousSystemFunction, of generic type, from being executable by proposal. /// /// Id = 5. - #[prost(uint64, tag = "9")] RemoveGenericNervousSystemFunction(u64), /// Execute a method outside the SNS canisters. /// /// Ids \in \[1000, u64::MAX\]. - #[prost(message, tag = "10")] ExecuteGenericNervousSystemFunction(super::ExecuteGenericNervousSystemFunction), /// Execute an upgrade to next version on the blessed SNS upgrade path. /// /// Id = 7. - #[prost(message, tag = "11")] UpgradeSnsToNextVersion(super::UpgradeSnsToNextVersion), /// Modify values of SnsMetadata. /// /// Id = 8. - #[prost(message, tag = "12")] ManageSnsMetadata(super::ManageSnsMetadata), /// Transfer SNS treasury funds (ICP or SNS token) to an account. /// Id = 9. - #[prost(message, tag = "13")] TransferSnsTreasuryFunds(super::TransferSnsTreasuryFunds), /// Register one or more dapp canister(s) in the SNS root canister. /// /// Id = 10. - #[prost(message, tag = "14")] RegisterDappCanisters(super::RegisterDappCanisters), /// Deregister one or more dapp canister(s) in the SNS root canister. /// /// Id = 11. - #[prost(message, tag = "15")] DeregisterDappCanisters(super::DeregisterDappCanisters), /// Mint SNS tokens to an account. /// /// Id = 12. - #[prost(message, tag = "16")] MintSnsTokens(super::MintSnsTokens), /// Change some parameters on the ledger. /// /// Id = 13. - #[prost(message, tag = "17")] ManageLedgerParameters(super::ManageLedgerParameters), /// Change canister settings for one or more dapp canister(s). /// /// Id = 14. - #[prost(message, tag = "18")] ManageDappCanisterSettings(super::ManageDappCanisterSettings), /// Advance SNS target version. /// /// Id = 15. - #[prost(message, tag = "19")] AdvanceSnsTargetVersion(super::AdvanceSnsTargetVersion), } } -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[compare_default] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GovernanceError { - #[prost(enumeration = "governance_error::ErrorType", tag = "1")] pub error_type: i32, - #[prost(string, tag = "2")] - pub error_message: ::prost::alloc::string::String, + pub error_message: String, } /// Nested message and enum types in `GovernanceError`. pub mod governance_error { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum ErrorType { @@ -911,7 +648,7 @@ pub mod governance_error { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "ERROR_TYPE_UNSPECIFIED" => Some(Self::Unspecified), "ERROR_TYPE_UNAVAILABLE" => Some(Self::Unavailable), @@ -942,73 +679,46 @@ pub mod governance_error { /// automatically caused by a neuron following other neurons. /// /// Once a ballot's vote is set it cannot be changed. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[self_describing] -#[derive(Clone, Copy, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Ballot { /// The ballot's vote. - #[prost(enumeration = "Vote", tag = "1")] pub vote: i32, /// The voting power associated with the ballot. The voting power of a ballot /// associated with a neuron and a proposal is set at the proposal's creation /// time to the neuron's voting power at that time. - #[prost(uint64, tag = "2")] pub voting_power: u64, /// The time when the ballot's vote was populated with a decision (YES or NO, not /// UNDECIDED) in seconds since the UNIX epoch. This is only meaningful once a /// decision has been made and set to zero when the proposal associated with the /// ballot is created. - #[prost(uint64, tag = "3")] pub cast_timestamp_seconds: u64, } /// A tally of votes associated with a proposal. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[self_describing] -#[derive(Clone, Copy, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Tally { /// The time when this tally was made, in seconds from the Unix epoch. - #[prost(uint64, tag = "1")] pub timestamp_seconds: u64, /// The number of yes votes, in voting power unit. - #[prost(uint64, tag = "2")] pub yes: u64, /// The number of no votes, in voting power unit. - #[prost(uint64, tag = "3")] pub no: u64, /// The total voting power unit of eligible neurons that can vote /// on the proposal that this tally is associated with (i.e., the sum /// of the voting power of yes, no, and undecided votes). /// This should always be greater than or equal to yes + no. - #[prost(uint64, tag = "4")] pub total: u64, } /// The wait-for-quiet state associated with a proposal, storing the /// data relevant to the "wait-for-quiet" implementation. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct WaitForQuietState { /// The current deadline of the proposal associated with this /// WaitForQuietState, in seconds from the Unix epoch. - #[prost(uint64, tag = "1")] pub current_deadline_timestamp_seconds: u64, } /// The ProposalData that contains everything related to a proposal: /// the proposal itself (immutable), as well as mutable data such as ballots. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ProposalData { /// The proposal's action. /// Types 0-999 are reserved for current (and future) core governance @@ -1031,61 +741,49 @@ pub struct ProposalData { /// Id 13 - ManageLedgerParameters proposals. /// Id 14 - ManageDappCanisterSettings proposals. /// Id 15 - AdvanceSnsTargetVersion proposals. - #[prost(uint64, tag = "1")] pub action: u64, /// This is stored here temporarily. It is also stored on the map /// that contains proposals. /// /// The unique id for this proposal. - #[prost(message, optional, tag = "2")] - pub id: ::core::option::Option, + pub id: Option, /// The NeuronId of the Neuron that made this proposal. - #[prost(message, optional, tag = "3")] - pub proposer: ::core::option::Option, + pub proposer: Option, /// The amount of governance tokens in e8s to be /// charged to the proposer if the proposal is rejected. - #[prost(uint64, tag = "4")] pub reject_cost_e8s: u64, /// The proposal originally submitted. - #[prost(message, optional, tag = "5")] - pub proposal: ::core::option::Option, + pub proposal: Option, /// The timestamp, in seconds from the Unix epoch, /// when this proposal was made. - #[prost(uint64, tag = "6")] pub proposal_creation_timestamp_seconds: u64, /// The ballots associated with a proposal, given as a map which /// maps the neurons' NeuronId to the neurons' ballots. This is /// only present as long as the proposal is not settled with /// respect to rewards. - #[prost(btree_map = "string, message", tag = "7")] - pub ballots: ::prost::alloc::collections::BTreeMap<::prost::alloc::string::String, Ballot>, + pub ballots: BTreeMap, /// The latest tally. The tally is computed only for open proposals when /// they are processed. Once a proposal is decided, i.e., /// ProposalDecisionStatus isn't open anymore, the tally never changes /// again. (But the ballots may still change as neurons may vote after /// the proposal has been decided.) - #[prost(message, optional, tag = "8")] - pub latest_tally: ::core::option::Option, + pub latest_tally: Option, /// The timestamp, in seconds since the Unix epoch, when this proposal /// was adopted or rejected. If not specified, the proposal is still 'open'. - #[prost(uint64, tag = "9")] pub decided_timestamp_seconds: u64, /// The timestamp, in seconds since the Unix epoch, when the (previously /// adopted) proposal has been executed. If not specified (i.e., still has /// the default value zero), the proposal has not (yet) been executed /// successfully. - #[prost(uint64, tag = "10")] pub executed_timestamp_seconds: u64, /// The timestamp, in seconds since the Unix epoch, when the (previously /// adopted) proposal has failed to be executed. If not specified (i.e., /// still has the default value zero), the proposal has not (yet) failed /// to execute. - #[prost(uint64, tag = "11")] pub failed_timestamp_seconds: u64, /// The reason why the (previously adopted) proposal has failed to execute. /// If not specified, the proposal has not (yet) failed to execute. - #[prost(message, optional, tag = "12")] - pub failure_reason: ::core::option::Option, + pub failure_reason: Option, /// OBSOLETE: Superseded by reward_event_end_timestamp_seconds. However, old /// proposals use this (old) field, not the new one, since they predate the new /// field. Therefore, to correctly detect whether a proposal has been rewarded, @@ -1099,11 +797,9 @@ pub struct ProposalData { /// no reward event taking this proposal into consideration happened yet. /// /// This field matches field round in RewardEvent. - #[prost(uint64, tag = "13")] pub reward_event_round: u64, /// The proposal's wait-for-quiet state. This needs to be saved in stable memory. - #[prost(message, optional, tag = "14")] - pub wait_for_quiet_state: ::core::option::Option, + pub wait_for_quiet_state: Option, /// The proposal's payload rendered as text, for display in text/UI frontends. /// This is set if the proposal is considered valid at time of submission. /// @@ -1113,8 +809,7 @@ pub struct ProposalData { /// Proposals with action of type GenericNervousSystemFunction provide no /// guarantee on the style of rendering as this is performed by the /// GenericNervousSystemFunction validator_canister. - #[prost(string, optional, tag = "15")] - pub payload_text_rendering: ::core::option::Option<::prost::alloc::string::String>, + pub payload_text_rendering: Option, /// Deprecated. From now on, this field will be set to true when new proposals /// are created. However, there ARE old proposals where this is set to false. /// @@ -1122,149 +817,83 @@ pub struct ProposalData { /// directly to Settled /// /// TODO(NNS1-2731): Delete this. - #[prost(bool, tag = "16")] pub is_eligible_for_rewards: bool, /// The initial voting period of the proposal, identical in meaning to the one in /// NervousSystemParameters, and duplicated here so the parameters can be changed /// without affecting existing proposals. - #[prost(uint64, tag = "17")] pub initial_voting_period_seconds: u64, /// The wait_for_quiet_deadline_increase_seconds of the proposal, identical in /// meaning to the one in NervousSystemParameters, and duplicated here so the /// parameters can be changed without affecting existing proposals. - #[prost(uint64, tag = "18")] pub wait_for_quiet_deadline_increase_seconds: u64, /// If populated, then the proposal is considered "settled" in terms of voting /// rewards. Prior to distribution of rewards, but after votes are no longer /// accepted, it is considered "ready to settle". - #[prost(uint64, optional, tag = "19")] - pub reward_event_end_timestamp_seconds: ::core::option::Option, + pub reward_event_end_timestamp_seconds: Option, /// Minimum "yes" votes needed for proposal adoption, as a fraction of the /// total voting power. Example: 300 basis points represents a requirement that /// 3% of the total voting power votes to adopt the proposal. - #[prost(message, optional, tag = "20")] - pub minimum_yes_proportion_of_total: - ::core::option::Option<::ic_nervous_system_proto::pb::v1::Percentage>, + pub minimum_yes_proportion_of_total: Option<::ic_nervous_system_proto::pb::v1::Percentage>, /// Minimum "yes" votes needed for proposal adoption, as a fraction of the /// exercised voting power. Example: 50_000 basis points represents a /// requirement that 50% of the exercised voting power votes to adopt the /// proposal. - #[prost(message, optional, tag = "21")] - pub minimum_yes_proportion_of_exercised: - ::core::option::Option<::ic_nervous_system_proto::pb::v1::Percentage>, + pub minimum_yes_proportion_of_exercised: Option<::ic_nervous_system_proto::pb::v1::Percentage>, /// In general, this holds data retrieved at proposal submission/creation time and used later /// during execution. This varies based on the action of the proposal. - #[prost(oneof = "proposal_data::ActionAuxiliary", tags = "22, 23, 24")] - pub action_auxiliary: ::core::option::Option, + pub action_auxiliary: Option, } /// Nested message and enum types in `ProposalData`. pub mod proposal_data { - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct TransferSnsTreasuryFundsActionAuxiliary { - #[prost(message, optional, tag = "1")] - pub valuation: ::core::option::Option, + pub valuation: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct MintSnsTokensActionAuxiliary { - #[prost(message, optional, tag = "1")] - pub valuation: ::core::option::Option, + pub valuation: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct AdvanceSnsTargetVersionActionAuxiliary { /// Corresponds to the Some(target_version) from an AdvanceSnsTargetVersion proposal, or /// to the last SNS version known to this SNS at the time of AdvanceSnsTargetVersion creation. - #[prost(message, optional, tag = "1")] - pub target_version: ::core::option::Option, + pub target_version: Option, } /// In general, this holds data retrieved at proposal submission/creation time and used later /// during execution. This varies based on the action of the proposal. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum ActionAuxiliary { - #[prost(message, tag = "22")] TransferSnsTreasuryFunds(TransferSnsTreasuryFundsActionAuxiliary), - #[prost(message, tag = "23")] MintSnsTokens(MintSnsTokensActionAuxiliary), - #[prost(message, tag = "24")] AdvanceSnsTargetVersion(AdvanceSnsTargetVersionActionAuxiliary), } } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Valuation { - #[prost(enumeration = "valuation::Token", optional, tag = "1")] - pub token: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub account: ::core::option::Option, - #[prost(uint64, optional, tag = "3")] - pub timestamp_seconds: ::core::option::Option, - #[prost(message, optional, tag = "4")] - pub valuation_factors: ::core::option::Option, + pub token: Option, + pub account: Option, + pub timestamp_seconds: Option, + pub valuation_factors: Option, } /// Nested message and enum types in `Valuation`. pub mod valuation { - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ValuationFactors { - #[prost(message, optional, tag = "1")] - pub tokens: ::core::option::Option<::ic_nervous_system_proto::pb::v1::Tokens>, - #[prost(message, optional, tag = "2")] - pub icps_per_token: ::core::option::Option<::ic_nervous_system_proto::pb::v1::Decimal>, - #[prost(message, optional, tag = "3")] - pub xdrs_per_icp: ::core::option::Option<::ic_nervous_system_proto::pb::v1::Decimal>, + pub tokens: Option<::ic_nervous_system_proto::pb::v1::Tokens>, + pub icps_per_token: Option<::ic_nervous_system_proto::pb::v1::Decimal>, + pub xdrs_per_icp: Option<::ic_nervous_system_proto::pb::v1::Decimal>, } #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum Token { @@ -1285,7 +914,7 @@ pub mod valuation { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "TOKEN_UNSPECIFIED" => Some(Self::Unspecified), "TOKEN_ICP" => Some(Self::Icp), @@ -1304,29 +933,19 @@ pub mod valuation { /// on the subnet). /// /// Required invariant: the canister code assumes that all system parameters are always set. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NervousSystemParameters { /// The number of e8s (10E-8 of a token) that a rejected /// proposal costs the proposer. - #[prost(uint64, optional, tag = "1")] - pub reject_cost_e8s: ::core::option::Option, + pub reject_cost_e8s: Option, /// The minimum number of e8s (10E-8 of a token) that can be staked in a neuron. /// /// To ensure that staking and disbursing of the neuron work, the chosen value /// must be larger than the transaction_fee_e8s. - #[prost(uint64, optional, tag = "2")] - pub neuron_minimum_stake_e8s: ::core::option::Option, + pub neuron_minimum_stake_e8s: Option, /// The transaction fee that must be paid for ledger transactions (except /// minting and burning governance tokens). - #[prost(uint64, optional, tag = "3")] - pub transaction_fee_e8s: ::core::option::Option, + pub transaction_fee_e8s: Option, /// The maximum number of proposals to keep, per action. When the /// total number of proposals for a given action is greater than this /// number, the oldest proposals that have reached final decision state @@ -1335,8 +954,7 @@ pub struct NervousSystemParameters { /// /// The number must be larger than zero and at most be as large as the /// defined ceiling MAX_PROPOSALS_TO_KEEP_PER_ACTION_CEILING. - #[prost(uint32, optional, tag = "4")] - pub max_proposals_to_keep_per_action: ::core::option::Option, + pub max_proposals_to_keep_per_action: Option, /// The initial voting period of a newly created proposal. /// A proposal's voting period may then be further increased during /// a proposal's lifecycle due to the wait-for-quiet algorithm. @@ -1344,8 +962,7 @@ pub struct NervousSystemParameters { /// The voting period must be between (inclusive) the defined floor /// INITIAL_VOTING_PERIOD_SECONDS_FLOOR and ceiling /// INITIAL_VOTING_PERIOD_SECONDS_CEILING. - #[prost(uint64, optional, tag = "5")] - pub initial_voting_period_seconds: ::core::option::Option, + pub initial_voting_period_seconds: Option, /// The wait for quiet algorithm extends the voting period of a proposal when /// there is a flip in the majority vote during the proposal's voting period. /// This parameter determines the maximum time period that the voting period @@ -1356,8 +973,7 @@ pub struct NervousSystemParameters { /// The maximum total voting period extension is 2 * wait_for_quiet_deadline_increase_seconds. /// For more information, see the wiki page on the wait-for-quiet algorithm: /// - #[prost(uint64, optional, tag = "18")] - pub wait_for_quiet_deadline_increase_seconds: ::core::option::Option, + pub wait_for_quiet_deadline_increase_seconds: Option, /// TODO NNS1-2169: This field currently has no effect. /// TODO NNS1-2169: Design and implement this feature. /// @@ -1367,34 +983,28 @@ pub struct NervousSystemParameters { /// If unset, neurons will have no followees by default. /// The set of followees for each function can be at most of size /// max_followees_per_function. - #[prost(message, optional, tag = "6")] - pub default_followees: ::core::option::Option, + pub default_followees: Option, /// The maximum number of allowed neurons. When this maximum is reached, no new /// neurons will be created until some are removed. /// /// This number must be larger than zero and at most as large as the defined /// ceiling MAX_NUMBER_OF_NEURONS_CEILING. - #[prost(uint64, optional, tag = "7")] - pub max_number_of_neurons: ::core::option::Option, + pub max_number_of_neurons: Option, /// The minimum dissolve delay a neuron must have to be eligible to vote. /// /// The chosen value must be smaller than max_dissolve_delay_seconds. - #[prost(uint64, optional, tag = "8")] - pub neuron_minimum_dissolve_delay_to_vote_seconds: ::core::option::Option, + pub neuron_minimum_dissolve_delay_to_vote_seconds: Option, /// The maximum number of followees each neuron can establish for each nervous system function. /// /// This number can be at most as large as the defined ceiling /// MAX_FOLLOWEES_PER_FUNCTION_CEILING. - #[prost(uint64, optional, tag = "9")] - pub max_followees_per_function: ::core::option::Option, + pub max_followees_per_function: Option, /// The maximum dissolve delay that a neuron can have. That is, the maximum /// that a neuron's dissolve delay can be increased to. The maximum is also enforced /// when saturating the dissolve delay bonus in the voting power computation. - #[prost(uint64, optional, tag = "10")] - pub max_dissolve_delay_seconds: ::core::option::Option, + pub max_dissolve_delay_seconds: Option, /// The age of a neuron that saturates the age bonus for the voting power computation. - #[prost(uint64, optional, tag = "12")] - pub max_neuron_age_for_age_bonus: ::core::option::Option, + pub max_neuron_age_for_age_bonus: Option, /// The max number of proposals for which ballots are still stored, i.e., /// unsettled proposals. If this number of proposals is reached, new proposals /// can only be added in exceptional cases (for few proposals it is defined @@ -1403,26 +1013,21 @@ pub struct NervousSystemParameters { /// /// This number must be larger than zero and at most as large as the defined /// ceiling MAX_NUMBER_OF_PROPOSALS_WITH_BALLOTS_CEILING. - #[prost(uint64, optional, tag = "14")] - pub max_number_of_proposals_with_ballots: ::core::option::Option, + pub max_number_of_proposals_with_ballots: Option, /// The default set of neuron permissions granted to the principal claiming a neuron. - #[prost(message, optional, tag = "15")] - pub neuron_claimer_permissions: ::core::option::Option, + pub neuron_claimer_permissions: Option, /// The superset of neuron permissions a principal with permission /// `NeuronPermissionType::ManagePrincipals` for a given neuron can grant to another /// principal for this same neuron. /// If this set changes via a ManageNervousSystemParameters proposal, previous /// neurons' permissions will be unchanged and only newly granted permissions will be affected. - #[prost(message, optional, tag = "16")] - pub neuron_grantable_permissions: ::core::option::Option, + pub neuron_grantable_permissions: Option, /// The maximum number of principals that can have permissions for a neuron - #[prost(uint64, optional, tag = "17")] - pub max_number_of_principals_per_neuron: ::core::option::Option, + pub max_number_of_principals_per_neuron: Option, /// When this field is not populated, voting rewards are "disabled". Once this /// is set, it probably should not be changed, because the results would /// probably be pretty confusing. - #[prost(message, optional, tag = "19")] - pub voting_rewards_parameters: ::core::option::Option, + pub voting_rewards_parameters: Option, /// E.g. if a large dissolve delay can double the voting power of a neuron, /// then this field would have a value of 100, indicating a maximum of /// 100% additional voting power. @@ -1430,14 +1035,12 @@ pub struct NervousSystemParameters { /// For no bonus, this should be set to 0. /// /// To achieve functionality equivalent to NNS, this should be set to 100. - #[prost(uint64, optional, tag = "20")] - pub max_dissolve_delay_bonus_percentage: ::core::option::Option, + pub max_dissolve_delay_bonus_percentage: Option, /// Analogous to the previous field (see the previous comment), /// but this one relates to neuron age instead of dissolve delay. /// /// To achieve functionality equivalent to NNS, this should be set to 25. - #[prost(uint64, optional, tag = "21")] - pub max_age_bonus_percentage: ::core::option::Option, + pub max_age_bonus_percentage: Option, /// By default, maturity modulation is enabled; however, an SNS can use this /// field to disable it. When disabled, this canister will still poll the /// Cycles Minting Canister (CMC), and store the value received therefrom. @@ -1446,18 +1049,9 @@ pub struct NervousSystemParameters { /// The reason we call this "disabled" instead of (positive) "enabled" is so /// that the PB default (bool fields are false) and our application default /// (enabled) agree. - #[prost(bool, optional, tag = "22")] - pub maturity_modulation_disabled: ::core::option::Option, + pub maturity_modulation_disabled: Option, } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct VotingRewardsParameters { /// The amount of time between reward events. /// @@ -1473,14 +1067,12 @@ pub struct VotingRewardsParameters { /// within a few seconds of this. /// /// This supersedes super.reward_distribution_period_seconds. - #[prost(uint64, optional, tag = "1")] - pub round_duration_seconds: ::core::option::Option, + pub round_duration_seconds: Option, /// The amount of time that the growth rate changes (presumably, decreases) /// from the initial growth rate to the final growth rate. (See the two /// *_reward_rate_basis_points fields bellow.) The transition is quadratic, and /// levels out at the end of the growth rate transition period. - #[prost(uint64, optional, tag = "3")] - pub reward_rate_transition_duration_seconds: ::core::option::Option, + pub reward_rate_transition_duration_seconds: Option, /// The amount of rewards is proportional to token_supply * current_rate. In /// turn, current_rate is somewhere between `initial_reward_rate_basis_points` /// and `final_reward_rate_basis_points`. In the first reward period, it is the @@ -1490,37 +1082,19 @@ pub struct VotingRewardsParameters { /// quadratic, and levels out at the end of the growth rate transition period. /// /// (A basis point is one in ten thousand.) - #[prost(uint64, optional, tag = "4")] - pub initial_reward_rate_basis_points: ::core::option::Option, - #[prost(uint64, optional, tag = "5")] - pub final_reward_rate_basis_points: ::core::option::Option, + pub initial_reward_rate_basis_points: Option, + pub final_reward_rate_basis_points: Option, } /// The set of default followees that every newly created neuron will follow per function. /// This is specified as a mapping of proposal functions to followees for that function. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct DefaultFollowees { - #[prost(btree_map = "uint64, message", tag = "1")] - pub followees: ::prost::alloc::collections::BTreeMap, + pub followees: BTreeMap, } /// A wrapper for a list of neuron permissions. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronPermissionList { - #[prost(enumeration = "NeuronPermissionType", repeated, tag = "1")] - pub permissions: ::prost::alloc::vec::Vec, + pub permissions: Vec, } /// A record of when voting rewards were determined, and neuron maturity /// increased for participation in voting on proposals. @@ -1531,14 +1105,7 @@ pub struct NeuronPermissionList { /// To make it a little easier to eventually deduplicate NNS and SNS governance /// code, tags should be chosen so that it is new to BOTH this and the NNS /// RewardEvent. (This also applies to other message definitions.) -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct RewardEvent { /// DEPRECATED: Use end_timestamp_seconds instead. /// @@ -1554,23 +1121,19 @@ pub struct RewardEvent { /// it was not possible to process a reward event for a while. This means that /// successive values in this field might not be consecutive, but they usually /// are. - #[prost(uint64, tag = "1")] pub round: u64, /// Not to be confused with round_end_timestampe_seconds. This is just used to /// record when the calculation (of voting rewards) was performed, not the time /// range/events (i.e. proposals) that was operated on. - #[prost(uint64, tag = "2")] pub actual_timestamp_seconds: u64, /// The list of proposals that were taken into account during /// this reward event. - #[prost(message, repeated, tag = "3")] - pub settled_proposals: ::prost::alloc::vec::Vec, + pub settled_proposals: Vec, /// The total amount of reward that was distributed during this reward event. /// /// The unit is "e8s equivalent" to insist that, while this quantity is on /// the same scale as governance tokens, maturity is not directly convertible /// to governance tokens: conversion requires a minting event. - #[prost(uint64, tag = "4")] pub distributed_e8s_equivalent: u64, /// All proposals that were "ready to settle" up to this time were /// considered. @@ -1587,8 +1150,7 @@ pub struct RewardEvent { /// Being able to change round duration does not exist in NNS (yet), and there /// is (currently) no intention to add that feature, but it could be done by /// making similar changes. - #[prost(uint64, optional, tag = "5")] - pub end_timestamp_seconds: ::core::option::Option, + pub end_timestamp_seconds: Option, /// In some cases, the rewards that would have been distributed in one round are /// "rolled over" into the next reward event. This field keeps track of how many /// rounds have passed since the last time rewards were distributed (rather @@ -1607,8 +1169,7 @@ pub struct RewardEvent { /// settled to distribute rewards for. /// /// In both of these cases, the rewards purse rolls over into the next round. - #[prost(uint64, optional, tag = "6")] - pub rounds_since_last_distribution: ::core::option::Option, + pub rounds_since_last_distribution: Option, /// The total amount of rewards that was available during the reward event. /// /// The e8s_equivalent_to_be_rolled_over method returns this when @@ -1619,32 +1180,25 @@ pub struct RewardEvent { /// Warning: There is a field with the same name in NNS, but different tags are /// used. Also, this uses the `optional` keyword (whereas, the NNS analog does /// not). - #[prost(uint64, optional, tag = "8")] - pub total_available_e8s_equivalent: ::core::option::Option, + pub total_available_e8s_equivalent: Option, } /// The representation of the whole governance system, containing all /// information about the governance system that must be kept /// across upgrades of the governance system, i.e. kept in stable memory. -#[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] -#[compare_default] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Governance { /// The current set of neurons registered in governance as a map from /// neuron IDs to neurons. - #[prost(btree_map = "string, message", tag = "1")] - pub neurons: ::prost::alloc::collections::BTreeMap<::prost::alloc::string::String, Neuron>, + pub neurons: BTreeMap, /// The current set of proposals registered in governance as a map /// from proposal IDs to the proposals' data. - #[prost(btree_map = "uint64, message", tag = "2")] - pub proposals: ::prost::alloc::collections::BTreeMap, + pub proposals: BTreeMap, /// The nervous system parameters that define and can be set by /// each nervous system. - #[prost(message, optional, tag = "8")] - pub parameters: ::core::option::Option, + pub parameters: Option, /// TODO IC-1168: update when rewards are introduced /// The latest reward event. - #[prost(message, optional, tag = "9")] - pub latest_reward_event: ::core::option::Option, + pub latest_reward_event: Option, /// The in-flight neuron ledger commands as a map from neuron IDs /// to commands. /// @@ -1666,87 +1220,54 @@ pub struct Governance { /// Because we know exactly what was going on, we should have the /// information necessary to reconcile the state, using custom code /// added on upgrade, if necessary. - #[prost(btree_map = "string, message", tag = "10")] - pub in_flight_commands: ::prost::alloc::collections::BTreeMap< - ::prost::alloc::string::String, - governance::NeuronInFlightCommand, - >, + pub in_flight_commands: BTreeMap, /// The timestamp that is considered genesis for the governance /// system, in seconds since the Unix epoch. That is, the time /// at which `canister_init` was run for the governance canister. - #[prost(uint64, tag = "11")] pub genesis_timestamp_seconds: u64, - #[prost(message, optional, tag = "13")] - pub metrics: ::core::option::Option, + pub metrics: Option, /// The canister ID of the ledger canister. - #[prost(message, optional, tag = "16")] - pub ledger_canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub ledger_canister_id: Option<::ic_base_types::PrincipalId>, /// The canister ID of the root canister. - #[prost(message, optional, tag = "17")] - pub root_canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub root_canister_id: Option<::ic_base_types::PrincipalId>, /// ID to NervousSystemFunction (which has an id field). - #[prost(btree_map = "uint64, message", tag = "18")] - pub id_to_nervous_system_functions: - ::prost::alloc::collections::BTreeMap, - #[prost(enumeration = "governance::Mode", tag = "19")] + pub id_to_nervous_system_functions: BTreeMap, pub mode: i32, /// The canister ID of the swap canister. /// /// When this is unpopulated, mode should be Normal, and when this is /// populated, mode should be PreInitializationSwap. - #[prost(message, optional, tag = "20")] - pub swap_canister_id: ::core::option::Option<::ic_base_types::PrincipalId>, - #[prost(message, optional, tag = "21")] - pub sns_metadata: ::core::option::Option, + pub swap_canister_id: Option<::ic_base_types::PrincipalId>, + pub sns_metadata: Option, /// The initialization parameters used to spawn an SNS - #[prost(string, tag = "22")] - pub sns_initialization_parameters: ::prost::alloc::string::String, + pub sns_initialization_parameters: String, /// Current version that this SNS is running. - #[prost(message, optional, tag = "23")] - pub deployed_version: ::core::option::Option, + pub deployed_version: Option, /// Version SNS is in process of upgrading to. - #[prost(message, optional, tag = "24")] - pub pending_version: ::core::option::Option, - #[prost(message, optional, tag = "30")] - pub target_version: ::core::option::Option, + pub pending_version: Option, + pub target_version: Option, /// True if the run_periodic_tasks function is currently finalizing disburse maturity, meaning /// that it should finish before being called again. - #[prost(bool, optional, tag = "25")] - pub is_finalizing_disburse_maturity: ::core::option::Option, - #[prost(message, optional, tag = "26")] - pub maturity_modulation: ::core::option::Option, - #[prost(message, optional, tag = "29")] - pub cached_upgrade_steps: ::core::option::Option, + pub is_finalizing_disburse_maturity: Option, + pub maturity_modulation: Option, + pub cached_upgrade_steps: Option, /// Information about the timers that perform periodic tasks of this Governance canister. - #[prost(message, optional, tag = "31")] - pub timers: ::core::option::Option<::ic_nervous_system_proto::pb::v1::Timers>, - #[prost(message, optional, tag = "32")] - pub upgrade_journal: ::core::option::Option, + pub timers: Option<::ic_nervous_system_proto::pb::v1::Timers>, + pub upgrade_journal: Option, } /// Nested message and enum types in `Governance`. pub mod governance { + use super::*; use crate::format_full_hash; use serde::ser::SerializeStruct; /// The commands that require a neuron lock. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronInFlightCommand { /// The timestamp at which the command was issued, for debugging /// purposes. - #[prost(uint64, tag = "1")] pub timestamp: u64, - #[prost( - oneof = "neuron_in_flight_command::Command", - tags = "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 20" - )] - pub command: ::core::option::Option, + pub command: Option, } /// Nested message and enum types in `NeuronInFlightCommand`. pub mod neuron_in_flight_command { @@ -1758,67 +1279,36 @@ pub mod governance { /// no value in actually storing the command itself, and this placeholder /// can generally be used in all sync cases. #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq, )] pub struct SyncCommand {} - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum Command { - #[prost(message, tag = "2")] Disburse(super::super::manage_neuron::Disburse), - #[prost(message, tag = "3")] Split(super::super::manage_neuron::Split), - #[prost(message, tag = "4")] MergeMaturity(super::super::manage_neuron::MergeMaturity), - #[prost(message, tag = "5")] DisburseMaturity(super::super::manage_neuron::DisburseMaturity), - #[prost(message, tag = "6")] ClaimOrRefreshNeuron(super::super::manage_neuron::ClaimOrRefresh), - #[prost(message, tag = "7")] AddNeuronPermissions(super::super::manage_neuron::AddNeuronPermissions), - #[prost(message, tag = "8")] RemoveNeuronPermissions(super::super::manage_neuron::RemoveNeuronPermissions), - #[prost(message, tag = "9")] Configure(super::super::manage_neuron::Configure), - #[prost(message, tag = "10")] Follow(super::super::manage_neuron::Follow), - #[prost(message, tag = "11")] MakeProposal(super::super::Proposal), - #[prost(message, tag = "12")] RegisterVote(super::super::manage_neuron::RegisterVote), - #[prost(message, tag = "13")] FinalizeDisburseMaturity(super::super::manage_neuron::FinalizeDisburseMaturity), - #[prost(message, tag = "20")] SyncCommand(SyncCommand), } } /// Metrics that are too costly to compute each time when they are /// requested. - #[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] - #[compare_default] - #[derive(Clone, PartialEq, ::prost::Message)] + #[derive(candid::CandidType, candid::Deserialize, Debug, Default, Clone, PartialEq)] pub struct GovernanceCachedMetrics { /// The timestamp when these metrics were computed, as seconds since /// Unix epoch. - #[prost(uint64, tag = "1")] pub timestamp_seconds: u64, /// The total supply of governance tokens in the ledger canister. - #[prost(uint64, tag = "2")] pub total_supply_governance_tokens: u64, /// The number of dissolving neurons (i.e., in NeuronState::Dissolving). - #[prost(uint64, tag = "3")] pub dissolving_neurons_count: u64, /// The number of staked governance tokens in dissolving neurons /// (i.e., in NeuronState::Dissolving) grouped by the neurons' dissolve delay @@ -1826,16 +1316,13 @@ pub mod governance { /// This is given as a map from dissolve delays (rounded to years) /// to the sum of staked tokens in the dissolving neurons that have this /// dissolve delay. - #[prost(btree_map = "uint64, double", tag = "4")] - pub dissolving_neurons_e8s_buckets: ::prost::alloc::collections::BTreeMap, + pub dissolving_neurons_e8s_buckets: BTreeMap, /// The number of dissolving neurons (i.e., in NeuronState::Dissolving) /// grouped by their dissolve delay rounded to years. /// This is given as a map from dissolve delays (rounded to years) to /// the number of dissolving neurons that have this dissolve delay. - #[prost(btree_map = "uint64, uint64", tag = "5")] - pub dissolving_neurons_count_buckets: ::prost::alloc::collections::BTreeMap, + pub dissolving_neurons_count_buckets: BTreeMap, /// The number of non-dissolving neurons (i.e., in NeuronState::NotDissolving). - #[prost(uint64, tag = "6")] pub not_dissolving_neurons_count: u64, /// The number of staked governance tokens in non-dissolving neurons /// (i.e., in NeuronState::NotDissolving) grouped by the neurons' dissolve delay @@ -1843,65 +1330,45 @@ pub mod governance { /// This is given as a map from dissolve delays (rounded to years) /// to the sum of staked tokens in the non-dissolving neurons that have this /// dissolve delay. - #[prost(btree_map = "uint64, double", tag = "7")] - pub not_dissolving_neurons_e8s_buckets: ::prost::alloc::collections::BTreeMap, + pub not_dissolving_neurons_e8s_buckets: BTreeMap, /// The number of non-dissolving neurons (i.e., in NeuronState::NotDissolving) /// grouped by their dissolve delay rounded to years. /// This is given as a map from dissolve delays (rounded to years) to /// the number of non-dissolving neurons that have this dissolve delay. - #[prost(btree_map = "uint64, uint64", tag = "8")] - pub not_dissolving_neurons_count_buckets: ::prost::alloc::collections::BTreeMap, + pub not_dissolving_neurons_count_buckets: BTreeMap, /// The number of dissolved neurons (i.e., in NeuronState::Dissolved). - #[prost(uint64, tag = "9")] pub dissolved_neurons_count: u64, /// The number of staked governance tokens in dissolved neurons /// (i.e., in NeuronState::Dissolved). - #[prost(uint64, tag = "10")] pub dissolved_neurons_e8s: u64, /// The number of neurons that are garbage collectable, i.e., that /// have a cached stake smaller than the ledger transaction fee. - #[prost(uint64, tag = "11")] pub garbage_collectable_neurons_count: u64, /// The number of neurons that have an invalid stake, i.e., that /// have a cached stake that is larger than zero but smaller than the /// minimum neuron stake defined in the nervous system parameters. - #[prost(uint64, tag = "12")] pub neurons_with_invalid_stake_count: u64, /// The total amount of governance tokens that are staked in neurons, /// measured in fractions of 10E-8 of a governance token. - #[prost(uint64, tag = "13")] pub total_staked_e8s: u64, /// TODO: rather than taking six months, it would be more interesting to take the respective SNS's eligibility boarder here. /// The number of neurons with a dissolve delay of less than six months. - #[prost(uint64, tag = "14")] pub neurons_with_less_than_6_months_dissolve_delay_count: u64, /// The number of governance tokens in neurons with a dissolve delay of /// less than six months. - #[prost(uint64, tag = "15")] pub neurons_with_less_than_6_months_dissolve_delay_e8s: u64, } /// Metadata about this SNS. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct SnsMetadata { /// The logo for the SNS project represented as a base64 encoded string. - #[prost(string, optional, tag = "1")] - pub logo: ::core::option::Option<::prost::alloc::string::String>, + pub logo: Option, /// Url to the dapp controlled by the SNS project. - #[prost(string, optional, tag = "2")] - pub url: ::core::option::Option<::prost::alloc::string::String>, + pub url: Option, /// Name of the SNS project. This may differ from the name of the associated token. - #[prost(string, optional, tag = "3")] - pub name: ::core::option::Option<::prost::alloc::string::String>, + pub name: Option, /// Description of the SNS project. - #[prost(string, optional, tag = "4")] - pub description: ::core::option::Option<::prost::alloc::string::String>, + pub description: Option, } impl serde::Serialize for Version { @@ -1931,87 +1398,48 @@ pub mod governance { /// A version of the SNS defined by the WASM hashes of its canisters. #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Eq, - std::hash::Hash, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, Eq, std::hash::Hash, Clone, PartialEq, )] pub struct Version { /// The hash of the Root canister WASM. - #[prost(bytes = "vec", tag = "1")] #[serde(with = "serde_bytes")] - pub root_wasm_hash: ::prost::alloc::vec::Vec, + pub root_wasm_hash: Vec, /// The hash of the Governance canister WASM. - #[prost(bytes = "vec", tag = "2")] #[serde(with = "serde_bytes")] - pub governance_wasm_hash: ::prost::alloc::vec::Vec, + pub governance_wasm_hash: Vec, /// The hash of the Ledger canister WASM. - #[prost(bytes = "vec", tag = "3")] #[serde(with = "serde_bytes")] - pub ledger_wasm_hash: ::prost::alloc::vec::Vec, + pub ledger_wasm_hash: Vec, /// The hash of the Swap canister WASM. - #[prost(bytes = "vec", tag = "4")] #[serde(with = "serde_bytes")] - pub swap_wasm_hash: ::prost::alloc::vec::Vec, + pub swap_wasm_hash: Vec, /// The hash of the Ledger Archive canister WASM. - #[prost(bytes = "vec", tag = "5")] #[serde(with = "serde_bytes")] - pub archive_wasm_hash: ::prost::alloc::vec::Vec, + pub archive_wasm_hash: Vec, /// The hash of the Index canister WASM. - #[prost(bytes = "vec", tag = "6")] #[serde(with = "serde_bytes")] - pub index_wasm_hash: ::prost::alloc::vec::Vec, + pub index_wasm_hash: Vec, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct Versions { - #[prost(message, repeated, tag = "1")] - pub versions: ::prost::alloc::vec::Vec, + pub versions: Vec, } /// An upgrade in progress, defined as a version target and a time at which it is considered failed. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct PendingVersion { /// Version to be upgraded to - #[prost(message, optional, tag = "1")] - pub target_version: ::core::option::Option, + pub target_version: Option, /// Seconds since UNIX epoch to mark this as a failed version if not in sync with current version - #[prost(uint64, tag = "2")] pub mark_failed_at_seconds: u64, /// Lock to avoid checking over and over again. Also, it is a counter for how many times we have attempted to check, /// allowing us to fail in case we otherwise have gotten stuck. - #[prost(uint64, tag = "3")] pub checking_upgrade_lock: u64, /// The proposal that initiated this upgrade - #[prost(uint64, optional, tag = "4")] - pub proposal_id: ::core::option::Option, + pub proposal_id: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct MaturityModulation { /// When X maturity is disbursed, the amount that goes to the destination /// account is X * (1 + y) where y = current_basis_points / 10_000. @@ -2020,52 +1448,37 @@ pub mod governance { /// /// There is a positive relationship between the price of ICP (in XDR) and /// this value. - #[prost(int32, optional, tag = "1")] - pub current_basis_points: ::core::option::Option, + pub current_basis_points: Option, /// When current_basis_points was last updated (seconds since UNIX epoch). - #[prost(uint64, optional, tag = "2")] - pub updated_at_timestamp_seconds: ::core::option::Option, + pub updated_at_timestamp_seconds: Option, } /// The sns's local cache of the upgrade steps recieved from SNS-W. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct CachedUpgradeSteps { /// The upgrade steps that have been returned from SNS-W the last time we /// called list_upgrade_steps. - #[prost(message, optional, tag = "1")] - pub upgrade_steps: ::core::option::Option, + pub upgrade_steps: Option, /// The timestamp of the request we sent to list_upgrade_steps. /// It's possible that this is greater than the response_timestamp_seconds, because /// we update it as soon as we send the request, and only update the /// response_timestamp and the upgrade_steps when we receive the response. /// The primary use of this is that we can avoid calling list_upgrade_steps /// more frequently than necessary. - #[prost(uint64, optional, tag = "2")] - pub requested_timestamp_seconds: ::core::option::Option, + pub requested_timestamp_seconds: Option, /// The timestamp of the response we received from list_upgrade_steps (stored in upgrade_steps). - #[prost(uint64, optional, tag = "3")] - pub response_timestamp_seconds: ::core::option::Option, + pub response_timestamp_seconds: Option, } #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, - strum_macros::EnumIter, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum Mode { @@ -2090,7 +1503,7 @@ pub mod governance { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "MODE_UNSPECIFIED" => Some(Self::Unspecified), "MODE_NORMAL" => Some(Self::Normal), @@ -2101,114 +1514,50 @@ pub mod governance { } } /// Request message for 'get_metadata'. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetMetadataRequest {} /// Response message for 'get_metadata'. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetMetadataResponse { - #[prost(string, optional, tag = "1")] - pub logo: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "2")] - pub url: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "3")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "4")] - pub description: ::core::option::Option<::prost::alloc::string::String>, + pub logo: Option, + pub url: Option, + pub name: Option, + pub description: Option, } /// Request message for 'get_sns_initialization_parameters' -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetSnsInitializationParametersRequest {} /// Response message for 'get_sns_initialization_parameters' -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetSnsInitializationParametersResponse { - #[prost(string, tag = "1")] - pub sns_initialization_parameters: ::prost::alloc::string::String, + pub sns_initialization_parameters: String, } /// Request for the SNS's currently running version. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetRunningSnsVersionRequest {} /// Response with the SNS's currently running version and any upgrades /// that are in progress. /// GetUpgradeJournal is a superior API to this one that should -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetRunningSnsVersionResponse { /// The currently deployed version of the SNS. - #[prost(message, optional, tag = "1")] - pub deployed_version: ::core::option::Option, + pub deployed_version: Option, /// The upgrade in progress, if any. - #[prost(message, optional, tag = "2")] - pub pending_version: - ::core::option::Option, + pub pending_version: Option, } /// Nested message and enum types in `GetRunningSnsVersionResponse`. pub mod get_running_sns_version_response { /// The same as PendingVersion (stored in the governance proto). They are separated to make it easy to change one without changing the other. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct UpgradeInProgress { /// Version to be upgraded to - #[prost(message, optional, tag = "1")] - pub target_version: ::core::option::Option, + pub target_version: Option, /// Seconds since UNIX epoch to mark this as a failed version if not in sync with current version - #[prost(uint64, tag = "2")] pub mark_failed_at_seconds: u64, /// Lock to avoid checking over and over again. Also, it is a counter for how many times we have attempted to check, /// allowing us to fail in case we otherwise have gotten stuck. - #[prost(uint64, tag = "3")] pub checking_upgrade_lock: u64, /// The proposal that initiated this upgrade - #[prost(uint64, tag = "4")] pub proposal_id: u64, } } @@ -2216,174 +1565,74 @@ pub mod get_running_sns_version_response { /// Failed if it is past the time when it should have been marked as failed. /// This is useful in the case where the asynchronous process may have failed to /// complete -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct FailStuckUpgradeInProgressRequest {} /// Response to FailStuckUpgradeInProgressRequest -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct FailStuckUpgradeInProgressResponse {} /// Empty message to use in oneof fields that represent empty /// enums. #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - Copy, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, Copy, PartialEq, )] pub struct Empty {} /// An operation that modifies a neuron. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ManageNeuron { /// The modified neuron's subaccount which also serves as the neuron's ID. - #[prost(bytes = "vec", tag = "1")] #[serde(with = "serde_bytes")] - pub subaccount: ::prost::alloc::vec::Vec, - #[prost( - oneof = "manage_neuron::Command", - tags = "2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13" - )] - pub command: ::core::option::Option, + pub subaccount: Vec, + pub command: Option, } /// Nested message and enum types in `ManageNeuron`. pub mod manage_neuron { /// The operation that increases a neuron's dissolve delay. It can be /// increased up to a maximum defined in the nervous system parameters. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct IncreaseDissolveDelay { /// The additional dissolve delay that should be added to the neuron's /// current dissolve delay. - #[prost(uint32, tag = "1")] pub additional_dissolve_delay_seconds: u32, } /// The operation that starts dissolving a neuron, i.e., changes a neuron's /// state such that it is dissolving. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct StartDissolving {} /// The operation that stops dissolving a neuron, i.e., changes a neuron's /// state such that it is non-dissolving. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct StopDissolving {} /// An (idempotent) alternative to IncreaseDissolveDelay where the dissolve delay /// is passed as an absolute timestamp in seconds since the Unix epoch. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct SetDissolveTimestamp { /// The time when the neuron (newly) should become dissolved, in seconds /// since the Unix epoch. - #[prost(uint64, tag = "1")] pub dissolve_timestamp_seconds: u64, } /// Changes auto-stake maturity for this Neuron. While on, auto-stake /// maturity will cause all the maturity generated by voting rewards /// to this neuron to be automatically staked and contribute to the /// voting power of the neuron. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct ChangeAutoStakeMaturity { - #[prost(bool, tag = "1")] pub requested_setting_for_auto_stake_maturity: bool, } /// Commands that only configure a given neuron, but do not interact /// with the outside world. They all require the caller to have /// `NeuronPermissionType::ConfigureDissolveState` for the neuron. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Configure { - #[prost(oneof = "configure::Operation", tags = "1, 2, 3, 4, 5")] - pub operation: ::core::option::Option, + pub operation: Option, } /// Nested message and enum types in `Configure`. pub mod configure { - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub enum Operation { - #[prost(message, tag = "1")] IncreaseDissolveDelay(super::IncreaseDissolveDelay), - #[prost(message, tag = "2")] StartDissolving(super::StartDissolving), - #[prost(message, tag = "3")] StopDissolving(super::StopDissolving), - #[prost(message, tag = "4")] SetDissolveTimestamp(super::SetDissolveTimestamp), - #[prost(message, tag = "5")] ChangeAutoStakeMaturity(super::ChangeAutoStakeMaturity), } } @@ -2392,36 +1641,20 @@ pub mod manage_neuron { /// Thereby, the neuron's accumulated fees are burned and (if relevant in /// the given nervous system) the token equivalent of the neuron's accumulated /// maturity are minted and also transferred to the specified account. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Disburse { /// The (optional) amount to disburse out of the neuron. If not specified the cached /// stake is used. - #[prost(message, optional, tag = "1")] - pub amount: ::core::option::Option, + pub amount: Option, /// The ledger account to which the disbursed tokens are transferred. - #[prost(message, optional, tag = "2")] - pub to_account: ::core::option::Option, + pub to_account: Option, } /// Nested message and enum types in `Disburse`. pub mod disburse { #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq, )] pub struct Amount { - #[prost(uint64, tag = "1")] pub e8s: u64, } } @@ -2434,97 +1667,51 @@ pub mod manage_neuron { /// the dissolve state. The parent neuron's fees and maturity (if applicable in the given /// nervous system) remain in the parent neuron and the child neuron's fees and maturity /// are initialized to be zero. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct Split { /// The amount of governance tokens (in measured in fractions of 10E-8 of /// a governance token) to be split to the child neuron. - #[prost(uint64, tag = "1")] pub amount_e8s: u64, /// The nonce that is used to compute the child neuron's /// subaccount which also serves as the child neuron's ID. This nonce /// is also used as the memo field in the ledger transfer that transfers /// the stake from the parent to the child neuron. - #[prost(uint64, tag = "2")] pub memo: u64, } /// The operation that merges a given percentage of a neuron's maturity (if applicable /// to the nervous system) to the neuron's stake. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct MergeMaturity { /// The percentage of maturity to merge, from 1 to 100. - #[prost(uint32, tag = "1")] pub percentage_to_merge: u32, } /// Stake the maturity of a neuron. /// The caller can choose a percentage of of the current maturity to stake. /// If 'percentage_to_stake' is not provided, all of the neuron's current /// maturity will be staked. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct StakeMaturity { /// The percentage of maturity to stake, from 1 to 100 (inclusive). - #[prost(uint32, optional, tag = "1")] - pub percentage_to_stake: ::core::option::Option, + pub percentage_to_stake: Option, } /// Disburse the maturity of a neuron to any ledger account. If an account /// is not specified, the caller's account will be used. The caller can choose /// a percentage of the current maturity to disburse to the ledger account. The /// resulting amount to disburse must be greater than or equal to the /// transaction fee. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct DisburseMaturity { /// The percentage to disburse, from 1 to 100 - #[prost(uint32, tag = "1")] pub percentage_to_disburse: u32, /// The (optional) principal to which to transfer the stake. - #[prost(message, optional, tag = "2")] - pub to_account: ::core::option::Option, + pub to_account: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct FinalizeDisburseMaturity { /// The amount to be disbursed in e8s of the governance token. - #[prost(uint64, tag = "1")] pub amount_to_be_disbursed_e8s: u64, /// The principal to which to transfer the stake (required). - #[prost(message, optional, tag = "2")] - pub to_account: ::core::option::Option, + pub to_account: Option, } /// The operation that adds a new follow relation to a neuron, specifying /// that it follows a set of followee neurons for a given proposal function. @@ -2546,85 +1733,42 @@ pub mod manage_neuron { /// then it becomes a catch-all follow rule, which will be used to vote /// automatically on proposals with actions for which no /// specific rule has been specified. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Follow { /// The function id of the proposal function defining for which proposals /// this follow relation is relevant. - #[prost(uint64, tag = "1")] pub function_id: u64, /// The list of followee neurons, specified by their neuron ID. - #[prost(message, repeated, tag = "2")] - pub followees: ::prost::alloc::vec::Vec, + pub followees: Vec, } /// The operation that registers a given vote from the neuron for a given /// proposal (a directly cast vote as opposed to a vote that is cast as /// a result of a follow relation). - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct RegisterVote { /// The ID of the proposal that the vote is cast for. - #[prost(message, optional, tag = "1")] - pub proposal: ::core::option::Option, + pub proposal: Option, /// The vote that is cast to adopt or reject the proposal. - #[prost(enumeration = "super::Vote", tag = "2")] pub vote: i32, } /// The operation that claims a new neuron (if it does not exist yet) or /// refreshes the stake of the neuron (if it already exists). - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ClaimOrRefresh { - #[prost(oneof = "claim_or_refresh::By", tags = "2, 3")] - pub by: ::core::option::Option, + pub by: Option, } /// Nested message and enum types in `ClaimOrRefresh`. pub mod claim_or_refresh { /// (see MemoAndController below) - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct MemoAndController { /// The memo(nonce) that is used to compute the neuron's subaccount /// (where the tokens were staked to). - #[prost(uint64, tag = "1")] pub memo: u64, /// The principal for which the neuron should be claimed. - #[prost(message, optional, tag = "2")] - pub controller: ::core::option::Option<::ic_base_types::PrincipalId>, + pub controller: Option<::ic_base_types::PrincipalId>, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum By { /// The memo and principal used to define the neuron to be claimed /// or refreshed. Specifically, the memo (nonce) and the given principal @@ -2633,12 +1777,10 @@ pub mod manage_neuron { /// refreshing a neuron were transferred to. /// If 'controller' is omitted, the id of the principal who calls this /// operation will be used. - #[prost(message, tag = "2")] MemoAndController(MemoAndController), /// The neuron ID of a neuron that should be refreshed. This just serves /// as an alternative way to specify a neuron to be refreshed, but cannot /// be used to claim new neurons. - #[prost(message, tag = "3")] NeuronId(super::super::Empty), } } @@ -2647,386 +1789,177 @@ pub mod manage_neuron { /// If the PrincipalId doesn't have existing permissions, a new entry will be added for it /// with the provided permissions. If a principalId already has permissions for the neuron, /// the new permissions will be added to the existing set. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct AddNeuronPermissions { /// The PrincipalId that the permissions will be granted to. - #[prost(message, optional, tag = "1")] - pub principal_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub principal_id: Option<::ic_base_types::PrincipalId>, /// The set of permissions that will be granted to the PrincipalId. - #[prost(message, optional, tag = "2")] - pub permissions_to_add: ::core::option::Option, + pub permissions_to_add: Option, } /// Remove a set of permissions from the Neuron for the given PrincipalId. If a PrincipalId has all of /// its permissions removed, it will be removed from the neuron's permissions list. This is a dangerous /// operation as its possible to remove all permissions for a neuron and no longer be able to modify /// it's state, i.e. disbursing the neuron back into the governance token. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct RemoveNeuronPermissions { /// The PrincipalId that the permissions will be revoked from. - #[prost(message, optional, tag = "1")] - pub principal_id: ::core::option::Option<::ic_base_types::PrincipalId>, + pub principal_id: Option<::ic_base_types::PrincipalId>, /// The set of permissions that will be revoked from the PrincipalId. - #[prost(message, optional, tag = "2")] - pub permissions_to_remove: ::core::option::Option, + pub permissions_to_remove: Option, } - #[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] + #[derive(candid::CandidType, candid::Deserialize, Debug)] #[allow(clippy::large_enum_variant)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq)] pub enum Command { - #[prost(message, tag = "2")] Configure(Configure), - #[prost(message, tag = "3")] Disburse(Disburse), - #[prost(message, tag = "4")] Follow(Follow), /// Making a proposal is defined by a proposal, which contains the proposer neuron. /// Making a proposal will implicitly cast a yes vote for the proposing neuron. - #[prost(message, tag = "5")] MakeProposal(super::Proposal), - #[prost(message, tag = "6")] RegisterVote(RegisterVote), - #[prost(message, tag = "7")] Split(Split), - #[prost(message, tag = "8")] ClaimOrRefresh(ClaimOrRefresh), - #[prost(message, tag = "9")] MergeMaturity(MergeMaturity), - #[prost(message, tag = "10")] DisburseMaturity(DisburseMaturity), - #[prost(message, tag = "11")] AddNeuronPermissions(AddNeuronPermissions), - #[prost(message, tag = "12")] RemoveNeuronPermissions(RemoveNeuronPermissions), - #[prost(message, tag = "13")] StakeMaturity(StakeMaturity), } } /// The response of a ManageNeuron command. /// There is a dedicated response type for each `ManageNeuron.command` field. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ManageNeuronResponse { - #[prost( - oneof = "manage_neuron_response::Command", - tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13" - )] - pub command: ::core::option::Option, + pub command: Option, } /// Nested message and enum types in `ManageNeuronResponse`. pub mod manage_neuron_response { /// The response to the ManageNeuron command 'configure'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct ConfigureResponse {} /// The response to the ManageNeuron command 'disburse'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct DisburseResponse { /// The block height of the ledger where the tokens were disbursed to the /// given account. - #[prost(uint64, tag = "1")] pub transfer_block_height: u64, } /// The response to the ManageNeuron command 'merge_maturity'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct MergeMaturityResponse { /// The maturity that was merged in fractions of /// 10E-8 of a governance token. - #[prost(uint64, tag = "1")] pub merged_maturity_e8s: u64, /// The resulting cached stake of the modified neuron /// in fractions of 10E-8 of a governance token. - #[prost(uint64, tag = "2")] pub new_stake_e8s: u64, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct DisburseMaturityResponse { /// This field is deprecated and is populated with the same value as `amount_deducted_e8s`. - #[prost(uint64, tag = "2")] pub amount_disbursed_e8s: u64, /// The amount of maturity in e8s of the governance token deducted from the Neuron. /// This amount will undergo maturity modulation if enabled, and may be increased or /// decreased at the time of disbursement. - #[prost(uint64, optional, tag = "3")] - pub amount_deducted_e8s: ::core::option::Option, + pub amount_deducted_e8s: Option, } - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct StakeMaturityResponse { - #[prost(uint64, tag = "1")] pub maturity_e8s: u64, - #[prost(uint64, tag = "2")] pub staked_maturity_e8s: u64, } /// The response to the ManageNeuron command 'follow'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct FollowResponse {} /// The response to the ManageNeuron command 'make_proposal'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct MakeProposalResponse { /// The ID of the created proposal. - #[prost(message, optional, tag = "1")] - pub proposal_id: ::core::option::Option, + pub proposal_id: Option, } /// The response to the ManageNeuron command 'register_vote'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct RegisterVoteResponse {} /// The response to the ManageNeuron command 'split'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct SplitResponse { /// The ID of the 'child neuron' that was newly created. - #[prost(message, optional, tag = "1")] - pub created_neuron_id: ::core::option::Option, + pub created_neuron_id: Option, } /// The response to the ManageNeuron command 'claim_or_refresh'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ClaimOrRefreshResponse { /// The neuron ID of the neuron that was newly claimed or /// refreshed. - #[prost(message, optional, tag = "1")] - pub refreshed_neuron_id: ::core::option::Option, + pub refreshed_neuron_id: Option, } /// The response to the ManageNeuron command 'add_neuron_permissions'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct AddNeuronPermissionsResponse {} /// The response to the ManageNeuron command 'remove_neuron_permissions'. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct RemoveNeuronPermissionsResponse {} - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum Command { - #[prost(message, tag = "1")] Error(super::GovernanceError), - #[prost(message, tag = "2")] Configure(ConfigureResponse), - #[prost(message, tag = "3")] Disburse(DisburseResponse), - #[prost(message, tag = "4")] Follow(FollowResponse), - #[prost(message, tag = "5")] MakeProposal(MakeProposalResponse), - #[prost(message, tag = "6")] RegisterVote(RegisterVoteResponse), - #[prost(message, tag = "7")] Split(SplitResponse), - #[prost(message, tag = "8")] ClaimOrRefresh(ClaimOrRefreshResponse), - #[prost(message, tag = "9")] MergeMaturity(MergeMaturityResponse), - #[prost(message, tag = "10")] DisburseMaturity(DisburseMaturityResponse), - #[prost(message, tag = "11")] AddNeuronPermission(AddNeuronPermissionsResponse), - #[prost(message, tag = "12")] RemoveNeuronPermission(RemoveNeuronPermissionsResponse), - #[prost(message, tag = "13")] StakeMaturity(StakeMaturityResponse), } } /// An operation that attempts to get a neuron by a given neuron ID. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetNeuron { - #[prost(message, optional, tag = "1")] - pub neuron_id: ::core::option::Option, + pub neuron_id: Option, } /// A response to the GetNeuron command. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetNeuronResponse { /// The response to a GetNeuron command is either an error or /// the requested neuron. - #[prost(oneof = "get_neuron_response::Result", tags = "1, 2")] - pub result: ::core::option::Option, + pub result: Option, } /// Nested message and enum types in `GetNeuronResponse`. pub mod get_neuron_response { /// The response to a GetNeuron command is either an error or /// the requested neuron. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum Result { - #[prost(message, tag = "1")] Error(super::GovernanceError), - #[prost(message, tag = "2")] Neuron(super::Neuron), } } -/// An operation that attempts to get a proposal by a given proposal ID. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +/// An operation that attempts to get a proposal by a given proposal ID. +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetProposal { - #[prost(message, optional, tag = "1")] - pub proposal_id: ::core::option::Option, + pub proposal_id: Option, } /// A response to the GetProposal command. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetProposalResponse { /// The response to a GetProposal command is either an error or /// the proposal data corresponding to the requested proposal. - #[prost(oneof = "get_proposal_response::Result", tags = "1, 2")] - pub result: ::core::option::Option, + pub result: Option, } /// Nested message and enum types in `GetProposalResponse`. pub mod get_proposal_response { /// The response to a GetProposal command is either an error or /// the proposal data corresponding to the requested proposal. - #[derive(candid::CandidType, candid::Deserialize, comparable::Comparable)] + #[derive(candid::CandidType, candid::Deserialize, Debug)] #[allow(clippy::large_enum_variant)] - #[derive(Clone, PartialEq, ::prost::Oneof)] + #[derive(Clone, PartialEq)] pub enum Result { - #[prost(message, tag = "1")] Error(super::GovernanceError), - #[prost(message, tag = "2")] Proposal(super::ProposalData), } } @@ -3037,18 +1970,10 @@ pub mod get_proposal_response { /// Proposals are stored using an increasing id where the most recent proposals /// have the highest ids. ListProposals reverses the list and paginates backwards /// using `before_proposal`, so the first element returned is the latest proposal. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ListProposals { /// Limit the number of Proposals returned in each page, from 1 to 100. /// If a value outside of this range is provided, 100 will be used. - #[prost(uint32, tag = "1")] pub limit: u32, /// The proposal ID specifying which proposals to return. /// This should be set to the last proposal of the previously returned page and @@ -3056,12 +1981,10 @@ pub struct ListProposals { /// If this is specified, then only the proposals that have a proposal ID strictly /// lower than the specified one are returned. If this is not specified /// then the list of proposals starts with the most recent proposal's ID. - #[prost(message, optional, tag = "2")] - pub before_proposal: ::core::option::Option, + pub before_proposal: Option, /// A list of proposal types, specifying that proposals of the given /// types should be excluded in this list. - #[prost(uint64, repeated, tag = "3")] - pub exclude_type: ::prost::alloc::vec::Vec, + pub exclude_type: Vec, /// A list of proposal reward statuses, specifying that only proposals that /// that have one of the define reward statuses should be included /// in the list. @@ -3070,48 +1993,29 @@ pub struct ListProposals { /// Example: If users are only interested in proposals for which they can /// receive voting rewards they can use this to filter for proposals /// with reward status PROPOSAL_REWARD_STATUS_ACCEPT_VOTES. - #[prost(enumeration = "ProposalRewardStatus", repeated, tag = "4")] - pub include_reward_status: ::prost::alloc::vec::Vec, + pub include_reward_status: Vec, /// A list of proposal decision statuses, specifying that only proposals that /// that have one of the define decision statuses should be included /// in the list. /// If this list is empty, no restriction is applied. - #[prost(enumeration = "ProposalDecisionStatus", repeated, tag = "5")] - pub include_status: ::prost::alloc::vec::Vec, + pub include_status: Vec, } /// A response to the ListProposals command. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ListProposalsResponse { /// The returned list of proposals' ProposalData. - #[prost(message, repeated, tag = "1")] - pub proposals: ::prost::alloc::vec::Vec, + pub proposals: Vec, /// Whether ballots cast by the caller are included in the returned proposals. - #[prost(bool, optional, tag = "2")] - pub include_ballots_by_caller: ::core::option::Option, + pub include_ballots_by_caller: Option, } /// An operation that lists all neurons tracked in the Governance state in a /// paginated fashion. /// Listing of all neurons can be accomplished using `limit` and `start_page_at`. /// To only list neurons associated with a given principal, use `of_principal`. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ListNeurons { /// Limit the number of Neurons returned in each page, from 1 to 100. /// If a value outside of this range is provided, 100 will be used. - #[prost(uint32, tag = "1")] pub limit: u32, /// Used to indicate where the next page of Neurons should start. Should be /// set to the last neuron of the previously returned page and will not be @@ -3119,708 +2023,333 @@ pub struct ListNeurons { /// size limit starting at the "0th" Neuron. Neurons are not kept in any specific /// order, but their ordering is deterministic, so this can be used to return all /// the neurons one page at a time. - #[prost(message, optional, tag = "2")] - pub start_page_at: ::core::option::Option, + pub start_page_at: Option, /// A principal ID, specifying that only neurons for which this principal has /// any permissions should be included in the list. /// If this is not specified, no restriction is applied. - #[prost(message, optional, tag = "3")] - pub of_principal: ::core::option::Option<::ic_base_types::PrincipalId>, + pub of_principal: Option<::ic_base_types::PrincipalId>, } /// A response to the ListNeurons command. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ListNeuronsResponse { /// The returned list of neurons. - #[prost(message, repeated, tag = "1")] - pub neurons: ::prost::alloc::vec::Vec, + pub neurons: Vec, } /// The response to the list_nervous_system_functions query. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ListNervousSystemFunctionsResponse { /// Current set of nervous system function, both native and user-defined, /// that can be executed by proposal. - #[prost(message, repeated, tag = "1")] - pub functions: ::prost::alloc::vec::Vec, + pub functions: Vec, /// Set of nervous system function ids that are reserved and cannot be /// used to add new NervousSystemFunctions. - #[prost(uint64, repeated, tag = "2")] - pub reserved_ids: ::prost::alloc::vec::Vec, + pub reserved_ids: Vec, } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct SetMode { - #[prost(enumeration = "governance::Mode", tag = "1")] pub mode: i32, } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct SetModeResponse {} -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetMode {} -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetModeResponse { - #[prost(enumeration = "governance::Mode", optional, tag = "1")] - pub mode: ::core::option::Option, + pub mode: Option, } /// The request for the `claim_swap_neurons` method. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ClaimSwapNeuronsRequest { /// The set of parameters that define the neurons created in `claim_swap_neurons`. For /// each NeuronRecipe, one neuron will be created. - #[prost(message, optional, tag = "2")] - pub neuron_recipes: ::core::option::Option, + pub neuron_recipes: Option, } /// Nested message and enum types in `ClaimSwapNeuronsRequest`. pub mod claim_swap_neurons_request { /// Replacement for NeuronParameters. Contains the information needed to set up /// a neuron for a swap participant. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronRecipe { /// The principal that should be the controller of the SNS neuron - #[prost(message, optional, tag = "1")] - pub controller: ::core::option::Option<::ic_base_types::PrincipalId>, + pub controller: Option<::ic_base_types::PrincipalId>, /// The ID of the SNS neuron - #[prost(message, optional, tag = "2")] - pub neuron_id: ::core::option::Option, + pub neuron_id: Option, /// The SNS neuron's stake in e8s (10E-8 of a token) - #[prost(uint64, optional, tag = "3")] - pub stake_e8s: ::core::option::Option, + pub stake_e8s: Option, /// The duration in seconds that the neuron's dissolve delay will be set to. - #[prost(uint64, optional, tag = "4")] - pub dissolve_delay_seconds: ::core::option::Option, + pub dissolve_delay_seconds: Option, /// The neurons this neuron should follow - #[prost(message, optional, tag = "5")] - pub followees: ::core::option::Option, - #[prost(oneof = "neuron_recipe::Participant", tags = "6, 7")] - pub participant: ::core::option::Option, + pub followees: Option, + pub participant: Option, } /// Nested message and enum types in `NeuronRecipe`. pub mod neuron_recipe { /// The info that for a participant in the Neurons' Fund - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronsFund { /// The neuron ID of the NNS neuron that participated in the Neurons' Fund. - #[prost(uint64, optional, tag = "1")] - pub nns_neuron_id: ::core::option::Option, + pub nns_neuron_id: Option, /// The controller of the NNS neuron that participated in the Neurons' Fund. - #[prost(message, optional, tag = "2")] - pub nns_neuron_controller: ::core::option::Option<::ic_base_types::PrincipalId>, + pub nns_neuron_controller: Option<::ic_base_types::PrincipalId>, /// The hotkeys of the NNS neuron that participated in the Neurons' Fund. - #[prost(message, optional, tag = "3")] - pub nns_neuron_hotkeys: - ::core::option::Option<::ic_nervous_system_proto::pb::v1::Principals>, + pub nns_neuron_hotkeys: Option<::ic_nervous_system_proto::pb::v1::Principals>, } /// The info that for a direct participant #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq, )] pub struct Direct {} - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum Participant { - #[prost(message, tag = "6")] Direct(Direct), - #[prost(message, tag = "7")] NeuronsFund(NeuronsFund), } } /// Needed to cause prost to generate a type isomorphic to /// Optional>. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct NeuronRecipes { - #[prost(message, repeated, tag = "1")] - pub neuron_recipes: ::prost::alloc::vec::Vec, + pub neuron_recipes: Vec, } } /// The response for the `claim_swap_neurons` method. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ClaimSwapNeuronsResponse { /// ClaimSwapNeurons will either return an error, in which /// no requested neurons were claimed, or a vector with /// various neuron statuses for the requested neuron ids. - #[prost( - oneof = "claim_swap_neurons_response::ClaimSwapNeuronsResult", - tags = "4, 5" - )] - pub claim_swap_neurons_result: - ::core::option::Option, + pub claim_swap_neurons_result: Option, } /// Nested message and enum types in `ClaimSwapNeuronsResponse`. pub mod claim_swap_neurons_response { /// The ok result from `claim_swap_neurons. For every requested neuron, /// a SwapNeuron message is returned, and should equal the count of /// `ClaimSwapNeuronsRequest.neuron_recipes`. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct ClaimedSwapNeurons { - #[prost(message, repeated, tag = "1")] - pub swap_neurons: ::prost::alloc::vec::Vec, + pub swap_neurons: Vec, } /// SwapNeuron associates the status of a neuron attempting to be /// claimed with a NeuronId. The `id` field will correspond with a /// `ClaimSwapNeuronsRequest.neuron_recipes.neuron_id` field in /// the request object used in `claim_swap_neurons`. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, - )] + #[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct SwapNeuron { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, + pub id: Option, /// The status of claiming of a requested Sale neuron. - #[prost(enumeration = "super::ClaimedSwapNeuronStatus", tag = "2")] pub status: i32, } /// ClaimSwapNeurons will either return an error, in which /// no requested neurons were claimed, or a vector with /// various neuron statuses for the requested neuron ids. - #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Oneof, - )] + #[derive(candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub enum ClaimSwapNeuronsResult { - #[prost(message, tag = "4")] Ok(ClaimedSwapNeurons), - #[prost(enumeration = "super::ClaimSwapNeuronsError", tag = "5")] Err(i32), } } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetMaturityModulationRequest {} -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetMaturityModulationResponse { - #[prost(message, optional, tag = "1")] - pub maturity_modulation: ::core::option::Option, + pub maturity_modulation: Option, } /// A request to add maturity to a neuron. The associated endpoint is only /// available when governance is compiled with the `test` feature enabled. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct AddMaturityRequest { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - #[prost(uint64, optional, tag = "2")] - pub amount_e8s: ::core::option::Option, + pub id: Option, + pub amount_e8s: Option, } /// The response to a request to add maturity to a neuron. The associated endpoint is only /// available when governance is compiled with the `test` feature enabled. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct AddMaturityResponse { - #[prost(uint64, optional, tag = "1")] - pub new_maturity_e8s: ::core::option::Option, + pub new_maturity_e8s: Option, } /// A test-only API that advances the target version of the SNS. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct AdvanceTargetVersionRequest { - #[prost(message, optional, tag = "1")] - pub target_version: ::core::option::Option, + pub target_version: Option, } /// The response to a request to advance the target version of the SNS. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct AdvanceTargetVersionResponse {} /// A test-only API that refreshes the cached upgrade steps. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct RefreshCachedUpgradeStepsRequest {} /// The response to a request to refresh the cached upgrade steps. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct RefreshCachedUpgradeStepsResponse {} /// Represents a single entry in the upgrade journal. #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeJournalEntry { - #[prost(uint64, optional, tag = "6")] - pub timestamp_seconds: ::core::option::Option, - #[prost(oneof = "upgrade_journal_entry::Event", tags = "1, 7, 2, 3, 4, 5")] - pub event: ::core::option::Option, + pub timestamp_seconds: Option, + pub event: Option, } /// Nested message and enum types in `UpgradeJournalEntry`. pub mod upgrade_journal_entry { #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeStepsRefreshed { - #[prost(message, optional, tag = "2")] - pub upgrade_steps: ::core::option::Option, + pub upgrade_steps: Option, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeStepsReset { - #[prost(string, optional, tag = "1")] - pub human_readable: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag = "2")] - pub upgrade_steps: ::core::option::Option, + pub human_readable: Option, + pub upgrade_steps: Option, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct TargetVersionSet { - #[prost(message, optional, tag = "1")] - pub old_target_version: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub new_target_version: ::core::option::Option, + pub old_target_version: Option, + pub new_target_version: Option, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct TargetVersionReset { - #[prost(message, optional, tag = "1")] - pub old_target_version: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub new_target_version: ::core::option::Option, - #[prost(string, optional, tag = "3")] - pub human_readable: ::core::option::Option<::prost::alloc::string::String>, + pub old_target_version: Option, + pub new_target_version: Option, + pub human_readable: Option, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeStarted { - #[prost(message, optional, tag = "1")] - pub current_version: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub expected_version: ::core::option::Option, - #[prost(oneof = "upgrade_started::Reason", tags = "3, 4")] - pub reason: ::core::option::Option, + pub current_version: Option, + pub expected_version: Option, + pub reason: Option, } /// Nested message and enum types in `UpgradeStarted`. pub mod upgrade_started { #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - Copy, - PartialEq, - ::prost::Oneof, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, Copy, PartialEq, )] pub enum Reason { - #[prost(message, tag = "3")] UpgradeSnsToNextVersionProposal(super::super::ProposalId), - #[prost(message, tag = "4")] BehindTargetVersion(super::super::Empty), } } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeOutcome { - #[prost(string, optional, tag = "1")] - pub human_readable: ::core::option::Option<::prost::alloc::string::String>, - #[prost(oneof = "upgrade_outcome::Status", tags = "2, 3, 4, 5")] - pub status: ::core::option::Option, + pub human_readable: Option, + pub status: Option, } /// Nested message and enum types in `UpgradeOutcome`. pub mod upgrade_outcome { #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct InvalidState { - #[prost(message, optional, tag = "1")] - pub version: ::core::option::Option, + pub version: Option, } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Oneof, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub enum Status { - #[prost(message, tag = "2")] Success(super::super::Empty), - #[prost(message, tag = "3")] Timeout(super::super::Empty), /// The SNS ended up being upgraded to a version that was not the expected one. - #[prost(message, tag = "4")] InvalidState(InvalidState), - #[prost(message, tag = "5")] ExternalFailure(super::super::Empty), } } #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Oneof, + candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub enum Event { - #[prost(message, tag = "1")] UpgradeStepsRefreshed(UpgradeStepsRefreshed), - #[prost(message, tag = "7")] UpgradeStepsReset(UpgradeStepsReset), - #[prost(message, tag = "2")] TargetVersionSet(TargetVersionSet), - #[prost(message, tag = "3")] TargetVersionReset(TargetVersionReset), - #[prost(message, tag = "4")] UpgradeStarted(UpgradeStarted), - #[prost(message, tag = "5")] UpgradeOutcome(UpgradeOutcome), } } /// Needed to cause prost to generate a type isomorphic to Option>. #[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - serde::Serialize, - Clone, - PartialEq, - ::prost::Message, + Default, candid::CandidType, candid::Deserialize, Debug, serde::Serialize, Clone, PartialEq, )] pub struct UpgradeJournal { /// The entries in the upgrade journal. - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, + pub entries: Vec, } /// The upgrade journal contains all the information neede to audit previous SNS upgrades and understand its current state. /// It is being implemented as part of the "effortless SNS upgrade" feature. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct GetUpgradeJournalRequest { /// Maximum number of journal entries to return. /// If not specified, defaults to 100. Values larger than 100 will be capped at 100. - #[prost(uint64, optional, tag = "1")] - pub limit: ::core::option::Option, + pub limit: Option, /// The starting index from which to return entries, counting from the oldest entry (0). /// If not specified, return the most recent entries. - #[prost(uint64, optional, tag = "2")] - pub offset: ::core::option::Option, + pub offset: Option, } -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct GetUpgradeJournalResponse { - #[prost(message, optional, tag = "1")] - pub upgrade_steps: ::core::option::Option, - #[prost(uint64, optional, tag = "2")] - pub response_timestamp_seconds: ::core::option::Option, + pub upgrade_steps: Option, + pub response_timestamp_seconds: Option, /// The target version that the SNS will be upgraded to. /// Currently, this field is always None, but in the "effortless SNS upgrade" /// feature, it reflect the version of the SNS that the community has decided to upgrade to. - #[prost(message, optional, tag = "3")] - pub target_version: ::core::option::Option, - #[prost(message, optional, tag = "5")] - pub deployed_version: ::core::option::Option, - #[prost(message, optional, tag = "4")] - pub upgrade_journal: ::core::option::Option, - #[prost(uint64, optional, tag = "6")] - pub upgrade_journal_entry_count: ::core::option::Option, + pub target_version: Option, + pub deployed_version: Option, + pub upgrade_journal: Option, + pub upgrade_journal_entry_count: Option, } /// A request to mint tokens for a particular principal. The associated endpoint /// is only available on SNS governance, and only then when SNS governance is /// compiled with the `test` feature enabled. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct MintTokensRequest { - #[prost(message, optional, tag = "1")] - pub recipient: ::core::option::Option, - #[prost(uint64, optional, tag = "2")] - pub amount_e8s: ::core::option::Option, + pub recipient: Option, + pub amount_e8s: Option, } /// The response to a request to mint tokens for a particular principal. The /// associated endpoint is only available on SNS governance, and only then when /// SNS governance is compiled with the `test` feature enabled. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - Copy, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, Copy, PartialEq)] pub struct MintTokensResponse {} /// A Ledger subaccount. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Subaccount { - #[prost(bytes = "vec", tag = "1")] #[serde(with = "serde_bytes")] - pub subaccount: ::prost::alloc::vec::Vec, + pub subaccount: Vec, } /// A Ledger account identified by the owner of the account `of` and /// the `subaccount`. If the `subaccount` is not specified then the default /// one is used. -#[derive( - candid::CandidType, - candid::Deserialize, - comparable::Comparable, - Clone, - PartialEq, - ::prost::Message, -)] +#[derive(Default, candid::CandidType, candid::Deserialize, Debug, Clone, PartialEq)] pub struct Account { /// The owner of the account. - #[prost(message, optional, tag = "1")] - pub owner: ::core::option::Option<::ic_base_types::PrincipalId>, + pub owner: Option<::ic_base_types::PrincipalId>, /// The subaccount of the account. If not set then the default /// subaccount (all bytes set to 0) is used. - #[prost(message, optional, tag = "2")] - pub subaccount: ::core::option::Option, + pub subaccount: Option, } /// The different types of neuron permissions, i.e., privileges to modify a neuron, /// that principals can have. #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, clap::ValueEnum, - strum_macros::EnumIter, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum NeuronPermissionType { @@ -3877,7 +2406,7 @@ impl NeuronPermissionType { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "NEURON_PERMISSION_TYPE_UNSPECIFIED" => Some(Self::Unspecified), "NEURON_PERMISSION_TYPE_CONFIGURE_DISSOLVE_STATE" => Some(Self::ConfigureDissolveState), @@ -3898,16 +2427,14 @@ impl NeuronPermissionType { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum Vote { @@ -3933,7 +2460,7 @@ impl Vote { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "VOTE_UNSPECIFIED" => Some(Self::Unspecified), "VOTE_YES" => Some(Self::Yes), @@ -3945,16 +2472,14 @@ impl Vote { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum LogVisibility { @@ -3977,7 +2502,7 @@ impl LogVisibility { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "LOG_VISIBILITY_UNSPECIFIED" => Some(Self::Unspecified), "LOG_VISIBILITY_CONTROLLERS" => Some(Self::Controllers), @@ -3989,16 +2514,14 @@ impl LogVisibility { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum ProposalDecisionStatus { @@ -4031,7 +2554,7 @@ impl ProposalDecisionStatus { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "PROPOSAL_DECISION_STATUS_UNSPECIFIED" => Some(Self::Unspecified), "PROPOSAL_DECISION_STATUS_OPEN" => Some(Self::Open), @@ -4047,16 +2570,14 @@ impl ProposalDecisionStatus { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum ProposalRewardStatus { @@ -4088,7 +2609,7 @@ impl ProposalRewardStatus { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "PROPOSAL_REWARD_STATUS_UNSPECIFIED" => Some(Self::Unspecified), "PROPOSAL_REWARD_STATUS_ACCEPT_VOTES" => Some(Self::AcceptVotes), @@ -4105,16 +2626,14 @@ impl ProposalRewardStatus { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum ClaimedSwapNeuronStatus { @@ -4154,7 +2673,7 @@ impl ClaimedSwapNeuronStatus { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "CLAIMED_SWAP_NEURON_STATUS_UNSPECIFIED" => Some(Self::Unspecified), "CLAIMED_SWAP_NEURON_STATUS_SUCCESS" => Some(Self::Success), @@ -4170,16 +2689,14 @@ impl ClaimedSwapNeuronStatus { #[derive( candid::CandidType, candid::Deserialize, - comparable::Comparable, + Debug, Clone, Copy, - Debug, PartialEq, Eq, Hash, PartialOrd, Ord, - ::prost::Enumeration, )] #[repr(i32)] pub enum ClaimSwapNeuronsError { @@ -4206,7 +2723,7 @@ impl ClaimSwapNeuronsError { } } /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { + pub fn from_str_name(value: &str) -> Option { match value { "CLAIM_SWAP_NEURONS_ERROR_UNSPECIFIED" => Some(Self::Unspecified), "CLAIM_SWAP_NEURONS_ERROR_UNAUTHORIZED" => Some(Self::Unauthorized), From cf53688f96ac8d79c4a81dc8260c066d08d78017 Mon Sep 17 00:00:00 2001 From: "pr-automation-bot-public[bot]" <189003650+pr-automation-bot-public[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 14:54:40 -0800 Subject: [PATCH 03/33] chore: Update Mainnet IC revisions canisters file (#3423) Update mainnet system canisters revisions file to include the latest WASM version released on the mainnet. This PR is created automatically using [`mainnet_revisions.py`](https://github.com/dfinity/ic/blob/master/ci/src/mainnet_revisions/mainnet_revisions.py) Co-authored-by: CI Automation --- mainnet-canister-revisions.json | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mainnet-canister-revisions.json b/mainnet-canister-revisions.json index f940ecf64b1..f9d79c4935f 100644 --- a/mainnet-canister-revisions.json +++ b/mainnet-canister-revisions.json @@ -44,16 +44,16 @@ "sha256": "98a7b7391608dc4a554d6964bad24157b6aaf890a05bbaad3fcc92033d9c7b02" }, "cycles-minting": { - "rev": "ee52ab3056cf5f39b09b08de70bdd20485c8b2dc", - "sha256": "bbb8995cb749ba9e2c721ff507f5e5313f32e69b1adf3df20e3901ed56a70b42" + "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", + "sha256": "11c8dedd11741f05990498c90f925e9e37ad60647a65ef47caa59cdba234be6f" }, "genesis-token": { "rev": "4bed17bfc82cddc5691743db6228992cdc2740f4", "sha256": "fd25a4e2e283b498c3be1aaf63cc9b2726264d78a12b12f43ad453ceeb575e7c" }, "governance": { - "rev": "ee52ab3056cf5f39b09b08de70bdd20485c8b2dc", - "sha256": "a23918c2c5d1302e5d1149f557b0fb913ab65931c1bce3ffc94a48e3d14ecbac" + "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", + "sha256": "5b67e1d273afb691a74ff29e0a495fb2ce7ee31196af58d801a8ce86a7dc4320" }, "index": { "rev": "7c6309cb5bec7ab28ed657ac7672af08a59fc1ba", @@ -64,16 +64,16 @@ "sha256": "a9ed1cb9dda555e0fc1038825eb7b3a6b366f17aa4b88575184c7537e864e551" }, "lifeline": { - "rev": "a0207146be211cdff83321c99e9e70baa62733c7", - "sha256": "76978515223287ece643bc7ca087eb310412b737e2382a73b8ae55fcb458da5b" + "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", + "sha256": "8c8eb285de53ca5609abd7dc41ba3ec8eeb67708b81469311fd670e6738d7d0a" }, "registry": { - "rev": "86229594d61b433c39fc5331ab818ccb6c6aa6a7", - "sha256": "b0b2a7f37e76fcbab20a861fdf65c34d7ac2ca84a5190d204dfe5e1c50fb383e" + "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", + "sha256": "771041412d2af4eb681262ca525bce1a87c199b631e17b55e1d7f9abb2cde3e6" }, "root": { - "rev": "c494c2af8bfc70a6501448dc73bf806477388738", - "sha256": "657010591182ce758c86f020d1eade5f7a188072cf0de9c41e2f9d577849c964" + "rev": "b5192581ccd35b67fe5a1f795ead9cbcd25956d6", + "sha256": "d3c702648ca4fb232f349bad7533c400c474a528abf62c05d4b100b4cdb91ce2" }, "sns-wasm": { "rev": "25c1bb0227d9970f5673b908817d7c4962b29911", @@ -84,8 +84,8 @@ "sha256": "f94cf1db965b7042197e5894fef54f5f413bb2ebc607ff0fb59c9d4dfd3babea" }, "sns_governance": { - "rev": "25c1bb0227d9970f5673b908817d7c4962b29911", - "sha256": "51fd3d1a529f3f7bad808b19074e761ce3538282ac8189bd7067b4156360c279" + "rev": "df7d443e6219c462b305152b63ca265171feb6ee", + "sha256": "bd936ef6bb878df87856a0b0c46034a242a88b7f1eeff5439daf6278febca6b7" }, "sns_index": { "rev": "2190613d3b5bcd9b74c382b22d151580b8ac271a", From 66ff2341e84649a79c5276521908fe1ccdccfb4e Mon Sep 17 00:00:00 2001 From: max-dfinity <100170574+max-dfinity@users.noreply.github.com> Date: Mon, 13 Jan 2025 15:18:29 -0800 Subject: [PATCH 04/33] feat(nns-tools): nns_claim_or_refresh helper (#3424) Add a simple way to claim or refresh a neuron (for reference) --- testnet/tools/nns-tools/lib/include.sh | 1 + testnet/tools/nns-tools/lib/nns_neurons.sh | 32 ++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 testnet/tools/nns-tools/lib/nns_neurons.sh diff --git a/testnet/tools/nns-tools/lib/include.sh b/testnet/tools/nns-tools/lib/include.sh index 3ecb2f7aecc..242fdff03c4 100644 --- a/testnet/tools/nns-tools/lib/include.sh +++ b/testnet/tools/nns-tools/lib/include.sh @@ -24,6 +24,7 @@ source "$LIB_DIR/canisters.sh" source "$LIB_DIR/constants.sh" source "$LIB_DIR/functions.sh" source "$LIB_DIR/installers.sh" +source "$LIB_DIR/nns_neurons.sh" source "$LIB_DIR/proposals.sh" source "$LIB_DIR/sns_upgrades.sh" source "$LIB_DIR/topology.sh" diff --git a/testnet/tools/nns-tools/lib/nns_neurons.sh b/testnet/tools/nns-tools/lib/nns_neurons.sh new file mode 100644 index 00000000000..f73edf268cb --- /dev/null +++ b/testnet/tools/nns-tools/lib/nns_neurons.sh @@ -0,0 +1,32 @@ +##: claim_or_refresh +## Usage: $1 +## Claim or refresh an NNS neuron with a particular ID +## NETWORK: The network to use. +## NEURON_ID: The neuron id to claim or refresh. +## Example: claim_or_refresh ic 1234567890 +nns_claim_or_refresh() { + local network=$1 + local neuron_id=$2 + + local IC=$(repo_root) + local GOV_DID="$IC/rs/nns/governance/canister/governance.did" + + dfx canister \ + --network ic \ + call \ + --candid "$GOV_DID" \ + rrkah-fqaaa-aaaaa-aaaaq-cai \ + manage_neuron "( + record { + id = opt record { id = ${neuron_id}: nat64 }; + command = opt variant { + ClaimOrRefresh = record { + controller = null; + by = opt variant { + NeuronIdOrSubaccount = record { } + } + } + } + } + )" +} From 16ee8b23a6af62a6ccbe9fbb37dfb8a0743be8bf Mon Sep 17 00:00:00 2001 From: jasonz-dfinity <133917836+jasonz-dfinity@users.noreply.github.com> Date: Mon, 13 Jan 2025 17:06:41 -0800 Subject: [PATCH 05/33] chore(nns): Add a comment on MAX_NEURONS_FUND_PARTICIPANTS about its effect on instructions (#3426) In the analysis of the safety of migrating neurons from heap to stable memory, one risk is that the `draw_maturity_from_neurons_fund`/`refund_maturity_to_neurons_fund` can take more instructions since reading from stable structures is more expensive. Therefore, the safety of such operations start to depend on how many neurons are involved in those operations. Adding a comment should help preventing any increase of this constant without considering its implications on the instructions cost. --- rs/nns/governance/src/governance.rs | 12 ++++++++---- rs/nns/governance/src/neuron_store/benches.rs | 3 +++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index c05c981f953..48a951b8349 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -251,10 +251,14 @@ const NODE_PROVIDER_REWARD_PERIOD_SECONDS: u64 = 2629800; const VALID_MATURITY_MODULATION_BASIS_POINTS_RANGE: RangeInclusive = -500..=500; -/// Maximum allowed number of Neurons' Fund participants that may participate in an SNS swap. -/// Given the maximum number of SNS neurons per swap participant (a.k.a. neuron basket count), -/// this constant can be used to obtain an upper bound for the number of SNS neurons created -/// for the Neurons' Fund participants. See also `MAX_SNS_NEURONS_PER_BASKET`. +/// Maximum allowed number of Neurons' Fund participants that may participate in an SNS swap. Given +/// the maximum number of SNS neurons per swap participant (a.k.a. neuron basket count), this +/// constant can be used to obtain an upper bound for the number of SNS neurons created for the +/// Neurons' Fund participants. See also `MAX_SNS_NEURONS_PER_BASKET`. In addition, this constant +/// also affects the upperbound of instructions needed to draw/refund maturity from/to the Neurons' +/// Fund, so before increasing this constant, the impact on the instructions used by +/// `CreateServiceNervousSystem` proposal execution also needs to be evaluated (currently, each +/// neuron takes ~120K instructions to draw/refund maturity, so the total is ~600M). pub const MAX_NEURONS_FUND_PARTICIPANTS: u64 = 5_000; impl NetworkEconomics { diff --git a/rs/nns/governance/src/neuron_store/benches.rs b/rs/nns/governance/src/neuron_store/benches.rs index 95e2da2699b..ca93870f5c1 100644 --- a/rs/nns/governance/src/neuron_store/benches.rs +++ b/rs/nns/governance/src/neuron_store/benches.rs @@ -404,6 +404,9 @@ fn draw_maturity_from_neurons_fund_stable() -> BenchResult { let mut rng = new_rng(); let mut neuron_store = NeuronStore::new(BTreeMap::new()); let mut neurons_fund_neurons = BTreeSet::new(); + // When extrapolating to `MAX_NEURONS_FUND_PARTICIPANTS` (5K) neurons, the current performance + // of 12M instructions (as of the time of writing) becomes 600M instructions. This is relatively + // small compared to the instruction limit of 50B (or the 40B limit for application subnets). for _ in 0..100 { let neuron = new_neuron_builder(&mut rng, NeuronLocation::Heap, NeuronSize::Typical) .with_maturity_e8s_equivalent(2_000_000_000) From 8bfa3c4f897bb6136363bcdaae5d4544265f29e9 Mon Sep 17 00:00:00 2001 From: Andre Popovitch Date: Tue, 14 Jan 2025 00:26:05 -0600 Subject: [PATCH 06/33] feat(sns): add release runscript to replace runbook in notion (#3430) This PR introduces a simple interactive "runscript" version of our NNS release runbook. A runscript is what I call a program that walks you through a procedure step-by-step, waiting for confirmation before showing the next step. Runscripts have one big advantage over runbooks: They're easy to automate incrementally. Just replace "press enter to continue" with actual automation for specific step. This first version is intentionally simple - it just shows the steps and waits for the user to press enter. But it provides a foundation we can build on to gradually automate pieces of the release process. --- Cargo.lock | 22 ++ Cargo.toml | 1 + .../tools/release-runscript/BUILD.bazel | 31 +++ .../tools/release-runscript/Cargo.toml | 29 +++ .../tools/release-runscript/src/main.rs | 210 ++++++++++++++++++ 5 files changed, 293 insertions(+) create mode 100644 rs/nervous_system/tools/release-runscript/BUILD.bazel create mode 100644 rs/nervous_system/tools/release-runscript/Cargo.toml create mode 100644 rs/nervous_system/tools/release-runscript/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index ea77ce2b3f1..330163c5345 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18429,6 +18429,28 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" +[[package]] +name = "release-runscript" +version = "0.9.0" +dependencies = [ + "anyhow", + "candid", + "colored", + "futures", + "ic-agent", + "ic-base-types", + "ic-nervous-system-agent", + "ic-nervous-system-clients", + "ic-nervous-system-common-test-keys", + "ic-nns-common", + "ic-nns-constants", + "rgb", + "serde", + "serde_json", + "tempfile", + "tokio", +] + [[package]] name = "rend" version = "0.4.2" diff --git a/Cargo.toml b/Cargo.toml index 521019dae7b..de382d1e46c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -197,6 +197,7 @@ members = [ "rs/nervous_system/runtime", "rs/nervous_system/string", "rs/nervous_system/temporary", + "rs/nervous_system/tools/release-runscript", "rs/nervous_system/tools/sync-with-released-nervous-system-wasms", "rs/nns/constants", "rs/nns/common", diff --git a/rs/nervous_system/tools/release-runscript/BUILD.bazel b/rs/nervous_system/tools/release-runscript/BUILD.bazel new file mode 100644 index 00000000000..dcdefb981ec --- /dev/null +++ b/rs/nervous_system/tools/release-runscript/BUILD.bazel @@ -0,0 +1,31 @@ +load("@rules_rust//rust:defs.bzl", "rust_binary") + +package(default_visibility = ["//visibility:public"]) + +# See rs/nervous_system/feature_test.md +DEPENDENCIES = [ + # Keep sorted. + "//rs/nervous_system/agent", + "//rs/nervous_system/clients", + "//rs/nns/common", + "//rs/nns/constants", + "//rs/types/base_types", + "@crate_index//:anyhow", + "@crate_index//:candid", + "@crate_index//:colored", + "@crate_index//:futures", + "@crate_index//:ic-agent", + "@crate_index//:rgb", + "@crate_index//:serde", + "@crate_index//:serde_json", + "@crate_index//:tempfile", + "@crate_index//:tokio", +] + +rust_binary( + name = "release-runscript", + srcs = [ + "src/main.rs", + ], + deps = DEPENDENCIES, +) diff --git a/rs/nervous_system/tools/release-runscript/Cargo.toml b/rs/nervous_system/tools/release-runscript/Cargo.toml new file mode 100644 index 00000000000..e603ef296ee --- /dev/null +++ b/rs/nervous_system/tools/release-runscript/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "release-runscript" +version.workspace = true +authors.workspace = true +edition.workspace = true +description.workspace = true +documentation.workspace = true + +[[bin]] +name = "release-runscript" +path = "src/main.rs" + +[dependencies] +anyhow = { workspace = true } +candid = { workspace = true } +colored = "2.0.0" +futures = { workspace = true } +ic-agent = { workspace = true } +ic-base-types = { path = "../../../types/base_types" } +ic-nervous-system-agent = { path = "../../agent" } +ic-nervous-system-clients = { path = "../../clients" } +ic-nervous-system-common-test-keys = { path = "../../common/test_keys" } +ic-nns-common = { path = "../../../nns/common" } +ic-nns-constants = { path = "../../../nns/constants" } +rgb = "0.8.37" +serde = { workspace = true } +serde_json = { workspace = true } +tempfile = { workspace = true } +tokio = { workspace = true } diff --git a/rs/nervous_system/tools/release-runscript/src/main.rs b/rs/nervous_system/tools/release-runscript/src/main.rs new file mode 100644 index 00000000000..952e4a9945d --- /dev/null +++ b/rs/nervous_system/tools/release-runscript/src/main.rs @@ -0,0 +1,210 @@ +use colored::*; +use std::io::{self, Write}; + +struct Step { + title: &'static str, + description: &'static str, +} + +fn main() { + let steps = vec![ + Step { + title: "Pick Release Candidate Commit", + description: "Run `./testnet/tools/nns-tools/cmd.sh latest_commit_with_prebuilt_artifacts`. +If you would like to pick a different commit, follow these steps: +2. Go to https://github.com/dfinity/ic/actions/workflows/ci-main.yml?query=branch%3Amaster+event%3Apush+is%3Asuccess +3. Find a recent commit with passing CI Main in the master branch +4. Record this commit (e.g., post to Slack) + +Pre-built artifacts check: +- Install aws tool if needed +- List available files: + aws s3 ls --no-sign-request s3://dfinity-download-public/ic/${COMMIT}/canisters/ +- Note: Our tools download from the analogous https://download.dfinity.systems/... URL", + }, + Step { + title: "Determine Upgrade Targets", + description: "Determine which NNS canisters and/or SNS WASMs need to be upgraded/published. +Only those with 'interesting' changes need to be released. + +Required checks: +1. Run: ./testnet/tools/nns-tools/list-new-commits.sh +2. Check Monday team sync meeting minutes at: + https://docs.google.com/document/d/1CPM1RlMz6UMSUQzqvdP7EDiLMomK4YeuEV7UnxQ9DAE/edit + +For SNS ledger suite (ledger, archive, and index canisters): +- Consult Financial Integrations team +- FI team should contact NNS team Friday morning about significant changes +- FI team should provide the 'Features' section of proposals +- This agreement is new - you may need to remind them +- This applies to ledger, archive, and index canisters", + }, + Step { + title: "Run NNS Upgrade Tests", + description: "Verify the commit you chose at the previous step has a green check on this page: https://github.com/dfinity/ic/actions/workflows/ci-main.yml?query=branch:master+event:push+is:success + +If not, you can also run the upgrade tests manually: + - Follow instructions in: testnet/tools/nns-tools/README.md#upgrade-testing-via-bazel + +2. SNS Testing Note: + - No manual testing needed for SNS + - Covered by sns_release_qualification in CI + - Example: Test at rs/nervous_system/integration_tests/tests/sns_release_qualification.rs", + }, + Step { + title: "Create Proposal Texts", + description: "Create proposal text for each canister to be upgraded. +This can be done in parallel with the previous testing step. + +Instructions: +1. Follow format in: testnet/tools/nns-tools/README.md#nnssns-canister-upgrade-proposal-process +2. Name conventions: + - NNS proposals: nns-*.md + - SNS proposals: sns-*.md +3. Organization: + - Put all proposal files in a dedicated directory + - Keep directory clean (nothing else in there) + - This will help with forum post generation later", + }, + Step { + title: "Submit Proposals", + description: "Submit the proposals on Friday + +Follow detailed instructions at: +testnet/tools/nns-tools/README.md#submit-the-proposals", + }, + Step { + title: "Create Forum Post", + description: "Create a forum post with the following specifications: + +1. Title Format: + 'NNS Updates (: )' + +2. Category: + Governance > NNS proposal discussion + Reference: https://forum.dfinity.org/t/nns-proposal-discussions/34492 + +3. Tags: + - Protocol-canister-management / Service-nervous-system-management + - nns / sns + +4. Content: + - Link to proposals in IC Dashboard + - Include all proposal texts + - Use six consecutive backticks (```````) to wrap proposal text + - Call out any 'interesting' changes, breaking changes, or required actions + +5. Generate Forum Content: + If your proposals are in a dedicated directory: + + For NNS upgrades: + ```bash + ./testnet/tools/nns-tools/cmd.sh \\ + generate_forum_post_nns_upgrades \\ + $PROPOSALS_DIR/nns-*.md \\ + | pbcopy + ``` + + For SNS WASM publishing: + ```bash + ./testnet/tools/nns-tools/cmd.sh \\ + generate_forum_post_sns_wasm_publish \\ + $PROPOSALS_DIR/sns-*.md \\ + | pbcopy + ``` + +6. Required Follow-ups: + - Reply to NNS Updates Aggregation Thread (https://forum.dfinity.org/t/nns-updates-aggregation-thread/23551) + - If SNS canister WASMs were published, update SNS Upgrades Aggregation Thread + (https://forum.dfinity.org/t/sns-upgrade-aggregation-thread/24259/2)", + }, + Step { + title: "Schedule Trusted Neurons Vote", + description: "Schedule calendar event for Trusted Neurons to vote the following Monday. + +Calendar Event Setup: +1. Duplicate a past event from: + https://calendar.google.com/calendar/u/0/r/eventedit/duplicate/MjJvMTdva2xtdGJuZDhoYjRjN2poZzNwM2ogY182NGYwZDdmZDYzYjNlMDYxZjE1Zjk2MTU1NWYzMmFiN2EyZmY3M2NjMWJmM2Q3ZTRkNGI3NGVjYjk1ZWVhM2M0QGc + +2. Use 'NNS Upgrades' calendar + +3. Timing: + - Usually scheduled at 6 pm Central European Time + - For multiple proposals, schedule separate sequential events + +4. Required Fields: + - Date and Time + - Title: Include canister name and proposal ID + - Description: Link to the proposal + +5. Actions: + - Click 'Save' to create event + - Send email invitations when prompted + - If people don't respond, ping @trusted-neurons in #eng-release channel", + }, + Step { + title: "Update Mainnet Canisters", + description: "After proposal execution, update mainnet-canisters.json: + +1. Run the sync command: + bazel run //rs/nervous_system/tools/sync-with-released-nevous-system-wasms + + Note: If you encounter problems, try adding --config=local + +2. Purpose of these changes: + - Tells bazel what versions are running in production + - Used by tests to verify upgrade compatibility + - Maintains build hermeticity + +3. Note on automation: + - There was a ticket for automating this (NNS1-2201) + - Currently marked as won't do", + }, + Step { + title: "Update Changelog", + description: "Update CHANGELOG.md file(s) for each proposal: + +1. For each proposal ID: + ```bash + PROPOSAL_IDS=... + + for PROPOSAL_ID in $PROPOSAL_IDS do + ./testnet/tools/nns-tools/add-release-to-changelog.sh \\ + $PROPOSAL_ID + done + ``` + +2. Best Practice: + - Combine this change with mainnet-canisters.json update in the same PR", + }, + ]; + + println!("{}", "\nNNS Release Runscript".bright_green().bold()); + println!("{}", "===================".bright_green()); + println!("This script will guide you through the NNS release process.\n"); + + for (index, step) in steps.iter().enumerate() { + print_step(index + 1, step); + + print!("\nPress Enter to continue to next step..."); + io::stdout().flush().unwrap(); + let mut input = String::new(); + io::stdin().read_line(&mut input).unwrap(); + + // Clear screen for next step + print!("\x1B[2J\x1B[1;1H"); + } + + println!("{}", "\nRelease process complete!".bright_green().bold()); + println!("Please verify that all steps were completed successfully."); +} + +fn print_step(number: usize, step: &Step) { + println!( + "{} {}", + format!("Step {}:", number).bright_blue().bold(), + step.title.white().bold() + ); + println!("{}", "---".bright_blue()); + println!("{}\n", step.description); +} From d6bb598cfcfa5565f9dcdaa6891669e394d2c08b Mon Sep 17 00:00:00 2001 From: maciejdfinity <122265298+maciejdfinity@users.noreply.github.com> Date: Tue, 14 Jan 2025 09:20:23 +0100 Subject: [PATCH 07/33] test(ICRC_Ledger): canbench benchmarks for icrc2_approve, icrc2_transfer_from and icrc3_get_blocks (#3400) Scopes for `icrc2_approve`, `icrc2_transfer_from` and `icrc3_get_blocks` were added. Scope `before_upgrade` was renamed to `icrc1_transfer`. Since we now test with more approvals, balances and blocks, the benchmark files need to be completely regenerated. --- .../ledger/canbench_results/canbench_u256.yml | 42 +++++--- .../ledger/canbench_results/canbench_u64.yml | 42 +++++--- .../icrc1/ledger/src/benches/benches_u256.rs | 96 ++++++++++++++----- .../icrc1/ledger/src/benches/benches_u64.rs | 93 ++++++++++++++---- .../icrc1/ledger/src/benches/mod.rs | 22 ++++- rs/ledger_suite/icrc1/ledger/src/main.rs | 16 +++- 6 files changed, 230 insertions(+), 81 deletions(-) diff --git a/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u256.yml b/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u256.yml index 4fe24655c4a..27a1b10cf2e 100644 --- a/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u256.yml +++ b/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u256.yml @@ -1,42 +1,54 @@ benches: bench_icrc1_transfers: total: - instructions: 13798815361 - heap_increase: 172 - stable_memory_increase: 128 + instructions: 58832813772 + heap_increase: 271 + stable_memory_increase: 256 scopes: - before_upgrade: - instructions: 13630531777 + icrc1_transfer: + instructions: 13635001695 heap_increase: 43 stable_memory_increase: 0 - post_upgrade: - instructions: 118601062 + icrc2_approve: + instructions: 20413485760 + heap_increase: 41 + stable_memory_increase: 128 + icrc2_transfer_from: + instructions: 24042619927 + heap_increase: 5 + stable_memory_increase: 0 + icrc3_get_blocks: + instructions: 7877258 heap_increase: 0 stable_memory_increase: 0 + post_upgrade: + instructions: 354777092 + heap_increase: 53 + stable_memory_increase: 0 pre_upgrade: - instructions: 49679478 + instructions: 149556765 heap_increase: 129 stable_memory_increase: 128 upgrade: - instructions: 168282130 - heap_increase: 129 + instructions: 504335921 + heap_increase: 182 stable_memory_increase: 128 bench_upgrade_baseline: total: - instructions: 8684974 + instructions: 8684182 heap_increase: 258 stable_memory_increase: 128 scopes: post_upgrade: - instructions: 8606422 + instructions: 8605997 heap_increase: 129 stable_memory_increase: 0 pre_upgrade: - instructions: 76145 + instructions: 75778 heap_increase: 129 stable_memory_increase: 128 upgrade: - instructions: 8684285 + instructions: 8683493 heap_increase: 258 stable_memory_increase: 128 -version: 0.1.7 +version: 0.1.8 diff --git a/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u64.yml b/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u64.yml index 7e7164f34e5..fd17caa5c7c 100644 --- a/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u64.yml +++ b/rs/ledger_suite/icrc1/ledger/canbench_results/canbench_u64.yml @@ -1,42 +1,54 @@ benches: bench_icrc1_transfers: total: - instructions: 13237283790 - heap_increase: 171 - stable_memory_increase: 128 + instructions: 56837535933 + heap_increase: 271 + stable_memory_increase: 256 scopes: - before_upgrade: - instructions: 13068913917 + icrc1_transfer: + instructions: 13071791984 heap_increase: 42 stable_memory_increase: 0 - post_upgrade: - instructions: 118797275 + icrc2_approve: + instructions: 19627513485 + heap_increase: 29 + stable_memory_increase: 128 + icrc2_transfer_from: + instructions: 23404082941 + heap_increase: 18 + stable_memory_increase: 0 + icrc3_get_blocks: + instructions: 7540214 heap_increase: 0 stable_memory_increase: 0 + post_upgrade: + instructions: 353134588 + heap_increase: 53 + stable_memory_increase: 0 pre_upgrade: - instructions: 49569466 + instructions: 149315334 heap_increase: 129 stable_memory_increase: 128 upgrade: - instructions: 168368331 - heap_increase: 129 + instructions: 502451986 + heap_increase: 182 stable_memory_increase: 128 bench_upgrade_baseline: total: - instructions: 8686052 + instructions: 8683414 heap_increase: 258 stable_memory_increase: 128 scopes: post_upgrade: - instructions: 8606533 + instructions: 8604304 heap_increase: 129 stable_memory_increase: 0 pre_upgrade: - instructions: 77112 + instructions: 76703 heap_increase: 129 stable_memory_increase: 128 upgrade: - instructions: 8685363 + instructions: 8682725 heap_increase: 258 stable_memory_increase: 128 -version: 0.1.7 +version: 0.1.8 diff --git a/rs/ledger_suite/icrc1/ledger/src/benches/benches_u256.rs b/rs/ledger_suite/icrc1/ledger/src/benches/benches_u256.rs index c142d92fcc8..2291972e9bd 100644 --- a/rs/ledger_suite/icrc1/ledger/src/benches/benches_u256.rs +++ b/rs/ledger_suite/icrc1/ledger/src/benches/benches_u256.rs @@ -1,16 +1,18 @@ use crate::{ benches::{ - assert_has_num_balances, emulate_archive_blocks, icrc1_transfer, max_length_principal, - mint_tokens, upgrade, NUM_TRANSFERS, + assert_has_num_balances, emulate_archive_blocks, icrc_transfer, mint_tokens, test_account, + test_account_offset, upgrade, NUM_GET_BLOCKS, NUM_OPERATIONS, }, - init_state, Access, Account, LOG, + icrc2_approve_not_async, icrc3_get_blocks, init_state, Access, Account, LOG, }; use assert_matches::assert_matches; use canbench_rs::{bench, BenchResult}; -use candid::Principal; +use candid::{Nat, Principal}; use ic_icrc1_ledger::{FeatureFlags, InitArgs, InitArgsBuilder}; use ic_ledger_canister_core::archive::ArchiveOptions; use icrc_ledger_types::icrc1::transfer::TransferArg; +use icrc_ledger_types::icrc2::approve::ApproveArgs; +use icrc_ledger_types::icrc3::blocks::GetBlocksRequest; const MINTER_PRINCIPAL: Principal = Principal::from_slice(&[0_u8, 0, 0, 0, 2, 48, 0, 156, 1, 1]); @@ -31,22 +33,74 @@ fn bench_icrc1_transfers() -> BenchResult { canbench_rs::bench_fn(|| { { - let _p = canbench_rs::bench_scope("before_upgrade"); - for i in 0..NUM_TRANSFERS { + let _p = canbench_rs::bench_scope("icrc1_transfer"); + for i in 0..NUM_OPERATIONS { let transfer = TransferArg { from_subaccount: account_with_tokens.subaccount, - to: Account { - owner: max_length_principal(i), - subaccount: Some([11_u8; 32]), - }, + to: test_account(i), created_at_time: Some(start_time + i as u64), ..cketh_transfer() }; - let result = icrc1_transfer(account_with_tokens.owner, transfer.clone()); + let result = icrc_transfer(account_with_tokens.owner, None, transfer.clone()); assert_matches!(result, Ok(_)); emulate_archive_blocks::(&LOG); } - assert_has_num_balances(NUM_TRANSFERS + 2); + assert_has_num_balances(NUM_OPERATIONS + 2); + } + { + let _p = canbench_rs::bench_scope("icrc2_approve"); + for i in 0..NUM_OPERATIONS { + let approve = ApproveArgs { + from_subaccount: account_with_tokens.subaccount, + spender: test_account(i), + created_at_time: Some(start_time + i as u64), + amount: u128::MAX.into(), + expected_allowance: Some(0u64.into()), + expires_at: Some(u64::MAX), + fee: None, + memo: Some(MEMO.to_vec().into()), + }; + let result = icrc2_approve_not_async(account_with_tokens.owner, approve.clone()); + assert_matches!(result, Ok(_)); + emulate_archive_blocks::(&LOG); + } + } + { + let _p = canbench_rs::bench_scope("icrc2_transfer_from"); + for i in 0..NUM_OPERATIONS { + let spender = test_account(i); + let transfer = TransferArg { + from_subaccount: account_with_tokens.subaccount, + to: test_account_offset(i), + created_at_time: Some(start_time + i as u64), + ..cketh_transfer() + }; + let result = + icrc_transfer(account_with_tokens.owner, Some(spender), transfer.clone()); + assert_matches!(result, Ok(_)); + emulate_archive_blocks::(&LOG); + } + assert_has_num_balances(2 * NUM_OPERATIONS + 2); + } + for i in 0..NUM_GET_BLOCKS { + let spender = test_account(i); + let transfer = TransferArg { + from_subaccount: account_with_tokens.subaccount, + to: test_account_offset(i), + created_at_time: Some(1_000_000_000 + start_time + i as u64), + ..cketh_transfer() + }; + let result = icrc_transfer(account_with_tokens.owner, Some(spender), transfer.clone()); + assert_matches!(result, Ok(_)); + } + { + let req = GetBlocksRequest { + start: Nat::from(3 * NUM_OPERATIONS), + length: Nat::from(NUM_GET_BLOCKS), + }; + let _p = canbench_rs::bench_scope("icrc3_get_blocks"); + let blocks_res = icrc3_get_blocks(vec![req]); + assert_eq!(blocks_res.blocks.len(), NUM_GET_BLOCKS as usize); } upgrade(); }) @@ -85,6 +139,13 @@ fn cketh_ledger_init_args_with_archive() -> InitArgs { .build() } +const MEMO: [u8; 61] = [ + 0x82_u8, 0x00, 0x83, 0x54, 0x04, 0xc5, 0x63, 0x84, 0x17, 0x78, 0xc9, 0x3f, 0x41, 0xdc, 0x1a, + 0x89, 0x82, 0x1a, 0xe1, 0xc6, 0x75, 0xbb, 0xe8, 0x15, 0x58, 0x20, 0xb5, 0xa1, 0x01, 0xfb, 0x96, + 0xc5, 0xcf, 0x22, 0x4d, 0xf0, 0xd5, 0x02, 0x9b, 0x56, 0xbe, 0x81, 0xfc, 0x65, 0xce, 0x61, 0xf8, + 0x99, 0x11, 0xb7, 0x71, 0x23, 0x27, 0x8a, 0xe7, 0xf4, 0x67, 0xb7, 0x19, 0x01, 0x2c, +]; + /// ckETH ledger transaction 495542 fn cketh_transfer() -> TransferArg { TransferArg { @@ -98,16 +159,7 @@ fn cketh_transfer() -> TransferArg { }, fee: None, created_at_time: None, - memo: Some( - vec![ - 0x82_u8, 0x00, 0x83, 0x54, 0x04, 0xc5, 0x63, 0x84, 0x17, 0x78, 0xc9, 0x3f, 0x41, - 0xdc, 0x1a, 0x89, 0x82, 0x1a, 0xe1, 0xc6, 0x75, 0xbb, 0xe8, 0x15, 0x58, 0x20, 0xb5, - 0xa1, 0x01, 0xfb, 0x96, 0xc5, 0xcf, 0x22, 0x4d, 0xf0, 0xd5, 0x02, 0x9b, 0x56, 0xbe, - 0x81, 0xfc, 0x65, 0xce, 0x61, 0xf8, 0x99, 0x11, 0xb7, 0x71, 0x23, 0x27, 0x8a, 0xe7, - 0xf4, 0x67, 0xb7, 0x19, 0x01, 0x2c, - ] - .into(), - ), + memo: Some(MEMO.to_vec().into()), amount: 19_998_200_000_000_000_000_u128.into(), } } diff --git a/rs/ledger_suite/icrc1/ledger/src/benches/benches_u64.rs b/rs/ledger_suite/icrc1/ledger/src/benches/benches_u64.rs index b3fbce97763..183b545f168 100644 --- a/rs/ledger_suite/icrc1/ledger/src/benches/benches_u64.rs +++ b/rs/ledger_suite/icrc1/ledger/src/benches/benches_u64.rs @@ -1,15 +1,17 @@ use crate::benches::{ - assert_has_num_balances, emulate_archive_blocks, icrc1_transfer, max_length_principal, - mint_tokens, upgrade, NUM_TRANSFERS, + assert_has_num_balances, emulate_archive_blocks, icrc_transfer, mint_tokens, test_account, + test_account_offset, upgrade, NUM_GET_BLOCKS, NUM_OPERATIONS, }; -use crate::{init_state, Access, LOG}; +use crate::{icrc2_approve_not_async, icrc3_get_blocks, init_state, Access, LOG}; use assert_matches::assert_matches; use canbench_rs::{bench, BenchResult}; -use candid::Principal; +use candid::{Nat, Principal}; use ic_icrc1_ledger::{FeatureFlags, InitArgs, InitArgsBuilder}; use ic_ledger_canister_core::archive::ArchiveOptions; use icrc_ledger_types::icrc1::account::Account; use icrc_ledger_types::icrc1::transfer::TransferArg; +use icrc_ledger_types::icrc2::approve::ApproveArgs; +use icrc_ledger_types::icrc3::blocks::GetBlocksRequest; const MINTER_PRINCIPAL: Principal = Principal::from_slice(&[0_u8, 0, 0, 0, 2, 48, 0, 7, 1, 1]); @@ -30,22 +32,74 @@ fn bench_icrc1_transfers() -> BenchResult { canbench_rs::bench_fn(|| { { - let _p = canbench_rs::bench_scope("before_upgrade"); - for i in 0..NUM_TRANSFERS { + let _p = canbench_rs::bench_scope("icrc1_transfer"); + for i in 0..NUM_OPERATIONS { let transfer = TransferArg { from_subaccount: account_with_tokens.subaccount, - to: Account { - owner: max_length_principal(i), - subaccount: Some([11_u8; 32]), - }, + to: test_account(i), created_at_time: Some(start_time + i as u64), ..ckbtc_transfer() }; - let result = icrc1_transfer(account_with_tokens.owner, transfer.clone()); + let result = icrc_transfer(account_with_tokens.owner, None, transfer.clone()); assert_matches!(result, Ok(_)); emulate_archive_blocks::(&LOG); } - assert_has_num_balances(NUM_TRANSFERS + 2); + assert_has_num_balances(NUM_OPERATIONS + 2); + } + { + let _p = canbench_rs::bench_scope("icrc2_approve"); + for i in 0..NUM_OPERATIONS { + let approve = ApproveArgs { + from_subaccount: account_with_tokens.subaccount, + spender: test_account(i), + created_at_time: Some(start_time + i as u64), + amount: u64::MAX.into(), + expected_allowance: Some(0u64.into()), + expires_at: Some(u64::MAX), + fee: None, + memo: Some(MEMO.to_vec().into()), + }; + let result = icrc2_approve_not_async(account_with_tokens.owner, approve.clone()); + assert_matches!(result, Ok(_)); + emulate_archive_blocks::(&LOG); + } + } + { + let _p = canbench_rs::bench_scope("icrc2_transfer_from"); + for i in 0..NUM_OPERATIONS { + let spender = test_account(i); + let transfer = TransferArg { + from_subaccount: account_with_tokens.subaccount, + to: test_account_offset(i), + created_at_time: Some(start_time + i as u64), + ..ckbtc_transfer() + }; + let result = + icrc_transfer(account_with_tokens.owner, Some(spender), transfer.clone()); + assert_matches!(result, Ok(_)); + emulate_archive_blocks::(&LOG); + } + assert_has_num_balances(2 * NUM_OPERATIONS + 2); + } + for i in 0..NUM_GET_BLOCKS { + let spender = test_account(i); + let transfer = TransferArg { + from_subaccount: account_with_tokens.subaccount, + to: test_account_offset(i), + created_at_time: Some(1_000_000_000 + start_time + i as u64), + ..ckbtc_transfer() + }; + let result = icrc_transfer(account_with_tokens.owner, Some(spender), transfer.clone()); + assert_matches!(result, Ok(_)); + } + { + let req = GetBlocksRequest { + start: Nat::from(3 * NUM_OPERATIONS), + length: Nat::from(NUM_GET_BLOCKS), + }; + let _p = canbench_rs::bench_scope("icrc3_get_blocks"); + let blocks_res = icrc3_get_blocks(vec![req]); + assert_eq!(blocks_res.blocks.len(), NUM_GET_BLOCKS as usize); } upgrade(); }) @@ -84,6 +138,12 @@ fn ckbtc_ledger_init_args_with_archive() -> InitArgs { .build() } +const MEMO: [u8; 41] = [ + 0x82_u8, 0x00, 0x83, 0x58, 0x20, 0x18, 0x19, 0xcc, 0xd2, 0x28, 0xad, 0x2e, 0x83, 0xc6, 0xc8, + 0x63, 0x99, 0xa0, 0xd7, 0xd0, 0x2e, 0xe9, 0x75, 0x96, 0x95, 0x86, 0xf3, 0x47, 0x85, 0xf6, 0xaf, + 0x99, 0x00, 0x1e, 0x08, 0x8b, 0xa0, 0x02, 0x19, 0x07, 0xd0, +]; + /// ckBTC ledger transaction 1604556 fn ckbtc_transfer() -> TransferArg { TransferArg { @@ -97,14 +157,7 @@ fn ckbtc_transfer() -> TransferArg { }, fee: None, created_at_time: None, - memo: Some( - vec![ - 0x82_u8, 0x00, 0x83, 0x58, 0x20, 0x18, 0x19, 0xcc, 0xd2, 0x28, 0xad, 0x2e, 0x83, - 0xc6, 0xc8, 0x63, 0x99, 0xa0, 0xd7, 0xd0, 0x2e, 0xe9, 0x75, 0x96, 0x95, 0x86, 0xf3, - 0x47, 0x85, 0xf6, 0xaf, 0x99, 0x00, 0x1e, 0x08, 0x8b, 0xa0, 0x02, 0x19, 0x07, 0xd0, - ] - .into(), - ), + memo: Some(MEMO.to_vec().into()), amount: 167_708_u32.into(), } } diff --git a/rs/ledger_suite/icrc1/ledger/src/benches/mod.rs b/rs/ledger_suite/icrc1/ledger/src/benches/mod.rs index 68d36334239..bd27b926857 100644 --- a/rs/ledger_suite/icrc1/ledger/src/benches/mod.rs +++ b/rs/ledger_suite/icrc1/ledger/src/benches/mod.rs @@ -12,7 +12,8 @@ mod benches_u256; #[cfg(not(feature = "u256-tokens"))] mod benches_u64; -pub const NUM_TRANSFERS: u32 = 10_000; +pub const NUM_OPERATIONS: u32 = 10_000; +pub const NUM_GET_BLOCKS: u32 = 100; pub fn upgrade() { let _p = canbench_rs::bench_scope("upgrade"); @@ -20,8 +21,9 @@ pub fn upgrade() { post_upgrade(None); } -pub fn icrc1_transfer( +pub fn icrc_transfer( from: Principal, + spender: Option, arg: TransferArg, ) -> Result> { let from_account = Account { @@ -31,7 +33,7 @@ pub fn icrc1_transfer( execute_transfer_not_async( from_account, arg.to, - None, + spender, arg.fee, arg.amount, arg.memo, @@ -52,14 +54,26 @@ pub fn max_length_principal(index: u32) -> Principal { Principal::from_slice(&principal) } +pub fn test_account(i: u32) -> Account { + Account { + owner: max_length_principal(i), + subaccount: Some([11_u8; 32]), + } +} + +pub fn test_account_offset(i: u32) -> Account { + test_account(1_000_000_000 + i) +} + fn mint_tokens>(minter: Principal, amount: T) -> Account { let account_with_tokens = Account { owner: max_length_principal(u32::MAX), subaccount: Some([255_u8; 32]), }; assert_matches!( - icrc1_transfer( + icrc_transfer( minter, + None, TransferArg { from_subaccount: None, to: account_with_tokens, diff --git a/rs/ledger_suite/icrc1/ledger/src/main.rs b/rs/ledger_suite/icrc1/ledger/src/main.rs index 18643abf59e..154819b4249 100644 --- a/rs/ledger_suite/icrc1/ledger/src/main.rs +++ b/rs/ledger_suite/icrc1/ledger/src/main.rs @@ -1,8 +1,8 @@ #[cfg(feature = "canbench-rs")] mod benches; -use candid::candid_method; use candid::types::number::Nat; +use candid::{candid_method, Principal}; use ic_canister_log::{declare_log_buffer, export, log}; use ic_canisters_http_types::{HttpRequest, HttpResponse, HttpResponseBuilder}; use ic_cdk::api::stable::StableReader; @@ -810,15 +810,13 @@ fn get_data_certificate() -> DataCertificate { } } -#[update] -#[candid_method(update)] -async fn icrc2_approve(arg: ApproveArgs) -> Result { +fn icrc2_approve_not_async(caller: Principal, arg: ApproveArgs) -> Result { panic_if_not_ready(); let block_idx = Access::with_ledger_mut(|ledger| { let now = TimeStamp::from_nanos_since_unix_epoch(ic_cdk::api::time()); let from_account = Account { - owner: ic_cdk::api::caller(), + owner: caller, subaccount: arg.from_subaccount, }; if from_account.owner == arg.spender.owner { @@ -881,6 +879,14 @@ async fn icrc2_approve(arg: ApproveArgs) -> Result { Ok(block_idx) })?; + Ok(block_idx) +} + +#[update] +#[candid_method(update)] +async fn icrc2_approve(arg: ApproveArgs) -> Result { + let block_idx = icrc2_approve_not_async(ic_cdk::api::caller(), arg)?; + // NB. we need to set the certified data before the first async call to make sure that the // blockchain state agrees with the certificate while archiving is in progress. ic_cdk::api::set_certified_data(&Access::with_ledger(Ledger::root_hash)); From 6b7b92b24a33bb024902cb97cbfc3296441c4020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mathias=20Bj=C3=B6rkqvist?= Date: Tue, 14 Jan 2025 10:11:22 +0100 Subject: [PATCH 08/33] test(ICRC_Ledger): FI-1043: Verify ICRC ledger and archive block equality (#3404) Verify that the candid `Block` type returned by the ICRC ledger and archive canisters are the same. --- rs/ledger_suite/icrc1/archive/BUILD.bazel | 1 + rs/ledger_suite/icrc1/archive/src/main.rs | 28 +++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/rs/ledger_suite/icrc1/archive/BUILD.bazel b/rs/ledger_suite/icrc1/archive/BUILD.bazel index d21998270ec..b6db17976c0 100644 --- a/rs/ledger_suite/icrc1/archive/BUILD.bazel +++ b/rs/ledger_suite/icrc1/archive/BUILD.bazel @@ -56,6 +56,7 @@ rust_test( crate = ":_wasm_archive_canister", data = [ ":archive.did", + "//rs/ledger_suite/icrc1/ledger:ledger.did", ], env = { "CARGO_MANIFEST_DIR": "rs/ledger_suite/icrc1/archive", diff --git a/rs/ledger_suite/icrc1/archive/src/main.rs b/rs/ledger_suite/icrc1/archive/src/main.rs index a8d8eb3c5e1..f629929c889 100644 --- a/rs/ledger_suite/icrc1/archive/src/main.rs +++ b/rs/ledger_suite/icrc1/archive/src/main.rs @@ -436,3 +436,31 @@ fn check_candid_interface() { ) .expect("the ledger interface is not compatible with archive.did"); } + +#[test] +fn check_archive_and_ledger_block_equality() { + // check that ledger.did and archive.did agree on the block format + let manifest_dir = std::path::PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); + let ledger_did_file = manifest_dir.join("../ledger/ledger.did"); + let archive_did_file = manifest_dir.join("archive.did"); + let mut ledger_env = candid_parser::utils::CandidSource::File(ledger_did_file.as_path()) + .load() + .unwrap() + .0; + let archive_env = candid_parser::utils::CandidSource::File(archive_did_file.as_path()) + .load() + .unwrap() + .0; + let ledger_block_type = ledger_env.find_type("Block").unwrap().to_owned(); + let archive_block_type = archive_env.find_type("Block").unwrap().to_owned(); + + let mut gamma = std::collections::HashSet::new(); + let archive_block_type = ledger_env.merge_type(archive_env, archive_block_type.clone()); + candid::types::subtype::equal( + &mut gamma, + &ledger_env, + &ledger_block_type, + &archive_block_type, + ) + .expect("Ledger and Archive block types are different"); +} From cc125603960c419b0d22ffdbdbf288fa11fc5fb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mathias=20Bj=C3=B6rkqvist?= Date: Tue, 14 Jan 2025 10:42:22 +0100 Subject: [PATCH 09/33] test(ICRC_Index): FI-1042: Verify ICRC ledger and index block equality (#3403) Verify that the candid `Block` type returned by the ICRC ledger and index canisters are the same. --- rs/ledger_suite/icrc1/index-ng/BUILD.bazel | 5 +++- rs/ledger_suite/icrc1/index-ng/src/main.rs | 28 ++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/rs/ledger_suite/icrc1/index-ng/BUILD.bazel b/rs/ledger_suite/icrc1/index-ng/BUILD.bazel index 832eb1a3c60..1c80cd0f10c 100644 --- a/rs/ledger_suite/icrc1/index-ng/BUILD.bazel +++ b/rs/ledger_suite/icrc1/index-ng/BUILD.bazel @@ -79,7 +79,10 @@ rust_library( rust_test( name = "index_ng_unit_test", crate = ":_wasm_index_ng_canister", - data = [":index-ng.did"], + data = [ + ":index-ng.did", + "//rs/ledger_suite/icrc1/ledger:ledger.did", + ], deps = [ # Keep sorted. "//rs/ledger_suite/icrc1/test_utils", diff --git a/rs/ledger_suite/icrc1/index-ng/src/main.rs b/rs/ledger_suite/icrc1/index-ng/src/main.rs index 644cd22bdaf..66cd8a876ee 100644 --- a/rs/ledger_suite/icrc1/index-ng/src/main.rs +++ b/rs/ledger_suite/icrc1/index-ng/src/main.rs @@ -1204,3 +1204,31 @@ fn check_candid_interface() { ) }); } + +#[test] +fn check_index_and_ledger_block_equality() { + // check that ledger.did and index-ng.did agree on the block format + let manifest_dir = std::path::PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); + let ledger_did_file = manifest_dir.join("../ledger/ledger.did"); + let index_did_file = manifest_dir.join("index-ng.did"); + let mut ledger_env = candid_parser::utils::CandidSource::File(ledger_did_file.as_path()) + .load() + .unwrap() + .0; + let index_env = candid_parser::utils::CandidSource::File(index_did_file.as_path()) + .load() + .unwrap() + .0; + let ledger_block_type = ledger_env.find_type("Block").unwrap().to_owned(); + let index_block_type = index_env.find_type("Block").unwrap().to_owned(); + + let mut gamma = std::collections::HashSet::new(); + let index_block_type = ledger_env.merge_type(index_env, index_block_type.clone()); + candid::types::subtype::equal( + &mut gamma, + &ledger_env, + &ledger_block_type, + &index_block_type, + ) + .expect("Ledger and Index block types are different"); +} From eb32930c0118638130eb548cd18f332f5225028d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mathias=20Bj=C3=B6rkqvist?= Date: Tue, 14 Jan 2025 10:55:00 +0100 Subject: [PATCH 10/33] test(ICP_Ledger): FI-1036: Add ICP ledger and index encoded block response compatibility (#3398) Add a test that verifies the ICP ledger and index encoded block response type compatibility. Since the response types are not exactly the same, the best we can do is a Candid subtype check. --- rs/ledger_suite/icp/index/BUILD.bazel | 5 +++- rs/ledger_suite/icp/index/src/main.rs | 35 +++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/rs/ledger_suite/icp/index/BUILD.bazel b/rs/ledger_suite/icp/index/BUILD.bazel index c9acf1f5001..7c11c0c3a05 100644 --- a/rs/ledger_suite/icp/index/BUILD.bazel +++ b/rs/ledger_suite/icp/index/BUILD.bazel @@ -79,7 +79,10 @@ rust_test( rust_test( name = "ic_icp_index_canister_test", crate = ":_wasm_ic-icp-index-canister", - data = [":index.did"], + data = [ + ":index.did", + "//rs/ledger_suite/icp:ledger.did", + ], env = { "CARGO_MANIFEST_DIR": "rs/ledger_suite/icp/index", }, diff --git a/rs/ledger_suite/icp/index/src/main.rs b/rs/ledger_suite/icp/index/src/main.rs index 0867d6ab041..a14dee429f9 100644 --- a/rs/ledger_suite/icp/index/src/main.rs +++ b/rs/ledger_suite/icp/index/src/main.rs @@ -751,3 +751,38 @@ fn check_candid_interface_compatibility() { ) .unwrap(); } + +#[test] +fn check_index_and_ledger_encoded_block_compatibility() { + // check that ledger.did and index.did agree on the encoded block format + let manifest_dir = std::path::PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); + let ledger_did_file = manifest_dir.join("../ledger.did"); + let index_did_file = manifest_dir.join("./index.did"); + let mut ledger_env = candid_parser::utils::CandidSource::File(ledger_did_file.as_path()) + .load() + .unwrap() + .0; + let index_env = candid_parser::utils::CandidSource::File(index_did_file.as_path()) + .load() + .unwrap() + .0; + let ledger_encoded_block_response_type = ledger_env + .find_type("QueryEncodedBlocksResponse") + .unwrap() + .to_owned(); + let index_encoded_block_response_type = + index_env.find_type("GetBlocksResponse").unwrap().to_owned(); + + let mut gamma = std::collections::HashSet::new(); + let index_encoded_block_response_type = + ledger_env.merge_type(index_env, index_encoded_block_response_type.clone()); + // Check if the ledger `query_encoded_blocks` response <: the index `get_blocks` response, + // i.e., if the index response type is a subtype of the ledger response type. + candid::types::subtype::subtype( + &mut gamma, + &ledger_env, + &ledger_encoded_block_response_type, + &index_encoded_block_response_type, + ) + .expect("Ledger and Index encoded block types are different"); +} From a1421407941a002610b4e55a23a5b47f3a850f34 Mon Sep 17 00:00:00 2001 From: Oleksandr Tkachenko <108659113+altkdf@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:02:56 +0100 Subject: [PATCH 11/33] test(PocketIC): add Schnorr `aux` types and tests (#3422) --- Cargo.lock | 1 + MODULE.bazel | 4 +- packages/pocket-ic/BUILD.bazel | 1 + packages/pocket-ic/Cargo.toml | 1 + packages/pocket-ic/src/management_canister.rs | 23 +++ .../pocket-ic/test_canister/src/canister.rs | 14 ++ .../pocket-ic/tests/management_canister.rs | 5 + packages/pocket-ic/tests/tests.rs | 133 ++++++++++++------ 8 files changed, 135 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 330163c5345..1e410db98ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17125,6 +17125,7 @@ version = "6.0.0" dependencies = [ "backoff", "base64 0.13.1", + "bitcoin 0.28.2", "candid", "candid_parser", "ed25519-dalek", diff --git a/MODULE.bazel b/MODULE.bazel index 9a67389dc26..d0e9fbe3a2d 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -863,8 +863,8 @@ http_file( http_file( name = "management_canister_did", downloaded_file_path = "ic.did", - sha256 = "31d4654d60b364420a2e52f546f06b2255dc78ac8c2d768271f004b8946e92cb", - url = "https://raw.githubusercontent.com/dfinity/portal/407ec5b92d06618c4df9f52e98514c5f4f44313e/docs/references/_attachments/ic.did", + sha256 = "0e92d8b9c2cf3d3fca166b76b2d3b8a2464d9b2b61117d8b2f63222b388d8dd1", + url = "https://raw.githubusercontent.com/dfinity/portal/78c93aa37ef17dc67484079d1a4bf58a10a63106/docs/references/_attachments/ic.did", ) # Mozilla CA certificate store in PEM format diff --git a/packages/pocket-ic/BUILD.bazel b/packages/pocket-ic/BUILD.bazel index 56cbb3fed7e..57e445d9270 100644 --- a/packages/pocket-ic/BUILD.bazel +++ b/packages/pocket-ic/BUILD.bazel @@ -33,6 +33,7 @@ MACRO_DEPENDENCIES = [ TEST_DEPENDENCIES = [ # Keep sorted. "//rs/types/error_types", + "@crate_index//:bitcoin", "@crate_index//:candid_parser", "@crate_index//:ed25519-dalek", "@crate_index//:flate2", diff --git a/packages/pocket-ic/Cargo.toml b/packages/pocket-ic/Cargo.toml index 01e3ad0f479..fbf1a1555bf 100644 --- a/packages/pocket-ic/Cargo.toml +++ b/packages/pocket-ic/Cargo.toml @@ -46,6 +46,7 @@ tracing-subscriber = { workspace = true } wslpath = "0.0.2" [dev-dependencies] +bitcoin = { workspace = true } candid_parser = { workspace = true } ed25519-dalek = { workspace = true } flate2 = { workspace = true } diff --git a/packages/pocket-ic/src/management_canister.rs b/packages/pocket-ic/src/management_canister.rs index 1878e6728e2..e76660c5237 100644 --- a/packages/pocket-ic/src/management_canister.rs +++ b/packages/pocket-ic/src/management_canister.rs @@ -1,6 +1,7 @@ use candid::{CandidType, Deserialize, Principal}; pub type CanisterId = Principal; +pub type SubnetId = Principal; #[derive(CandidType, Deserialize, Debug, Clone)] pub struct CanisterIdRecord { @@ -234,6 +235,11 @@ pub struct CanisterInfoArgs { pub num_requested_changes: Option, } +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct SubnetInfoArgs { + pub subnet_id: SubnetId, +} + #[derive(CandidType, Deserialize, Debug, Clone)] pub enum ChangeOrigin { #[serde(rename = "from_user")] @@ -292,6 +298,11 @@ pub struct CanisterInfoResult { pub total_num_changes: u64, } +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct SubnetInfoResult { + pub replica_version: String, +} + // raw randomness pub type RawRandResult = Vec; @@ -455,6 +466,18 @@ pub struct SignWithSchnorrArgs { pub key_id: SignWithSchnorrArgsKeyId, pub derivation_path: Vec>, pub message: Vec, + pub aux: Option, +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub enum SignWithSchnorrAux { + #[serde(rename = "bip341")] + Bip341(SignWithBip341Aux), +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct SignWithBip341Aux { + pub merkle_root_hash: Vec, } #[derive(CandidType, Deserialize, Debug, Clone)] diff --git a/packages/pocket-ic/test_canister/src/canister.rs b/packages/pocket-ic/test_canister/src/canister.rs index 53cd4ffad6c..8f8fe0b6a2d 100644 --- a/packages/pocket-ic/test_canister/src/canister.rs +++ b/packages/pocket-ic/test_canister/src/canister.rs @@ -109,6 +109,18 @@ struct SignWithSchnorrArgument { pub message: Vec, pub derivation_path: Vec>, pub key_id: SchnorrKeyId, + pub aux: Option, +} + +#[derive(CandidType, Serialize, Deserialize, Debug)] +pub enum SignWithSchnorrAux { + #[serde(rename = "bip341")] + Bip341(SignWithBip341Aux), +} + +#[derive(CandidType, Serialize, Deserialize, Debug)] +pub struct SignWithBip341Aux { + pub merkle_root_hash: ByteBuf, } #[derive(CandidType, Deserialize, Debug)] @@ -144,11 +156,13 @@ async fn sign_with_schnorr( message: Vec, derivation_path: Vec>, key_id: SchnorrKeyId, + aux: Option, ) -> Result, String> { let internal_request = SignWithSchnorrArgument { message, derivation_path, key_id, + aux, }; let (internal_reply,): (SignWithSchnorrResponse,) = ic_cdk::api::call::call_with_payment( diff --git a/packages/pocket-ic/tests/management_canister.rs b/packages/pocket-ic/tests/management_canister.rs index b6e813729eb..fe1c68e261e 100644 --- a/packages/pocket-ic/tests/management_canister.rs +++ b/packages/pocket-ic/tests/management_canister.rs @@ -61,6 +61,11 @@ fn canister_info(_: CanisterInfoArgs) -> CanisterInfoResult { unreachable!() } +#[update] +fn subnet_info(_: SubnetInfoArgs) -> SubnetInfoResult { + unreachable!() +} + #[update] fn delete_canister(_: CanisterIdRecord) { unreachable!() diff --git a/packages/pocket-ic/tests/tests.rs b/packages/pocket-ic/tests/tests.rs index 295ed91d115..8b9d39c40d5 100644 --- a/packages/pocket-ic/tests/tests.rs +++ b/packages/pocket-ic/tests/tests.rs @@ -5,7 +5,7 @@ use ic_transport_types::EnvelopeContent::ReadState; use pocket_ic::management_canister::{ CanisterId, CanisterIdRecord, CanisterInstallMode, CanisterSettings, EcdsaPublicKeyResult, HttpRequestResult, ProvisionalCreateCanisterWithCyclesArgs, SchnorrAlgorithm, - SchnorrPublicKeyArgsKeyId, SchnorrPublicKeyResult, + SchnorrPublicKeyArgsKeyId, SchnorrPublicKeyResult, SignWithBip341Aux, SignWithSchnorrAux, }; use pocket_ic::{ common::rest::{ @@ -946,53 +946,96 @@ fn test_schnorr() { // We define the message, derivation path, and ECDSA key ID to use in this test. let message = b"Hello, world!==================="; // must be of length 32 bytes for BIP340 let derivation_path = vec!["my message".as_bytes().to_vec()]; + let some_aux: Option = + Some(SignWithSchnorrAux::Bip341(SignWithBip341Aux { + merkle_root_hash: b"Hello, aux!=====================".to_vec(), + })); for algorithm in [SchnorrAlgorithm::Bip340Secp256K1, SchnorrAlgorithm::Ed25519] { for name in ["key_1", "test_key_1", "dfx_test_key"] { - let key_id = SchnorrPublicKeyArgsKeyId { - algorithm: algorithm.clone(), - name: name.to_string(), - }; - - // We get the Schnorr public key and signature via update calls to the test canister. - let schnorr_public_key = update_candid::< - (Option, _, _), - (Result,), - >( - &pic, - canister, - "schnorr_public_key", - (None, derivation_path.clone(), key_id.clone()), - ) - .unwrap() - .0 - .unwrap(); - let schnorr_signature = update_candid::<_, (Result, String>,)>( - &pic, - canister, - "sign_with_schnorr", - (message, derivation_path.clone(), key_id.clone()), - ) - .unwrap() - .0 - .unwrap(); + for aux in [None, some_aux.clone()] { + let key_id = SchnorrPublicKeyArgsKeyId { + algorithm: algorithm.clone(), + name: name.to_string(), + }; - // We verify the Schnorr signature. - match key_id.algorithm { - SchnorrAlgorithm::Bip340Secp256K1 => { - use k256::ecdsa::signature::hazmat::PrehashVerifier; - use k256::schnorr::{Signature, VerifyingKey}; - let vk = VerifyingKey::from_bytes(&schnorr_public_key.public_key[1..]).unwrap(); - let sig = Signature::try_from(schnorr_signature.as_slice()).unwrap(); - vk.verify_prehash(message, &sig).unwrap(); - } - SchnorrAlgorithm::Ed25519 => { - use ed25519_dalek::{Signature, Verifier, VerifyingKey}; - let pk: [u8; 32] = schnorr_public_key.public_key.try_into().unwrap(); - let vk = VerifyingKey::from_bytes(&pk).unwrap(); - let signature = Signature::from_slice(&schnorr_signature).unwrap(); - vk.verify(message, &signature).unwrap(); - } - }; + // We get the Schnorr public key and signature via update calls to the test canister. + let schnorr_public_key = update_candid::< + (Option, _, _), + (Result,), + >( + &pic, + canister, + "schnorr_public_key", + (None, derivation_path.clone(), key_id.clone()), + ) + .unwrap() + .0 + .unwrap(); + let schnorr_signature_result = update_candid::<_, (Result, String>,)>( + &pic, + canister, + "sign_with_schnorr", + ( + message, + derivation_path.clone(), + key_id.clone(), + aux.clone(), + ), + ) + .unwrap() + .0; + + // We verify the Schnorr signature. + match key_id.algorithm { + SchnorrAlgorithm::Bip340Secp256K1 => { + use k256::ecdsa::signature::hazmat::PrehashVerifier; + use k256::schnorr::{Signature, VerifyingKey}; + let bip340_public_key = schnorr_public_key.public_key[1..].to_vec(); + let public_key = match aux { + None => bip340_public_key, + Some(SignWithSchnorrAux::Bip341(bip341_aux)) => { + use bitcoin::hashes::Hash; + use bitcoin::schnorr::TapTweak; + let xonly = bitcoin::util::key::XOnlyPublicKey::from_slice( + bip340_public_key.as_slice(), + ) + .unwrap(); + let merkle_root = + bitcoin::util::taproot::TapBranchHash::from_slice( + &bip341_aux.merkle_root_hash, + ) + .unwrap(); + let secp256k1_engine = bitcoin::secp256k1::Secp256k1::new(); + xonly + .tap_tweak(&secp256k1_engine, Some(merkle_root)) + .0 + .to_inner() + .serialize() + .to_vec() + } + }; + let vk = VerifyingKey::from_bytes(&public_key).unwrap(); + let sig = Signature::try_from(schnorr_signature_result.unwrap().as_slice()) + .unwrap(); + + vk.verify_prehash(message, &sig).unwrap(); + } + SchnorrAlgorithm::Ed25519 => { + use ed25519_dalek::{Signature, Verifier, VerifyingKey}; + let pk: [u8; 32] = schnorr_public_key.public_key.try_into().unwrap(); + let vk = VerifyingKey::from_bytes(&pk).unwrap(); + let verification_result = schnorr_signature_result.map(|signature| { + let s = Signature::from_slice(&signature).unwrap(); + vk.verify(message, &s).unwrap(); + }); + assert!( + verification_result.is_ok() == aux.is_none(), + "{:?}", + verification_result + ); + } + }; + } } } } From d7192c042c00a485726ed6643cb593927fd92a35 Mon Sep 17 00:00:00 2001 From: Aleksandr Pakhomov <111274088+pakhomov-dfinity@users.noreply.github.com> Date: Tue, 14 Jan 2025 13:35:08 +0100 Subject: [PATCH 12/33] test: [EXC-1819] Fix flakiness of state_manager_integration_tests (random_canister_input_lsmt) (#3200) The test is flaky since freeing of sandbox threads, commit `4a622c0` (`RUN-1014`), which was modified at `EXC-1681`(`82c76c1`), but not resolved. On top of that `a1e516f` introduced more flakiness. One reason is `StateManager` outliving the `StateMachine` because something holds the `Arc` containing it, yet we remove & recreate tempdir using the same name, ending up with two `StateManagers` accessing the same dir. This is addressed by reordering fields of `StateMachine` to ensure we destruct `StateManager` before the StateDir at `StateMachine::drop` and waiting for the `StateManager` to drop in `StateMachine::into_components`. Another issue is communication with sandbox, it was solved by using fork in `ProptestConfig`. --- rs/state_machine_tests/src/lib.rs | 19 ++++++++++++++++++- rs/state_manager/tests/state_manager.rs | 9 +++++++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/rs/state_machine_tests/src/lib.rs b/rs/state_machine_tests/src/lib.rs index 87eb8cf6e70..0d5df069cc4 100644 --- a/rs/state_machine_tests/src/lib.rs +++ b/rs/state_machine_tests/src/lib.rs @@ -1805,7 +1805,7 @@ impl StateMachine { } } - fn into_components(self) -> (Box, u64, Time, u64) { + fn into_components_inner(self) -> (Box, u64, Time, u64) { ( self.state_dir, self.nonce.into_inner(), @@ -1814,6 +1814,23 @@ impl StateMachine { ) } + fn into_components(self) -> (Box, u64, Time, u64) { + let state_manager = Arc::downgrade(&self.state_manager); + let result = self.into_components_inner(); + let mut i = 0i32; + // StateManager is owned by an Arc, that is cloned into multiple components and different + // threads. If we return before all the asynchronous components release the Arc, we may + // end up with to StateManagers writing to the same directory, resulting in a crash. + while state_manager.upgrade().is_some() { + std::thread::sleep(std::time::Duration::from_millis(50)); + i += 1; + if i >= 100 { + panic!("Failed to wait for StateManager drop"); + } + } + result + } + /// Emulates a node restart, including checkpoint recovery. pub fn restart_node(self) -> Self { // We must drop self before setup_form_dir so that we don't have two StateManagers pointing diff --git a/rs/state_manager/tests/state_manager.rs b/rs/state_manager/tests/state_manager.rs index 078e82a6d7f..b82e7b2aceb 100644 --- a/rs/state_manager/tests/state_manager.rs +++ b/rs/state_manager/tests/state_manager.rs @@ -6201,6 +6201,7 @@ fn can_merge_unexpected_number_of_files() { .vmemory_0(); let existing_overlays = pm_layout.existing_overlays().unwrap(); assert_eq!(existing_overlays.len(), NUM_PAGES); // single page per shard + state_manager.flush_tip_channel(); // Copy each shard for heights 1..HEIGHT; now each file is beyond the hard limit, // triggering forced merge for all shards back to one overlay. @@ -7519,8 +7520,12 @@ fn arbitrary_test_canister_op() -> impl Strategy { } proptest! { -// We go for fewer, but longer runs -#![proptest_config(ProptestConfig::with_cases(5))] +#![proptest_config(ProptestConfig { + // Fork to prevent flaky timeouts due to closed sandbox fds + fork: true, + // We go for fewer, but longer runs + ..ProptestConfig::with_cases(5) +})] #[test] fn random_canister_input_lsmt(ops in proptest::collection::vec(arbitrary_test_canister_op(), 1..50)) { From ad6df00098cf25d8df29a469c0a4504979a411c1 Mon Sep 17 00:00:00 2001 From: Igor Novgorodov Date: Tue, 14 Jan 2025 14:18:31 +0100 Subject: [PATCH 13/33] fix: ic-boundary: use numeric ip family in logs (#3437) --- rs/boundary_node/ic_boundary/src/metrics.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/rs/boundary_node/ic_boundary/src/metrics.rs b/rs/boundary_node/ic_boundary/src/metrics.rs index d4d363f9788..6b8c1701295 100644 --- a/rs/boundary_node/ic_boundary/src/metrics.rs +++ b/rs/boundary_node/ic_boundary/src/metrics.rs @@ -500,8 +500,17 @@ pub async fn metrics_middleware( let ip_family = request .extensions() .get::>() - .map(|x| x.remote_addr.family()) - .unwrap_or("0"); + .map(|x| { + let f = x.remote_addr.family(); + if f == "v4" { + 4 + } else if f == "v6" { + 6 + } else { + 0 + } + }) + .unwrap_or(0); let remote_addr = request .extensions() From 38a497106eda4577231b1d796b9533ce0921ad12 Mon Sep 17 00:00:00 2001 From: Adam Bratschi-Kaye Date: Tue, 14 Jan 2025 15:38:42 +0100 Subject: [PATCH 14/33] fix(EXC-1834): Ignore empty data segment pages (#3435) EXC-1834 Avoid triggering our critical error alert in the case where a module has empty data segments. --------- Co-authored-by: Venkkatesh Sekar --- rs/embedders/src/wasm_utils.rs | 3 ++ .../tests/execution_test.rs | 34 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/rs/embedders/src/wasm_utils.rs b/rs/embedders/src/wasm_utils.rs index 31e8b4ce0c5..771a5d328f3 100644 --- a/rs/embedders/src/wasm_utils.rs +++ b/rs/embedders/src/wasm_utils.rs @@ -133,6 +133,9 @@ impl Segments { // them into a map page_num -> page. Whenever we map a chunk into its page, // we simply copy its bytes to the right place inside the page. .fold(HashMap::new(), |mut acc, (offset, bytes)| { + if bytes.is_empty() { + return acc; + } let page_num = offset / PAGE_SIZE; let list = acc .entry(PageIndex::new(page_num as u64)) diff --git a/rs/execution_environment/tests/execution_test.rs b/rs/execution_environment/tests/execution_test.rs index 68031280b6f..f8cae43c080 100644 --- a/rs/execution_environment/tests/execution_test.rs +++ b/rs/execution_environment/tests/execution_test.rs @@ -1,5 +1,6 @@ use assert_matches::assert_matches; use candid::Encode; +use canister_test::CanisterInstallMode; use ic_base_types::PrincipalId; use ic_config::{ execution_environment::{Config as HypervisorConfig, DEFAULT_WASM_MEMORY_LIMIT}, @@ -2773,3 +2774,36 @@ fn do_not_initialize_wasm_memory_limit_if_it_is_not_empty() { let wasm_memory_limit = fetch_wasm_memory_limit(&env, canister_id); assert_eq!(wasm_memory_limit, NumBytes::new(10_000_000_000)); } + +/// Even if a Wasm module has inital memory size 0, it is allowed to have data +/// segments of length 0 inserted at address 0. This test checks that such data +/// segments don't trigger any of our critical errors. +#[test] +fn no_critical_error_on_empty_data_segment() { + let env = StateMachine::new(); + let wat: &str = r#" + (module + (memory (;0;) i64 0) + (data (;0;) (i64.const 0) "") + ) + "#; + let _id = env.install_canister_wat(wat, vec![], None); + + // A module with an empty data segment outside of memory should fail to + // install, but not trigger any critical errors. + let wat: &str = r#" + (module + (memory (;0;) i64 0) + (data (;0;) (i64.const 1) "") + ) + "#; + let wasm = wat::parse_str(wat).unwrap(); + let id = env.create_canister(None); + let error = env + .install_wasm_in_mode(id, CanisterInstallMode::Install, wasm, vec![]) + .unwrap_err(); + error.assert_contains( + ErrorCode::CanisterInvalidWasm, + "Wasm module has invalid data segment of 0 bytes at 1.", + ); +} From ba0b355f59df7d7a683b58b5600339c298911480 Mon Sep 17 00:00:00 2001 From: "pr-automation-bot-public[bot]" <189003650+pr-automation-bot-public[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 14:43:50 +0000 Subject: [PATCH 15/33] chore: Update Mainnet IC revisions subnets file (#3436) Update mainnet revisions file to include the latest version released on the mainnet. This PR is created automatically using [`mainnet_revisions.py`](https://github.com/dfinity/ic/blob/master/ci/src/mainnet_revisions/mainnet_revisions.py) Co-authored-by: CI Automation --- mainnet-subnet-revisions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mainnet-subnet-revisions.json b/mainnet-subnet-revisions.json index 02ebe42e34c..2c5e960f9d8 100644 --- a/mainnet-subnet-revisions.json +++ b/mainnet-subnet-revisions.json @@ -1,6 +1,6 @@ { "subnets": { "tdb26-jop6k-aogll-7ltgs-eruif-6kk7m-qpktf-gdiqx-mxtrf-vb5e6-eqe": "43670245ed6919790e7858813c7e838c6fbcedf5", - "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "43670245ed6919790e7858813c7e838c6fbcedf5" + "io67a-2jmkw-zup3h-snbwi-g6a5n-rm5dn-b6png-lvdpl-nqnto-yih6l-gqe": "aa705aaa621c2e0d4f146f3a1de801edcb0fa0d5" } } \ No newline at end of file From c4739e9ad8a15c78058c1e32a9b29e3c828a4aa6 Mon Sep 17 00:00:00 2001 From: Adam Bratschi-Kaye Date: Tue, 14 Jan 2025 15:51:51 +0100 Subject: [PATCH 16/33] feat(EXC-1800): Use Wasmtime `deserialize_open_file`. (#3412) EXC-1800 When using the on-disk compilation cache, switch to Wasmtime's new `deserialize_open_file` API which allows sandboxes to mmap the existing file so that canister code doesn't take up resident memory in the sandbox. --- rs/embedders/src/wasm_executor.rs | 27 +++++++++++++++--- rs/embedders/src/wasmtime_embedder.rs | 40 ++++++++------------------- 2 files changed, 35 insertions(+), 32 deletions(-) diff --git a/rs/embedders/src/wasm_executor.rs b/rs/embedders/src/wasm_executor.rs index ec2de9f11be..e920683064e 100644 --- a/rs/embedders/src/wasm_executor.rs +++ b/rs/embedders/src/wasm_executor.rs @@ -385,6 +385,29 @@ impl WasmExecutorImpl { }) } else { match compilation_cache.get(&wasm_binary.binary) { + Some(Ok(StoredCompilation::Disk(on_disk_serialized_module))) => { + // This path is only used when sandboxing is disabled. + // Otherwise the fd is implicitly duplicated when passed to + // the sandbox process over the unix socket. + let instance_pre = self.wasm_embedder.read_file_and_pre_instantiate( + on_disk_serialized_module + .bytes + .try_clone() + .expect("Unable to duplicate serialzed module file descriptor."), + ); + let cache = EmbedderCache::new(instance_pre.clone()); + *guard = Some(cache.clone()); + match instance_pre { + Ok(_) => Ok(CacheLookup { + cache, + serialized_module: Some(StoredCompilation::Disk( + on_disk_serialized_module, + )), + compilation_result: None, + }), + Err(err) => Err(err), + } + } Some(Ok(StoredCompilation::Memory(serialized_module))) => { let instance_pre = self .wasm_embedder @@ -400,10 +423,6 @@ impl WasmExecutorImpl { Err(err) => Err(err), } } - Some(Ok(StoredCompilation::Disk(_serialized_module))) => { - // TODO(EXC-1780) - panic!("On disk compilation cache not yet supported"); - } Some(Err(err)) => { let cache: HypervisorResult = Err(err.clone()); *guard = Some(EmbedderCache::new(cache)); diff --git a/rs/embedders/src/wasmtime_embedder.rs b/rs/embedders/src/wasmtime_embedder.rs index b9c8dca7577..113ee053de4 100644 --- a/rs/embedders/src/wasmtime_embedder.rs +++ b/rs/embedders/src/wasmtime_embedder.rs @@ -11,8 +11,6 @@ use std::{ convert::TryFrom, fs::File, mem::size_of, - os::fd::{AsRawFd, IntoRawFd}, - os::unix::fs::MetadataExt, sync::{atomic::Ordering, Arc, Mutex}, }; @@ -39,7 +37,6 @@ use ic_types::{ }; use ic_wasm_types::{BinaryEncodedWasm, WasmEngineError}; use memory_tracker::{DirtyPageTracking, PageBitmap, SigsegvMemoryTracker}; -use nix::sys::mman::{mmap, MapFlags, ProtFlags}; use signal_stack::WasmtimeSignalStack; use crate::wasm_utils::instrumentation::{ @@ -336,28 +333,18 @@ impl WasmtimeEmbedder { self.pre_instantiate(&module) } - /// TODO(EXC-1800): Replace this with `wasmtime::Module::deserialize_open_file`. - fn deserialize_from_file(&self, serialized_module: &File) -> HypervisorResult { - let mmap_size = serialized_module.metadata().unwrap().size() as usize; - let mmap_ptr = unsafe { - mmap( - std::ptr::null_mut(), - mmap_size, - ProtFlags::PROT_READ, - MapFlags::MAP_PRIVATE, - serialized_module.as_raw_fd(), - 0, - ) - } - .unwrap_or_else(|err| panic!("Module deserialization failed: {:?}", err)) - as *mut u8; - let bytes = unsafe { std::slice::from_raw_parts(mmap_ptr, mmap_size) }; + fn deserialize_from_file(&self, serialized_module: File) -> HypervisorResult { + // SAFETY: The compilation cache setup guarantees that this file is a + // valid serialized module and will not be modified after initial + // creation. unsafe { - Module::deserialize(&self.create_engine()?, bytes).map_err(|err| { - HypervisorError::WasmEngineError(WasmEngineError::FailedToDeserializeModule( - format!("{:?}", err), - )) - }) + Module::deserialize_open_file(&self.create_engine()?, serialized_module).map_err( + |err| { + HypervisorError::WasmEngineError(WasmEngineError::FailedToDeserializeModule( + format!("{:?}", err), + )) + }, + ) } } @@ -365,10 +352,7 @@ impl WasmtimeEmbedder { &self, serialized_module: File, ) -> HypervisorResult> { - // TODO(EXC-1800): Switch to new wasmtime API and remove leaking the - // file. - let module = self.deserialize_from_file(&serialized_module)?; - let _ = serialized_module.into_raw_fd(); + let module = self.deserialize_from_file(serialized_module)?; self.pre_instantiate(&module) } From f9f2491d302ce74634f487e8971105c81d82ae54 Mon Sep 17 00:00:00 2001 From: mraszyk <31483726+mraszyk@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:16:27 +0100 Subject: [PATCH 17/33] fix: mocked xnet in PocketIC (#3376) This PR fixes an assertion failure in the mocked xnet of PocketIC, reproduced in a new PocketIC test sending 500 inter-canister calls with ~10KB payloads: ``` 2025-01-09T12:14:35.158543Z INFO pocket_ic_server: The PocketIC server is listening on port 43473 thread 'tokio-runtime-worker' panicked at rs/xnet/payload_builder/src/lib.rs:862:25: Slice from 6a7lo-edqtc-nflgn-rbyzy-msh45-ifbmo-xs5bc-p3l2c-my7nk-i47kz-gae has packed byte size 3995437, unpacked byte size 3984588, limit was 3984588 ``` The fix proceeds by reusing the production `XNetSlicePoolImpl` and only mocking the code fetching XNet slices from remote subnets. --- packages/pocket-ic/tests/tests.rs | 32 ++++++ rs/state_machine_tests/src/lib.rs | 123 ++++++++++++----------- rs/xnet/payload_builder/src/lib.rs | 153 +++++++++++++++++------------ 3 files changed, 186 insertions(+), 122 deletions(-) diff --git a/packages/pocket-ic/tests/tests.rs b/packages/pocket-ic/tests/tests.rs index 8b9d39c40d5..196a11777e1 100644 --- a/packages/pocket-ic/tests/tests.rs +++ b/packages/pocket-ic/tests/tests.rs @@ -2171,3 +2171,35 @@ fn await_call_no_ticks() { }; assert_eq!(principal, canister_id.to_string()); } + +#[test] +fn many_intersubnet_calls() { + let pic = PocketIcBuilder::new() + .with_application_subnet() + .with_application_subnet() + .build(); + let canister_1 = pic.create_canister_on_subnet(None, None, pic.topology().get_app_subnets()[0]); + pic.add_cycles(canister_1, 100_000_000_000_000_000); + pic.install_canister(canister_1, test_canister_wasm(), vec![], None); + let canister_2 = pic.create_canister_on_subnet(None, None, pic.topology().get_app_subnets()[1]); + pic.add_cycles(canister_2, 100_000_000_000_000_000); + pic.install_canister(canister_2, test_canister_wasm(), vec![], None); + + let mut msg_ids = vec![]; + let num_msgs: usize = 500; + let msg_size: usize = 10000; + for _ in 0..num_msgs { + let msg_id = pic + .submit_call( + canister_1, + Principal::anonymous(), + "call_with_large_blob", + Encode!(&canister_2, &msg_size).unwrap(), + ) + .unwrap(); + msg_ids.push(msg_id); + } + for msg_id in msg_ids { + pic.await_call(msg_id).unwrap(); + } +} diff --git a/rs/state_machine_tests/src/lib.rs b/rs/state_machine_tests/src/lib.rs index 0d5df069cc4..5031290b05a 100644 --- a/rs/state_machine_tests/src/lib.rs +++ b/rs/state_machine_tests/src/lib.rs @@ -153,9 +153,8 @@ use ic_types::{ CanisterId, CryptoHashOfState, Cycles, NumBytes, PrincipalId, SubnetId, UserId, }; use ic_xnet_payload_builder::{ - certified_slice_pool::{certified_slice_count_bytes, CertifiedSliceError}, - ExpectedIndices, RefillTaskHandle, XNetPayloadBuilderImpl, XNetPayloadBuilderMetrics, - XNetSlicePool, + certified_slice_pool::CertifiedSlicePool, refill_stream_slice_indices, RefillTaskHandle, + XNetPayloadBuilderImpl, XNetPayloadBuilderMetrics, XNetSlicePoolImpl, }; use rcgen::{CertificateParams, KeyPair}; use serde::Deserialize; @@ -599,72 +598,64 @@ pub trait Subnets: Send + Sync { fn get(&self, subnet_id: SubnetId) -> Option>; } -/// Struct mocking the pool of XNet messages required for -/// instantiating `XNetPayloadBuilderImpl` in `StateMachine`. -struct PocketXNetSlicePoolImpl { - /// Pool of `StateMachine`s from which the XNet messages are fetched. +/// Struct mocking the XNet layer. +struct PocketXNetImpl { + /// Pool of `StateMachine`s from which XNet messages are fetched. subnets: Arc, - /// Subnet ID of the `StateMachine` containing the pool. + /// The certified slice pool of the `StateMachine` for which the XNet layer is mocked. + pool: Arc>, + /// The subnet ID of the `StateMachine` for which the XNet layer is mocked. own_subnet_id: SubnetId, } -impl PocketXNetSlicePoolImpl { - fn new(subnets: Arc, own_subnet_id: SubnetId) -> Self { +impl PocketXNetImpl { + fn new( + subnets: Arc, + pool: Arc>, + own_subnet_id: SubnetId, + ) -> Self { Self { subnets, + pool, own_subnet_id, } } -} -impl XNetSlicePool for PocketXNetSlicePoolImpl { - /// Obtains a certified slice of a stream from a `StateMachine` - /// corresponding to a given subnet ID. - fn take_slice( - &self, - subnet_id: SubnetId, - begin: Option<&ExpectedIndices>, - msg_limit: Option, - byte_limit: Option, - ) -> Result, CertifiedSliceError> { - let sm = self.subnets.get(subnet_id).unwrap(); - let msg_begin = begin.map(|idx| idx.message_index); - // We set `witness_begin` equal to `msg_begin` since all states are certified. - let certified_stream = sm.generate_certified_stream_slice( - self.own_subnet_id, - msg_begin, - msg_begin, - msg_limit, - byte_limit, - ); - Ok(certified_stream - .map(|certified_stream| { - let mut num_bytes = certified_slice_count_bytes(&certified_stream).unwrap(); - // Because `StateMachine::generate_certified_stream_slice` only uses a size estimate - // when constructing a slice (this estimate can be off by at most a few KB), - // we fake the reported slice size if it exceeds the specified size limit to make sure the payload builder will accept the slice as valid and include it into the block. - // This is fine since we don't actually validate the payload in the context of Pocket IC, and so blocks containing - // a XNet slice exceeding the byte limit won't be rejected as invalid. - if let Some(byte_limit) = byte_limit { - if num_bytes > byte_limit { - num_bytes = byte_limit; + fn refill(&self, registry_version: RegistryVersion, log: ReplicaLogger) { + let refill_stream_slice_indices = + refill_stream_slice_indices(self.pool.clone(), self.own_subnet_id); + + for (subnet_id, indices) in refill_stream_slice_indices { + let sm = self.subnets.get(subnet_id).unwrap(); + match sm.generate_certified_stream_slice( + self.own_subnet_id, + Some(indices.witness_begin), + Some(indices.msg_begin), + None, + Some(indices.byte_limit), + ) { + Ok(slice) => { + if indices.witness_begin != indices.msg_begin { + // Pulled a stream suffix, append to pooled slice. + self.pool + .lock() + .unwrap() + .append(subnet_id, slice, registry_version, log.clone()) + .unwrap(); + } else { + // Pulled a complete stream, replace pooled slice (if any). + self.pool + .lock() + .unwrap() + .put(subnet_id, slice, registry_version, log.clone()) + .unwrap(); } } - (certified_stream, num_bytes) - }) - .ok()) + Err(EncodeStreamError::NoStreamForSubnet(_)) => (), + Err(err) => panic!("Unexpected XNetClient error: {}", err), + } + } } - - /// We do not collect any metrics here. - fn observe_pool_size_bytes(&self) {} - - /// We do not cache XNet messages in this mock implementation - /// and thus there is no need for garbage collection. - fn garbage_collect(&self, _new_stream_positions: BTreeMap) {} - - /// We do not cache XNet messages in this mock implementation - /// and thus there is no need for garbage collection. - fn garbage_collect_slice(&self, _subnet_id: SubnetId, _stream_position: ExpectedIndices) {} } /// A custom `QueryStatsPayloadBuilderImpl` that uses a single @@ -825,6 +816,7 @@ pub struct StateMachine { ingress_pool: Arc>, ingress_manager: Arc, pub ingress_filter: Arc>, + pocket_xnet: Arc>>, payload_builder: Arc>>, message_routing: SyncMessageRouting, pub metrics_registry: MetricsRegistry, @@ -1224,7 +1216,12 @@ impl StateMachineBuilder { // Instantiate a `XNetPayloadBuilderImpl`. // We need to use a deterministic PRNG - so we use an arbitrary fixed seed, e.g., 42. let rng = Arc::new(Some(Mutex::new(StdRng::seed_from_u64(42)))); - let xnet_slice_pool_impl = Box::new(PocketXNetSlicePoolImpl::new(subnets, subnet_id)); + let certified_stream_store: Arc = sm.state_manager.clone(); + let certified_slice_pool = Arc::new(Mutex::new(CertifiedSlicePool::new( + certified_stream_store, + &sm.metrics_registry, + ))); + let xnet_slice_pool_impl = Box::new(XNetSlicePoolImpl::new(certified_slice_pool.clone())); let metrics = Arc::new(XNetPayloadBuilderMetrics::new(&sm.metrics_registry)); let xnet_payload_builder = Arc::new(XNetPayloadBuilderImpl::new_from_components( sm.state_manager.clone(), @@ -1262,6 +1259,10 @@ impl StateMachineBuilder { sm.replica_logger.clone(), )); + // Put `PocketXNetImpl` into `StateMachine` + // which contains no `PocketXNetImpl` after creation. + let pocket_xnet_impl = PocketXNetImpl::new(subnets, certified_slice_pool, subnet_id); + *sm.pocket_xnet.write().unwrap() = Some(pocket_xnet_impl); // Instantiate a `PayloadBuilderImpl` and put it into `StateMachine` // which contains no `PayloadBuilderImpl` after creation. *sm.payload_builder.write().unwrap() = Some(PayloadBuilderImpl::new( @@ -1305,6 +1306,7 @@ impl StateMachine { /// because the payload builder contains an `Arc` of this `StateMachine` /// which creates a circular dependency preventing this `StateMachine`s from being dropped. pub fn drop_payload_builder(&self) { + self.pocket_xnet.write().unwrap().take(); self.payload_builder.write().unwrap().take(); } @@ -1349,6 +1351,12 @@ impl StateMachine { membership_version: subnet_record.clone(), context_version: subnet_record, }; + self.pocket_xnet + .read() + .unwrap() + .as_ref() + .unwrap() + .refill(registry_version, self.replica_logger.clone()); let payload_builder = self.payload_builder.read().unwrap(); let payload_builder = payload_builder.as_ref().unwrap(); let batch_payload = payload_builder.get_payload( @@ -1775,6 +1783,7 @@ impl StateMachine { ingress_pool, ingress_manager: ingress_manager.clone(), ingress_filter: Arc::new(Mutex::new(execution_services.ingress_filter)), + pocket_xnet: Arc::new(RwLock::new(None)), // set by `StateMachineBuilder::build_with_subnets` payload_builder: Arc::new(RwLock::new(None)), // set by `StateMachineBuilder::build_with_subnets` ingress_history_reader: execution_services.ingress_history_reader, message_routing, diff --git a/rs/xnet/payload_builder/src/lib.rs b/rs/xnet/payload_builder/src/lib.rs index c54d937a3e6..bbaf916a747 100644 --- a/rs/xnet/payload_builder/src/lib.rs +++ b/rs/xnet/payload_builder/src/lib.rs @@ -1195,6 +1195,85 @@ pub const POOL_SLICE_BYTE_SIZE_MAX: usize = 4 << 20; /// the payload once we're this close to the payload size limit. pub const SLICE_BYTE_SIZE_MIN: usize = 1 << 10; +/// This struct stores stream indices and byte limit used for refilling +/// stream slices of a subnet in a certified slice pool. +pub struct RefillStreamSliceIndices { + pub witness_begin: StreamIndex, + pub msg_begin: StreamIndex, + pub byte_limit: usize, +} + +/// Computes `RefillStreamSliceIndices` for every subnet whose stream slices should be refilled +/// in the given certified slice pool owned by the given subnet. +pub fn refill_stream_slice_indices( + pool_lock: Arc>, + own_subnet_id: SubnetId, +) -> impl Iterator { + let mut result: BTreeMap = BTreeMap::new(); + + let pool_slice_stats = { + let pool = pool_lock.lock().unwrap(); + + if pool.byte_size() > POOL_BYTE_SIZE_SOFT_CAP { + // Abort if pool is already full. + return result.into_iter(); + } + + pool.peers() + // Skip our own subnet, the loopback stream is routed separately. + .filter(|&&subnet_id| subnet_id != own_subnet_id) + .map(|&subnet_id| (subnet_id, pool.slice_stats(subnet_id))) + .collect::>() + }; + + for (subnet_id, slice_stats) in pool_slice_stats { + let (stream_position, messages_begin, msg_count, byte_size) = match slice_stats { + // Have a cached stream position. + (Some(stream_position), messages_begin, msg_count, byte_size) => { + (stream_position, messages_begin, msg_count, byte_size) + } + + // No cached stream position, no pooling / refill necessary. + (None, _, _, _) => continue, + }; + + let (witness_begin, msg_begin, slice_byte_limit) = match messages_begin { + // Existing pooled stream, pull partial slice and append. + Some(messages_begin) if messages_begin == stream_position.message_index => ( + stream_position.message_index, + stream_position.message_index + (msg_count as u64).into(), + POOL_SLICE_BYTE_SIZE_MAX.saturating_sub(byte_size), + ), + + // No pooled stream, or pooled stream does not begin at cached stream position, pull + // complete slice from cached stream position. + _ => ( + stream_position.message_index, + stream_position.message_index, + POOL_SLICE_BYTE_SIZE_MAX, + ), + }; + + if slice_byte_limit < SLICE_BYTE_SIZE_MIN { + // No more space left in the pool for this slice, bail out. + continue; + } + + result.insert( + subnet_id, + RefillStreamSliceIndices { + witness_begin, + msg_begin, + // XNetEndpoint only counts message bytes, allow some overhead (measuread: 350 + // bytes for certification plus base witness, 2% for large payloads). + byte_limit: (slice_byte_limit.saturating_sub(350)) * 98 / 100, + }, + ); + } + + result.into_iter() +} + /// An async task that refills the slice pool. pub struct PoolRefillTask { /// A pool of slices, filled in the background by an async task. @@ -1235,12 +1314,7 @@ impl PoolRefillTask { runtime_handle.spawn(async move { while let Some(registry_version) = refill_receiver.recv().await { - task.refill_pool( - POOL_BYTE_SIZE_SOFT_CAP, - POOL_SLICE_BYTE_SIZE_MAX, - registry_version, - ) - .await; + task.refill_pool(registry_version).await; } }); @@ -1249,68 +1323,17 @@ impl PoolRefillTask { /// Queries all subnets for new slices and puts / appends them to the pool after /// validation against the given registry version. - async fn refill_pool( - &self, - pool_byte_size_soft_cap: usize, - slice_byte_size_max: usize, - registry_version: RegistryVersion, - ) { - let pool_slice_stats = { - let pool = self.pool.lock().unwrap(); - - if pool.byte_size() > pool_byte_size_soft_cap { - // Abort if pool is already full. - return; - } - - pool.peers() - // Skip our own subnet, the loopback stream is routed separately. - .filter(|&&subnet_id| subnet_id != self.endpoint_resolver.subnet_id) - .map(|&subnet_id| (subnet_id, pool.slice_stats(subnet_id))) - .collect::>() - }; - - for (subnet_id, slice_stats) in pool_slice_stats { - let (stream_position, messages_begin, msg_count, byte_size) = match slice_stats { - // Have a cached stream position. - (Some(stream_position), messages_begin, msg_count, byte_size) => { - (stream_position, messages_begin, msg_count, byte_size) - } - - // No cached stream position, no pooling / refill necessary. - (None, _, _, _) => continue, - }; - - let (witness_begin, msg_begin, slice_byte_limit) = match messages_begin { - // Existing pooled stream, pull partial slice and append. - Some(messages_begin) if messages_begin == stream_position.message_index => ( - stream_position.message_index, - stream_position.message_index + (msg_count as u64).into(), - slice_byte_size_max.saturating_sub(byte_size), - ), - - // No pooled stream, or pooled stream does not begin at cached stream position, pull - // complete slice from cached stream position. - _ => ( - stream_position.message_index, - stream_position.message_index, - slice_byte_size_max, - ), - }; - - if slice_byte_limit < SLICE_BYTE_SIZE_MIN { - // No more space left in the pool for this slice, bail out. - continue; - } + async fn refill_pool(&self, registry_version: RegistryVersion) { + let refill_stream_slice_indices = + refill_stream_slice_indices(self.pool.clone(), self.endpoint_resolver.subnet_id); + for (subnet_id, indices) in refill_stream_slice_indices { // `XNetEndpoint` URL of a node on `subnet_id`. let endpoint_locator = match self.endpoint_resolver.xnet_endpoint_url( subnet_id, - witness_begin, - msg_begin, - // XNetEndpoint only counts message bytes, allow some overhead (measuread: 350 - // bytes for certification plus base witness, 2% for large payloads). - (slice_byte_limit.saturating_sub(350)) * 98 / 100, + indices.witness_begin, + indices.msg_begin, + indices.byte_limit, ) { Ok(endpoint_locator) => endpoint_locator, Err(e) => { @@ -1336,7 +1359,7 @@ impl PoolRefillTask { Ok(slice) => { let logger = log.clone(); let res = tokio::task::spawn_blocking(move || { - if witness_begin != msg_begin { + if indices.witness_begin != indices.msg_begin { // Pulled a stream suffix, append to pooled slice. pool.lock() .unwrap() From 56015b7c3cf290bd132d98eb7695a65cbad4232b Mon Sep 17 00:00:00 2001 From: Daniel Wong <97631336+daniel-wong-dfinity-org@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:27:03 +0100 Subject: [PATCH 18/33] feat(governance-tools): Support creating entries in SNS CHANGELOG.md files. (#3416) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # How The main difference is that the wording of SNS proposals is different. With these changes, the script can also parse SNS proposal titles. Since the format of SNS titles is similar to those of NNS, this change is straightforward. # References [👈 Previous PR][prev] [prev]: https://github.com/dfinity/ic/pull/3332 --- .../nns-tools/add-release-to-changelog.sh | 32 ++++++++++++++----- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/testnet/tools/nns-tools/add-release-to-changelog.sh b/testnet/tools/nns-tools/add-release-to-changelog.sh index e3ca0d7d033..47d94487252 100755 --- a/testnet/tools/nns-tools/add-release-to-changelog.sh +++ b/testnet/tools/nns-tools/add-release-to-changelog.sh @@ -59,13 +59,28 @@ EXECUTED_ON=$( print_purple "Proposal ${PROPOSAL_ID} was executed ${SECONDS_AGO} seconds ago." >&2 # Extract which canister was upgraded, and to what commit. -TITLE=$(echo "${PROPOSAL_INFO}" | jq -r '.proposal[0].summary' | head -n 1) -CANISTER_NAME=$( - echo "${TITLE}" \ - | sed 's/# Upgrade the //' | sed 's/ Canister to Commit .*//' \ - | tr '[:upper:]' '[:lower:]' -) -DESTINATION_COMMIT_ID=$(echo "${TITLE}" | sed 's/# Upgrade the .* Canister to Commit //') +TITLE=$(echo "${PROPOSAL_INFO}" | jq -r '.proposal[0].title[0]') +if grep 'Upgrade the .* Canister to Commit .*' <<<"${TITLE}" &>/dev/null; then + GOVERNANCE_TYPE='NNS' + CANISTER_NAME=$( + echo "${TITLE}" \ + | sed 's/Upgrade the //' | sed 's/ Canister to Commit .*//' \ + | tr '[:upper:]' '[:lower:]' + ) + DESTINATION_COMMIT_ID=$(echo "${TITLE}" | sed 's/Upgrade the .* Canister to Commit //') +elif grep 'Publish SNS .* WASM Built at Commit .*' <<<"${TITLE}" &>/dev/null; then + GOVERNANCE_TYPE='SNS' + CANISTER_NAME=$( + echo "${TITLE}" \ + | sed 's/Publish SNS //' | sed 's/ WASM Built at Commit .*//' \ + | tr '[:upper:]' '[:lower:]' + ) + DESTINATION_COMMIT_ID=$(echo "${TITLE}" | sed 's/Publish SNS .* WASM Built at Commit //') +else + print_red "💀 Unable to parse proposal title: ${TITLE}" >&2 + print_red "(In particular, unable to determine which canister and commit.)" >&2 + exit 1 +fi # Fail if the proposal's commit is not checked out. if [[ $(git rev-parse HEAD) != $DESTINATION_COMMIT_ID* ]]; then @@ -78,7 +93,8 @@ fi # cd to the canister's primary code path. CANISTER_CODE_PATH=$( - get_nns_canister_code_location "${CANISTER_NAME}" \ + get_"$(echo "${GOVERNANCE_TYPE}" | tr '[:upper:]' '[:lower:]')"_canister_code_location \ + "${CANISTER_NAME}" \ | sed "s^${PWD}^.^g" \ | cut -d' ' -f1 ) From e4479ba9d49923759e672ed83e742cd2a371bb61 Mon Sep 17 00:00:00 2001 From: Nicolas Mattia Date: Tue, 14 Jan 2025 17:41:24 +0100 Subject: [PATCH 19/33] chore(IDX): Use repository_ctx.getenv (#3442) This ensures that the repository rule get re-evaluated when the env var changes without having to specify it as a rule input. --- bazel/sanitizers_enabled_env/defs.bzl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bazel/sanitizers_enabled_env/defs.bzl b/bazel/sanitizers_enabled_env/defs.bzl index 091354e0076..a6e65bef9a2 100644 --- a/bazel/sanitizers_enabled_env/defs.bzl +++ b/bazel/sanitizers_enabled_env/defs.bzl @@ -8,7 +8,7 @@ def _impl(repository_ctx): ) repository_ctx.file( "defs.bzl", - content = "SANITIZERS_ENABLED=" + repository_ctx.os.environ.get("SANITIZERS_ENABLED", "0") + "\n", + content = "SANITIZERS_ENABLED=" + repository_ctx.getenv("SANITIZERS_ENABLED", "0") + "\n", executable = False, ) @@ -16,6 +16,5 @@ def sanitizers_enabled_env(name = None): rule = repository_rule( implementation = _impl, local = True, - environ = ["SANITIZERS_ENABLED"], ) rule(name = name) From f72bd1cdb20e307e1c928f951b168cf5940b4801 Mon Sep 17 00:00:00 2001 From: Daniel Wong <97631336+daniel-wong-dfinity-org@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:59:10 +0100 Subject: [PATCH 20/33] feat(governance-tools): Pretty add-release-to-changelog.sh output. (#3417) # More Precisely, What Added a success message. Added emojis. Changed informational messages from purple to cyan. I think dark grey would be best, but we don't (yet) have a function for that; cyan seems like the next best thing. # Why Silence is a highly ambiguous way to communicate. > Explicit is better than implicit. > --Zen of Python --- .../nns-tools/add-release-to-changelog.sh | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/testnet/tools/nns-tools/add-release-to-changelog.sh b/testnet/tools/nns-tools/add-release-to-changelog.sh index 47d94487252..296d6d841bd 100755 --- a/testnet/tools/nns-tools/add-release-to-changelog.sh +++ b/testnet/tools/nns-tools/add-release-to-changelog.sh @@ -25,7 +25,7 @@ cd "$(repo_root)" PWD="$(pwd)" # Fetch the proposal. -print_purple "Fetching proposal ${PROPOSAL_ID}..." >&2 +print_cyan "⏳ Fetching proposal ${PROPOSAL_ID}..." >&2 PROPOSAL_INFO=$( __dfx --quiet \ canister call \ @@ -38,7 +38,7 @@ PROPOSAL_INFO=$( # Unwrap. LEN=$(echo "${PROPOSAL_INFO}" | jq '. | length') if [[ "${LEN}" -ne 1 ]]; then - print_red "Unexpected result from the get_proposal_info method:" >&2 + print_red "💀 Unexpected result from the get_proposal_info method:" >&2 print_red "Should have one element, but has ${LEN}" >&2 exit 1 fi @@ -47,7 +47,7 @@ PROPOSAL_INFO=$(echo "${PROPOSAL_INFO}" | jq '.[0]') # Assert was executed. EXECUTED_TIMESTAMP_SECONDS=$(echo "${PROPOSAL_INFO}" | jq '.executed_timestamp_seconds | tonumber') if [[ "${EXECUTED_TIMESTAMP_SECONDS}" -eq 0 ]]; then - print_red "Proposal ${PROPOSAL_ID} exists, but was not successfully executed." >&2 + print_red "💀 Proposal ${PROPOSAL_ID} exists, but was not successfully executed." >&2 exit 1 fi SECONDS_AGO=$(($(date +%s) - "${EXECUTED_TIMESTAMP_SECONDS}")) @@ -56,7 +56,7 @@ EXECUTED_ON=$( --date=@"${EXECUTED_TIMESTAMP_SECONDS}" \ --iso-8601 ) -print_purple "Proposal ${PROPOSAL_ID} was executed ${SECONDS_AGO} seconds ago." >&2 +print_cyan "🗳️ Proposal ${PROPOSAL_ID} was executed ${SECONDS_AGO} seconds ago." >&2 # Extract which canister was upgraded, and to what commit. TITLE=$(echo "${PROPOSAL_INFO}" | jq -r '.proposal[0].title[0]') @@ -85,7 +85,7 @@ fi # Fail if the proposal's commit is not checked out. if [[ $(git rev-parse HEAD) != $DESTINATION_COMMIT_ID* ]]; then echo >&2 - print_red "You currently have $(git rev-parse HEAD)" >&2 + print_red "💀 You currently have $(git rev-parse HEAD)" >&2 print_red "checked out, but this command only supports being run when" >&2 print_red "the proposal's commit (${DESTINATION_COMMIT_ID}) is checked out." >&2 exit 1 @@ -103,7 +103,7 @@ cd "${CANISTER_CODE_PATH}" # Assert that there is a CHANGELOG.md file. if [[ ! -e CHANGELOG.md ]]; then echo >&2 - print_red "${CANISTER_NAME} has no CHANGELOG.md file." >&2 + print_red "💀 ${CANISTER_NAME} has no CHANGELOG.md file." >&2 exit 1 fi # TODO: Also verify that unreleased_changelog.md exists. @@ -117,7 +117,7 @@ NEW_FEATURES_AND_FIXES=$( ) if [[ -z "${NEW_FEATURES_AND_FIXES}" ]]; then echo >&2 - print_red "The ${CANISTER_NAME} canister has no information in its unreleased_changelog.md." >&2 + print_red "💀 The ${CANISTER_NAME} canister has no information in its unreleased_changelog.md." >&2 exit 1 fi NEW_ENTRY="# ${EXECUTED_ON}: Proposal ${PROPOSAL_ID} @@ -160,3 +160,8 @@ echo -n "${UNRELEASED_CHANGELOG_INTRODUCTION} ## Security """ \ >unreleased_changelog.md + +echo >&2 +print_green '🎉 Success! Added new entry to CHANGELOG.md.' >&2 +print_cyan '💡 Run `git diff` to see the changes. If you are pleased, commit,' >&2 +print_cyan 'push, request review, and merge them into master, per usual.' >&2 From fe42b708759d3d76b543e7ad4699d0136c63e96d Mon Sep 17 00:00:00 2001 From: kpop-dfinity <125868903+kpop-dfinity@users.noreply.github.com> Date: Tue, 14 Jan 2025 18:32:45 +0100 Subject: [PATCH 21/33] test(consensus): modify the test cases in ingress payload serialization/deserialization benchmarks (#3447) Currently we only measure how much it takes to deserialize/serialize ingress payloads with 2000/4000/6000/8000 messages each of size 1KB. With the changes in this PR we will instead measure the following test cases: 1. 1000 messages, 4KB each 2. 2000 messages, 4KB each 3. 1 message, 4MB 4. 1 message, 8MB which resembles more closely the current limits on the ingress payload, from both number of messages and message sizes extremes. Also, in the deserialization benchmark we moved out the `clone()` as it affects the measurements significantly. --- rs/consensus/benches/validate_payload.rs | 58 ++++++++++++++---------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/rs/consensus/benches/validate_payload.rs b/rs/consensus/benches/validate_payload.rs index 00d463268f0..db2b14d1507 100644 --- a/rs/consensus/benches/validate_payload.rs +++ b/rs/consensus/benches/validate_payload.rs @@ -10,7 +10,7 @@ //! in the past payloads, and the user signature is checked eventually, and //! the message validates successfully -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use criterion::{black_box, criterion_group, criterion_main, BatchSize, Criterion}; use dkg::DkgDataPayload; use ic_artifact_pool::{consensus_pool::ConsensusPoolImpl, ingress_pool::IngressPoolImpl}; use ic_config::state_manager::Config as StateManagerConfig; @@ -80,7 +80,7 @@ const PAST_PAYLOAD_HEIGHT: u64 = 4; /// Ingress history size: 5 min worth of messages at 1000/sec = 300K. const INGRESS_HISTORY_SIZE: usize = 300_000; -fn run_test(_test_name: &str, test_fn: T) +fn run_test(test_fn: T) where T: FnOnce(Time, &mut ConsensusPoolImpl, &dyn PayloadBuilder), { @@ -233,13 +233,18 @@ fn setup_ingress_state(now: Time, state_manager: &mut StateManagerImpl) { /// Prepares the ingress payload which has 1K x specified number of /// SignedIngress messages. The payload is filled with the specified 'seed' /// bytes -fn prepare_ingress_payload(now: Time, message_count: usize, seed: u8) -> IngressPayload { +fn prepare_ingress_payload( + now: Time, + message_count: usize, + message_size: usize, + seed: u8, +) -> IngressPayload { let mut ingress_msgs = Vec::new(); let expiry = std::time::Duration::from_secs(MAX_INGRESS_TTL.as_secs() - 1); for i in 0..message_count { let ingress = SignedIngressBuilder::new() .method_name("provisional_create_canister_with_cycles") - .method_payload(vec![seed; INGRESS_MESSAGE_SIZE]) + .method_payload(vec![seed; message_size]) .nonce(i as u64) .expiry_time(now + expiry) .canister_id(IC_00) @@ -269,7 +274,7 @@ fn add_past_blocks( for i in 1..=to_add { let mut block = Block::from_parent(&parent); block.rank = Rank(i); - let ingress = prepare_ingress_payload(now, message_count, i as u8); + let ingress = prepare_ingress_payload(now, message_count, INGRESS_MESSAGE_SIZE, i as u8); block.payload = Payload::new( ic_types::crypto::crypto_hash, BlockPayload::Data(DataPayload { @@ -336,7 +341,6 @@ fn validate_payload_benchmark(criterion: &mut Criterion) { for message_count in (50..=850).step_by(50) { run_test( - "validate_payload_benchmark", |now: Time, consensus_pool: &mut ConsensusPoolImpl, payload_builder: &dyn PayloadBuilder| { @@ -344,7 +348,8 @@ fn validate_payload_benchmark(criterion: &mut Criterion) { let pool_reader = PoolReader::new(consensus_pool); let seed = CERTIFIED_HEIGHT + PAST_PAYLOAD_HEIGHT + 10; - let ingress = prepare_ingress_payload(now, message_count, seed as u8); + let ingress = + prepare_ingress_payload(now, message_count, INGRESS_MESSAGE_SIZE, seed as u8); let payload = Payload::new( ic_types::crypto::crypto_hash, BlockPayload::Data(DataPayload { @@ -359,8 +364,7 @@ fn validate_payload_benchmark(criterion: &mut Criterion) { }), ); - let name = format!("validate_payload_{}", message_count); - group.bench_function(&name, |bench| { + group.bench_function(format!("validate_payload_{}", message_count), |bench| { bench.iter(|| { validate_payload(now, &payload, &pool_reader, &tip, payload_builder) .expect("Invalid payload") @@ -372,31 +376,39 @@ fn validate_payload_benchmark(criterion: &mut Criterion) { } fn serialization_benchmark(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("serialization"); - group.sample_size(30); + let mut group = criterion.benchmark_group("ingress_payload_serialization_deserialization"); + group.sample_size(50); group.measurement_time(std::time::Duration::from_secs(10)); - for message_count in (2000..=8000).step_by(2000) { + for (message_count, message_size_kb, tag) in [ + (1_000, 4_000, "1000x4KB"), + (2_000, 4_000, "2000x4KB"), + (1, 4_000_000, "1x4MB"), + (1, 8_000_000, "1x8MB"), + ] { run_test( - "serialization_benchmark", |now: Time, _: &mut ConsensusPoolImpl, _: &dyn PayloadBuilder| { let seed = CERTIFIED_HEIGHT + PAST_PAYLOAD_HEIGHT + 10; - let ingress = prepare_ingress_payload(now, message_count, seed as u8); - let name = format!("serialization_{}_kb_payload", message_count); - group.bench_function(&name, |bench| { + let ingress = + prepare_ingress_payload(now, message_count, message_size_kb, seed as u8); + + group.bench_function(format!("serialization_{tag}"), |bench| { bench.iter(|| { let proto: pb::IngressPayload = (&ingress).into(); black_box(proto); }) }); - let name = format!("deserialization_{}_kb_payload", message_count); - group.bench_function(&name, |bench| { + + group.bench_function(format!("deserialization_{tag}"), |bench| { let p: pb::IngressPayload = (&ingress).into(); - bench.iter(|| { - let proto = p.clone(); - let deser: IngressPayload = proto.try_into().unwrap(); - black_box(deser); - }) + bench.iter_batched( + || p.clone(), + |proto| { + let deser: IngressPayload = proto.try_into().unwrap(); + black_box(deser); + }, + BatchSize::LargeInput, + ) }); }, ) From 02cba760482130780672553020297cfa0b16cb64 Mon Sep 17 00:00:00 2001 From: Andre Popovitch Date: Tue, 14 Jan 2025 12:00:03 -0600 Subject: [PATCH 22/33] fix(nervous-system-agent): propagate candid encode errors (#3448) Previously we were panicking, even though there was already an error case we could be using to propagate these errors. Pointed out by @aterga --- rs/nervous_system/agent/src/agent_impl.rs | 2 +- rs/nervous_system/agent/src/lib.rs | 6 +++--- rs/nervous_system/agent/src/null_request.rs | 4 ++-- rs/nervous_system/agent/src/pocketic_impl.rs | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rs/nervous_system/agent/src/agent_impl.rs b/rs/nervous_system/agent/src/agent_impl.rs index c70472a01af..586c7c2cbf5 100644 --- a/rs/nervous_system/agent/src/agent_impl.rs +++ b/rs/nervous_system/agent/src/agent_impl.rs @@ -25,7 +25,7 @@ impl CallCanisters for Agent { request: R, ) -> Result { let canister_id = canister_id.into(); - let request_bytes = request.payload(); + let request_bytes = request.payload().map_err(AgentCallError::CandidEncode)?; let response = if request.update() { let request = self .update(&canister_id, request.method()) diff --git a/rs/nervous_system/agent/src/lib.rs b/rs/nervous_system/agent/src/lib.rs index 2308ccd5adb..7bd9d86b2f9 100644 --- a/rs/nervous_system/agent/src/lib.rs +++ b/rs/nervous_system/agent/src/lib.rs @@ -22,7 +22,7 @@ mod sealed { pub trait Request: Send { fn method(&self) -> &'static str; fn update(&self) -> bool; - fn payload(&self) -> Vec; + fn payload(&self) -> Result, candid::Error>; type Response: CandidType + DeserializeOwned; } @@ -33,8 +33,8 @@ impl Request for R { fn update(&self) -> bool { Self::UPDATE } - fn payload(&self) -> Vec { - candid::encode_one(self).unwrap() + fn payload(&self) -> Result, candid::Error> { + candid::encode_one(self) } type Response = ::Response; diff --git a/rs/nervous_system/agent/src/null_request.rs b/rs/nervous_system/agent/src/null_request.rs index 93588d2bbe0..bb288477e8c 100644 --- a/rs/nervous_system/agent/src/null_request.rs +++ b/rs/nervous_system/agent/src/null_request.rs @@ -27,8 +27,8 @@ impl Request for NullRequest { fn update(&self) -> bool { self.update } - fn payload(&self) -> Vec { - Encode!().unwrap() + fn payload(&self) -> Result, candid::Error> { + Encode!() } type Response = T; diff --git a/rs/nervous_system/agent/src/pocketic_impl.rs b/rs/nervous_system/agent/src/pocketic_impl.rs index 44399fffdd6..d9e8b814408 100644 --- a/rs/nervous_system/agent/src/pocketic_impl.rs +++ b/rs/nervous_system/agent/src/pocketic_impl.rs @@ -27,7 +27,7 @@ impl CallCanisters for PocketIc { request: R, ) -> Result { let canister_id = canister_id.into(); - let request_bytes = request.payload(); + let request_bytes = request.payload().map_err(PocketIcCallError::CandidEncode)?; let response = if request.update() { self.update_call( canister_id, From dce918ac820c6f2faba1d28eacf60e1c037a7523 Mon Sep 17 00:00:00 2001 From: Nicolas Mattia Date: Tue, 14 Jan 2025 19:01:41 +0100 Subject: [PATCH 23/33] chore(IDX): bump rules_rust (#3449) This bumps to 0.54.1 which allows us to remove our backported fixes. --- Cargo.Bazel.Fuzzing.json.lock | 576 +++++++++++++++++++--------------- Cargo.Bazel.json.lock | 565 +++++++++++++++++++-------------- WORKSPACE.bazel | 7 +- bazel/rules_rust.patch | 45 --- 4 files changed, 656 insertions(+), 537 deletions(-) delete mode 100644 bazel/rules_rust.patch diff --git a/Cargo.Bazel.Fuzzing.json.lock b/Cargo.Bazel.Fuzzing.json.lock index bcbb23035a8..08fd98a11dc 100644 --- a/Cargo.Bazel.Fuzzing.json.lock +++ b/Cargo.Bazel.Fuzzing.json.lock @@ -1,5 +1,5 @@ { - "checksum": "8fd8fccec5a57eefbac6e6bc61cde6077b347dc896dd713bf649cb70bb4778b1", + "checksum": "befed3db2258ef5e97774c44951feb5c8ca098af84913123d4a8945afeac827c", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -11122,19 +11122,19 @@ "target": "crossterm" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "crossterm 0.27.0", "target": "crossterm" @@ -11268,6 +11268,16 @@ "target": "wasm_bindgen" } ], + "wasm32-wasip1": [ + { + "id": "serde-wasm-bindgen 0.5.0", + "target": "serde_wasm_bindgen" + }, + { + "id": "wasm-bindgen 0.2.95", + "target": "wasm_bindgen" + } + ], "x86_64-apple-darwin": [ { "id": "crossterm 0.27.0", @@ -11280,25 +11290,25 @@ "target": "crossterm" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "crossterm 0.27.0", "target": "crossterm" @@ -11761,12 +11771,6 @@ "target": "iana_time_zone" } ], - "aarch64-fuchsia": [ - { - "id": "iana-time-zone 0.1.59", - "target": "iana_time_zone" - } - ], "aarch64-linux-android": [ { "id": "android-tzdata 0.1.1", @@ -11783,6 +11787,12 @@ "target": "windows_targets" } ], + "aarch64-unknown-fuchsia": [ + { + "id": "iana-time-zone 0.1.59", + "target": "iana_time_zone" + } + ], "aarch64-unknown-linux-gnu": [ { "id": "iana-time-zone 0.1.59", @@ -11891,12 +11901,6 @@ "target": "iana_time_zone" } ], - "x86_64-fuchsia": [ - { - "id": "iana-time-zone 0.1.59", - "target": "iana_time_zone" - } - ], "x86_64-linux-android": [ { "id": "android-tzdata 0.1.1", @@ -11919,6 +11923,12 @@ "target": "iana_time_zone" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "iana-time-zone 0.1.59", + "target": "iana_time_zone" + } + ], "x86_64-unknown-linux-gnu": [ { "id": "iana-time-zone 0.1.59", @@ -15758,7 +15768,7 @@ "target": "signal_hook_mio" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "mio 0.8.10", "target": "mio" @@ -15772,7 +15782,17 @@ "target": "signal_hook_mio" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ + { + "id": "crossterm_winapi 0.9.1", + "target": "crossterm_winapi" + }, + { + "id": "winapi 0.3.9", + "target": "winapi" + } + ], + "aarch64-unknown-fuchsia": [ { "id": "mio 0.8.10", "target": "mio" @@ -15786,16 +15806,6 @@ "target": "signal_hook_mio" } ], - "aarch64-pc-windows-msvc": [ - { - "id": "crossterm_winapi 0.9.1", - "target": "crossterm_winapi" - }, - { - "id": "winapi 0.3.9", - "target": "winapi" - } - ], "aarch64-unknown-linux-gnu": [ { "id": "mio 0.8.10", @@ -16008,7 +16018,7 @@ "target": "signal_hook_mio" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "mio 0.8.10", "target": "mio" @@ -16022,7 +16032,17 @@ "target": "signal_hook_mio" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ + { + "id": "crossterm_winapi 0.9.1", + "target": "crossterm_winapi" + }, + { + "id": "winapi 0.3.9", + "target": "winapi" + } + ], + "x86_64-unknown-freebsd": [ { "id": "mio 0.8.10", "target": "mio" @@ -16036,17 +16056,7 @@ "target": "signal_hook_mio" } ], - "x86_64-pc-windows-msvc": [ - { - "id": "crossterm_winapi 0.9.1", - "target": "crossterm_winapi" - }, - { - "id": "winapi 0.3.9", - "target": "winapi" - } - ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "mio 0.8.10", "target": "mio" @@ -22607,19 +22617,19 @@ "target": "parking" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "parking 2.1.1", "target": "parking" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "parking 2.1.1", "target": "parking" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "parking 2.1.1", "target": "parking" @@ -22739,25 +22749,25 @@ "target": "parking" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "parking 2.1.1", "target": "parking" @@ -22860,19 +22870,19 @@ "target": "parking" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "parking 2.1.1", "target": "parking" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "parking 2.1.1", "target": "parking" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "parking 2.1.1", "target": "parking" @@ -22992,25 +23002,25 @@ "target": "parking" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "parking 2.1.1", "target": "parking" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "parking 2.1.1", "target": "parking" @@ -29105,15 +29115,15 @@ "webpki-roots", "webpki-tokio" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "webpki-roots", "webpki-tokio" ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ "webpki-roots", "webpki-tokio" ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ "webpki-roots", "webpki-tokio" ], @@ -29193,10 +29203,6 @@ "webpki-roots", "webpki-tokio" ], - "x86_64-fuchsia": [ - "webpki-roots", - "webpki-tokio" - ], "x86_64-linux-android": [ "webpki-roots", "webpki-tokio" @@ -29209,6 +29215,10 @@ "webpki-roots", "webpki-tokio" ], + "x86_64-unknown-fuchsia": [ + "webpki-roots", + "webpki-tokio" + ], "x86_64-unknown-linux-gnu": [ "webpki-roots", "webpki-tokio" @@ -29290,19 +29300,19 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" @@ -29422,25 +29432,25 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" @@ -33041,7 +33051,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid 1.5.0": { @@ -33113,7 +33125,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid_transform 1.5.0": { @@ -33189,7 +33203,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid_transform_data 1.5.0": { @@ -33225,7 +33241,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_normalizer 1.5.0": { @@ -33318,7 +33336,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_normalizer_data 1.5.0": { @@ -33354,7 +33374,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_properties 1.5.1": { @@ -33435,7 +33457,9 @@ "version": "1.5.1" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_properties_data 1.5.0": { @@ -33471,7 +33495,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_provider 1.5.0": { @@ -33559,7 +33585,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_provider_macros 1.5.0": { @@ -33612,7 +33640,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "id-arena 2.2.1": { @@ -39107,7 +39137,9 @@ "version": "0.7.3" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "little-loadshedder 0.2.0": { @@ -41709,10 +41741,10 @@ "aarch64-apple-ios-sim": [ "os-ext" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "os-ext" ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ "os-ext" ], "aarch64-unknown-linux-gnu": [ @@ -41757,15 +41789,15 @@ "x86_64-apple-ios": [ "os-ext" ], - "x86_64-fuchsia": [ - "os-ext" - ], "x86_64-linux-android": [ "os-ext" ], "x86_64-unknown-freebsd": [ "os-ext" ], + "x86_64-unknown-fuchsia": [ + "os-ext" + ], "x86_64-unknown-linux-gnu": [ "os-ext" ], @@ -53715,13 +53747,13 @@ "target": "libc" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -53811,19 +53843,19 @@ "target": "libc" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -56068,7 +56100,7 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56098,7 +56130,7 @@ "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56128,7 +56160,7 @@ "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56804,7 +56836,7 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56834,7 +56866,7 @@ "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56864,7 +56896,7 @@ "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -56894,7 +56926,7 @@ "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "async-compression 0.4.4", "target": "async_compression" @@ -57278,7 +57310,7 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -57328,7 +57360,7 @@ "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -57378,7 +57410,7 @@ "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58360,6 +58392,12 @@ "target": "wasm_streams" } ], + "wasm32-wasip1": [ + { + "id": "wasm-streams 0.4.0", + "target": "wasm_streams" + } + ], "x86_64-apple-darwin": [ { "id": "futures-channel 0.3.31", @@ -58460,7 +58498,7 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58510,7 +58548,7 @@ "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58560,7 +58598,7 @@ "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58610,7 +58648,7 @@ "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -60728,7 +60766,7 @@ "time", "use-libc-auxv" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "default", "event", "mm", @@ -60738,7 +60776,7 @@ "time", "use-libc-auxv" ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ "default", "event", "mm", @@ -60914,6 +60952,11 @@ "termios", "use-libc-auxv" ], + "wasm32-wasip1": [ + "default", + "termios", + "use-libc-auxv" + ], "x86_64-apple-darwin": [ "default", "event", @@ -60934,7 +60977,7 @@ "time", "use-libc-auxv" ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ "default", "event", "mm", @@ -60944,7 +60987,7 @@ "time", "use-libc-auxv" ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ "default", "event", "mm", @@ -60954,7 +60997,7 @@ "time", "use-libc-auxv" ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ "default", "event", "mm", @@ -61042,7 +61085,7 @@ "target": "libc" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "errno 0.3.8", "target": "errno", @@ -61053,22 +61096,22 @@ "target": "libc" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "errno 0.3.8", "target": "errno", "alias": "libc_errno" - }, - { - "id": "libc 0.2.158", - "target": "libc" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "errno 0.3.8", "target": "errno", "alias": "libc_errno" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-unknown-nto-qnx710": [ @@ -61255,7 +61298,7 @@ "target": "libc" } ], - "x86_64-apple-darwin": [ + "wasm32-wasip1": [ { "id": "errno 0.3.8", "target": "errno", @@ -61266,7 +61309,7 @@ "target": "libc" } ], - "x86_64-apple-ios": [ + "x86_64-apple-darwin": [ { "id": "errno 0.3.8", "target": "errno", @@ -61277,7 +61320,7 @@ "target": "libc" } ], - "x86_64-fuchsia": [ + "x86_64-apple-ios": [ { "id": "errno 0.3.8", "target": "errno", @@ -61317,6 +61360,17 @@ "target": "libc" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "errno 0.3.8", + "target": "errno", + "alias": "libc_errno" + }, + { + "id": "libc 0.2.158", + "target": "libc" + } + ], "x86_64-unknown-none": [ { "id": "errno 0.3.8", @@ -67814,15 +67868,15 @@ "aarch64-apple-ios-sim": [ "once" ], - "aarch64-fuchsia": [ - "once" - ], "aarch64-linux-android": [ "once" ], "aarch64-pc-windows-msvc": [ "once" ], + "aarch64-unknown-fuchsia": [ + "once" + ], "aarch64-unknown-linux-gnu": [ "once" ], @@ -67868,9 +67922,6 @@ "x86_64-apple-ios": [ "once" ], - "x86_64-fuchsia": [ - "once" - ], "x86_64-linux-android": [ "once" ], @@ -67880,6 +67931,9 @@ "x86_64-unknown-freebsd": [ "once" ], + "x86_64-unknown-fuchsia": [ + "once" + ], "x86_64-unknown-linux-gnu": [ "once" ], @@ -70030,13 +70084,13 @@ "target": "xattr" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "xattr 0.2.3", "target": "xattr" @@ -70132,19 +70186,19 @@ "target": "xattr" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "xattr 0.2.3", "target": "xattr" @@ -71828,7 +71882,7 @@ "target": "num_threads" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -71838,7 +71892,7 @@ "target": "num_threads" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -71988,7 +72042,7 @@ "target": "num_threads" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -71998,7 +72052,7 @@ "target": "num_threads" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" @@ -72008,7 +72062,7 @@ "target": "num_threads" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -72336,7 +72390,9 @@ "version": "0.7.6" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "tinytemplate 1.2.1": { @@ -72719,7 +72775,7 @@ "target": "socket2" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -72733,7 +72789,17 @@ "target": "socket2" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ + { + "id": "socket2 0.5.7", + "target": "socket2" + }, + { + "id": "windows-sys 0.52.0", + "target": "windows_sys" + } + ], + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -72747,16 +72813,6 @@ "target": "socket2" } ], - "aarch64-pc-windows-msvc": [ - { - "id": "socket2 0.5.7", - "target": "socket2" - }, - { - "id": "windows-sys 0.52.0", - "target": "windows_sys" - } - ], "aarch64-unknown-linux-gnu": [ { "id": "libc 0.2.158", @@ -72993,7 +73049,7 @@ "target": "socket2" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -73007,7 +73063,17 @@ "target": "socket2" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ + { + "id": "socket2 0.5.7", + "target": "socket2" + }, + { + "id": "windows-sys 0.52.0", + "target": "windows_sys" + } + ], + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" @@ -73021,17 +73087,7 @@ "target": "socket2" } ], - "x86_64-pc-windows-msvc": [ - { - "id": "socket2 0.5.7", - "target": "socket2" - }, - { - "id": "windows-sys 0.52.0", - "target": "windows_sys" - } - ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -79939,12 +79995,6 @@ "target": "rustix" } ], - "aarch64-fuchsia": [ - { - "id": "rustix 0.38.32", - "target": "rustix" - } - ], "aarch64-linux-android": [ { "id": "rustix 0.38.32", @@ -79957,6 +80007,12 @@ "target": "windows_sys" } ], + "aarch64-unknown-fuchsia": [ + { + "id": "rustix 0.38.32", + "target": "rustix" + } + ], "aarch64-unknown-linux-gnu": [ { "id": "memfd 0.6.4", @@ -80087,12 +80143,6 @@ "target": "rustix" } ], - "x86_64-fuchsia": [ - { - "id": "rustix 0.38.32", - "target": "rustix" - } - ], "x86_64-linux-android": [ { "id": "rustix 0.38.32", @@ -80111,6 +80161,12 @@ "target": "rustix" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "rustix 0.38.32", + "target": "rustix" + } + ], "x86_64-unknown-linux-gnu": [ { "id": "memfd 0.6.4", @@ -84734,7 +84790,9 @@ "version": "0.5.5" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "wsl 0.1.0": { @@ -85397,7 +85455,9 @@ "version": "0.7.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "yoke-derive 0.7.4": { @@ -85454,7 +85514,9 @@ "version": "0.7.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerocopy 0.7.32": { @@ -85620,7 +85682,9 @@ "version": "0.1.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerofrom-derive 0.1.4": { @@ -85677,7 +85741,9 @@ "version": "0.1.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zeroize 1.8.1": { @@ -85855,7 +85921,9 @@ "version": "0.10.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerovec-derive 0.10.3": { @@ -85908,7 +85976,9 @@ "version": "0.10.3" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zstd 0.13.2": { @@ -86166,9 +86236,6 @@ "aarch64-apple-ios-sim": [ "aarch64-apple-ios-sim" ], - "aarch64-fuchsia": [ - "aarch64-fuchsia" - ], "aarch64-linux-android": [ "aarch64-linux-android" ], @@ -86176,6 +86243,9 @@ "aarch64-pc-windows-msvc": [ "aarch64-pc-windows-msvc" ], + "aarch64-unknown-fuchsia": [ + "aarch64-unknown-fuchsia" + ], "aarch64-unknown-linux-gnu": [ "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu" @@ -86218,10 +86288,10 @@ "cfg(all(not(curve25519_dalek_backend = \"fiat\"), not(curve25519_dalek_backend = \"serial\"), target_arch = \"x86_64\"))": [ "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86239,8 +86309,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-nto-qnx710", "armv7-linux-androideabi", "i686-apple-darwin", @@ -86254,11 +86324,12 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-none" ], "cfg(all(target_arch = \"aarch64\", target_env = \"msvc\", not(windows_raw_dylib)))": [ @@ -86283,7 +86354,8 @@ "wasm32-unknown-unknown" ], "cfg(all(target_arch = \"wasm32\", target_os = \"wasi\"))": [ - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(all(target_arch = \"wasm32\", target_vendor = \"unknown\", target_os = \"unknown\", target_env = \"\"))": [ "wasm32-unknown-unknown" @@ -86311,8 +86383,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86327,14 +86399,14 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], "cfg(all(unix, not(target_os = \"android\"), not(target_vendor = \"apple\"), not(target_arch = \"wasm32\")))": [ - "aarch64-fuchsia", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86344,16 +86416,16 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "x86_64-fuchsia", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], "cfg(all(unix, not(target_os = \"macos\")))": [ "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86366,9 +86438,9 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86377,9 +86449,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86395,10 +86467,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86407,9 +86479,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86420,10 +86492,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86432,9 +86504,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86445,10 +86517,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86465,17 +86537,17 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" ], "cfg(any(target_arch = \"x86\", target_arch = \"x86_64\", all(any(target_arch = \"aarch64\", target_arch = \"arm\"), any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))))": [ - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "arm-unknown-linux-gnueabi", @@ -86488,10 +86560,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86504,10 +86576,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86550,9 +86622,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "arm-unknown-linux-gnueabi", @@ -86566,12 +86638,13 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86625,8 +86698,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86641,9 +86714,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86651,8 +86724,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86667,9 +86740,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86677,8 +86750,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86692,11 +86765,12 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86704,9 +86778,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86722,10 +86796,10 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86734,9 +86808,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86754,12 +86828,13 @@ "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86768,8 +86843,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86788,18 +86863,19 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" ], "cfg(not(any(target_os = \"macos\", target_os = \"ios\", target_os = \"windows\", target_arch = \"wasm32\")))": [ - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86815,9 +86891,9 @@ "s390x-unknown-linux-gnu", "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86826,8 +86902,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86846,9 +86922,9 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86857,8 +86933,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86876,11 +86952,12 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86889,9 +86966,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86911,10 +86988,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86923,9 +87000,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86945,10 +87022,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86957,8 +87034,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86977,11 +87054,12 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86990,9 +87068,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -87012,12 +87090,13 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -87027,16 +87106,17 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710" ], "cfg(target_arch = \"wasm32\")": [ "wasm32-unknown-unknown", - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(target_arch = \"x86\")": [ "i686-apple-darwin", @@ -87048,10 +87128,10 @@ "cfg(target_arch = \"x86_64\")": [ "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -87072,8 +87152,8 @@ "cfg(target_os = \"cloudabi\")": [], "cfg(target_os = \"dragonfly\")": [], "cfg(target_os = \"fuchsia\")": [ - "aarch64-fuchsia", - "x86_64-fuchsia" + "aarch64-unknown-fuchsia", + "x86_64-unknown-fuchsia" ], "cfg(target_os = \"haiku\")": [], "cfg(target_os = \"hermit\")": [], @@ -87100,7 +87180,8 @@ ], "cfg(target_os = \"redox\")": [], "cfg(target_os = \"wasi\")": [ - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(target_os = \"windows\")": [ "aarch64-pc-windows-msvc", @@ -87112,8 +87193,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -87128,9 +87209,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -87182,15 +87263,15 @@ "wasm32-wasi": [ "wasm32-wasi" ], + "wasm32-wasip1": [ + "wasm32-wasip1" + ], "x86_64-apple-darwin": [ "x86_64-apple-darwin" ], "x86_64-apple-ios": [ "x86_64-apple-ios" ], - "x86_64-fuchsia": [ - "x86_64-fuchsia" - ], "x86_64-linux-android": [ "x86_64-linux-android" ], @@ -87202,6 +87283,9 @@ "x86_64-unknown-freebsd": [ "x86_64-unknown-freebsd" ], + "x86_64-unknown-fuchsia": [ + "x86_64-unknown-fuchsia" + ], "x86_64-unknown-linux-gnu": [ "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" diff --git a/Cargo.Bazel.json.lock b/Cargo.Bazel.json.lock index d5c0c188242..b5e8dae02b1 100644 --- a/Cargo.Bazel.json.lock +++ b/Cargo.Bazel.json.lock @@ -1,5 +1,5 @@ { - "checksum": "194342fc37fdddbf3dceb1e96cdf4ce10a63ff4ec17867d1a241e3e034c1ad0c", + "checksum": "16c350a57ca08666035e4f0e31f17c9715860d84ed6755b026870a84de503a09", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -11018,19 +11018,19 @@ "target": "crossterm" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "crossterm 0.27.0", "target": "crossterm" @@ -11164,6 +11164,16 @@ "target": "wasm_bindgen" } ], + "wasm32-wasip1": [ + { + "id": "serde-wasm-bindgen 0.5.0", + "target": "serde_wasm_bindgen" + }, + { + "id": "wasm-bindgen 0.2.95", + "target": "wasm_bindgen" + } + ], "x86_64-apple-darwin": [ { "id": "crossterm 0.27.0", @@ -11176,25 +11186,25 @@ "target": "crossterm" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "crossterm 0.27.0", "target": "crossterm" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "crossterm 0.27.0", "target": "crossterm" @@ -11657,12 +11667,6 @@ "target": "iana_time_zone" } ], - "aarch64-fuchsia": [ - { - "id": "iana-time-zone 0.1.59", - "target": "iana_time_zone" - } - ], "aarch64-linux-android": [ { "id": "android-tzdata 0.1.1", @@ -11679,6 +11683,12 @@ "target": "windows_targets" } ], + "aarch64-unknown-fuchsia": [ + { + "id": "iana-time-zone 0.1.59", + "target": "iana_time_zone" + } + ], "aarch64-unknown-linux-gnu": [ { "id": "iana-time-zone 0.1.59", @@ -11787,12 +11797,6 @@ "target": "iana_time_zone" } ], - "x86_64-fuchsia": [ - { - "id": "iana-time-zone 0.1.59", - "target": "iana_time_zone" - } - ], "x86_64-linux-android": [ { "id": "android-tzdata 0.1.1", @@ -11815,6 +11819,12 @@ "target": "iana_time_zone" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "iana-time-zone 0.1.59", + "target": "iana_time_zone" + } + ], "x86_64-unknown-linux-gnu": [ { "id": "iana-time-zone 0.1.59", @@ -15586,7 +15596,7 @@ "target": "signal_hook_mio" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "mio 0.8.10", "target": "mio" @@ -15600,7 +15610,17 @@ "target": "signal_hook_mio" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ + { + "id": "crossterm_winapi 0.9.1", + "target": "crossterm_winapi" + }, + { + "id": "winapi 0.3.9", + "target": "winapi" + } + ], + "aarch64-unknown-fuchsia": [ { "id": "mio 0.8.10", "target": "mio" @@ -15614,16 +15634,6 @@ "target": "signal_hook_mio" } ], - "aarch64-pc-windows-msvc": [ - { - "id": "crossterm_winapi 0.9.1", - "target": "crossterm_winapi" - }, - { - "id": "winapi 0.3.9", - "target": "winapi" - } - ], "aarch64-unknown-linux-gnu": [ { "id": "mio 0.8.10", @@ -15836,7 +15846,7 @@ "target": "signal_hook_mio" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "mio 0.8.10", "target": "mio" @@ -15850,7 +15860,17 @@ "target": "signal_hook_mio" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ + { + "id": "crossterm_winapi 0.9.1", + "target": "crossterm_winapi" + }, + { + "id": "winapi 0.3.9", + "target": "winapi" + } + ], + "x86_64-unknown-freebsd": [ { "id": "mio 0.8.10", "target": "mio" @@ -15864,17 +15884,7 @@ "target": "signal_hook_mio" } ], - "x86_64-pc-windows-msvc": [ - { - "id": "crossterm_winapi 0.9.1", - "target": "crossterm_winapi" - }, - { - "id": "winapi 0.3.9", - "target": "winapi" - } - ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "mio 0.8.10", "target": "mio" @@ -22458,19 +22468,19 @@ "target": "parking" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "parking 2.1.0", "target": "parking" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "parking 2.1.0", "target": "parking" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "parking 2.1.0", "target": "parking" @@ -22590,25 +22600,25 @@ "target": "parking" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "parking 2.1.0", "target": "parking" @@ -22711,19 +22721,19 @@ "target": "parking" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "parking 2.1.0", "target": "parking" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "parking 2.1.0", "target": "parking" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "parking 2.1.0", "target": "parking" @@ -22843,25 +22853,25 @@ "target": "parking" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "parking 2.1.0", "target": "parking" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "parking 2.1.0", "target": "parking" @@ -28960,15 +28970,15 @@ "webpki-roots", "webpki-tokio" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "webpki-roots", "webpki-tokio" ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ "webpki-roots", "webpki-tokio" ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ "webpki-roots", "webpki-tokio" ], @@ -29048,10 +29058,6 @@ "webpki-roots", "webpki-tokio" ], - "x86_64-fuchsia": [ - "webpki-roots", - "webpki-tokio" - ], "x86_64-linux-android": [ "webpki-roots", "webpki-tokio" @@ -29064,6 +29070,10 @@ "webpki-roots", "webpki-tokio" ], + "x86_64-unknown-fuchsia": [ + "webpki-roots", + "webpki-tokio" + ], "x86_64-unknown-linux-gnu": [ "webpki-roots", "webpki-tokio" @@ -29145,19 +29155,19 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" @@ -29277,25 +29287,25 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "webpki-roots 0.26.1", "target": "webpki_roots" @@ -32875,7 +32885,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid 1.5.0": { @@ -32947,7 +32959,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid_transform 1.5.0": { @@ -33023,7 +33037,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_locid_transform_data 1.5.0": { @@ -33059,7 +33075,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_normalizer 1.5.0": { @@ -33152,7 +33170,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_normalizer_data 1.5.0": { @@ -33188,7 +33208,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_properties 1.5.1": { @@ -33269,7 +33291,9 @@ "version": "1.5.1" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_properties_data 1.5.0": { @@ -33305,7 +33329,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_provider 1.5.0": { @@ -33393,7 +33419,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "icu_provider_macros 1.5.0": { @@ -33446,7 +33474,9 @@ "version": "1.5.0" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "id-arena 2.2.1": { @@ -38944,7 +38974,9 @@ "version": "0.7.3" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "little-loadshedder 0.2.0": { @@ -41549,10 +41581,10 @@ "aarch64-apple-ios-sim": [ "os-ext" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "os-ext" ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ "os-ext" ], "aarch64-unknown-linux-gnu": [ @@ -41597,15 +41629,15 @@ "x86_64-apple-ios": [ "os-ext" ], - "x86_64-fuchsia": [ - "os-ext" - ], "x86_64-linux-android": [ "os-ext" ], "x86_64-unknown-freebsd": [ "os-ext" ], + "x86_64-unknown-fuchsia": [ + "os-ext" + ], "x86_64-unknown-linux-gnu": [ "os-ext" ], @@ -53517,13 +53549,13 @@ "target": "libc" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -53613,19 +53645,19 @@ "target": "libc" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -55914,7 +55946,7 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -55944,7 +55976,7 @@ "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -55974,7 +56006,7 @@ "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -56650,7 +56682,7 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -56680,7 +56712,7 @@ "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -56710,7 +56742,7 @@ "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -56740,7 +56772,7 @@ "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "async-compression 0.4.3", "target": "async_compression" @@ -57124,7 +57156,7 @@ "target": "webpki_roots" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -57174,7 +57206,7 @@ "target": "webpki_roots" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -57224,7 +57256,7 @@ "target": "webpki_roots" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58206,6 +58238,12 @@ "target": "wasm_streams" } ], + "wasm32-wasip1": [ + { + "id": "wasm-streams 0.4.0", + "target": "wasm_streams" + } + ], "x86_64-apple-darwin": [ { "id": "futures-channel 0.3.31", @@ -58306,7 +58344,7 @@ "target": "webpki_roots" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58356,7 +58394,7 @@ "target": "webpki_roots" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58406,7 +58444,7 @@ "target": "webpki_roots" } ], - "x86_64-pc-windows-msvc": [ + "x86_64-unknown-freebsd": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -58456,7 +58494,7 @@ "target": "webpki_roots" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "futures-channel 0.3.31", "target": "futures_channel" @@ -60574,7 +60612,7 @@ "time", "use-libc-auxv" ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ "default", "event", "mm", @@ -60584,7 +60622,7 @@ "time", "use-libc-auxv" ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ "default", "event", "mm", @@ -60760,6 +60798,11 @@ "termios", "use-libc-auxv" ], + "wasm32-wasip1": [ + "default", + "termios", + "use-libc-auxv" + ], "x86_64-apple-darwin": [ "default", "event", @@ -60780,7 +60823,7 @@ "time", "use-libc-auxv" ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ "default", "event", "mm", @@ -60790,7 +60833,7 @@ "time", "use-libc-auxv" ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ "default", "event", "mm", @@ -60800,7 +60843,7 @@ "time", "use-libc-auxv" ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ "default", "event", "mm", @@ -60888,7 +60931,7 @@ "target": "libc" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "errno 0.3.8", "target": "errno", @@ -60899,22 +60942,22 @@ "target": "libc" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ { "id": "errno 0.3.8", "target": "errno", "alias": "libc_errno" - }, - { - "id": "libc 0.2.158", - "target": "libc" } ], - "aarch64-pc-windows-msvc": [ + "aarch64-unknown-fuchsia": [ { "id": "errno 0.3.8", "target": "errno", "alias": "libc_errno" + }, + { + "id": "libc 0.2.158", + "target": "libc" } ], "aarch64-unknown-nto-qnx710": [ @@ -61101,7 +61144,7 @@ "target": "libc" } ], - "x86_64-apple-darwin": [ + "wasm32-wasip1": [ { "id": "errno 0.3.8", "target": "errno", @@ -61112,7 +61155,7 @@ "target": "libc" } ], - "x86_64-apple-ios": [ + "x86_64-apple-darwin": [ { "id": "errno 0.3.8", "target": "errno", @@ -61123,7 +61166,7 @@ "target": "libc" } ], - "x86_64-fuchsia": [ + "x86_64-apple-ios": [ { "id": "errno 0.3.8", "target": "errno", @@ -61163,6 +61206,17 @@ "target": "libc" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "errno 0.3.8", + "target": "errno", + "alias": "libc_errno" + }, + { + "id": "libc 0.2.158", + "target": "libc" + } + ], "x86_64-unknown-none": [ { "id": "errno 0.3.8", @@ -67660,15 +67714,15 @@ "aarch64-apple-ios-sim": [ "once" ], - "aarch64-fuchsia": [ - "once" - ], "aarch64-linux-android": [ "once" ], "aarch64-pc-windows-msvc": [ "once" ], + "aarch64-unknown-fuchsia": [ + "once" + ], "aarch64-unknown-linux-gnu": [ "once" ], @@ -67714,9 +67768,6 @@ "x86_64-apple-ios": [ "once" ], - "x86_64-fuchsia": [ - "once" - ], "x86_64-linux-android": [ "once" ], @@ -67726,6 +67777,9 @@ "x86_64-unknown-freebsd": [ "once" ], + "x86_64-unknown-fuchsia": [ + "once" + ], "x86_64-unknown-linux-gnu": [ "once" ], @@ -69876,13 +69930,13 @@ "target": "xattr" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "xattr 0.2.3", "target": "xattr" @@ -69978,19 +70032,19 @@ "target": "xattr" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "xattr 0.2.3", "target": "xattr" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "xattr 0.2.3", "target": "xattr" @@ -71674,7 +71728,7 @@ "target": "num_threads" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -71684,7 +71738,7 @@ "target": "num_threads" } ], - "aarch64-linux-android": [ + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -71834,7 +71888,7 @@ "target": "num_threads" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -71844,7 +71898,7 @@ "target": "num_threads" } ], - "x86_64-linux-android": [ + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" @@ -71854,7 +71908,7 @@ "target": "num_threads" } ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -72182,7 +72236,9 @@ "version": "0.7.6" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "tinytemplate 1.2.1": { @@ -72565,7 +72621,7 @@ "target": "socket2" } ], - "aarch64-fuchsia": [ + "aarch64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -72579,7 +72635,17 @@ "target": "socket2" } ], - "aarch64-linux-android": [ + "aarch64-pc-windows-msvc": [ + { + "id": "socket2 0.5.7", + "target": "socket2" + }, + { + "id": "windows-sys 0.52.0", + "target": "windows_sys" + } + ], + "aarch64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -72593,16 +72659,6 @@ "target": "socket2" } ], - "aarch64-pc-windows-msvc": [ - { - "id": "socket2 0.5.7", - "target": "socket2" - }, - { - "id": "windows-sys 0.52.0", - "target": "windows_sys" - } - ], "aarch64-unknown-linux-gnu": [ { "id": "libc 0.2.158", @@ -72839,7 +72895,7 @@ "target": "socket2" } ], - "x86_64-fuchsia": [ + "x86_64-linux-android": [ { "id": "libc 0.2.158", "target": "libc" @@ -72853,7 +72909,17 @@ "target": "socket2" } ], - "x86_64-linux-android": [ + "x86_64-pc-windows-msvc": [ + { + "id": "socket2 0.5.7", + "target": "socket2" + }, + { + "id": "windows-sys 0.52.0", + "target": "windows_sys" + } + ], + "x86_64-unknown-freebsd": [ { "id": "libc 0.2.158", "target": "libc" @@ -72867,17 +72933,7 @@ "target": "socket2" } ], - "x86_64-pc-windows-msvc": [ - { - "id": "socket2 0.5.7", - "target": "socket2" - }, - { - "id": "windows-sys 0.52.0", - "target": "windows_sys" - } - ], - "x86_64-unknown-freebsd": [ + "x86_64-unknown-fuchsia": [ { "id": "libc 0.2.158", "target": "libc" @@ -79785,12 +79841,6 @@ "target": "rustix" } ], - "aarch64-fuchsia": [ - { - "id": "rustix 0.38.32", - "target": "rustix" - } - ], "aarch64-linux-android": [ { "id": "rustix 0.38.32", @@ -79803,6 +79853,12 @@ "target": "windows_sys" } ], + "aarch64-unknown-fuchsia": [ + { + "id": "rustix 0.38.32", + "target": "rustix" + } + ], "aarch64-unknown-linux-gnu": [ { "id": "memfd 0.6.4", @@ -79933,12 +79989,6 @@ "target": "rustix" } ], - "x86_64-fuchsia": [ - { - "id": "rustix 0.38.32", - "target": "rustix" - } - ], "x86_64-linux-android": [ { "id": "rustix 0.38.32", @@ -79957,6 +80007,12 @@ "target": "rustix" } ], + "x86_64-unknown-fuchsia": [ + { + "id": "rustix 0.38.32", + "target": "rustix" + } + ], "x86_64-unknown-linux-gnu": [ { "id": "memfd 0.6.4", @@ -84554,7 +84610,9 @@ "version": "0.5.5" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "wsl 0.1.0": { @@ -85217,7 +85275,9 @@ "version": "0.7.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "yoke-derive 0.7.4": { @@ -85274,7 +85334,9 @@ "version": "0.7.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerocopy 0.7.32": { @@ -85440,7 +85502,9 @@ "version": "0.1.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerofrom-derive 0.1.4": { @@ -85497,7 +85561,9 @@ "version": "0.1.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zeroize 1.8.1": { @@ -85675,7 +85741,9 @@ "version": "0.10.4" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zerovec-derive 0.10.3": { @@ -85728,7 +85796,9 @@ "version": "0.10.3" }, "license": "Unicode-3.0", - "license_ids": [], + "license_ids": [ + "Unicode-3.0" + ], "license_file": "LICENSE" }, "zstd 0.12.4": { @@ -86124,9 +86194,6 @@ "aarch64-apple-ios-sim": [ "aarch64-apple-ios-sim" ], - "aarch64-fuchsia": [ - "aarch64-fuchsia" - ], "aarch64-linux-android": [ "aarch64-linux-android" ], @@ -86134,6 +86201,9 @@ "aarch64-pc-windows-msvc": [ "aarch64-pc-windows-msvc" ], + "aarch64-unknown-fuchsia": [ + "aarch64-unknown-fuchsia" + ], "aarch64-unknown-linux-gnu": [ "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu" @@ -86176,10 +86246,10 @@ "cfg(all(not(curve25519_dalek_backend = \"fiat\"), not(curve25519_dalek_backend = \"serial\"), target_arch = \"x86_64\"))": [ "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86197,8 +86267,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-nto-qnx710", "armv7-linux-androideabi", "i686-apple-darwin", @@ -86212,11 +86282,12 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-none" ], "cfg(all(target_arch = \"aarch64\", target_env = \"msvc\", not(windows_raw_dylib)))": [ @@ -86241,7 +86312,8 @@ "wasm32-unknown-unknown" ], "cfg(all(target_arch = \"wasm32\", target_os = \"wasi\"))": [ - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(all(target_arch = \"wasm32\", target_vendor = \"unknown\", target_os = \"unknown\", target_env = \"\"))": [ "wasm32-unknown-unknown" @@ -86269,8 +86341,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86285,14 +86357,14 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], "cfg(all(unix, not(target_os = \"android\"), not(target_vendor = \"apple\"), not(target_arch = \"wasm32\")))": [ - "aarch64-fuchsia", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86302,16 +86374,16 @@ "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", - "x86_64-fuchsia", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], "cfg(all(unix, not(target_os = \"macos\")))": [ "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86324,9 +86396,9 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86335,9 +86407,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86353,10 +86425,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86365,9 +86437,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86378,10 +86450,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86390,9 +86462,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86403,10 +86475,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86416,8 +86488,8 @@ "s390x-unknown-linux-gnu" ], "cfg(any(target_arch = \"x86\", target_arch = \"x86_64\", all(any(target_arch = \"aarch64\", target_arch = \"arm\"), any(target_os = \"android\", target_os = \"fuchsia\", target_os = \"linux\"))))": [ - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "arm-unknown-linux-gnueabi", @@ -86430,10 +86502,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86446,10 +86518,10 @@ "i686-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86492,9 +86564,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "arm-unknown-linux-gnueabi", @@ -86508,12 +86580,13 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86567,8 +86640,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86583,9 +86656,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86593,8 +86666,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86609,9 +86682,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86619,8 +86692,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86634,11 +86707,12 @@ "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -86647,9 +86721,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86667,19 +86741,20 @@ "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" ], "cfg(not(any(target_os = \"macos\", target_os = \"ios\", target_os = \"windows\", target_arch = \"wasm32\")))": [ - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86695,9 +86770,9 @@ "s390x-unknown-linux-gnu", "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86706,8 +86781,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86726,9 +86801,9 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86737,8 +86812,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86756,11 +86831,12 @@ "thumbv7em-none-eabi", "thumbv8m.main-none-eabi", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86769,9 +86845,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86791,10 +86867,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86803,9 +86879,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86825,10 +86901,10 @@ "thumbv8m.main-none-eabi", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86837,8 +86913,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86857,11 +86933,12 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86870,9 +86947,9 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -86892,12 +86969,13 @@ "thumbv8m.main-none-eabi", "wasm32-unknown-unknown", "wasm32-wasi", + "wasm32-wasip1", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86907,16 +86985,17 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", "aarch64-pc-windows-msvc", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710" ], "cfg(target_arch = \"wasm32\")": [ "wasm32-unknown-unknown", - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(target_arch = \"x86\")": [ "i686-apple-darwin", @@ -86928,10 +87007,10 @@ "cfg(target_arch = \"x86_64\")": [ "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-pc-windows-msvc", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu", "x86_64-unknown-none" @@ -86952,8 +87031,8 @@ "cfg(target_os = \"cloudabi\")": [], "cfg(target_os = \"dragonfly\")": [], "cfg(target_os = \"fuchsia\")": [ - "aarch64-fuchsia", - "x86_64-fuchsia" + "aarch64-unknown-fuchsia", + "x86_64-unknown-fuchsia" ], "cfg(target_os = \"haiku\")": [], "cfg(target_os = \"hermit\")": [], @@ -86980,7 +87059,8 @@ ], "cfg(target_os = \"redox\")": [], "cfg(target_os = \"wasi\")": [ - "wasm32-wasi" + "wasm32-wasi", + "wasm32-wasip1" ], "cfg(target_os = \"windows\")": [ "aarch64-pc-windows-msvc", @@ -86992,8 +87072,8 @@ "aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", - "aarch64-fuchsia", "aarch64-linux-android", + "aarch64-unknown-fuchsia", "aarch64-unknown-linux-gnu", "aarch64-unknown-nixos-gnu", "aarch64-unknown-nto-qnx710", @@ -87008,9 +87088,9 @@ "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", - "x86_64-fuchsia", "x86_64-linux-android", "x86_64-unknown-freebsd", + "x86_64-unknown-fuchsia", "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" ], @@ -87062,15 +87142,15 @@ "wasm32-wasi": [ "wasm32-wasi" ], + "wasm32-wasip1": [ + "wasm32-wasip1" + ], "x86_64-apple-darwin": [ "x86_64-apple-darwin" ], "x86_64-apple-ios": [ "x86_64-apple-ios" ], - "x86_64-fuchsia": [ - "x86_64-fuchsia" - ], "x86_64-linux-android": [ "x86_64-linux-android" ], @@ -87082,6 +87162,9 @@ "x86_64-unknown-freebsd": [ "x86_64-unknown-freebsd" ], + "x86_64-unknown-fuchsia": [ + "x86_64-unknown-fuchsia" + ], "x86_64-unknown-linux-gnu": [ "x86_64-unknown-linux-gnu", "x86_64-unknown-nixos-gnu" diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel index 09c62d8bd34..10c29ef4de2 100644 --- a/WORKSPACE.bazel +++ b/WORKSPACE.bazel @@ -96,11 +96,8 @@ sol_register_toolchains( http_archive( name = "rules_rust", - # Back-ported fix: https://github.com/bazelbuild/rules_rust/pull/2981 - patch_args = ["-p1"], - patches = ["//bazel:rules_rust.patch"], - sha256 = "85e2013727ab26fb22abdffe4b2ac0c27a2d5b6296167ba63d8f6e13140f51f9", - urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.53.0/rules_rust-v0.53.0.tar.gz"], + sha256 = "af4f56caae50a99a68bfce39b141b509dd68548c8204b98ab7a1cafc94d5bb02", + urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.54.1/rules_rust-v0.54.1.tar.gz"], ) load("@rules_rust//rust:repositories.bzl", "rules_rust_dependencies", "rust_register_toolchains") diff --git a/bazel/rules_rust.patch b/bazel/rules_rust.patch deleted file mode 100644 index d9252428a0b..00000000000 --- a/bazel/rules_rust.patch +++ /dev/null @@ -1,45 +0,0 @@ -# Backports for https://github.com/bazelbuild/rules_rust/issues/2974 and https://github.com/bazelbuild/rules_rust/pull/2981 -diff --git a/cargo/cargo_build_script_runner/bin.rs b/cargo/cargo_build_script_runner/bin.rs -index 2dab3578..b5bb4fca 100644 ---- a/cargo/cargo_build_script_runner/bin.rs -+++ b/cargo/cargo_build_script_runner/bin.rs -@@ -187,9 +187,9 @@ fn run_buildrs() -> Result<(), String> { - .as_bytes(), - ) - .unwrap_or_else(|_| panic!("Unable to write file {:?}", output_dep_env_path)); -- write(&stdout_path, process_output.stdout) -+ write(&stdout_path, "") - .unwrap_or_else(|_| panic!("Unable to write file {:?}", stdout_path)); -- write(&stderr_path, process_output.stderr) -+ write(&stderr_path, "") - .unwrap_or_else(|_| panic!("Unable to write file {:?}", stderr_path)); - - let CompileAndLinkFlags { -diff --git a/crate_universe/private/crate.bzl b/crate_universe/private/crate.bzl -index c493e9a6..ad317abf 100644 ---- a/crate_universe/private/crate.bzl -+++ b/crate_universe/private/crate.bzl -@@ -230,7 +230,22 @@ def _stringify_label(value): - def _stringify_list(values): - if not values: - return values -- return [str(x) for x in values] -+ -+ if type(values) == "list": -+ return [str(x) for x in values] -+ -+ -+ -+ -+ if type(values) == "struct" and type(values.selects) != "NoneType": -+ new_selects = {} -+ -+ for k, v in values.selects.items(): -+ new_selects[k] = [str(x) for x in values.selects[k]] -+ -+ return struct(common = [str(x) for x in values.common], selects = new_selects) -+ -+ fail("Cannot stringify unknown type for list '{}'".format(values)) - - def _select(common, selects): - """A Starlark Select for `crate.annotation()`. From 323b72f6ffc201b04f28b49d037fa3b10f13149f Mon Sep 17 00:00:00 2001 From: Andriy Berestovskyy Date: Tue, 14 Jan 2025 19:58:42 +0100 Subject: [PATCH 24/33] chore: EXC-1818: Run all benchmarks faster (#3357) This change avoids unnecessary benchmark repetitions when performance remains unchanged compared to the baseline. It also reduces the number of repetitions from 9 to 3. --- rs/execution_environment/benches/run-all-benchmarks.sh | 7 ++++++- rs/execution_environment/benches/summarize-results.sh | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/rs/execution_environment/benches/run-all-benchmarks.sh b/rs/execution_environment/benches/run-all-benchmarks.sh index 3e72b0c219d..e13cb38258d 100755 --- a/rs/execution_environment/benches/run-all-benchmarks.sh +++ b/rs/execution_environment/benches/run-all-benchmarks.sh @@ -11,7 +11,7 @@ set -ue ## printf "%-12s := %s\n" \ - "REPEAT" "${REPEAT:=9}" >&2 + "REPEAT" "${REPEAT:=3}" >&2 RUN_BENCHMARK="${0%/*}/run-benchmark.sh" [ -x "${RUN_BENCHMARK}" ] || (echo "Error accessing script: ${RUN_BENCHMARK}" >&2 && exit 1) @@ -39,7 +39,12 @@ run() { # Summarize results if the benchmark was executed or if it's the final iteration. if [ "${counter}" -lt "${i}" -o "${i}" = "${REPEAT}" ]; then echo "==> Summarizing ${name} results:" >&2 + set +e NAME="${name}" MIN_FILE="${min_file}" "${SUMMARIZE_RESULTS}" + local ret="${?}" + set -e + # Stop repeating the benchmark if there are no changes. + [ "${ret}" -eq 0 ] && echo "${REPEAT}" >"${counter_file}" fi } diff --git a/rs/execution_environment/benches/summarize-results.sh b/rs/execution_environment/benches/summarize-results.sh index 8e94a73e1dc..cbd7fb5c20e 100755 --- a/rs/execution_environment/benches/summarize-results.sh +++ b/rs/execution_environment/benches/summarize-results.sh @@ -71,4 +71,7 @@ if [ "${total_diff}" != "0" ]; then echo " - ${name} time improved by ${diff}%" done fi -# rm -f "${TMP_FILE}" +rm -f "${TMP_FILE}" + +# Return an error is there are changes, so the calling script might retry or report an error. +[ "${total_diff}" != "0" ] From 572afbcdbf9507406742007d10f55b8173e4cfa9 Mon Sep 17 00:00:00 2001 From: Andrew Battat <113942931+andrewbattat@users.noreply.github.com> Date: Tue, 14 Jan 2025 16:06:32 -0600 Subject: [PATCH 25/33] feat(node): add fstrim datadir observability (#3322) NODE-1537 Node decommissioning stage 2 was rolled out in a hurry so that we could make the holiday-release cutoff: https://github.com/dfinity/ic/pull/2953 These changes add the missing observability logic. Main commits: - [Add datadir metrics](https://github.com/dfinity/ic/pull/3322/commits/a1e6dbb99e60c8397d5ff9b7b8b1b839a5d0c0e2) - [Update unit tests](https://github.com/dfinity/ic/pull/3322/commits/513ba80c20709d74fd7631fe3566ba6f92bd8679) Screenshot of metrics: image --- Cargo.lock | 1 + rs/ic_os/fstrim_tool/BUILD.bazel | 1 + rs/ic_os/fstrim_tool/Cargo.toml | 1 + rs/ic_os/fstrim_tool/src/lib.rs | 24 +- rs/ic_os/fstrim_tool/src/metrics/mod.rs | 88 ++++- rs/ic_os/fstrim_tool/src/metrics/tests.rs | 107 +++++- rs/ic_os/fstrim_tool/src/tests.rs | 323 +++++++++++------- .../fstrim_tool/tests/integration_tests.rs | 180 +++++----- 8 files changed, 481 insertions(+), 244 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e410db98ce..098b9048d54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8394,6 +8394,7 @@ dependencies = [ "maplit", "predicates", "rand 0.8.5", + "regex", "tempfile", ] diff --git a/rs/ic_os/fstrim_tool/BUILD.bazel b/rs/ic_os/fstrim_tool/BUILD.bazel index f387d516321..38abf36e4ec 100644 --- a/rs/ic_os/fstrim_tool/BUILD.bazel +++ b/rs/ic_os/fstrim_tool/BUILD.bazel @@ -17,6 +17,7 @@ DEV_DEPENDENCIES = [ "@crate_index//:assert_matches", "@crate_index//:predicates", "@crate_index//:rand", + "@crate_index//:regex", "@crate_index//:tempfile", ] diff --git a/rs/ic_os/fstrim_tool/Cargo.toml b/rs/ic_os/fstrim_tool/Cargo.toml index 56348b47f9f..d97b8a61d13 100644 --- a/rs/ic_os/fstrim_tool/Cargo.toml +++ b/rs/ic_os/fstrim_tool/Cargo.toml @@ -19,4 +19,5 @@ assert_matches = { workspace = true } ic-crypto-test-utils-reproducible-rng = { path = "../../crypto/test_utils/reproducible_rng" } predicates = { workspace = true } rand = { workspace = true } +regex = { workspace = true } tempfile = { workspace = true } diff --git a/rs/ic_os/fstrim_tool/src/lib.rs b/rs/ic_os/fstrim_tool/src/lib.rs index af39d6c6f5d..1bbe877bf5e 100644 --- a/rs/ic_os/fstrim_tool/src/lib.rs +++ b/rs/ic_os/fstrim_tool/src/lib.rs @@ -61,7 +61,12 @@ fn write_metrics_using_tmp_file(metrics: &FsTrimMetrics, metrics_filename: &str) .context("Failed to write metrics to file") } -fn update_metrics(elapsed: Duration, is_success: bool, metrics_filename: &str) -> Result<()> { +fn update_metrics( + elapsed: Duration, + is_success: bool, + metrics_filename: &str, + is_datadir: bool, +) -> Result<()> { let mut metrics = parse_existing_metrics_from_file(metrics_filename) .unwrap_or_else(|e| { eprintln!("error parsing existing metrics: {}", e); @@ -71,7 +76,13 @@ fn update_metrics(elapsed: Duration, is_success: bool, metrics_filename: &str) - eprintln!("no existing metrics found"); FsTrimMetrics::default() }); - metrics.update(is_success, elapsed)?; + + if is_datadir { + metrics.update_datadir(is_success, elapsed)?; + } else { + metrics.update(is_success, elapsed)?; + } + write_metrics_using_tmp_file(&metrics, metrics_filename) } @@ -101,14 +112,13 @@ pub fn fstrim_tool( let start = std::time::Instant::now(); let res_target = run_command(command, &target); let elapsed_target = start.elapsed(); - update_metrics(elapsed_target, res_target.is_ok(), &metrics_filename)?; + update_metrics(elapsed_target, res_target.is_ok(), &metrics_filename, false)?; if !datadir_target.is_empty() && !is_node_assigned() { - // TODO observability changes needed, expand the metrics logic - // let start_datadir = std::time::Instant::now(); + let start = std::time::Instant::now(); let res_datadir = run_command(command, &datadir_target); - // let elapsed_datadir = start_datadir.elapsed(); - // update_metrics(elapsed_datadir, res_datadir.is_ok(), &metrics_filename)?; + let elapsed = start.elapsed(); + update_metrics(elapsed, res_datadir.is_ok(), &metrics_filename, true)?; res_target.and(res_datadir) } else { res_target diff --git a/rs/ic_os/fstrim_tool/src/metrics/mod.rs b/rs/ic_os/fstrim_tool/src/metrics/mod.rs index 5e42bce8d83..dfa68037edb 100644 --- a/rs/ic_os/fstrim_tool/src/metrics/mod.rs +++ b/rs/ic_os/fstrim_tool/src/metrics/mod.rs @@ -8,11 +8,20 @@ const METRICS_LAST_RUN_DURATION_MILLISECONDS: &str = "fstrim_last_run_duration_m const METRICS_LAST_RUN_SUCCESS: &str = "fstrim_last_run_success"; const METRICS_RUNS_TOTAL: &str = "fstrim_runs_total"; +const METRICS_LAST_RUN_DURATION_MILLISECONDS_DATADIR: &str = + "fstrim_datadir_last_run_duration_milliseconds"; +const METRICS_LAST_RUN_SUCCESS_DATADIR: &str = "fstrim_datadir_last_run_success"; +const METRICS_RUNS_TOTAL_DATADIR: &str = "fstrim_datadir_runs_total"; + #[derive(Debug)] pub struct FsTrimMetrics { pub last_duration_milliseconds: f64, pub last_run_success: bool, pub total_runs: f64, + + pub last_duration_milliseconds_datadir: f64, + pub last_run_success_datadir: bool, + pub total_runs_datadir: f64, } impl Default for FsTrimMetrics { @@ -21,6 +30,10 @@ impl Default for FsTrimMetrics { last_duration_milliseconds: 0f64, last_run_success: true, total_runs: 0f64, + + last_duration_milliseconds_datadir: 0f64, + last_run_success_datadir: true, + total_runs_datadir: 0f64, } } } @@ -33,26 +46,54 @@ impl FsTrimMetrics { Ok(()) } + pub(crate) fn update_datadir(&mut self, success: bool, duration: Duration) -> Result<()> { + self.total_runs_datadir += 1f64; + self.last_run_success_datadir = success; + self.last_duration_milliseconds_datadir = duration.as_millis() as f64; + Ok(()) + } + pub fn to_p8s_metrics_string(&self) -> String { + let fstrim_last_run_duration_milliseconds = to_go_f64(self.last_duration_milliseconds); + let fstrim_last_run_success = if self.last_run_success { "1" } else { "0" }; + let fstrim_runs_total = to_go_f64(self.total_runs); + + let fstrim_datadir_last_run_duration_milliseconds = + to_go_f64(self.last_duration_milliseconds_datadir); + let fstrim_datadir_last_run_success = if self.last_run_success_datadir { + "1" + } else { + "0" + }; + let fstrim_datadir_runs_total = to_go_f64(self.total_runs_datadir); + format!( "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ # TYPE fstrim_last_run_duration_milliseconds gauge\n\ - fstrim_last_run_duration_milliseconds {}\n\ + fstrim_last_run_duration_milliseconds {fstrim_last_run_duration_milliseconds}\n\ # HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)\n\ # TYPE fstrim_last_run_success gauge\n\ - fstrim_last_run_success {}\n\ + fstrim_last_run_success {fstrim_last_run_success}\n\ # HELP fstrim_runs_total Total number of runs of fstrim\n\ # TYPE fstrim_runs_total counter\n\ - fstrim_runs_total {}\n", - to_go_f64(self.last_duration_milliseconds), - if self.last_run_success { "1" } else { "0" }, - to_go_f64(self.total_runs), - ).to_string() + fstrim_runs_total {fstrim_runs_total}\n\ + # HELP fstrim_datadir_last_run_duration_milliseconds Duration of last run of fstrim on datadir in milliseconds\n\ + # TYPE fstrim_datadir_last_run_duration_milliseconds gauge\n\ + fstrim_datadir_last_run_duration_milliseconds {fstrim_datadir_last_run_duration_milliseconds}\n\ + # HELP fstrim_datadir_last_run_success Success status of last run of fstrim on datadir (success: 1, failure: 0)\n\ + # TYPE fstrim_datadir_last_run_success gauge\n\ + fstrim_datadir_last_run_success {fstrim_datadir_last_run_success}\n\ + # HELP fstrim_datadir_runs_total Total number of runs of fstrim on datadir\n\ + # TYPE fstrim_datadir_runs_total counter\n\ + fstrim_datadir_runs_total {fstrim_datadir_runs_total}\n" + ) } fn are_valid(&self) -> bool { is_f64_finite_and_0_or_larger(self.total_runs) && is_f64_finite_and_0_or_larger(self.last_duration_milliseconds) + && is_f64_finite_and_0_or_larger(self.total_runs_datadir) + && is_f64_finite_and_0_or_larger(self.last_duration_milliseconds_datadir) } } @@ -102,27 +143,41 @@ where let mut last_duration_milliseconds: Option = None; let mut last_run_success: Option = None; let mut total_runs: Option = None; + + // Default datadir fields (we treat them as optional in the metrics file) + let mut datadir_last_duration_milliseconds: f64 = 0f64; + let mut datadir_last_run_success: bool = true; + let mut datadir_total_runs: f64 = 0f64; + for line_or_err in lines { let line = line_or_err.map_err(|e| format_err!("failed to read line: {}", e))?; match line.split(' ').collect::>()[..] { ["#", ..] => continue, [key, value] => match key { METRICS_LAST_RUN_DURATION_MILLISECONDS => { - let _ = last_duration_milliseconds - .get_or_insert(parse_metrics_value(key, value)?); + last_duration_milliseconds.get_or_insert(parse_metrics_value(key, value)?); } METRICS_LAST_RUN_SUCCESS => { - let _ = - last_run_success.get_or_insert(parse_metrics_value(key, value)? > 0f64); + last_run_success.get_or_insert(parse_metrics_value(key, value)? > 0f64); } METRICS_RUNS_TOTAL => { - let _ = total_runs.get_or_insert(parse_metrics_value(key, value)?); + total_runs.get_or_insert(parse_metrics_value(key, value)?); + } + METRICS_LAST_RUN_DURATION_MILLISECONDS_DATADIR => { + datadir_last_duration_milliseconds = parse_metrics_value(key, value)?; + } + METRICS_LAST_RUN_SUCCESS_DATADIR => { + datadir_last_run_success = parse_metrics_value(key, value)? > 0f64; + } + METRICS_RUNS_TOTAL_DATADIR => { + datadir_total_runs = parse_metrics_value(key, value)?; } _ => return Err(format_err!("unknown metric key: {}", key)), }, _ => return Err(format_err!("invalid metric line: {:?}", line)), } } + let metrics = FsTrimMetrics { last_duration_milliseconds: last_duration_milliseconds.ok_or(format_err!( "missing metric: {}", @@ -131,6 +186,9 @@ where last_run_success: last_run_success .ok_or(format_err!("missing metric: {}", METRICS_LAST_RUN_SUCCESS))?, total_runs: total_runs.ok_or(format_err!("missing metric: {}", METRICS_RUNS_TOTAL))?, + last_duration_milliseconds_datadir: datadir_last_duration_milliseconds, + last_run_success_datadir: datadir_last_run_success, + total_runs_datadir: datadir_total_runs, }; if !metrics.are_valid() { return Err(format_err!("parsed metrics are invalid")); @@ -148,6 +206,12 @@ impl PartialEq for FsTrimMetrics { other.last_duration_milliseconds, ) && (self.last_run_success == other.last_run_success) + && f64_approx_eq( + self.last_duration_milliseconds_datadir, + other.last_duration_milliseconds_datadir, + ) + && (self.last_run_success_datadir == other.last_run_success_datadir) + && f64_approx_eq(self.total_runs_datadir, other.total_runs_datadir) } } diff --git a/rs/ic_os/fstrim_tool/src/metrics/tests.rs b/rs/ic_os/fstrim_tool/src/metrics/tests.rs index bca2d1e79b4..b21a61f3723 100644 --- a/rs/ic_os/fstrim_tool/src/metrics/tests.rs +++ b/rs/ic_os/fstrim_tool/src/metrics/tests.rs @@ -3,9 +3,11 @@ use assert_matches::assert_matches; use ic_crypto_test_utils_reproducible_rng::reproducible_rng; use rand::Rng; use std::fs::write; +use std::time::Duration; +use tempfile::tempdir; #[test] -fn should_compare_f64() { +fn compare_f64() { assert!(f64_approx_eq(f64::NAN, f64::NAN)); assert!(f64_approx_eq(f64::INFINITY, f64::INFINITY)); assert!(f64_approx_eq(f64::INFINITY + 1f64, f64::INFINITY)); @@ -16,8 +18,8 @@ fn should_compare_f64() { } #[test] -fn should_parse_valid_metrics_file() { - let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); +fn parse_valid_metrics_file() { + let temp_dir = tempdir().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); let metrics_file_content = "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ @@ -30,18 +32,22 @@ fn should_parse_valid_metrics_file() { # TYPE fstrim_runs_total counter\n\ fstrim_runs_total 1\n"; write(&test_file, metrics_file_content).expect("error writing to file"); + let parsed_metrics = parse_existing_metrics_from_file(&test_file.to_string_lossy()).unwrap(); let expected_metrics = FsTrimMetrics { last_duration_milliseconds: 6.0, last_run_success: true, total_runs: 1.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, }; assert_eq!(parsed_metrics, Some(expected_metrics)); } #[test] -fn should_only_consider_first_parsed_value_when_parsing_metrics_file() { - let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); +fn ignore_subsequent_values_for_same_metric() { + let temp_dir = tempdir().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); let metrics_file_content = "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ @@ -57,17 +63,21 @@ fn should_only_consider_first_parsed_value_when_parsing_metrics_file() { fstrim_runs_total 12\n\ fstrim_runs_total 1\n"; write(&test_file, metrics_file_content).expect("error writing to file"); + let parsed_metrics = parse_existing_metrics_from_file(&test_file.to_string_lossy()).unwrap(); let expected_metrics = FsTrimMetrics { last_duration_milliseconds: 6.0, last_run_success: true, total_runs: 12.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, }; assert_eq!(parsed_metrics, Some(expected_metrics)); } #[test] -fn should_return_error_when_parsing_empty_metrics_file() { +fn should_error_on_empty_metrics_file() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); write(&test_file, "").expect("error writing to file"); @@ -76,7 +86,7 @@ fn should_return_error_when_parsing_empty_metrics_file() { } #[test] -fn should_return_error_for_metrics_file_with_too_many_tokens() { +fn should_error_when_metrics_file_has_too_many_tokens() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); write(&test_file, "pineapple on pizza is delicious").expect("error writing to file"); @@ -87,7 +97,7 @@ fn should_return_error_for_metrics_file_with_too_many_tokens() { } #[test] -fn should_return_error_for_metrics_file_with_unknown_metric_name() { +fn should_error_when_unknown_metric_name() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); write(&test_file, "pineapple pizza").expect("error writing to file"); @@ -98,7 +108,7 @@ fn should_return_error_for_metrics_file_with_unknown_metric_name() { } #[test] -fn should_return_error_for_metrics_file_with_timestamp() { +fn should_error_when_metrics_file_has_timestamp() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); write( @@ -113,7 +123,7 @@ fn should_return_error_for_metrics_file_with_timestamp() { } #[test] -fn should_return_error_for_metrics_file_with_non_numeric_value() { +fn should_error_when_metrics_file_has_non_numeric_value() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); write(&test_file, format!("{} pizza", METRICS_RUNS_TOTAL).as_str()) @@ -125,7 +135,7 @@ fn should_return_error_for_metrics_file_with_non_numeric_value() { } #[test] -fn should_return_none_when_parsing_if_metrics_file_does_not_exist() { +fn file_does_not_exist() { let temp_dir = tempfile::TempDir::new().expect("failed to create a temporary directory"); let test_file = temp_dir.as_ref().join("test_file"); let parsed_metrics = parse_existing_metrics_from_file(&test_file.to_string_lossy()).unwrap(); @@ -133,7 +143,7 @@ fn should_return_none_when_parsing_if_metrics_file_does_not_exist() { } #[test] -fn should_set_metrics() { +fn set_metrics() { let mut existing_metrics = FsTrimMetrics::default(); existing_metrics .update(true, Duration::from_millis(110)) @@ -142,12 +152,13 @@ fn should_set_metrics() { last_duration_milliseconds: 110.0, last_run_success: true, total_runs: 1.0, + ..FsTrimMetrics::default() }; assert_eq!(existing_metrics, expected_metrics); } #[test] -fn should_update_metrics() { +fn update_metrics() { let mut rng = reproducible_rng(); for _ in 0..100 { let total_runs: u64 = rng.gen_range(0..10000000); @@ -162,7 +173,7 @@ fn should_update_metrics() { for _ in 0..100 { let success = rng.gen_bool(0.5); let duration = Duration::from_millis(rng.gen_range(0..15000)); - update_metrics(&mut expected_metrics, success, duration); + update_metrics_locally(&mut expected_metrics, success, duration); updated_metrics .update(success, duration) .expect("should update metrics successfully"); @@ -177,14 +188,15 @@ fn should_update_metrics() { } } -fn update_metrics(metrics: &mut FsTrimMetrics, success: bool, duration: Duration) { +// Simple local "update" for the test reference +fn update_metrics_locally(metrics: &mut FsTrimMetrics, success: bool, duration: Duration) { metrics.total_runs += 1f64; metrics.last_run_success = success; metrics.last_duration_milliseconds = duration.as_millis() as f64; } #[test] -fn should_update_metric_with_infinite_values() { +fn update_metrics_with_infinite_values() { let mut existing_metrics = FsTrimMetrics { total_runs: f64::INFINITY, ..FsTrimMetrics::default() @@ -198,13 +210,14 @@ fn should_update_metric_with_infinite_values() { last_duration_milliseconds: duration.as_millis() as f64, last_run_success: success, total_runs: f64::INFINITY, + ..FsTrimMetrics::default() }; assert_eq!(existing_metrics, expected_metrics); } #[test] -fn should_update_metric_with_nan_values() { +fn update_metrics_with_nan_values() { let mut existing_metrics = FsTrimMetrics { total_runs: f64::NAN, ..FsTrimMetrics::default() @@ -218,6 +231,7 @@ fn should_update_metric_with_nan_values() { last_duration_milliseconds: duration.as_millis() as f64, last_run_success: success, total_runs: f64::NAN, + ..FsTrimMetrics::default() }; assert_eq!(existing_metrics, expected_metrics); @@ -230,7 +244,7 @@ fn verify_invariants(i: f64, existing_metrics: &FsTrimMetrics) { } #[test] -fn should_maintain_invariants() { +fn maintain_invariants() { let mut existing_metrics = FsTrimMetrics::default(); let rng = &mut reproducible_rng(); for i in 0..100 { @@ -242,3 +256,60 @@ fn should_maintain_invariants() { verify_invariants(i as f64, &existing_metrics); } } + +#[test] +fn update_datadir_metrics() { + let mut metrics = FsTrimMetrics::default(); + assert_eq!(metrics.total_runs_datadir, 0.0); + assert_eq!(metrics.last_duration_milliseconds_datadir, 0.0); + assert!(metrics.last_run_success_datadir); + + metrics + .update_datadir(false, Duration::from_millis(123)) + .expect("should update datadir metrics"); + + assert_eq!(metrics.total_runs_datadir, 1.0); + assert_eq!(metrics.last_duration_milliseconds_datadir, 123.0); + assert!(!metrics.last_run_success_datadir); + + // Check that normal fields remain untouched + assert_eq!(metrics.total_runs, 0.0); + assert_eq!(metrics.last_duration_milliseconds, 0.0); + assert!(metrics.last_run_success); +} + +#[test] +fn format_metrics_output() { + let metrics = FsTrimMetrics { + last_duration_milliseconds: 123.45, + last_run_success: true, + total_runs: 6.0, + last_duration_milliseconds_datadir: 678.9, + last_run_success_datadir: false, + total_runs_datadir: 4.0, + }; + + let metrics_str = metrics.to_p8s_metrics_string(); + let expected_str = "\ +# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 123.45 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 1 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total 6 +# HELP fstrim_datadir_last_run_duration_milliseconds Duration of last run of fstrim on datadir in milliseconds +# TYPE fstrim_datadir_last_run_duration_milliseconds gauge +fstrim_datadir_last_run_duration_milliseconds 678.9 +# HELP fstrim_datadir_last_run_success Success status of last run of fstrim on datadir (success: 1, failure: 0) +# TYPE fstrim_datadir_last_run_success gauge +fstrim_datadir_last_run_success 0 +# HELP fstrim_datadir_runs_total Total number of runs of fstrim on datadir +# TYPE fstrim_datadir_runs_total counter +fstrim_datadir_runs_total 4 +"; + + assert_eq!(metrics_str, expected_str); +} diff --git a/rs/ic_os/fstrim_tool/src/tests.rs b/rs/ic_os/fstrim_tool/src/tests.rs index 7fbb921466f..6fd146a0b85 100644 --- a/rs/ic_os/fstrim_tool/src/tests.rs +++ b/rs/ic_os/fstrim_tool/src/tests.rs @@ -1,32 +1,34 @@ use super::*; use assert_matches::assert_matches; -use std::fs::write; +use std::fs::{read_to_string, write}; +use std::path::PathBuf; +use std::time::Duration; use tempfile::tempdir; -const EXISTING_METRICS_CONTENT: &str = - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ - # TYPE fstrim_last_run_duration_milliseconds gauge\n\ - fstrim_last_run_duration_milliseconds 0\n\ - # HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)\n\ - # TYPE fstrim_last_run_success gauge\n\ - fstrim_last_run_success 1\n\ - # HELP fstrim_runs_total Total number of runs of fstrim\n\ - # TYPE fstrim_runs_total counter\n\ - fstrim_runs_total 1\n"; - -const EXISTING_METRICS_CONTENT_WITH_SPECIAL_VALUES: &str = - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ - # TYPE fstrim_last_run_duration_milliseconds gauge\n\ - fstrim_last_run_duration_milliseconds 0\n\ - # HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)\n\ - # TYPE fstrim_last_run_success gauge\n\ - fstrim_last_run_success 1\n\ - # HELP fstrim_runs_total Total number of runs of fstrim\n\ - # TYPE fstrim_runs_total counter\n\ - fstrim_runs_total +Inf\n"; +const EXISTING_METRICS_CONTENT: &str = r#"# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 0 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 1 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total 1 +"#; + +const EXISTING_METRICS_CONTENT_WITH_SPECIAL_VALUES: &str = r#"# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 0 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 1 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total +Inf +"#; #[test] -fn should_parse_metrics_from_file() { +fn parse_metrics_without_datadir_fields() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); write(&metrics_file, EXISTING_METRICS_CONTENT).expect("error writing to file"); @@ -38,12 +40,49 @@ fn should_parse_metrics_from_file() { ) .expect("parsing metrics should succeed") .expect("parsed metrics should be some"); - let parsed_metrics_string = parsed_metrics.to_p8s_metrics_string(); - assert_eq!(parsed_metrics_string, EXISTING_METRICS_CONTENT); + + let expected_metrics = FsTrimMetrics { + last_duration_milliseconds: 0.0, + last_run_success: true, + total_runs: 1.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, + }; + + assert_eq!(parsed_metrics, expected_metrics); } #[test] -fn should_return_error_if_metrics_in_file_contain_special_values() { +fn parse_metrics_with_datadir_fields() { + let tmp_dir = tempdir().expect("temp dir creation should succeed"); + let metrics_file = tmp_dir.path().join("fstrim.prom"); + + let initial_metrics = FsTrimMetrics { + last_duration_milliseconds: 42.0, + last_run_success: false, + total_runs: 7.0, + last_duration_milliseconds_datadir: 999.0, + last_run_success_datadir: true, + total_runs_datadir: 12.0, + }; + write_metrics_using_tmp_file( + &initial_metrics, + metrics_file + .to_str() + .expect("metrics file path should be valid"), + ) + .unwrap(); + + let parsed_metrics = parse_existing_metrics_from_file(metrics_file.to_str().unwrap()) + .expect("parsing metrics should succeed") + .expect("parsed metrics should be some"); + + assert_eq!(parsed_metrics, initial_metrics); +} + +#[test] +fn should_error_if_metrics_in_file_has_special_values() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); write(&metrics_file, EXISTING_METRICS_CONTENT_WITH_SPECIAL_VALUES) @@ -58,14 +97,21 @@ fn should_return_error_if_metrics_in_file_contain_special_values() { } #[test] -fn should_write_metrics_to_file() { +fn write_metrics_to_file() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); - let default_metrics = FsTrimMetrics::default(); - let default_metrics_string = default_metrics.to_p8s_metrics_string(); + + let metrics = FsTrimMetrics { + last_duration_milliseconds: 64.0, + last_run_success: false, + total_runs: 60.0, + last_duration_milliseconds_datadir: 3.0, + last_run_success_datadir: true, + total_runs_datadir: 16.0, + }; write_metrics_using_tmp_file( - &default_metrics, + &metrics, metrics_file .to_str() .expect("metrics file path should be valid"), @@ -79,19 +125,30 @@ fn should_write_metrics_to_file() { ) .expect("parsing metrics should succeed") .expect("parsed metrics should be some"); - let parsed_metrics_string = parsed_metrics.to_p8s_metrics_string(); - assert_eq!( - parsed_metrics, default_metrics, - "{}\n{}", - parsed_metrics_string, default_metrics_string - ); + + assert_eq!(parsed_metrics, metrics); } #[test] -fn should_update_metrics() { +fn test_update_metrics() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); - write(&metrics_file, EXISTING_METRICS_CONTENT).expect("error writing to file"); + + let initial_metrics = FsTrimMetrics { + last_duration_milliseconds: 0.0, + last_run_success: true, + total_runs: 1.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, + }; + write_metrics_using_tmp_file( + &initial_metrics, + metrics_file + .to_str() + .expect("metrics file path should be valid"), + ) + .unwrap(); update_metrics( Duration::from_millis(151), @@ -99,8 +156,10 @@ fn should_update_metrics() { metrics_file .to_str() .expect("metrics file path should be valid"), + false, ) .expect("updating metrics should succeed"); + let parsed_metrics = parse_existing_metrics_from_file( metrics_file .to_str() @@ -108,22 +167,68 @@ fn should_update_metrics() { ) .expect("parsing metrics should succeed") .expect("parsed metrics should be some"); - let expected_metrics = - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ - # TYPE fstrim_last_run_duration_milliseconds gauge\n\ - fstrim_last_run_duration_milliseconds 151\n\ - # HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)\n\ - # TYPE fstrim_last_run_success gauge\n\ - fstrim_last_run_success 1\n\ - # HELP fstrim_runs_total Total number of runs of fstrim\n\ - # TYPE fstrim_runs_total counter\n\ - fstrim_runs_total 2\n"; - let parsed_metrics_string = parsed_metrics.to_p8s_metrics_string(); - assert_eq!(parsed_metrics_string, expected_metrics); + + let expected_metrics = FsTrimMetrics { + last_duration_milliseconds: 151.0, + last_run_success: true, + total_runs: 2.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, + }; + assert_eq!(parsed_metrics, expected_metrics); } #[test] -fn should_start_from_empty_metrics_for_update_if_metrics_in_file_contain_special_values() { +fn update_datadir_metrics() { + let tmp_dir = tempdir().expect("temp dir creation should succeed"); + let metrics_file = tmp_dir.path().join("fstrim.prom"); + + let initial_metrics = FsTrimMetrics { + last_duration_milliseconds: 0.0, + last_run_success: true, + total_runs: 1.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, + }; + write_metrics_using_tmp_file( + &initial_metrics, + metrics_file + .to_str() + .expect("metrics file path should be valid"), + ) + .unwrap(); + + update_metrics( + Duration::from_millis(501), + false, + metrics_file + .to_str() + .expect("metrics file path should be valid"), + true, + ) + .expect("updating datadir metrics should succeed"); + + let parsed_metrics = parse_existing_metrics_from_file( + metrics_file.to_str().expect("should convert path to str"), + ) + .expect("parsing metrics should succeed") + .expect("parsed metrics should be some"); + + let expected_metrics = FsTrimMetrics { + last_duration_milliseconds: 0.0, + last_run_success: true, + total_runs: 1.0, + last_duration_milliseconds_datadir: 501.0, + last_run_success_datadir: false, + total_runs_datadir: 1.0, + }; + assert_eq!(parsed_metrics, expected_metrics); +} + +#[test] +fn start_from_empty_metrics_when_file_has_special_values() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); write(&metrics_file, EXISTING_METRICS_CONTENT_WITH_SPECIAL_VALUES) @@ -135,8 +240,10 @@ fn should_start_from_empty_metrics_for_update_if_metrics_in_file_contain_special metrics_file .to_str() .expect("metrics file path should be valid"), + false, ) .expect("updating metrics should succeed"); + let parsed_metrics = parse_existing_metrics_from_file( metrics_file .to_str() @@ -144,37 +251,37 @@ fn should_start_from_empty_metrics_for_update_if_metrics_in_file_contain_special ) .expect("parsing metrics should succeed") .expect("parsed metrics should be some"); - let expected_metrics = - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds\n\ - # TYPE fstrim_last_run_duration_milliseconds gauge\n\ - fstrim_last_run_duration_milliseconds 151\n\ - # HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)\n\ - # TYPE fstrim_last_run_success gauge\n\ - fstrim_last_run_success 1\n\ - # HELP fstrim_runs_total Total number of runs of fstrim\n\ - # TYPE fstrim_runs_total counter\n\ - fstrim_runs_total 1\n"; - let parsed_metrics_string = parsed_metrics.to_p8s_metrics_string(); - assert_eq!(parsed_metrics_string, expected_metrics); + + let expected_metrics = FsTrimMetrics { + last_duration_milliseconds: 151.0, + last_run_success: true, + total_runs: 1.0, + last_duration_milliseconds_datadir: 0.0, + last_run_success_datadir: true, + total_runs_datadir: 0.0, + }; + assert_eq!(parsed_metrics, expected_metrics); } #[test] -fn should_return_ok_from_successfully_run_command() { +fn successfully_run_command() { run_command("true", "/").expect("running command should succeed"); } #[test] -fn should_return_error_from_unsuccessfully_run_command() { +fn unsuccessfully_run_command() { let res = run_command("false", "/"); assert_matches!(res, Err(err) if err.to_string().contains("Failed to run command")); } #[test] -fn should_fail_but_write_metrics_if_command_fails() { +fn command_fails_but_writes_metrics() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let tmp_dir2 = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); + + // This should fail to run the command, but still write updated metrics assert_matches!( fstrim_tool( "/non/existent/command", @@ -198,11 +305,20 @@ fn should_fail_but_write_metrics_if_command_fails() { if err.to_string().contains("Failed to run command") ); - assert_metrics_file_content(&metrics_file, false, 1); + // Verify that the metrics were written with success=0, total_runs=1, etc. + let parsed_metrics = + parse_existing_metrics_from_file(metrics_file.to_str().expect("valid path")) + .expect("parsing metrics should succeed") + .expect("parsed metrics should be some"); + + assert!(!parsed_metrics.last_run_success); + assert_eq!(parsed_metrics.total_runs, 1.0); + assert!(!parsed_metrics.last_run_success_datadir); + assert_eq!(parsed_metrics.total_runs_datadir, 1.0); } #[test] -fn should_fail_if_command_cannot_be_run() { +fn fails_if_command_cannot_be_run() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let tmp_dir2 = tempdir().expect("temp dir creation should succeed"); @@ -232,10 +348,11 @@ fn should_fail_if_command_cannot_be_run() { } #[test] -fn should_not_run_command_but_initialize_metrics_if_flag_set() { +fn init_flag() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let tmp_dir2 = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); + assert!(fstrim_tool( "/non/existent/command", metrics_file @@ -247,7 +364,7 @@ fn should_not_run_command_but_initialize_metrics_if_flag_set() { .to_str() .expect("tmp_dir path should be valid") .to_string(), - true, + true, //init should write out default metrics even though the command fails tmp_dir2 .path() .to_str() @@ -256,11 +373,16 @@ fn should_not_run_command_but_initialize_metrics_if_flag_set() { ) .is_ok()); - assert_metrics_file_content(&metrics_file, true, 0); + let parsed_metrics = + parse_existing_metrics_from_file(metrics_file.to_str().expect("valid path")) + .expect("parsing metrics should succeed") + .expect("parsed metrics should be some"); + + assert_eq!(parsed_metrics, FsTrimMetrics::default()); } #[test] -fn should_not_overwrite_existing_metrics_if_metrics_init_flag_set() { +fn init_flag_does_not_overwrite_existing_metrics() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let tmp_dir2 = tempdir().expect("temp dir creation should succeed"); @@ -305,14 +427,16 @@ fn should_not_overwrite_existing_metrics_if_metrics_init_flag_set() { ) .is_ok()); - assert_metrics_file_content(&metrics_file, true, 1); + let content = read_to_string(&metrics_file).expect("reading metrics should succeed"); + assert!(content.contains("fstrim_runs_total 1")); } #[test] -fn should_fail_if_metrics_file_cannot_be_written_to() { +fn should_fail_if_metrics_file_cannot_be_written() { let metrics_file = PathBuf::from("/non/existent/directory/fstrim.prom"); let tmp_dir = tempdir().expect("temp dir creation should succeed"); let tmp_dir2 = tempdir().expect("temp dir creation should succeed"); + assert_matches!( fstrim_tool( "true", @@ -344,10 +468,8 @@ fn should_fail_if_target_is_not_a_directory() { let metrics_file = tmp_dir.path().join("fstrim.prom"); let target = PathBuf::from("/non/existent/target/directory"); - let expected_error = format!( - "Target {} is not a directory", - target.to_str().expect("target path should be valid") - ); + let expected_error = format!("Target {} is not a directory", target.to_str().unwrap()); + assert_matches!( fstrim_tool( "true", @@ -370,48 +492,3 @@ fn should_fail_if_target_is_not_a_directory() { if err.to_string() == expected_error ); } - -fn assert_metrics_file_content(metrics_filename: &PathBuf, is_success: bool, total_runs: u32) { - let file = File::open(metrics_filename).expect("should succeed in opening metrics file"); - let reader = BufReader::new(file); - let lines = reader.lines(); - for (i, line) in lines.enumerate() { - match i { - 0 => assert_eq!( - line.unwrap(), - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds" - ), - 1 => assert_eq!( - line.unwrap(), - "# TYPE fstrim_last_run_duration_milliseconds gauge" - ), - 2 => assert!(line.unwrap().starts_with("fstrim_last_run_duration_milliseconds")), - 3 => assert_eq!( - line.unwrap(), "# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)" - ), - 4 => assert_eq!( - line.unwrap(), "# TYPE fstrim_last_run_success gauge" - ), - 5 => { - let line_str = line.unwrap(); - let mut tokens = line_str.split(' '); - assert_eq!(tokens.next().unwrap(), "fstrim_last_run_success", "{}", line_str); - let success_str = if is_success { "1" } else { "0" }; - assert_eq!(tokens.next().unwrap(), success_str, "{}", line_str); - }, - 6 => assert_eq!( - line.unwrap(), "# HELP fstrim_runs_total Total number of runs of fstrim" - ), - 7 => assert_eq!( - line.unwrap(), "# TYPE fstrim_runs_total counter" - ), - 8 => { - let line_str = line.unwrap(); - let mut tokens = line_str.split(' '); - assert_eq!(tokens.next().unwrap(), "fstrim_runs_total", "{}", line_str); - assert_eq!(tokens.next().unwrap().parse::().unwrap(), total_runs, "{}", line_str); - }, - _ => panic!("unexpected line: {}", line.unwrap()), - } - } -} diff --git a/rs/ic_os/fstrim_tool/tests/integration_tests.rs b/rs/ic_os/fstrim_tool/tests/integration_tests.rs index 3701250642c..e89c785f3cd 100644 --- a/rs/ic_os/fstrim_tool/tests/integration_tests.rs +++ b/rs/ic_os/fstrim_tool/tests/integration_tests.rs @@ -1,68 +1,39 @@ use assert_cmd::Command; use predicates::prelude::*; -use std::fs::File; -use std::io::{BufRead, BufReader}; -use std::path::PathBuf; +use regex::Regex; +use std::fs::read_to_string; use tempfile::tempdir; fn new_fstrim_tool_command() -> Command { match Command::cargo_bin("fstrim_tool") { // When in Cargo environment. - Ok(v) => v, + Ok(cmd) => cmd, // When in Bazel environment Err(_) => Command::new("rs/ic_os/fstrim_tool/fstrim_tool_bin"), } } -fn assert_metrics_file_content(metrics_filename: &PathBuf, is_success: bool, total_runs: u32) { - let file = File::open(metrics_filename).expect("should succeed in opening metrics file"); - let reader = BufReader::new(file); - let lines = reader.lines(); - for (i, line) in lines.enumerate() { - match i { - 0 => assert_eq!( - line.unwrap(), - "# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds" - ), - 1 => assert_eq!( - line.unwrap(), - "# TYPE fstrim_last_run_duration_milliseconds gauge" - ), - 2 => assert!(line.unwrap().starts_with("fstrim_last_run_duration_milliseconds")), - 3 => assert_eq!( - line.unwrap(), "# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0)" - ), - 4 => assert_eq!( - line.unwrap(), "# TYPE fstrim_last_run_success gauge" - ), - 5 => { - let line_str = line.unwrap(); - let mut tokens = line_str.split(' '); - assert_eq!(tokens.next().unwrap(), "fstrim_last_run_success", "{}", line_str); - let success_str = if is_success { "1" } else { "0" }; - assert_eq!(tokens.next().unwrap(), success_str, "{}", line_str); - }, - 6 => assert_eq!( - line.unwrap(), "# HELP fstrim_runs_total Total number of runs of fstrim" - ), - 7 => assert_eq!( - line.unwrap(), "# TYPE fstrim_runs_total counter" - ), - 8 => { - let line_str = line.unwrap(); - let mut tokens = line_str.split(' '); - assert_eq!(tokens.next().unwrap(), "fstrim_runs_total", "{}", line_str); - assert_eq!(tokens.next().unwrap().parse::().unwrap(), total_runs, "{}", line_str); - }, - _ => panic!("unexpected line: {}", line.unwrap()), - } - } +/// Replaces lines that contain: +/// - `fstrim_last_run_duration_milliseconds X` +/// - `fstrim_datadir_last_run_duration_milliseconds X` +/// +/// with a placeholder: +/// - `fstrim_last_run_duration_milliseconds ` +/// - `fstrim_datadir_last_run_duration_milliseconds ` +/// +/// This ensures that the duration numeric values do not cause test flakiness. +fn normalize_duration_line(input: &str) -> String { + let re = + Regex::new(r"(?m)^(fstrim(?:_datadir)?_last_run_duration_milliseconds)\s+\d+(\.\d+)?$") + .unwrap(); + re.replace_all(input, "$1 ").into_owned() } #[test] -fn should_successfully_initialize_metrics_if_flag_is_set() { +fn initialize_metrics() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); + new_fstrim_tool_command() .args([ "--metrics", @@ -81,13 +52,34 @@ fn should_successfully_initialize_metrics_if_flag_is_set() { .stderr(predicate::str::is_empty()) .success(); - assert_metrics_file_content(&metrics_file, true, 0); + let actual = read_to_string(&metrics_file).expect("reading metrics file should succeed"); + let expected = r#"# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 0 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 1 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total 0 +# HELP fstrim_datadir_last_run_duration_milliseconds Duration of last run of fstrim on datadir in milliseconds +# TYPE fstrim_datadir_last_run_duration_milliseconds gauge +fstrim_datadir_last_run_duration_milliseconds 0 +# HELP fstrim_datadir_last_run_success Success status of last run of fstrim on datadir (success: 1, failure: 0) +# TYPE fstrim_datadir_last_run_success gauge +fstrim_datadir_last_run_success 1 +# HELP fstrim_datadir_runs_total Total number of runs of fstrim on datadir +# TYPE fstrim_datadir_runs_total counter +fstrim_datadir_runs_total 0 +"#; + assert_eq!(actual, expected); } #[test] -fn should_fail_but_write_metrics_if_target_is_not_a_directory() { +fn should_fail_but_write_metrics_if_target_not_a_directory() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); + new_fstrim_tool_command() .args([ "--metrics", @@ -102,41 +94,36 @@ fn should_fail_but_write_metrics_if_target_is_not_a_directory() { .stderr(predicate::str::contains("not a directory")) .failure(); - assert_metrics_file_content(&metrics_file, false, 1); -} + let actual = read_to_string(&metrics_file).expect("reading metrics file should succeed"); + // The command fails, so success=0, runs=1. Datadir not updated => datadir success=1, runs=0 + let expected = r#"# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 0 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 0 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total 1 +# HELP fstrim_datadir_last_run_duration_milliseconds Duration of last run of fstrim on datadir in milliseconds +# TYPE fstrim_datadir_last_run_duration_milliseconds gauge +fstrim_datadir_last_run_duration_milliseconds 0 +# HELP fstrim_datadir_last_run_success Success status of last run of fstrim on datadir (success: 1, failure: 0) +# TYPE fstrim_datadir_last_run_success gauge +fstrim_datadir_last_run_success 1 +# HELP fstrim_datadir_runs_total Total number of runs of fstrim on datadir +# TYPE fstrim_datadir_runs_total counter +fstrim_datadir_runs_total 0 +"#; -// This fails if not tested under root user as the successful execution of the 1st target calls fstrim -// #[test] -// fn should_fail_but_write_metrics_if_data_target_is_not_a_directory() { -// let tmp_dir = tempdir().expect("temp dir creation should succeed"); -// let metrics_file = tmp_dir.path().join("fstrim.prom"); -// new_fstrim_tool_command() -// .args([ -// "--metrics", -// metrics_file -// .to_str() -// .expect("metrics file path should be valid"), -// "--target", -// tmp_dir -// .path() -// .to_str() -// .expect("tmp_dir path should be valid"), -// "--datadir_target", -// "/not/a/directory", -// ]) -// .assert() -// .stdout(predicate::str::is_empty()) -// .stderr(predicate::str::contains("not a directory")) -// .failure(); -// -// // As metrics now only target the main target, success will be reported -// assert_metrics_file_content(&metrics_file, true, 1); -// } + assert_eq!(actual, expected); +} #[test] -fn should_fail_but_write_metrics_with_discard_not_supported_with_correct_parameters() { +fn should_fail_but_writes_metrics_when_discard_not_supported() { let tmp_dir = tempdir().expect("temp dir creation should succeed"); let metrics_file = tmp_dir.path().join("fstrim.prom"); + new_fstrim_tool_command() .args([ "--metrics", @@ -151,12 +138,37 @@ fn should_fail_but_write_metrics_with_discard_not_supported_with_correct_paramet ]) .assert() .stdout(predicate::str::is_empty()) - .stderr(predicate::str::contains( - "the discard operation is not supported", - )) + .stderr( + predicate::str::contains("the discard operation is not supported") + .or(predicate::str::contains("Operation not permitted")), + ) .failure(); - assert_metrics_file_content(&metrics_file, false, 1); + let actual_raw = read_to_string(&metrics_file).expect("reading metrics file should succeed"); + let actual = normalize_duration_line(&actual_raw); + // The tool fails => success=0, runs=1. Datadir not updated => success=1, runs=0 + let expected_raw = r#"# HELP fstrim_last_run_duration_milliseconds Duration of last run of fstrim in milliseconds +# TYPE fstrim_last_run_duration_milliseconds gauge +fstrim_last_run_duration_milliseconds 2 +# HELP fstrim_last_run_success Success status of last run of fstrim (success: 1, failure: 0) +# TYPE fstrim_last_run_success gauge +fstrim_last_run_success 0 +# HELP fstrim_runs_total Total number of runs of fstrim +# TYPE fstrim_runs_total counter +fstrim_runs_total 1 +# HELP fstrim_datadir_last_run_duration_milliseconds Duration of last run of fstrim on datadir in milliseconds +# TYPE fstrim_datadir_last_run_duration_milliseconds gauge +fstrim_datadir_last_run_duration_milliseconds 0 +# HELP fstrim_datadir_last_run_success Success status of last run of fstrim on datadir (success: 1, failure: 0) +# TYPE fstrim_datadir_last_run_success gauge +fstrim_datadir_last_run_success 1 +# HELP fstrim_datadir_runs_total Total number of runs of fstrim on datadir +# TYPE fstrim_datadir_runs_total counter +fstrim_datadir_runs_total 0 +"#; + let expected = normalize_duration_line(expected_raw); + + assert_eq!(actual, expected); } #[test] From d90e934eb440c730d44d9d9b1ece2cc3f9505d05 Mon Sep 17 00:00:00 2001 From: Paul Liu Date: Wed, 15 Jan 2025 17:38:57 +0800 Subject: [PATCH 26/33] fix: cargo build registry-canister for wasm32 target (#3408) The following command currently failed to compile the registry canister if running from the /ic/rs/registery/canister sub-directory: cargo build --profile canister-release --target wasm32-unknown-unknown --bin registry-canister The fix is to make sure the feature `getrandom/custom` is enabled. Note that the above command would succeed if running from the top-level directory, but would produce incorrect wasm binary. This is because cargo would bring in global dependencies that enable both `getrandom/custom` and `getrandom/js` features, and the latter will lead to wasm binaries having unwanted imports (See #3309 for more details). Since this problem does not affect bazel builds, this fix is only relevant to cargo. --- Cargo.lock | 1 + rs/registry/canister/Cargo.toml | 3 +++ 2 files changed, 4 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 098b9048d54..9b761dcc7bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18343,6 +18343,7 @@ dependencies = [ "dfn_core", "dfn_http_metrics", "futures", + "getrandom", "ic-base-types", "ic-canister-client-sender", "ic-cdk 0.16.0", diff --git a/rs/registry/canister/Cargo.toml b/rs/registry/canister/Cargo.toml index 25ad4b78899..43ed07df70b 100644 --- a/rs/registry/canister/Cargo.toml +++ b/rs/registry/canister/Cargo.toml @@ -51,6 +51,9 @@ prost = { workspace = true } serde = { workspace = true } url = { workspace = true } +[target.'cfg(target_arch = "wasm32")'.dependencies] +getrandom = { version = "0.2", features = [ "custom" ] } + [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] assert_matches = { workspace = true } candid_parser = { workspace = true } From f8f274d89991300aad7a7ef850f379a728a50378 Mon Sep 17 00:00:00 2001 From: oggy-dfin <89794951+oggy-dfin@users.noreply.github.com> Date: Wed, 15 Jan 2025 10:53:13 +0100 Subject: [PATCH 27/33] feat(IC-1579): TLA annotations for disburse_to_neuron (#3411) Instrument the `disburse_to_neuron` function to check the test traces for compatibility with the TLA model. --- rs/nns/governance/src/governance.rs | 12 +- .../src/governance/tla/disburse_to_neuron.rs | 29 +++ rs/nns/governance/src/governance/tla/mod.rs | 2 + rs/nns/governance/tests/governance.rs | 2 + rs/nns/governance/tla/Claim_Neuron.tla | 2 +- rs/nns/governance/tla/Disburse_To_Neuron.tla | 193 ++++++++++++++++++ .../tla/Disburse_To_Neuron_Apalache.tla | 70 +++++++ 7 files changed, 307 insertions(+), 3 deletions(-) create mode 100644 rs/nns/governance/src/governance/tla/disburse_to_neuron.rs create mode 100644 rs/nns/governance/tla/Disburse_To_Neuron.tla create mode 100644 rs/nns/governance/tla/Disburse_To_Neuron_Apalache.tla diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index 48a951b8349..ccfeb7948f0 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -147,8 +147,8 @@ use std::collections::BTreeSet; #[cfg(feature = "tla")] pub use tla::{ tla_update_method, InstrumentationState, ToTla, CLAIM_NEURON_DESC, DISBURSE_NEURON_DESC, - MERGE_NEURONS_DESC, SPAWN_NEURONS_DESC, SPAWN_NEURON_DESC, SPLIT_NEURON_DESC, - TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX, + DISBURSE_TO_NEURON_DESC, MERGE_NEURONS_DESC, SPAWN_NEURONS_DESC, SPAWN_NEURON_DESC, + SPLIT_NEURON_DESC, TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX, }; // 70 KB (for executing NNS functions that are not canister upgrades) @@ -3541,6 +3541,7 @@ impl Governance { /// stake. /// - The amount to split minus the transfer fee is more than the minimum /// stake. + #[cfg_attr(feature = "tla", tla_update_method(DISBURSE_TO_NEURON_DESC.clone()))] pub async fn disburse_to_neuron( &mut self, id: &NeuronId, @@ -3706,6 +3707,13 @@ impl Governance { // Do the transfer from the parent neuron's subaccount to the child neuron's // subaccount. let memo = created_timestamp_seconds; + + tla_log_locals! { + parent_neuron_id: parent_nid.id, + disburse_amount: disburse_to_neuron.amount_e8s, + child_neuron_id: child_nid.id, + child_account_id: tla::account_to_tla(neuron_subaccount(to_subaccount)) + }; let result: Result = self .ledger .transfer_funds( diff --git a/rs/nns/governance/src/governance/tla/disburse_to_neuron.rs b/rs/nns/governance/src/governance/tla/disburse_to_neuron.rs new file mode 100644 index 00000000000..b114e0b9956 --- /dev/null +++ b/rs/nns/governance/src/governance/tla/disburse_to_neuron.rs @@ -0,0 +1,29 @@ +use super::{extract_common_constants, post_process_trace}; +use lazy_static::lazy_static; +use tla_instrumentation::{Label, TlaConstantAssignment, ToTla, Update, VarAssignment}; + +const PID: &str = "Disburse_To_Neuron"; +lazy_static! { + pub static ref DISBURSE_TO_NEURON_DESC: Update = { + let default_locals = VarAssignment::new() + .add("parent_neuron_id", 0_u64.to_tla_value()) + .add("disburse_amount", 0_u64.to_tla_value()) + .add("child_account_id", "".to_tla_value()) + .add("child_neuron_id", 0_u64.to_tla_value()); + Update { + default_start_locals: default_locals.clone(), + default_end_locals: default_locals, + start_label: Label::new("DisburseToNeuron"), + end_label: Label::new("Done"), + process_id: PID.to_string(), + canister_name: "governance".to_string(), + post_process: |trace| { + let constants = TlaConstantAssignment { + constants: extract_common_constants(PID, trace).into_iter().collect(), + }; + post_process_trace(trace); + constants + }, + } + }; +} diff --git a/rs/nns/governance/src/governance/tla/mod.rs b/rs/nns/governance/src/governance/tla/mod.rs index bf8fa7de13d..45e62e439d2 100644 --- a/rs/nns/governance/src/governance/tla/mod.rs +++ b/rs/nns/governance/src/governance/tla/mod.rs @@ -26,6 +26,7 @@ pub use store::{TLA_INSTRUMENTATION_STATE, TLA_TRACES_LKEY, TLA_TRACES_MUTEX}; mod claim_neuron; mod disburse_neuron; +mod disburse_to_neuron; mod merge_neurons; mod spawn_neuron; mod spawn_neurons; @@ -33,6 +34,7 @@ mod split_neuron; pub use claim_neuron::CLAIM_NEURON_DESC; pub use disburse_neuron::DISBURSE_NEURON_DESC; +pub use disburse_to_neuron::DISBURSE_TO_NEURON_DESC; pub use merge_neurons::MERGE_NEURONS_DESC; pub use spawn_neuron::SPAWN_NEURON_DESC; pub use spawn_neurons::SPAWN_NEURONS_DESC; diff --git a/rs/nns/governance/tests/governance.rs b/rs/nns/governance/tests/governance.rs index 7a11a736248..f1cde600775 100644 --- a/rs/nns/governance/tests/governance.rs +++ b/rs/nns/governance/tests/governance.rs @@ -4264,6 +4264,7 @@ fn fixture_for_approve_kyc() -> GovernanceProto { /// If we approve KYC for Principals 1 and 2, neurons A, B and C should have /// `kyc_verified=true`, while neuron D still has `kyc_verified=false` #[test] +#[cfg_attr(feature = "tla", with_tla_trace_check)] fn test_approve_kyc() { let governance_proto = fixture_for_approve_kyc(); let driver = fake::FakeDriver::default() @@ -6716,6 +6717,7 @@ async fn test_neuron_with_non_self_authenticating_controller_is_now_allowed() { } #[test] +#[cfg_attr(feature = "tla", with_tla_trace_check)] fn test_disburse_to_neuron() { let from = *TEST_NEURON_1_OWNER_PRINCIPAL; // Compute the subaccount to which the transfer would have been made diff --git a/rs/nns/governance/tla/Claim_Neuron.tla b/rs/nns/governance/tla/Claim_Neuron.tla index cd38eebed88..d88dcecb55a 100644 --- a/rs/nns/governance/tla/Claim_Neuron.tla +++ b/rs/nns/governance/tla/Claim_Neuron.tla @@ -72,7 +72,7 @@ process ( Claim_Neuron \in Claim_Neuron_Process_Ids ) \* instead of await here, to check that assert neuron_id \notin locks; locks := locks \union {neuron_id}; - neuron_id_by_account := account :> neuron_id @@ neuron_id_by_account; +gp neuron_id_by_account := account :> neuron_id @@ neuron_id_by_account; neuron := neuron_id :> [ cached_stake |-> 0, account |-> account, fees |-> 0, maturity |-> 0 ] @@ neuron; \* send_request(self, OP_QUERY_BALANCE, balance_query(account)); governance_to_ledger := Append(governance_to_ledger, request(self, account_balance(account))); diff --git a/rs/nns/governance/tla/Disburse_To_Neuron.tla b/rs/nns/governance/tla/Disburse_To_Neuron.tla new file mode 100644 index 00000000000..622f87505d2 --- /dev/null +++ b/rs/nns/governance/tla/Disburse_To_Neuron.tla @@ -0,0 +1,193 @@ +---- MODULE Disburse_To_Neuron ---- + +EXTENDS TLC, Integers, FiniteSets, Sequences, Variants + +CONSTANTS + Governance_Account_Ids, + Neuron_Ids + +CONSTANTS + Disburse_To_Neuron_Process_Ids + +CONSTANTS + \* Minimum stake a neuron can have + MIN_STAKE, + \* The transfer fee charged by the ledger canister + TRANSACTION_FEE + +CONSTANT + FRESH_NEURON_ID(_) + +\* Initial value used for uninitialized accounts +DUMMY_ACCOUNT == "" + +\* @type: (a -> b, Set(a)) => a -> b; +Remove_Arguments(f, S) == [ x \in (DOMAIN f \ S) |-> f[x]] +Max(x, y) == IF x < y THEN y ELSE x + +request(caller, request_args) == [caller |-> caller, method_and_args |-> request_args] +transfer(from, to, amount, fee) == Variant("Transfer", [from |-> from, to |-> to, amount |-> amount, fee |-> fee]) + +o_deduct(disb_amount) == disb_amount + TRANSACTION_FEE + +(* --algorithm Governance_Ledger_Disburse_To_Neuron { + +variables + + neuron \in [{} -> {}]; + \* Used to decide whether we should refresh or claim a neuron + neuron_id_by_account \in [{} -> {}]; + \* The set of currently locked neurons + locks = {}; + \* The queue of messages sent from the governance canister to the ledger canister + governance_to_ledger = <<>>; + ledger_to_governance = {}; + spawning_neurons = FALSE; + +macro send_request(caller_id, request_args) { + governance_to_ledger := Append(governance_to_ledger, request(caller_id, request_args)) +}; + +process (Disburse_To_Neuron \in Disburse_To_Neuron_Process_Ids) + variables + parent_neuron_id = 0; + disburse_amount = 0; + child_account_id = DUMMY_ACCOUNT; + child_neuron_id = 0; + { + DisburseToNeuron: + either { + \* Simulate calls that just fail early and don't change the state. + \* Not so useful for model checking, but needed to follow the code traces. + goto Done; + } or { + \* Skipping a few checks again: + \* 1. authorization of the caller + \* 2. that the parent neuron has been dissolved + \* 3. kyc checks + \* 4. checks on the presence and shape of new controller + with(pnid \in DOMAIN(neuron) \ locks; + parent_neuron = neuron[pnid]; + amt \in (MIN_STAKE + TRANSACTION_FEE)..(parent_neuron.cached_stake - parent_neuron.fees - MIN_STAKE); + c_acc_id \in Governance_Account_Ids \ { neuron[n].account : n \in DOMAIN(neuron)}; + ) { + parent_neuron_id := pnid; + disburse_amount := amt; + await parent_neuron.maturity <= TRANSACTION_FEE; + child_account_id := c_acc_id; + child_neuron_id := FRESH_NEURON_ID(DOMAIN(neuron)); + neuron_id_by_account := child_account_id :> child_neuron_id @@ neuron_id_by_account; + neuron := child_neuron_id :> [ cached_stake |-> 0, account |-> child_account_id, fees |-> 0, maturity |-> 0 ] @@ neuron; + \* The Rust code throws an error here if the parent neuron is locked. Instead, we prevent the Disburse_To_Neuron process from running. + \* This is OK since the Rust code doesn't change the canister's state before obtaining the parant lock (if it + \* did, the model wouldn't capture this state and we could miss behaviors). + assert child_neuron_id \notin locks; + \* Note that in the implementation this implies that child_neuron_id != parent_neuron_id, + \* as the locks are taken sequentially there; here, we're sure that these neuron IDs differ, + \* so we omit the extra check. + locks := locks \union {parent_neuron_id, child_neuron_id}; + send_request(self, transfer(parent_neuron.account, child_account_id, disburse_amount - TRANSACTION_FEE, TRANSACTION_FEE)); + }; + }; + DisburseToNeuron_WaitForTransfer: + with(answer \in { resp \in ledger_to_governance: resp.caller = self}) { + ledger_to_governance := ledger_to_governance \ {answer}; + if(answer.response = Variant("Fail", UNIT)) { + neuron := Remove_Arguments(neuron, {child_neuron_id}); + neuron_id_by_account := Remove_Arguments(neuron_id_by_account, {child_account_id}); + } else { + neuron := [ neuron EXCEPT ![parent_neuron_id].cached_stake = @ - disburse_amount, + ![child_neuron_id].cached_stake = disburse_amount - TRANSACTION_FEE ]; + }; + locks := locks \ {parent_neuron_id, child_neuron_id}; + parent_neuron_id := 0; + disburse_amount := 0; + child_account_id := DUMMY_ACCOUNT; + child_neuron_id := 0; + }; + + } +} +*) +\* BEGIN TRANSLATION (chksum(pcal) = "d03e80ed" /\ chksum(tla) = "b79d8d63") +VARIABLES pc, neuron, neuron_id_by_account, locks, governance_to_ledger, + ledger_to_governance, spawning_neurons, parent_neuron_id, + disburse_amount, child_account_id, child_neuron_id + +vars == << pc, neuron, neuron_id_by_account, locks, governance_to_ledger, + ledger_to_governance, spawning_neurons, parent_neuron_id, + disburse_amount, child_account_id, child_neuron_id >> + +ProcSet == (Disburse_To_Neuron_Process_Ids) + +Init == (* Global variables *) + /\ neuron \in [{} -> {}] + /\ neuron_id_by_account \in [{} -> {}] + /\ locks = {} + /\ governance_to_ledger = <<>> + /\ ledger_to_governance = {} + /\ spawning_neurons = FALSE + (* Process Disburse_To_Neuron *) + /\ parent_neuron_id = [self \in Disburse_To_Neuron_Process_Ids |-> 0] + /\ disburse_amount = [self \in Disburse_To_Neuron_Process_Ids |-> 0] + /\ child_account_id = [self \in Disburse_To_Neuron_Process_Ids |-> DUMMY_ACCOUNT] + /\ child_neuron_id = [self \in Disburse_To_Neuron_Process_Ids |-> 0] + /\ pc = [self \in ProcSet |-> "DisburseToNeuron"] + +DisburseToNeuron(self) == /\ pc[self] = "DisburseToNeuron" + /\ \/ /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED <> + \/ /\ \E pnid \in DOMAIN(neuron) \ locks: + LET parent_neuron == neuron[pnid] IN + \E amt \in (MIN_STAKE + TRANSACTION_FEE)..(parent_neuron.cached_stake - parent_neuron.fees - MIN_STAKE): + \E c_acc_id \in Governance_Account_Ids \ { neuron[n].account : n \in DOMAIN(neuron)}: + /\ parent_neuron_id' = [parent_neuron_id EXCEPT ![self] = pnid] + /\ disburse_amount' = [disburse_amount EXCEPT ![self] = amt] + /\ parent_neuron.maturity <= TRANSACTION_FEE + /\ child_account_id' = [child_account_id EXCEPT ![self] = c_acc_id] + /\ child_neuron_id' = [child_neuron_id EXCEPT ![self] = FRESH_NEURON_ID(DOMAIN(neuron))] + /\ neuron_id_by_account' = (child_account_id'[self] :> child_neuron_id'[self] @@ neuron_id_by_account) + /\ neuron' = (child_neuron_id'[self] :> [ cached_stake |-> 0, account |-> child_account_id'[self], fees |-> 0, maturity |-> 0 ] @@ neuron) + /\ Assert(child_neuron_id'[self] \notin locks, + "Failure of assertion at line 84, column 17.") + /\ locks' = (locks \union {parent_neuron_id'[self], child_neuron_id'[self]}) + /\ governance_to_ledger' = Append(governance_to_ledger, request(self, (transfer(parent_neuron.account, child_account_id'[self], disburse_amount'[self] - TRANSACTION_FEE, TRANSACTION_FEE)))) + /\ pc' = [pc EXCEPT ![self] = "DisburseToNeuron_WaitForTransfer"] + /\ UNCHANGED << ledger_to_governance, + spawning_neurons >> + +DisburseToNeuron_WaitForTransfer(self) == /\ pc[self] = "DisburseToNeuron_WaitForTransfer" + /\ \E answer \in { resp \in ledger_to_governance: resp.caller = self}: + /\ ledger_to_governance' = ledger_to_governance \ {answer} + /\ IF answer.response = Variant("Fail", UNIT) + THEN /\ neuron' = Remove_Arguments(neuron, {child_neuron_id[self]}) + /\ neuron_id_by_account' = Remove_Arguments(neuron_id_by_account, {child_account_id[self]}) + ELSE /\ neuron' = [ neuron EXCEPT ![parent_neuron_id[self]].cached_stake = @ - disburse_amount[self], + ![child_neuron_id[self]].cached_stake = disburse_amount[self] - TRANSACTION_FEE ] + /\ UNCHANGED neuron_id_by_account + /\ locks' = locks \ {parent_neuron_id[self], child_neuron_id[self]} + /\ parent_neuron_id' = [parent_neuron_id EXCEPT ![self] = 0] + /\ disburse_amount' = [disburse_amount EXCEPT ![self] = 0] + /\ child_account_id' = [child_account_id EXCEPT ![self] = DUMMY_ACCOUNT] + /\ child_neuron_id' = [child_neuron_id EXCEPT ![self] = 0] + /\ pc' = [pc EXCEPT ![self] = "Done"] + /\ UNCHANGED << governance_to_ledger, + spawning_neurons >> + +Disburse_To_Neuron(self) == DisburseToNeuron(self) + \/ DisburseToNeuron_WaitForTransfer(self) + +(* Allow infinite stuttering to prevent deadlock on termination. *) +Terminating == /\ \A self \in ProcSet: pc[self] = "Done" + /\ UNCHANGED vars + +Next == (\E self \in Disburse_To_Neuron_Process_Ids: Disburse_To_Neuron(self)) + \/ Terminating + +Spec == Init /\ [][Next]_vars + +Termination == <>(\A self \in ProcSet: pc[self] = "Done") + +\* END TRANSLATION + +==== diff --git a/rs/nns/governance/tla/Disburse_To_Neuron_Apalache.tla b/rs/nns/governance/tla/Disburse_To_Neuron_Apalache.tla new file mode 100644 index 00000000000..b28dda89c3e --- /dev/null +++ b/rs/nns/governance/tla/Disburse_To_Neuron_Apalache.tla @@ -0,0 +1,70 @@ +---- MODULE Disburse_To_Neuron_Apalache ---- + +EXTENDS TLC, Variants + +(* +@typeAlias: proc = Str; +@typeAlias: account = Str; +@typeAlias: neuronId = Int; +@typeAlias: methodCall = Transfer({ from: $account, to: $account, amount: Int, fee: Int}) | AccountBalance({ account: $account }); +@typeAlias: methodResponse = Fail(UNIT) | TransferOk(UNIT) | BalanceQueryOk(Int); +*) +_type_alias_dummy == TRUE + +\* CODE_LINK_INSERT_CONSTANTS + +(* +CONSTANTS + \* @type: Set($account); + Governance_Account_Ids, + \* @type: Set($neuronId); + Neuron_Ids + +CONSTANTS + \* @type: Set($proc); + Disburse_To_Neuron_Process_Ids + +CONSTANTS + \* Minimum stake a neuron can have + \* @type: Int; + MIN_STAKE, + \* The transfer fee charged by the ledger canister + \* @type: Int; + TRANSACTION_FEE +*) + +VARIABLES + \* @type: $neuronId -> {cached_stake: Int, account: $account, maturity: Int, fees: Int}; + neuron, + \* @type: $account -> $neuronId; + neuron_id_by_account, + \* @type: Set($neuronId); + locks, + \* @type: Seq({caller : $proc, method_and_args: $methodCall }); + governance_to_ledger, + \* @type: Set({caller: $proc, response: $methodResponse }); + ledger_to_governance, + \* @type: $proc -> Str; + pc, + \* @type: $proc -> $neuronId; + parent_neuron_id, + \* @type: $proc -> Int; + disburse_amount, + \* @type: $proc -> $account; + child_account_id, + \* @type: $proc -> $neuronId; + child_neuron_id, + \* Not used by this model, but it's a global variable used by spawn_neurons, so + \* it's the easiest to just add it to all the other models + \* @type: Bool; + spawning_neurons + +\* @type: Set($neuronId) => $neuronId; +FRESH_NEURON_ID(existing_neurons) == CHOOSE nid \in (Neuron_Ids \ existing_neurons): TRUE + +MOD == INSTANCE Disburse_To_Neuron + +Next == [MOD!Next]_MOD!vars + + +==== From 2828131f605b6008351fc57d1eaf2b40635a87f8 Mon Sep 17 00:00:00 2001 From: mraszyk <31483726+mraszyk@users.noreply.github.com> Date: Wed, 15 Jan 2025 14:20:13 +0100 Subject: [PATCH 28/33] fix(PocketIC): safely drop StateMachine (#3450) This PR safely drops every StateMachine in PocketIC to prevent its state directory from being deleted before every Arc to its state manager is dropped. --- rs/pocket_ic_server/src/pocket_ic.rs | 34 ++++++++++++++++++++++++++-- rs/state_machine_tests/src/lib.rs | 14 ++++++++---- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/rs/pocket_ic_server/src/pocket_ic.rs b/rs/pocket_ic_server/src/pocket_ic.rs index 220099396b0..a74a6011c70 100644 --- a/rs/pocket_ic_server/src/pocket_ic.rs +++ b/rs/pocket_ic_server/src/pocket_ic.rs @@ -384,6 +384,9 @@ impl SubnetsImpl { pub(crate) fn get_all(&self) -> Vec> { self.subnets.read().unwrap().values().cloned().collect() } + fn clear(&self) { + self.subnets.write().unwrap().clear(); + } } impl Subnets for SubnetsImpl { @@ -421,8 +424,8 @@ pub struct PocketIc { impl Drop for PocketIc { fn drop(&mut self) { - let subnets = self.subnets.get_all(); if let Some(ref state_dir) = self.state_dir { + let subnets = self.subnets.get_all(); for subnet in &subnets { subnet.state_machine.checkpointed_tick(); } @@ -452,9 +455,36 @@ impl Drop for PocketIc { let topology_json = serde_json::to_string(&raw_topology).unwrap(); topology_file.write_all(topology_json.as_bytes()).unwrap(); } - for subnet in subnets { + for subnet in self.subnets.get_all() { subnet.state_machine.drop_payload_builder(); } + let state_machines: Vec<_> = self + .subnets + .get_all() + .into_iter() + .map(|subnet| subnet.state_machine.clone()) + .collect(); + self.subnets.clear(); + // for every StateMachine, wait until nobody else has an Arc to that StateMachine + // and then drop that StateMachine + let start = std::time::Instant::now(); + for state_machine in state_machines { + let mut state_machine = Some(state_machine); + while state_machine.is_some() { + match Arc::try_unwrap(state_machine.take().unwrap()) { + Ok(sm) => { + sm.drop(); + break; + } + Err(sm) => { + state_machine = Some(sm); + } + } + if start.elapsed() > std::time::Duration::from_secs(5 * 60) { + panic!("Timed out while dropping PocketIC."); + } + } + } } } diff --git a/rs/state_machine_tests/src/lib.rs b/rs/state_machine_tests/src/lib.rs index 5031290b05a..c096015a556 100644 --- a/rs/state_machine_tests/src/lib.rs +++ b/rs/state_machine_tests/src/lib.rs @@ -1826,20 +1826,26 @@ impl StateMachine { fn into_components(self) -> (Box, u64, Time, u64) { let state_manager = Arc::downgrade(&self.state_manager); let result = self.into_components_inner(); - let mut i = 0i32; // StateManager is owned by an Arc, that is cloned into multiple components and different // threads. If we return before all the asynchronous components release the Arc, we may // end up with to StateManagers writing to the same directory, resulting in a crash. + let start = std::time::Instant::now(); while state_manager.upgrade().is_some() { std::thread::sleep(std::time::Duration::from_millis(50)); - i += 1; - if i >= 100 { - panic!("Failed to wait for StateManager drop"); + if start.elapsed() > std::time::Duration::from_secs(5 * 60) { + panic!("Timed out while dropping StateMachine."); } } result } + /// Safely drops this `StateMachine`. We cannot achieve this functionality by implementing `Drop` + /// since we have to wait until there are no more `Arc`s for the state manager and + /// this is infeasible in a `Drop` implementation. + pub fn drop(self) { + let _ = self.into_components(); + } + /// Emulates a node restart, including checkpoint recovery. pub fn restart_node(self) -> Self { // We must drop self before setup_form_dir so that we don't have two StateManagers pointing From f491f848c4da05a852186f0c2279e5bcb7ce60f8 Mon Sep 17 00:00:00 2001 From: kpop-dfinity <125868903+kpop-dfinity@users.noreply.github.com> Date: Wed, 15 Jan 2025 16:26:04 +0100 Subject: [PATCH 29/33] refactor(consensus): simplify `IngressPayload` implementation (#3444) Currently the payload is one big bytes buffer which contains all ingress messages in a packed representation. This makes it a big hard to work with. In this PR we simplify the structure by replacing the buffer with a map from `IngressMessageId`s to a _serialized_ ingress messages. The following two important properties are preserved: 1. The individual ingress messages deserialization is delayed until it's actually needed 2. We preserve the original byte representation of the ingress messages I've ran multiple benchmarks and didn't notice a _big_ change: 1. `consensus-performance` system test showed the same throughput / block rates with both implementations 2. the [serialization/deserialization ](https://github.com/dfinity/ic/blob/master/rs/consensus/benches/validate_payload.rs#L374-L404) of ingress payload gets about 2x slower, but it's still in sub 1ms territory --------- Co-authored-by: Leon Tan --- .../src/consensus/malicious_consensus.rs | 4 +- rs/consensus/src/consensus/metrics.rs | 2 +- rs/consensus/src/consensus/purger.rs | 4 +- rs/ingress_manager/src/ingress_selector.rs | 36 ++- rs/interfaces/src/ingress_manager.rs | 10 +- .../src/fetch_stripped_artifact/download.rs | 4 +- .../src/fetch_stripped_artifact/stripper.rs | 2 +- rs/protobuf/def/types/v1/consensus.proto | 10 +- rs/protobuf/src/gen/types/types.v1.rs | 15 +- rs/state_machine_tests/src/lib.rs | 4 +- rs/types/types/src/batch/ingress.rs | 258 +++++++----------- rs/types/types/src/exhaustive.rs | 4 + rs/types/types/src/messages.rs | 1 + 13 files changed, 174 insertions(+), 180 deletions(-) diff --git a/rs/consensus/src/consensus/malicious_consensus.rs b/rs/consensus/src/consensus/malicious_consensus.rs index 4d7cdddee3c..5a9e5be9300 100644 --- a/rs/consensus/src/consensus/malicious_consensus.rs +++ b/rs/consensus/src/consensus/malicious_consensus.rs @@ -87,7 +87,9 @@ fn maliciously_propose_blocks( .get_block_maker_rank(height, &beacon, my_node_id) { Ok(Some(rank)) => Some(rank), - Ok(None) => Some(Rank(0)), + // TODO: introduce a malicious flag which will instruct a malicious node to propose a block + // when it's not elected a block maker; implement a system test which uses the flag. + Ok(None) => None, Err(_) => None, }; diff --git a/rs/consensus/src/consensus/metrics.rs b/rs/consensus/src/consensus/metrics.rs index 7bf58e7c898..b261ecb349d 100644 --- a/rs/consensus/src/consensus/metrics.rs +++ b/rs/consensus/src/consensus/metrics.rs @@ -157,7 +157,7 @@ impl BatchStats { self.ingress_message_bytes_delivered += payload.ingress.count_bytes(); self.xnet_bytes_delivered += payload.xnet.size_bytes(); self.ingress_ids - .extend_from_slice(&payload.ingress.message_ids()); + .extend(payload.ingress.message_ids().cloned()); } } diff --git a/rs/consensus/src/consensus/purger.rs b/rs/consensus/src/consensus/purger.rs index 915e3c944f1..d658233d79b 100644 --- a/rs/consensus/src/consensus/purger.rs +++ b/rs/consensus/src/consensus/purger.rs @@ -905,10 +905,10 @@ mod tests { non_finalized_notarization_2 )), ChangeAction::RemoveFromValidated(ConsensusMessage::BlockProposal( - non_finalized_block_proposal_2_1 + non_finalized_block_proposal_2_0 )), ChangeAction::RemoveFromValidated(ConsensusMessage::BlockProposal( - non_finalized_block_proposal_2_0 + non_finalized_block_proposal_2_1 )), ] ); diff --git a/rs/ingress_manager/src/ingress_selector.rs b/rs/ingress_manager/src/ingress_selector.rs index 02afb96e5fd..ad0f07f69e5 100644 --- a/rs/ingress_manager/src/ingress_selector.rs +++ b/rs/ingress_manager/src/ingress_selector.rs @@ -330,10 +330,29 @@ impl IngressSelector for IngressManager { // Tracks the sum of cycles needed per canister. let mut cycles_needed: BTreeMap = BTreeMap::new(); - for i in 0..payload.message_count() { - let (ingress_id, ingress) = payload - .get(i) - .map_err(InvalidIngressPayloadReason::IngressPayloadError)?; + + // Validate each ingress message in the payload + for (ingress_id, maybe_ingress) in payload.iter() { + let ingress = match maybe_ingress { + Ok(ingress) => ingress, + Err(deserialization_error) => { + return Err(ValidationError::InvalidArtifact( + InvalidIngressPayloadReason::IngressMessageDeserializationFailure( + ingress_id.clone(), + deserialization_error.to_string(), + ), + )); + } + }; + + if IngressMessageId::from(&ingress) != *ingress_id { + return Err(ValidationError::InvalidArtifact( + InvalidIngressPayloadReason::MismatchedMessageId { + expected: ingress_id.clone(), + computed: IngressMessageId::from(&ingress), + }, + )); + } self.validate_ingress( ingress_id.clone(), @@ -373,7 +392,7 @@ impl IngressSelector for IngressManager { let ingress = ingress_payload_cache .entry((*height, payload_hash.clone())) .or_insert_with(|| { - Arc::new(batch.ingress.message_ids().into_iter().collect()) + Arc::new(batch.ingress.message_ids().cloned().collect()) }); Some(ingress.clone()) } @@ -1046,11 +1065,8 @@ mod tests { assert_eq!(first_ingress_payload.message_count(), 1); // we should not get it again because it is part of past payloads - let mut hash_set = HashSet::new(); - for i in 0..first_ingress_payload.message_count() { - let (id, _) = first_ingress_payload.get(i).unwrap(); - hash_set.insert(id); - } + let hash_set = HashSet::from_iter(first_ingress_payload.message_ids().cloned()); + let second_ingress_payload = ingress_manager.get_ingress_payload( &hash_set, &validation_context, diff --git a/rs/interfaces/src/ingress_manager.rs b/rs/interfaces/src/ingress_manager.rs index 6814f89b977..7df69d29416 100644 --- a/rs/interfaces/src/ingress_manager.rs +++ b/rs/interfaces/src/ingress_manager.rs @@ -6,7 +6,7 @@ use crate::{ use ic_interfaces_state_manager::StateManagerError; use ic_types::{ artifact::IngressMessageId, - batch::{IngressPayload, IngressPayloadError, ValidationContext}, + batch::{IngressPayload, ValidationContext}, consensus::Payload, ingress::IngressSets, messages::MessageId, @@ -52,8 +52,14 @@ impl IngressSetQuery for IngressSets { /// Reasons for why an ingress payload might be invalid. #[derive(Eq, PartialEq, Debug)] pub enum InvalidIngressPayloadReason { + /// An [`IngressMessageId`] inside the payload doesn't match the referenced [`SignedIngress`]. + MismatchedMessageId { + expected: IngressMessageId, + computed: IngressMessageId, + }, + /// Failed to deserialize an ingress message. + IngressMessageDeserializationFailure(IngressMessageId, String), IngressValidationError(MessageId, String), - IngressPayloadError(IngressPayloadError), IngressExpired(MessageId, String), IngressMessageTooBig(usize, usize), IngressPayloadTooManyMessages(usize, usize), diff --git a/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/download.rs b/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/download.rs index 43a5f4a770f..4e6d6c0587b 100644 --- a/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/download.rs +++ b/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/download.rs @@ -84,11 +84,11 @@ impl Pools { }; match data_payload.batch.ingress.get_by_id(ingress_message_id) { - Some(ingress_message) => { + Ok(Some(ingress_message)) => { self.metrics.ingress_messages_in_block.inc(); Ok(ingress_message) } - None => { + _ => { self.metrics.ingress_messages_not_found.inc(); Err(PoolsAccessError::IngressMessageNotFound) } diff --git a/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/stripper.rs b/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/stripper.rs index 7bad4d70937..30fab0ed7ff 100644 --- a/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/stripper.rs +++ b/rs/p2p/artifact_downloader/src/fetch_stripped_artifact/stripper.rs @@ -55,7 +55,7 @@ impl Strippable for &IngressPayload { fn strip(self) -> Self::Output { Self::Output { - ingress_messages: self.message_ids(), + ingress_messages: self.message_ids().cloned().collect(), } } } diff --git a/rs/protobuf/def/types/v1/consensus.proto b/rs/protobuf/def/types/v1/consensus.proto index b788a2e1026..8737a307d34 100644 --- a/rs/protobuf/def/types/v1/consensus.proto +++ b/rs/protobuf/def/types/v1/consensus.proto @@ -204,9 +204,15 @@ message IngressIdOffset { uint64 offset = 3; } +message IngressMessage { + bytes message_id = 1; + uint64 expiry = 2; + bytes signed_request_bytes = 3; +} + message IngressPayload { - repeated IngressIdOffset id_and_pos = 1; - bytes buffer = 2; + reserved 1, 2; + repeated IngressMessage ingress_messages = 3; } // Stripped consensus artifacts messages below diff --git a/rs/protobuf/src/gen/types/types.v1.rs b/rs/protobuf/src/gen/types/types.v1.rs index 892c97432d9..1ac349addae 100644 --- a/rs/protobuf/src/gen/types/types.v1.rs +++ b/rs/protobuf/src/gen/types/types.v1.rs @@ -1528,11 +1528,18 @@ pub struct IngressIdOffset { pub offset: u64, } #[derive(Clone, PartialEq, ::prost::Message)] +pub struct IngressMessage { + #[prost(bytes = "vec", tag = "1")] + pub message_id: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub expiry: u64, + #[prost(bytes = "vec", tag = "3")] + pub signed_request_bytes: ::prost::alloc::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] pub struct IngressPayload { - #[prost(message, repeated, tag = "1")] - pub id_and_pos: ::prost::alloc::vec::Vec, - #[prost(bytes = "vec", tag = "2")] - pub buffer: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "3")] + pub ingress_messages: ::prost::alloc::vec::Vec, } /// Stripped consensus artifacts messages below #[derive(Clone, PartialEq, ::prost::Message)] diff --git a/rs/state_machine_tests/src/lib.rs b/rs/state_machine_tests/src/lib.rs index c096015a556..a9aa8347b27 100644 --- a/rs/state_machine_tests/src/lib.rs +++ b/rs/state_machine_tests/src/lib.rs @@ -1370,9 +1370,7 @@ impl StateMachine { // used by the function `Self::execute_payload` of the `StateMachine`. let xnet_payload = batch_payload.xnet.clone(); let ingress = &batch_payload.ingress; - let ingress_messages = (0..ingress.message_count()) - .map(|i| ingress.get(i).unwrap().1) - .collect(); + let ingress_messages = ingress.clone().try_into().unwrap(); let (http_responses, _) = CanisterHttpPayloadBuilderImpl::into_messages(&batch_payload.canister_http); let inducted: Vec<_> = http_responses diff --git a/rs/types/types/src/batch/ingress.rs b/rs/types/types/src/batch/ingress.rs index 467136fe982..e1379a92870 100644 --- a/rs/types/types/src/batch/ingress.rs +++ b/rs/types/types/src/batch/ingress.rs @@ -1,45 +1,42 @@ use crate::{ artifact::IngressMessageId, - messages::{MessageId, SignedIngress, SignedRequestBytes, EXPECTED_MESSAGE_ID_LENGTH}, + messages::{ + HttpRequestError, MessageId, SignedIngress, SignedRequestBytes, EXPECTED_MESSAGE_ID_LENGTH, + }, CountBytes, Time, }; #[cfg(test)] use ic_exhaustive_derive::ExhaustiveSet; use ic_protobuf::{proxy::ProxyDecodeError, types::v1 as pb}; use serde::{Deserialize, Serialize}; -use std::{ - convert::TryFrom, - io::{Cursor, Write}, -}; +use std::{collections::BTreeMap, convert::TryFrom, fmt::Display}; /// Payload that contains Ingress messages #[derive(Clone, Eq, PartialEq, Hash, Debug, Default, Deserialize, Serialize)] #[cfg_attr(test, derive(ExhaustiveSet))] pub struct IngressPayload { - /// Pairs of MessageId and its serialized byte position in the buffer. - id_and_pos: Vec<(IngressMessageId, u64)>, - /// All messages are serialized in a single byte buffer, so individual + /// Keep ingress messages in a serialized form, so individual /// deserialization is delayed. This allows faster deserialization of /// IngressPayload when individual message is not needed (e.g. in /// ingress payload deduplication). - #[serde(with = "serde_bytes")] - buffer: Vec, + serialized_ingress_messages: BTreeMap, } impl From<&IngressPayload> for pb::IngressPayload { fn from(ingress_payload: &IngressPayload) -> Self { - Self { - id_and_pos: ingress_payload - .id_and_pos - .iter() - .map(|(msg_id, offset)| pb::IngressIdOffset { - expiry: msg_id.expiry().as_nanos_since_unix_epoch(), - message_id: msg_id.message_id.as_bytes().to_vec(), - offset: *offset, - }) - .collect(), - buffer: ingress_payload.buffer.clone(), - } + let ingress_messages = ingress_payload + .serialized_ingress_messages + .iter() + .map( + |(ingress_message_id, serialized_ingress_message)| pb::IngressMessage { + expiry: ingress_message_id.expiry().as_nanos_since_unix_epoch(), + message_id: ingress_message_id.message_id.as_bytes().to_vec(), + signed_request_bytes: serialized_ingress_message.as_ref().to_vec(), + }, + ) + .collect(); + + pb::IngressPayload { ingress_messages } } } @@ -47,131 +44,99 @@ impl TryFrom for IngressPayload { type Error = ProxyDecodeError; fn try_from(payload: pb::IngressPayload) -> Result { + let mut serialized_ingress_messages = BTreeMap::new(); + + for ingress_message_proto in payload.ingress_messages { + let ingress_message_id = IngressMessageId::new( + Time::from_nanos_since_unix_epoch(ingress_message_proto.expiry), + MessageId::try_from(ingress_message_proto.message_id.as_slice())?, + ); + + serialized_ingress_messages.insert( + ingress_message_id, + SignedRequestBytes::from(ingress_message_proto.signed_request_bytes), + ); + } + Ok(Self { - id_and_pos: payload - .id_and_pos - .iter() - .map(|ingress_offset| { - Ok(( - IngressMessageId::new( - Time::from_nanos_since_unix_epoch(ingress_offset.expiry), - MessageId::try_from(ingress_offset.message_id.as_slice())?, - ), - ingress_offset.offset, - )) - }) - .collect::, ProxyDecodeError>>()?, - buffer: payload.buffer, + serialized_ingress_messages, }) } } -/// Index of an ingress message in the IngressPayload. -type IngressIndex = usize; +#[derive(Debug, PartialEq)] +pub struct IngressPayloadError(HttpRequestError); -/// Position of serialized ingress message in the payload buffer. -type BufferPosition = u64; - -#[derive(Eq, PartialEq, Debug)] -/// Possible errors when accessing messages in an [`IngressPayload`]. -pub enum IngressPayloadError { - IndexOutOfBound(IngressIndex), - IngressPositionOutOfBound(IngressIndex, BufferPosition), - DeserializationFailure(String), - MismatchedMessageIdAtIndex(IngressIndex), +impl Display for IngressPayloadError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } } impl IngressPayload { /// Return the number of ingress messages contained in this payload pub fn message_count(&self) -> usize { - self.id_and_pos.len() + self.serialized_ingress_messages.len() } - /// Return all MessageIds in the payload. - pub fn message_ids(&self) -> Vec { - self.id_and_pos - .iter() - .map(|(id, _)| id.clone()) - .collect::>() + /// Return all [`IngressMessageId`]s in the payload. + pub fn message_ids(&self) -> impl Iterator { + self.serialized_ingress_messages.keys() } /// Return true if the payload is empty. pub fn is_empty(&self) -> bool { - self.id_and_pos.is_empty() + self.serialized_ingress_messages.is_empty() } - // TODO(kpop): run some benchmarks and see if it makes sense to change the type of - // `[IngressPayload::id_and_pos]` - pub fn get_by_id(&self, ingress_message_id: &IngressMessageId) -> Option { - let (index, _) = self - .id_and_pos - .iter() - .enumerate() - .find(|(_, (id, _))| id == ingress_message_id)?; - - self.get(index) - .map(|(_, ingress_message)| ingress_message) - .ok() + /// Return the [`SignedIngress`] referenced by the [`IngressMessageId`]. + /// Return [`IngressPayloadError`] if we fail to deserialize the message. + pub fn get_by_id( + &self, + ingress_message_id: &IngressMessageId, + ) -> Result, IngressPayloadError> { + self.serialized_ingress_messages + .get(ingress_message_id) + .map(|bytes| SignedIngress::try_from(bytes.clone()).map_err(IngressPayloadError)) + .transpose() } - /// Return the ingress message at a given index, which is expected to be - /// less than `message_count`. - pub fn get( + /// Iterates over the ingress messages in their deserialized form. + pub fn iter( &self, - index: usize, - ) -> Result<(IngressMessageId, SignedIngress), IngressPayloadError> { - self.id_and_pos - .get(index) - .ok_or(IngressPayloadError::IndexOutOfBound(index)) - .and_then(|(id, pos)| { - // Return error if pos is out of bound. - if *pos > self.buffer.len() as u64 { - Err(IngressPayloadError::IngressPositionOutOfBound(index, *pos)) - } else { - let end = { - if index == self.id_and_pos.len() - 1 { - self.buffer.len() - } else { - self.id_and_pos[index + 1].1 as usize - } - }; - let ingress = SignedIngress::try_from(SignedRequestBytes::from(Vec::from( - &self.buffer[*pos as usize..end], - ))) - .map_err(|e| IngressPayloadError::DeserializationFailure(e.to_string()))?; - let ingress_id = IngressMessageId::from(&ingress); - if *id == ingress_id { - Ok((ingress_id, ingress)) - } else { - Err(IngressPayloadError::MismatchedMessageIdAtIndex(index)) - } - } - }) + ) -> impl Iterator< + Item = ( + &IngressMessageId, + Result, + ), + > { + self.serialized_ingress_messages.iter().map(|(id, bytes)| { + ( + id, + SignedIngress::try_from(bytes.clone()).map_err(IngressPayloadError), + ) + }) } } impl CountBytes for IngressPayload { fn count_bytes(&self) -> usize { - self.buffer.len() + self.id_and_pos.len() * EXPECTED_MESSAGE_ID_LENGTH + self.serialized_ingress_messages + .values() + .map(|message| EXPECTED_MESSAGE_ID_LENGTH + message.len()) + .sum() } } impl<'a> FromIterator<&'a SignedIngress> for IngressPayload { fn from_iter>(msgs: I) -> Self { - let mut buf = Cursor::new(Vec::new()); - let mut id_and_pos = Vec::new(); - for ingress in msgs { - let id = IngressMessageId::from(ingress); - let pos = buf.position(); - // This panic will only happen when we run out of memory. - buf.write_all(ingress.binary().as_ref()) - .unwrap_or_else(|err| panic!("SignedIngress serialization error: {:?}", err)); + let serialized_ingress_messages = msgs + .into_iter() + .map(|ingress| (IngressMessageId::from(ingress), ingress.binary().clone())) + .collect(); - id_and_pos.push((id, pos)); - } Self { - id_and_pos, - buffer: buf.into_inner(), + serialized_ingress_messages, } } } @@ -184,13 +149,14 @@ impl From> for IngressPayload { impl TryFrom for Vec { type Error = IngressPayloadError; + fn try_from(payload: IngressPayload) -> Result, Self::Error> { payload - .id_and_pos - .iter() - .enumerate() - .map(|(i, _)| payload.get(i).map(|m| m.1)) - .collect::>() + .serialized_ingress_messages + .into_values() + .map(SignedIngress::try_from) + .collect::, _>>() + .map_err(IngressPayloadError) } } @@ -204,7 +170,6 @@ mod tests { }, time::expiry_time_from_now, }; - use assert_matches::assert_matches; use std::convert::TryFrom; fn fake_http_call_content(method_name: &str) -> HttpCallContent { @@ -247,12 +212,19 @@ mod tests { )]), }, ]; - let signed_ingresses: Vec = update_messages + let mut signed_ingresses: Vec = update_messages .into_iter() .map(|msg| SignedIngress::try_from(msg).unwrap()) .collect(); + let ingress_payload = IngressPayload::from(signed_ingresses.clone()); let signed_ingresses1 = Vec::::try_from(ingress_payload).unwrap(); + // ingress messages are sorted by id in the ingress payload, hence the sort below + signed_ingresses.sort_by(|msg_1, msg_2| { + IngressMessageId::from(msg_1) + .partial_cmp(&IngressMessageId::from(msg_2)) + .unwrap() + }); assert_eq!(signed_ingresses, signed_ingresses1); } @@ -274,50 +246,32 @@ mod tests { sender_delegation: None, }; - SignedIngress::try_from(message).unwrap() + let ingress = SignedIngress::try_from(message).unwrap(); + let id = IngressMessageId::from(&ingress); + + (ingress, id) }; // Some test messages. - let m1 = fake_ingress_message("m1"); - let m1_id = m1.id(); - let m2 = fake_ingress_message("m2"); - let m3 = fake_ingress_message("m3"); + let (m1, id1) = fake_ingress_message("m1"); + let (m2, id2) = fake_ingress_message("m2"); + let (m3, id3) = fake_ingress_message("m3"); + let (_m4, id4) = fake_ingress_message("m4"); - let msgs = vec![m1, m2, m3]; + let msgs = vec![m1.clone(), m2.clone(), m3.clone()]; let payload = IngressPayload::from(msgs.clone()); // Serialization/deserialization works. - let mut bytes = bincode::serialize(&payload).unwrap(); + let bytes = bincode::serialize(&payload).unwrap(); assert_eq!( bincode::deserialize::(&bytes).unwrap(), payload ); // Individual lookup works. - assert_matches!(payload.get(0).unwrap(), (_, msg) if msg == msgs[0]); - assert_matches!(payload.get(1).unwrap(), (_, msg) if msg == msgs[1]); - assert_matches!(payload.get(2).unwrap(), (_, msg) if msg == msgs[2]); - // Test IndexOutOfBound. - assert_matches!(payload.get(3), Err(IngressPayloadError::IndexOutOfBound(3))); + assert_eq!(payload.get_by_id(&id1), Ok(Some(m1))); + assert_eq!(payload.get_by_id(&id2), Ok(Some(m2))); + assert_eq!(payload.get_by_id(&id3), Ok(Some(m3))); + assert_eq!(payload.get_by_id(&id4), Ok(None)); // Converting back to messages should match original assert_eq!(msgs, >::try_from(payload).unwrap()); - - // A sub-sequence search function - fn find(array: &[u8], subseq: &[u8]) -> Option { - (0..array.len() - subseq.len() + 1).find(|&i| array[i..i + subseq.len()] == subseq[..]) - } - - // Mutate some byte, deserialization works, but casting back to messages fail. - let pos = find(&bytes, m1_id.as_bytes()).unwrap(); - // `+= 1` may overflow in debug mode. - bytes[pos] ^= 1; - let payload = bincode::deserialize::(&bytes); - assert!(payload.is_ok()); - let payload = payload.unwrap(); - // get(0) should return error. - assert_matches!( - payload.get(0), - Err(IngressPayloadError::MismatchedMessageIdAtIndex(0)) - ); - // Conversion should also fail. - assert!(>::try_from(payload).is_err()); } } diff --git a/rs/types/types/src/exhaustive.rs b/rs/types/types/src/exhaustive.rs index dfeb87fb86f..7589ffdb70e 100644 --- a/rs/types/types/src/exhaustive.rs +++ b/rs/types/types/src/exhaustive.rs @@ -1,5 +1,6 @@ //! Implementations and serialization tests of the ExhaustiveSet trait +use crate::artifact::IngressMessageId; use crate::consensus::hashed::Hashed; use crate::consensus::idkg::common::{PreSignatureInCreation, PreSignatureRef}; use crate::consensus::idkg::ecdsa::QuadrupleInCreation; @@ -28,6 +29,7 @@ use crate::crypto::{ CombinedThresholdSig, CombinedThresholdSigOf, CryptoHash, CryptoHashOf, CryptoHashable, IndividualMultiSig, IndividualMultiSigOf, Signed, ThresholdSigShare, ThresholdSigShareOf, }; +use crate::messages::SignedRequestBytes; use crate::signature::{ BasicSignature, BasicSignatureBatch, MultiSignature, MultiSignatureShare, ThresholdSignature, ThresholdSignatureShare, @@ -1002,6 +1004,8 @@ impl HasId for MasterKeyTranscript { } } +impl HasId for SignedRequestBytes {} + impl HasId for ReshareOfUnmaskedParams {} impl HasId for CompletedSignature {} impl HasId for CompletedReshareRequest {} diff --git a/rs/types/types/src/messages.rs b/rs/types/types/src/messages.rs index 6a348076d36..3a5df6ff044 100644 --- a/rs/types/types/src/messages.rs +++ b/rs/types/types/src/messages.rs @@ -270,6 +270,7 @@ impl TryFrom for StopCanisterContext { /// format. Use `TryFrom` or `TryInto` to convert between `SignedRequestBytes` /// and other types, corresponding to serialization/deserialization. #[derive(Clone, Eq, PartialEq, Hash, Debug, Deserialize, Serialize)] +#[cfg_attr(test, derive(ExhaustiveSet))] pub struct SignedRequestBytes(#[serde(with = "serde_bytes")] Vec); impl AsRef<[u8]> for SignedRequestBytes { From 145aff3e51115db33f21cea1e1bcd10cc7e012f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sa=C5=A1a=20Tomi=C4=87?= Date: Wed, 15 Jan 2025 18:17:43 +0100 Subject: [PATCH 30/33] chore(nftables): update IPv6 prefix list in the HostOS firewall (#3414) - Remove obsolete prefixes: `2607:f6f0:3004::/48` (CH1-old) and `2001:4d78:40d::/48` (FR1-old). - Add new SH1 entry with prefix: `2001:4c08:2003:b09::/64`, which is a DFINITY DC in [Stockholm](https://dashboard.internetcomputer.org/center/sh1) --- ic-os/components/networking/nftables/hostos/nftables.template | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ic-os/components/networking/nftables/hostos/nftables.template b/ic-os/components/networking/nftables/hostos/nftables.template index 04d7acb4798..065ac59bbc0 100644 --- a/ic-os/components/networking/nftables/hostos/nftables.template +++ b/ic-os/components/networking/nftables/hostos/nftables.template @@ -92,7 +92,6 @@ table ip6 filter { 2001:920:401a:1710::/64, # BR1 2001:920:401a:1706::/64, # BR2 2a04:9dc0:0:108::/64, # BU1 - 2607:f6f0:3004::/48, # CH1-old 2602:fb2b:120::/48, # CH1 InfraDC prefix 2604:7e00:50::/64, # CH2 2607:ff70:3:2::/64, # CH3 @@ -101,7 +100,6 @@ table ip6 filter { 2604:6800:258:1::/64, # DM1 InfraDC annex 2600:3000:1300:1300::/64, # DN1 2001:470:1:c76::/64, # FM1 - 2001:4d78:40d::/48, # FR1-old 2602:fb2b:110::/48, # FR1 InfraDC prefix 2001:4d78:400:10a::/64, # FR2 2604:1380:4091:3000::/56, # FR2 Equinix boundary @@ -122,11 +120,11 @@ table ip6 filter { 2610:190:6000:1::/64, # PH1 2600:3004:1200:1200::/56, # PL1 2600:c00:2:100::/64, # SE1 InfraDC annex - 2602:fb2b:100::/48, # SF1 InfraDC prefix 2401:3f00:1000:24::/64, # SG1 2604:1380:40e1:4700::/56, # SG1 Equinix boundary 2401:3f00:1000:22::/64, # SG2 2401:3f00:1000:23::/64, # SG3 + 2001:4c08:2003:b09::/64, # SH1 2600:c02:b002:15::/64, # SJ1 2610:190:df01:5::/64, # ST1 2604:1380:45e1:a600::/56, # SV15 Equinix boundary From 5cce4f5cb504786873b56e869346179c38e7b4f0 Mon Sep 17 00:00:00 2001 From: Rostislav Rumenov Date: Wed, 15 Jan 2025 19:04:07 +0100 Subject: [PATCH 31/33] chore: Split the user ingress artifacts and the artifacts coming from P2P (#3419) The PR improves the API of the abortable broadcast crate. ``` ubuntu@devenv-container:/ic/rs/p2p/artifact_manager$ bazel test :all --runs_per_test=100 INFO: Invocation ID: ae9b1dd3-3e96-4a1a-955b-fa628bf3de14 INFO: Analyzed 2 targets (0 packages loaded, 19 targets configured). INFO: Found 1 target and 1 test target... INFO: Elapsed time: 2.290s, Critical Path: 1.12s INFO: 103 processes: 202 linux-sandbox. INFO: Build completed successfully, 103 total actions //rs/p2p/artifact_manager:artifact_manager_test PASSED in 0.1s Stats over 100 runs: max = 0.1s, min = 0.0s, avg = 0.0s, dev = 0.0s ``` --------- Co-authored-by: IDX GitHub Automation --- Cargo.Bazel.Fuzzing.json.lock | 29 +++--- Cargo.Bazel.Fuzzing.toml.lock | 5 +- Cargo.Bazel.json.lock | 29 +++--- Cargo.Bazel.toml.lock | 5 +- Cargo.lock | 3 + Cargo.toml | 1 + bazel/external_crates.bzl | 3 + rs/p2p/artifact_manager/BUILD.bazel | 2 + rs/p2p/artifact_manager/Cargo.toml | 2 + rs/p2p/artifact_manager/src/lib.rs | 132 ++++++++++++++++++++----- rs/p2p/consensus_manager/src/lib.rs | 5 +- rs/p2p/test_utils/BUILD.bazel | 1 + rs/p2p/test_utils/Cargo.toml | 1 + rs/p2p/test_utils/src/lib.rs | 2 +- rs/p2p/test_utils/src/turmoil.rs | 10 +- rs/replica/setup_ic_network/src/lib.rs | 26 +++-- 16 files changed, 185 insertions(+), 71 deletions(-) diff --git a/Cargo.Bazel.Fuzzing.json.lock b/Cargo.Bazel.Fuzzing.json.lock index 08fd98a11dc..3ec5130c1bf 100644 --- a/Cargo.Bazel.Fuzzing.json.lock +++ b/Cargo.Bazel.Fuzzing.json.lock @@ -1,5 +1,5 @@ { - "checksum": "befed3db2258ef5e97774c44951feb5c8ca098af84913123d4a8945afeac827c", + "checksum": "ee07bd5cde20eb057c9baa703b47a96ece7ed32714378e3b62f6cb2a1bde2d94", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -19466,6 +19466,10 @@ "id": "tokio-socks 0.5.2", "target": "tokio_socks" }, + { + "id": "tokio-stream 0.1.17", + "target": "tokio_stream" + }, { "id": "tokio-test 0.4.4", "target": "tokio_test" @@ -46328,7 +46332,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -46443,7 +46447,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -73314,7 +73318,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -73680,14 +73684,14 @@ ], "license_file": "LICENSE" }, - "tokio-stream 0.1.16": { + "tokio-stream 0.1.17": { "name": "tokio-stream", - "version": "0.1.16", + "version": "0.1.17", "package_url": "https://github.com/tokio-rs/tokio", "repository": { "Http": { - "url": "https://static.crates.io/crates/tokio-stream/0.1.16/download", - "sha256": "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" + "url": "https://static.crates.io/crates/tokio-stream/0.1.17/download", + "sha256": "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" } }, "targets": [ @@ -73735,7 +73739,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.16" + "version": "0.1.17" }, "license": "MIT", "license_ids": [ @@ -73791,7 +73795,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -74294,7 +74298,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -76524,7 +76528,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -87593,6 +87597,7 @@ "tokio-rustls 0.26.0", "tokio-serde 0.8.0", "tokio-socks 0.5.2", + "tokio-stream 0.1.17", "tokio-test 0.4.4", "tokio-util 0.7.13", "toml 0.5.11", diff --git a/Cargo.Bazel.Fuzzing.toml.lock b/Cargo.Bazel.Fuzzing.toml.lock index 05fafc63a0d..11569e4c150 100644 --- a/Cargo.Bazel.Fuzzing.toml.lock +++ b/Cargo.Bazel.Fuzzing.toml.lock @@ -3262,6 +3262,7 @@ dependencies = [ "tokio-rustls 0.26.0", "tokio-serde", "tokio-socks", + "tokio-stream", "tokio-test", "tokio-util", "toml", @@ -11421,9 +11422,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", diff --git a/Cargo.Bazel.json.lock b/Cargo.Bazel.json.lock index b5e8dae02b1..cd26fde8e6d 100644 --- a/Cargo.Bazel.json.lock +++ b/Cargo.Bazel.json.lock @@ -1,5 +1,5 @@ { - "checksum": "16c350a57ca08666035e4f0e31f17c9715860d84ed6755b026870a84de503a09", + "checksum": "af5e4debd1243293865e30a9b64a67f317d54d2087da3adf9fd816ffdd8b1262", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -19294,6 +19294,10 @@ "id": "tokio-socks 0.5.2", "target": "tokio_socks" }, + { + "id": "tokio-stream 0.1.17", + "target": "tokio_stream" + }, { "id": "tokio-test 0.4.4", "target": "tokio_test" @@ -46135,7 +46139,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -46250,7 +46254,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -73160,7 +73164,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -73526,14 +73530,14 @@ ], "license_file": "LICENSE" }, - "tokio-stream 0.1.16": { + "tokio-stream 0.1.17": { "name": "tokio-stream", - "version": "0.1.16", + "version": "0.1.17", "package_url": "https://github.com/tokio-rs/tokio", "repository": { "Http": { - "url": "https://static.crates.io/crates/tokio-stream/0.1.16/download", - "sha256": "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" + "url": "https://static.crates.io/crates/tokio-stream/0.1.17/download", + "sha256": "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" } }, "targets": [ @@ -73581,7 +73585,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.16" + "version": "0.1.17" }, "license": "MIT", "license_ids": [ @@ -73637,7 +73641,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" } ], @@ -74140,7 +74144,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -76370,7 +76374,7 @@ "target": "tokio" }, { - "id": "tokio-stream 0.1.16", + "id": "tokio-stream 0.1.17", "target": "tokio_stream" }, { @@ -87472,6 +87476,7 @@ "tokio-rustls 0.26.0", "tokio-serde 0.8.0", "tokio-socks 0.5.2", + "tokio-stream 0.1.17", "tokio-test 0.4.4", "tokio-util 0.7.13", "toml 0.5.11", diff --git a/Cargo.Bazel.toml.lock b/Cargo.Bazel.toml.lock index c017af7c37a..a41a0957dbd 100644 --- a/Cargo.Bazel.toml.lock +++ b/Cargo.Bazel.toml.lock @@ -3251,6 +3251,7 @@ dependencies = [ "tokio-rustls 0.26.0", "tokio-serde", "tokio-socks", + "tokio-stream", "tokio-test", "tokio-util", "toml", @@ -11417,9 +11418,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", diff --git a/Cargo.lock b/Cargo.lock index 9b761dcc7bc..f70c30ce41c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5445,6 +5445,7 @@ name = "ic-artifact-manager" version = "0.9.0" dependencies = [ "assert_matches", + "futures", "ic-artifact-pool", "ic-config", "ic-interfaces", @@ -5454,6 +5455,7 @@ dependencies = [ "ic-types", "prometheus", "tokio", + "tokio-stream", "tracing", ] @@ -10709,6 +10711,7 @@ dependencies = [ "slog", "tempfile", "tokio", + "tokio-stream", "turmoil", ] diff --git a/Cargo.toml b/Cargo.toml index de382d1e46c..4a3f645d2a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -712,6 +712,7 @@ tokio-metrics = "0.4.0" tokio-rustls = { version = "0.26.0", default-features = false, features = [ "ring", ] } +tokio-stream = "0.1.17" tokio-test = "0.4.4" tokio-util = { version = "0.7.13", features = ["full"] } tonic = "0.12.3" diff --git a/bazel/external_crates.bzl b/bazel/external_crates.bzl index a62c47466ce..9677beb8222 100644 --- a/bazel/external_crates.bzl +++ b/bazel/external_crates.bzl @@ -1296,6 +1296,9 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable "ring", ], ), + "tokio-stream": crate.spec( + version = "^0.1.17", + ), "tokio-serde": crate.spec( version = "^0.8", features = [ diff --git a/rs/p2p/artifact_manager/BUILD.bazel b/rs/p2p/artifact_manager/BUILD.bazel index f04ea2a6858..caa14e5771c 100644 --- a/rs/p2p/artifact_manager/BUILD.bazel +++ b/rs/p2p/artifact_manager/BUILD.bazel @@ -7,8 +7,10 @@ DEPENDENCIES = [ "//rs/interfaces", "//rs/monitoring/metrics", "//rs/types/types", + "@crate_index//:futures", "@crate_index//:prometheus", "@crate_index//:tokio", + "@crate_index//:tokio-stream", "@crate_index//:tracing", ] diff --git a/rs/p2p/artifact_manager/Cargo.toml b/rs/p2p/artifact_manager/Cargo.toml index fdb8d5ec4fb..630e0ba56e6 100644 --- a/rs/p2p/artifact_manager/Cargo.toml +++ b/rs/p2p/artifact_manager/Cargo.toml @@ -10,8 +10,10 @@ documentation.workspace = true ic-interfaces = { path = "../../interfaces" } ic-metrics = { path = "../../monitoring/metrics" } ic-types = { path = "../../types/types" } +futures = { workspace = true } prometheus = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } tracing = { workspace = true } [dev-dependencies] diff --git a/rs/p2p/artifact_manager/src/lib.rs b/rs/p2p/artifact_manager/src/lib.rs index 92949b68976..804856cdb13 100644 --- a/rs/p2p/artifact_manager/src/lib.rs +++ b/rs/p2p/artifact_manager/src/lib.rs @@ -1,3 +1,4 @@ +use futures::stream::Stream; use ic_interfaces::{ p2p::{ artifact_manager::JoinGuard, @@ -12,10 +13,12 @@ use ic_metrics::MetricsRegistry; use ic_types::{artifact::*, messages::SignedIngress}; use prometheus::{histogram_opts, labels, Histogram}; use std::{ + pin::Pin, sync::{ atomic::{AtomicBool, Ordering::SeqCst}, Arc, RwLock, }, + task::Poll, thread::{Builder as ThreadBuilder, JoinHandle}, time::Duration, }; @@ -23,6 +26,7 @@ use tokio::{ sync::mpsc::{Sender, UnboundedReceiver}, time::timeout, }; +use tokio_stream::StreamExt; use tracing::instrument; /// Metrics for a client artifact processor. @@ -130,12 +134,15 @@ impl Drop for ArtifactProcessorJoinGuard { } // TODO: make it private, it is used only for tests outside of this crate -pub fn run_artifact_processor( +pub fn run_artifact_processor< + Artifact: IdentifiableArtifact, + I: Stream> + Send + Unpin + 'static, +>( time_source: Arc, metrics_registry: MetricsRegistry, client: Box>, outbound_tx: Sender>, - inbound_rx: UnboundedReceiver>, + inbound_rx_stream: I, initial_artifacts: Vec, ) -> Box { let shutdown = Arc::new(AtomicBool::new(false)); @@ -154,7 +161,7 @@ pub fn run_artifact_processor( time_source, client, outbound_tx, - inbound_rx, + inbound_rx_stream, ArtifactProcessorMetrics::new(metrics_registry, Artifact::NAME.to_string()), shutdown_cl, ); @@ -163,12 +170,52 @@ pub fn run_artifact_processor( Box::new(ArtifactProcessorJoinGuard::new(handle, shutdown)) } +enum StreamState { + Value(T), + NoNewValueAvailable, + EndOfStream, +} + +async fn read_batch + Send + Unpin + 'static>( + mut stream: Pin<&mut S>, + recv_timeout: Duration, +) -> Option> { + let mut stream = std::pin::Pin::new(&mut stream); + match timeout(recv_timeout, stream.next()).await { + Ok(Some(first_value)) => { + let mut res = vec![first_value]; + // We ignore the end of stream and empty value states. + while let StreamState::Value(value) = + std::future::poll_fn(|cx| match stream.as_mut().poll_next(cx) { + Poll::Pending => Poll::Ready(StreamState::NoNewValueAvailable), + Poll::Ready(Some(v)) => Poll::Ready(StreamState::Value(v)), + // Stream has finished because the abortable broadcast/p2p has stopped. + // This is infallible. + Poll::Ready(None) => Poll::Ready(StreamState::EndOfStream), + }) + .await + { + res.push(value) + } + Some(res) + } + // Stream has finished because the abortable broadcast/p2p has stopped. + // This is infallible. + Ok(None) => None, + // First value didn't arrive on time + Err(_) => Some(vec![]), + } +} + // The artifact processor thread loop -fn process_messages( +fn process_messages< + Artifact: IdentifiableArtifact + 'static, + I: Stream> + Send + Unpin + 'static, +>( time_source: Arc, client: Box>, send_advert: Sender>, - mut receiver: UnboundedReceiver>, + mut inbound_stream: I, mut metrics: ArtifactProcessorMetrics, shutdown: Arc, ) { @@ -188,20 +235,8 @@ fn process_messages( }; let batched_artifact_events = current_thread_rt.block_on(async { - match timeout(recv_timeout, receiver.recv()).await { - Ok(Some(artifact_event)) => { - let mut artifacts = vec![artifact_event]; - while let Ok(artifact) = receiver.try_recv() { - artifacts.push(artifact); - } - Some(artifacts) - } - Ok(None) => { - // p2p is stopped - None - } - Err(_) => Some(vec![]), - } + let inbound_stream = std::pin::Pin::new(&mut inbound_stream); + read_batch(inbound_stream, recv_timeout).await }); let batched_artifact_events = match batched_artifact_events { Some(v) => v, @@ -231,6 +266,7 @@ pub fn create_ingress_handlers< >( outbound_tx: Sender>, inbound_rx: UnboundedReceiver>, + user_ingress_rx: UnboundedReceiver>, time_source: Arc, ingress_pool: Arc>, ingress_handler: Arc< @@ -243,12 +279,15 @@ pub fn create_ingress_handlers< metrics_registry: MetricsRegistry, ) -> Box { let client = IngressProcessor::new(ingress_pool.clone(), ingress_handler); + let inbound_rx_stream = tokio_stream::wrappers::UnboundedReceiverStream::new(inbound_rx); + let user_ingress_rx_stream = + tokio_stream::wrappers::UnboundedReceiverStream::new(user_ingress_rx); run_artifact_processor( time_source.clone(), metrics_registry, Box::new(client), outbound_tx, - inbound_rx, + inbound_rx_stream.merge(user_ingress_rx_stream), vec![], ) } @@ -268,12 +307,13 @@ pub fn create_artifact_handler< ) -> Box { let inital_artifacts: Vec<_> = pool.read().unwrap().get_all_for_broadcast().collect(); let client = Processor::new(pool, change_set_producer); + let inbound_rx_stream = tokio_stream::wrappers::UnboundedReceiverStream::new(inbound_rx); run_artifact_processor( time_source.clone(), metrics_registry, Box::new(client), outbound_tx, - inbound_rx, + inbound_rx_stream, inital_artifacts, ) } @@ -400,14 +440,53 @@ impl + Send + Sync + 'static> ArtifactProcessor( + run_artifact_processor::< + DummyArtifact, + UnboundedReceiverStream>, + >( time_source, MetricsRegistry::default(), Box::new(DummyProcessor), send_tx, - inbound_rx, + inbound_rx.into(), (0..10).map(Into::into).collect(), ); diff --git a/rs/p2p/consensus_manager/src/lib.rs b/rs/p2p/consensus_manager/src/lib.rs index 3b957c05fde..5f0ded558bc 100644 --- a/rs/p2p/consensus_manager/src/lib.rs +++ b/rs/p2p/consensus_manager/src/lib.rs @@ -66,8 +66,6 @@ impl AbortableBroadcastChannelBuilder { ) -> ( AbortableBroadcastSender, AbortableBroadcastReceiver, - // TODO: remove this by introducing a new channel from the http handler into the processor - UnboundedSender>, ) { let (outbound_tx, outbound_rx) = tokio::sync::mpsc::channel(MAX_OUTBOUND_CHANNEL_SIZE); // Making this channel bounded can be problematic since we don't have true multiplexing @@ -88,7 +86,6 @@ impl AbortableBroadcastChannelBuilder { let rt_handle = self.rt_handle.clone(); let metrics_registry = self.metrics_registry.clone(); - let inbound_tx_c = inbound_tx.clone(); let builder = move |transport: Arc, topology_watcher| { start_consensus_manager( log, @@ -113,7 +110,7 @@ impl AbortableBroadcastChannelBuilder { ); self.clients.push(Box::new(builder)); - (outbound_tx, inbound_rx, inbound_tx_c) + (outbound_tx, inbound_rx) } pub fn router(&mut self) -> Router { diff --git a/rs/p2p/test_utils/BUILD.bazel b/rs/p2p/test_utils/BUILD.bazel index 4263af5951b..68abf6b3f56 100644 --- a/rs/p2p/test_utils/BUILD.bazel +++ b/rs/p2p/test_utils/BUILD.bazel @@ -45,6 +45,7 @@ DEPENDENCIES = [ "@crate_index//:slog", "@crate_index//:tempfile", "@crate_index//:tokio", + "@crate_index//:tokio-stream", "@crate_index//:turmoil", ] diff --git a/rs/p2p/test_utils/Cargo.toml b/rs/p2p/test_utils/Cargo.toml index 529c36d4b9b..8b3c295e8f0 100644 --- a/rs/p2p/test_utils/Cargo.toml +++ b/rs/p2p/test_utils/Cargo.toml @@ -47,4 +47,5 @@ serde = { workspace = true } slog = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } +tokio-stream = { workspace = true } turmoil = { workspace = true } diff --git a/rs/p2p/test_utils/src/lib.rs b/rs/p2p/test_utils/src/lib.rs index 4135223ca9c..756296ddb8e 100644 --- a/rs/p2p/test_utils/src/lib.rs +++ b/rs/p2p/test_utils/src/lib.rs @@ -467,7 +467,7 @@ pub fn start_consensus_manager( rt_handle.clone(), MetricsRegistry::default(), ); - let (outbound_tx, inbound_rx, _) = cm1.abortable_broadcast_channel(downloader, usize::MAX); + let (outbound_tx, inbound_rx) = cm1.abortable_broadcast_channel(downloader, usize::MAX); let artifact_processor_jh = start_test_processor( outbound_tx, diff --git a/rs/p2p/test_utils/src/turmoil.rs b/rs/p2p/test_utils/src/turmoil.rs index c484641fb89..14eb6a251f6 100644 --- a/rs/p2p/test_utils/src/turmoil.rs +++ b/rs/p2p/test_utils/src/turmoil.rs @@ -35,6 +35,7 @@ use tokio::{ select, sync::{mpsc, oneshot, watch, Notify}, }; +use tokio_stream::wrappers::UnboundedReceiverStream; use turmoil::Sim; pub struct CustomUdp { @@ -378,7 +379,7 @@ pub fn add_transport_to_sim( bouncer_factory, MetricsRegistry::default(), ); - let (outbound_tx, inbound_tx, _) = + let (outbound_tx, inbound_tx) = consensus_builder.abortable_broadcast_channel(downloader, usize::MAX); let artifact_processor_jh = start_test_processor( @@ -446,12 +447,15 @@ pub fn start_test_processor( ) -> Box { let time_source = Arc::new(SysTimeSource::new()); let client = ic_artifact_manager::Processor::new(pool, change_set_producer); - run_artifact_processor( + run_artifact_processor::< + U64Artifact, + UnboundedReceiverStream>, + >( time_source, MetricsRegistry::default(), Box::new(client), outbound_tx, - inbound_rx, + inbound_rx.into(), vec![], ) } diff --git a/rs/replica/setup_ic_network/src/lib.rs b/rs/replica/setup_ic_network/src/lib.rs index 44824366df2..ca2333d1edd 100644 --- a/rs/replica/setup_ic_network/src/lib.rs +++ b/rs/replica/setup_ic_network/src/lib.rs @@ -58,7 +58,10 @@ use std::{ str::FromStr, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{mpsc::UnboundedSender, watch}; +use tokio::sync::{ + mpsc::{unbounded_channel, UnboundedSender}, + watch, +}; use tower_http::trace::TraceLayer; /// [IC-1718]: Whether the `hashes-in-blocks` feature is enabled. If the flag is set to `true`, we @@ -331,7 +334,7 @@ fn start_consensus( let consensus_pool = Arc::clone(&consensus_pool); let bouncer = Arc::new(ConsensusBouncer::new(metrics_registry, message_router)); - let (outbound_tx, inbound_rx, _) = if HASHES_IN_BLOCKS_FEATURE_ENABLED { + let (outbound_tx, inbound_rx) = if HASHES_IN_BLOCKS_FEATURE_ENABLED { let assembler = ic_artifact_downloader::FetchStrippedConsensusArtifact::new( log.clone(), rt_handle.clone(), @@ -366,7 +369,9 @@ fn start_consensus( join_handles.push(jh); }; - let ingress_sender = { + let user_ingress_tx = { + #[allow(clippy::disallowed_methods)] + let (user_ingress_tx, user_ingress_rx) = unbounded_channel(); let bouncer = Arc::new(IngressBouncer::new(time_source.clone())); let assembler = ic_artifact_downloader::FetchArtifact::new( log.clone(), @@ -376,19 +381,20 @@ fn start_consensus( metrics_registry.clone(), ); - let (outbound_tx, inbound_rx, inbound_tx) = + let (outbound_tx, inbound_rx) = new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_LIMIT_INGRESS); // Create the ingress client. let jh = create_ingress_handlers( outbound_tx, inbound_rx, + user_ingress_rx, Arc::clone(&time_source) as Arc<_>, Arc::clone(&artifact_pools.ingress_pool), ingress_manager, metrics_registry.clone(), ); join_handles.push(jh); - inbound_tx + user_ingress_tx }; { @@ -411,7 +417,7 @@ fn start_consensus( metrics_registry.clone(), ); - let (outbound_tx, inbound_rx, _) = + let (outbound_tx, inbound_rx) = new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_NO_LIMIT); // Create the certification client. let jh = create_artifact_handler( @@ -435,7 +441,7 @@ fn start_consensus( metrics_registry.clone(), ); - let (outbound_tx, inbound_rx, _) = + let (outbound_tx, inbound_rx) = new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_NO_LIMIT); // Create the DKG client. let jh = create_artifact_handler( @@ -483,7 +489,7 @@ fn start_consensus( metrics_registry.clone(), ); - let (outbound_tx, inbound_rx, _) = + let (outbound_tx, inbound_rx) = new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_NO_LIMIT); let jh = create_artifact_handler( @@ -519,7 +525,7 @@ fn start_consensus( metrics_registry.clone(), ); - let (outbound_tx, inbound_rx, _) = + let (outbound_tx, inbound_rx) = new_p2p_consensus.abortable_broadcast_channel(assembler, SLOT_TABLE_NO_LIMIT); let jh = create_artifact_handler( @@ -544,7 +550,7 @@ fn start_consensus( ( artifact_pools.ingress_pool, - ingress_sender, + user_ingress_tx, join_handles, new_p2p_consensus, ) From fb3d35d0dddff7359acb8fb0faa9545759705768 Mon Sep 17 00:00:00 2001 From: max-dfinity <100170574+max-dfinity@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:06:33 -0800 Subject: [PATCH 32/33] chore(nns): Remove one-off fix for broken neuron after deployment (#3452) This removes code that fixed a neuron that was locked as a result of a ledger upgrade happening during spawn. --- rs/nns/governance/canister/canister.rs | 15 ------ rs/nns/governance/src/governance.rs | 61 ---------------------- rs/nns/governance/tests/governance.rs | 72 +------------------------- 3 files changed, 1 insertion(+), 147 deletions(-) diff --git a/rs/nns/governance/canister/canister.rs b/rs/nns/governance/canister/canister.rs index 27aebf8d450..2f54dda2027 100644 --- a/rs/nns/governance/canister/canister.rs +++ b/rs/nns/governance/canister/canister.rs @@ -167,9 +167,6 @@ fn schedule_timers() { // TODO(NNS1-3446): Delete. (This only needs to be run once, but can safely be run multiple times). schedule_backfill_voting_power_refreshed_timestamps(Duration::from_secs(0)); - - // Schedule the fix for the locked neuron - schedule_locked_spawning_neuron_fix(); } // Seeding interval seeks to find a balance between the need for rng secrecy, and @@ -324,18 +321,6 @@ fn schedule_vote_processing() { }); } -// TODO(NNS1-3526): Remove this method once it is released. -fn schedule_locked_spawning_neuron_fix() { - ic_cdk_timers::set_timer(Duration::from_secs(0), || { - spawn(async { - governance_mut() - .fix_locked_spawn_neuron() - .await - .expect("Failed to fix locked neuron"); - }); - }); -} - struct CanisterEnv { rng: Option, time_warp: GovTimeWarp, diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index ccfeb7948f0..a4add42fe8a 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -7089,67 +7089,6 @@ impl Governance { self.heap_data.spawning_neurons = Some(false); } - // TODO(NNS1-3526): Remove this method once it is released. - pub async fn fix_locked_spawn_neuron(&mut self) -> Result<(), GovernanceError> { - // ID of neuron that was locked when trying to spawn it due to ledger upgrade. - // Neuron's state was updated, but the ledger transaction did not finish. - const TARGETED_LOCK_TIMESTAMP: u64 = 1728911670; - - let id = 17912780790050115461; - let neuron_id = NeuronId { id }; - - let now_seconds = self.env.now(); - - match self.heap_data.in_flight_commands.get(&id) { - None => { - return Ok(()); - } - Some(existing_lock) => { - let NeuronInFlightCommand { - timestamp, - command: _, - } = existing_lock; - - // We check the exact timestamp so that new locks couldn't trigger this condition - // which would allow that neuron to repeatedly mint under the right conditions. - if *timestamp != TARGETED_LOCK_TIMESTAMP { - return Ok(()); - } - } - }; - - let (neuron_stake, subaccount) = self.with_neuron(&neuron_id, |neuron| { - let neuron_stake = neuron.cached_neuron_stake_e8s; - let subaccount = neuron.subaccount(); - (neuron_stake, subaccount) - })?; - - // Mint the ICP - match self - .ledger - .transfer_funds( - neuron_stake, - 0, // Minting transfer don't pay a fee. - None, - neuron_subaccount(subaccount), - now_seconds, - ) - .await - { - Ok(_) => { - self.heap_data.in_flight_commands.remove(&id); - Ok(()) - } - Err(error) => Err(GovernanceError::new_with_message( - ErrorType::Unavailable, - format!( - "Error fixing locked neuron: {:?}. Ledger update failed with err: {:?}.", - neuron_id, error - ), - )), - } - } - /// Return `true` if rewards should be distributed, `false` otherwise fn should_distribute_rewards(&self) -> bool { let latest_distribution_nominal_end_timestamp_seconds = diff --git a/rs/nns/governance/tests/governance.rs b/rs/nns/governance/tests/governance.rs index f1cde600775..534115d2941 100644 --- a/rs/nns/governance/tests/governance.rs +++ b/rs/nns/governance/tests/governance.rs @@ -136,10 +136,7 @@ use std::{ #[cfg(feature = "tla")] use ic_nns_governance::governance::tla::{check_traces as tla_check_traces, TLA_TRACES_LKEY}; -use ic_nns_governance::{ - pb::v1::governance::{neuron_in_flight_command, NeuronInFlightCommand}, - storage::reset_stable_memory, -}; +use ic_nns_governance::storage::reset_stable_memory; #[cfg(feature = "tla")] use tla_instrumentation_proc_macros::with_tla_trace_check; @@ -15042,70 +15039,3 @@ impl CMC for StubCMC { unimplemented!() } } - -// TODO(NNS1-3526): Remove after deployed and confirmed fix -#[test] -fn test_locked_neuron_is_unlocked_and_icp_is_minted() { - let epoch = DEFAULT_TEST_START_TIMESTAMP_SECONDS + (20 * ONE_YEAR_SECONDS); - let neuron_id = 17912780790050115461; - let mut nns = NNSBuilder::new() - .set_start_time(epoch) - .add_neuron( - NeuronBuilder::new(neuron_id, 1_200_000, PrincipalId::new_user_test_id(42)) - .set_dissolve_state(Some(DissolveState::WhenDissolvedTimestampSeconds(epoch))), - ) - .create(); - - nns.governance.heap_data.in_flight_commands.insert( - neuron_id, - NeuronInFlightCommand { - timestamp: 1728911670, - command: Some(neuron_in_flight_command::Command::Spawn(NeuronId { - id: neuron_id, - })), - }, - ); - - // B/c of how this test is setup, the neuron will have stake. - // We just want to make sure it can only incrase once. - assert_eq!( - nns.get_account_balance(nns.get_neuron_account_id(neuron_id)), - 1_200_000 - ); - - nns.governance - .fix_locked_spawn_neuron() - .now_or_never() - .unwrap() - .expect("Failed to fix locked spawn neuron"); - - assert_eq!(nns.governance.heap_data.in_flight_commands.len(), 0); - assert_eq!( - nns.get_account_balance(nns.get_neuron_account_id(neuron_id)), - 1_200_000 * 2 - ); - - // Nothing happens if you call it again with a differnt lock time. - nns.governance.heap_data.in_flight_commands.insert( - neuron_id, - NeuronInFlightCommand { - timestamp: 1728911671, - command: Some(neuron_in_flight_command::Command::Spawn(NeuronId { - id: neuron_id, - })), - }, - ); - nns.governance - .fix_locked_spawn_neuron() - .now_or_never() - .unwrap() - .expect("Failed to fix locked spawn neuron"); - - // Lock is not cleared b/c we didn't target that lock - assert_eq!(nns.governance.heap_data.in_flight_commands.len(), 1); - // Balance doesn't change this time. - assert_eq!( - nns.get_account_balance(nns.get_neuron_account_id(neuron_id)), - 1_200_000 * 2 - ); -} From 85af5fc7b9963f10bb6fd30d7929814501d783c7 Mon Sep 17 00:00:00 2001 From: "pr-automation-bot-public[bot]" <189003650+pr-automation-bot-public[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 18:12:32 -0800 Subject: [PATCH 33/33] chore: Update Base Image Refs [2025-01-16-0145] (#3463) Updating base container image references. Run URL: https://github.com/dfinity/ic/actions/runs/12800380478 Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- ic-os/boundary-guestos/context/docker-base.prod | 2 +- ic-os/guestos/context/docker-base.dev | 2 +- ic-os/guestos/context/docker-base.prod | 2 +- ic-os/hostos/context/docker-base.dev | 2 +- ic-os/hostos/context/docker-base.prod | 2 +- ic-os/setupos/context/docker-base.dev | 2 +- ic-os/setupos/context/docker-base.prod | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ic-os/boundary-guestos/context/docker-base.prod b/ic-os/boundary-guestos/context/docker-base.prod index 8abee27e8f2..157c9a4f6d7 100644 --- a/ic-os/boundary-guestos/context/docker-base.prod +++ b/ic-os/boundary-guestos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/boundaryos-base@sha256:f771e5cbb6bc8c42983a331d52f1377b220006c8ced778bf17853359b7a8425d +ghcr.io/dfinity/boundaryos-base@sha256:aa0ccd79c78627faa6cce8083f116c2dbf8b9f098c5cd979a2f438babb382690 diff --git a/ic-os/guestos/context/docker-base.dev b/ic-os/guestos/context/docker-base.dev index fbe705c58e0..21253874ad2 100644 --- a/ic-os/guestos/context/docker-base.dev +++ b/ic-os/guestos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/guestos-base-dev@sha256:8f9c6e9ee671ee35b7c78f8d2d6f02de515d88b2623ab3068ed6ccafbdf85367 +ghcr.io/dfinity/guestos-base-dev@sha256:71f39526238b5991007adc7d97ceb100d5c3ec44773dc18b2a38062bb1f7c61c diff --git a/ic-os/guestos/context/docker-base.prod b/ic-os/guestos/context/docker-base.prod index 796248cb4e3..81ca8a40e8d 100644 --- a/ic-os/guestos/context/docker-base.prod +++ b/ic-os/guestos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/guestos-base@sha256:cacd67837eb7be7c2be6e3d7e3199ac8c32a738e0aa1205c7a3d938bee804882 +ghcr.io/dfinity/guestos-base@sha256:a2731e6fd8c8673a017445871e6c57e1c075f0d2a5da87b8e376e82970dae46f diff --git a/ic-os/hostos/context/docker-base.dev b/ic-os/hostos/context/docker-base.dev index d67538ae2a8..0697ec933f3 100644 --- a/ic-os/hostos/context/docker-base.dev +++ b/ic-os/hostos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/hostos-base-dev@sha256:4c99f3bdeb4ab0dae3ef9b29b85392e0586cadc5b2c17d775a7d71e4eb5638ee +ghcr.io/dfinity/hostos-base-dev@sha256:60670ec6d57c893d5fa62d45a65c63dc633a930034c3036540afd5f12bc12fc8 diff --git a/ic-os/hostos/context/docker-base.prod b/ic-os/hostos/context/docker-base.prod index f0b91faaaaa..b4392626d0a 100644 --- a/ic-os/hostos/context/docker-base.prod +++ b/ic-os/hostos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/hostos-base@sha256:a4d22768b111eff3ef5e875f83dc55eea9f6b853deb6753bfcd1071271b2303d +ghcr.io/dfinity/hostos-base@sha256:669a373b2aca8135da1e0a72286d4efee20d12e415b0d26f5129a04fc4d2943e diff --git a/ic-os/setupos/context/docker-base.dev b/ic-os/setupos/context/docker-base.dev index dcf1b5f7dfb..ab15c90ffc6 100644 --- a/ic-os/setupos/context/docker-base.dev +++ b/ic-os/setupos/context/docker-base.dev @@ -1 +1 @@ -ghcr.io/dfinity/setupos-base-dev@sha256:7d5ac4b029c62424b2563f35ec98cd7d2c3f4dc3e4b620872576c0f64973a338 +ghcr.io/dfinity/setupos-base-dev@sha256:e40adec389e5b4e5d3dab30119b38f4d0877198cf42b767603ae32a04af1a904 diff --git a/ic-os/setupos/context/docker-base.prod b/ic-os/setupos/context/docker-base.prod index 75515f4802d..302bf970dfc 100644 --- a/ic-os/setupos/context/docker-base.prod +++ b/ic-os/setupos/context/docker-base.prod @@ -1 +1 @@ -ghcr.io/dfinity/setupos-base@sha256:533a3ef6c894b3256e57253ac93597ad43b0a4595b621206bb82d79ba04448e1 +ghcr.io/dfinity/setupos-base@sha256:2c3e414efc505fad7ffb03da0f6520f456438dcd59bf2a3949a3ddf8cc6f4f24