From ce875f512b537d1404b64a8860815b146fe2096e Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 7 Jun 2024 01:18:35 +0200 Subject: [PATCH 1/4] Remove code that starts the scanner --- zebrad/src/commands/start.rs | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index aa2b3f781f9..50d083d52d8 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -45,11 +45,6 @@ //! * Progress Task //! * logs progress towards the chain tip //! -//! Shielded Scanning: -//! * Shielded Scanner Task -//! * if the user has configured Zebra with their shielded viewing keys, scans new and existing -//! blocks for transactions that use those keys -//! //! Block Mining: //! * Internal Miner Task //! * if the user has configured Zebra to mine blocks, spawns tasks to generate new blocks, @@ -339,24 +334,6 @@ impl StartCmd { tokio::spawn(syncer.sync().in_current_span()) }; - #[cfg(feature = "shielded-scan")] - // Spawn never ending scan task only if we have keys to scan for. - let scan_task_handle = { - // TODO: log the number of keys and update the scan_task_starts() test - info!("spawning shielded scanner with configured viewing keys"); - zebra_scan::spawn_init( - config.shielded_scan.clone(), - config.network.network.clone(), - state, - chain_tip_change, - ) - }; - - #[cfg(not(feature = "shielded-scan"))] - // Spawn a dummy scan task which doesn't do anything and never finishes. - let scan_task_handle: tokio::task::JoinHandle> = - tokio::spawn(std::future::pending().in_current_span()); - // And finally, spawn the internal Zcash miner, if it is enabled. // // TODO: add a config to enable the miner rather than a feature. @@ -398,7 +375,6 @@ impl StartCmd { pin!(tx_gossip_task_handle); pin!(progress_task_handle); pin!(end_of_support_task_handle); - pin!(scan_task_handle); pin!(miner_task_handle); // startup tasks @@ -487,10 +463,6 @@ impl StartCmd { Ok(()) } - scan_result = &mut scan_task_handle => scan_result - .expect("unexpected panic in the scan task") - .map(|_| info!("scan task exited")), - miner_result = &mut miner_task_handle => miner_result .expect("unexpected panic in the miner task") .map(|_| info!("miner task exited")), @@ -519,7 +491,6 @@ impl StartCmd { tx_gossip_task_handle.abort(); progress_task_handle.abort(); end_of_support_task_handle.abort(); - scan_task_handle.abort(); miner_task_handle.abort(); // startup tasks From 93425f9020e74f35d4f886acdcf494385d9ad875 Mon Sep 17 00:00:00 2001 From: Marek Date: Fri, 7 Jun 2024 01:59:58 +0200 Subject: [PATCH 2/4] Update CHANGELOG --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ecf031ee39b..9e0bda829e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org). - We realized that a longer than `zcashd` end of support could be problematic in some cases so we reverted back from 20 to 16 weeks ([#8530](https://github.com/ZcashFoundation/zebra/pull/8530)) +- The `zebrad` binary no longer supports starting the scanner of shielded + transactions. ([#8594](https://github.com/ZcashFoundation/zebra/pull/8594)) + This means `zebrad` no longer contains users' viewing keys, which is a private + key material. ## [Zebra 1.7.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.7.0) - 2024-05-07 From cfecd64a171d328e220dc74a5f4a7470f5260dcd Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 10 Jun 2024 16:41:50 +0200 Subject: [PATCH 3/4] Disable tests of the scanner --- docker/entrypoint.sh | 3 +- zebrad/tests/acceptance.rs | 381 +++++++++++++++++++------------------ 2 files changed, 194 insertions(+), 190 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 8cd61a49bb3..0357c6a5744 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -349,7 +349,8 @@ case "$1" in elif [[ "${TEST_SCAN_START_WHERE_LEFT}" -eq "1" ]]; then # Test that the scanner can continue scanning where it was left when zebrad restarts. check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - run_cargo_test "shielded-scan" "scan_start_where_left" + # TODO: Move this test once we have the new scanner binary. + # run_cargo_test "shielded-scan" "scan_start_where_left" elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then # Test that the scanner can continue scanning where it was left when zebrad restarts. diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 2c8c692d4b9..a502447386f 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -2903,195 +2903,198 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { Ok(()) } -/// Test that the scanner task gets started when the node starts. -#[test] -#[cfg(feature = "shielded-scan")] -fn scan_task_starts() -> Result<()> { - use indexmap::IndexMap; - use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY; - - let _init_guard = zebra_test::init(); - - let test_type = TestType::LaunchWithEmptyState { - launches_lightwalletd: false, - }; - let mut config = default_test_config(&Mainnet)?; - let mut keys = IndexMap::new(); - keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); - config.shielded_scan.sapling_keys_to_scan = keys; - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Check scanner was started. - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // Look for 2 scanner notices indicating we are below sapling activation. - zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; - zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; - - // Kill the node. - zebrad.kill(false)?; - - // Check that scan task started and the first scanning is done. - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - Ok(()) -} - -/// Test that the scanner gRPC server starts when the node starts. -#[tokio::test] -#[cfg(all(feature = "shielded-scan", not(target_os = "windows")))] -async fn scan_rpc_server_starts() -> Result<()> { - use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty}; - - let _init_guard = zebra_test::init(); - - let test_type = TestType::LaunchWithEmptyState { - launches_lightwalletd: false, - }; - - let port = random_known_port(); - let listen_addr = format!("127.0.0.1:{port}"); - let mut config = default_test_config(&Mainnet)?; - config.shielded_scan.listen_addr = Some(listen_addr.parse()?); - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Wait until gRPC server is starting. - tokio::time::sleep(LAUNCH_DELAY).await; - zebrad.expect_stdout_line_matches("starting scan gRPC server")?; - tokio::time::sleep(Duration::from_secs(1)).await; - - let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?; - - let request = tonic::Request::new(Empty {}); - - client.get_info(request).await?; - - // Kill the node. - zebrad.kill(false)?; - - // Check that scan task started and the first scanning is done. - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - Ok(()) -} - -/// Test that the scanner can continue scanning where it was left when zebrad restarts. -/// -/// Needs a cache state close to the tip. A possible way to run it locally is: -/// -/// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" -/// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture -/// -/// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. -/// Then it will restart zebrad and check that it resumes scanning where it was left. -/// -/// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory -/// so it can start with an empty scanning state. -#[ignore] -#[test] -#[cfg(feature = "shielded-scan")] -fn scan_start_where_left() -> Result<()> { - use indexmap::IndexMap; - use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY}; - - let _init_guard = zebra_test::init(); - - // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available. - let test_type = TestType::UpdateZebraCachedStateNoRpc; - if let Some(cache_dir) = test_type.zebrad_state_path("scan test") { - // Add a key to the config - let mut config = default_test_config(&Mainnet)?; - let mut keys = IndexMap::new(); - keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); - config.shielded_scan.sapling_keys_to_scan = keys; - - // Add the cache dir to shielded scan, make it the same as the zebrad cache state. - config - .shielded_scan - .db_config_mut() - .cache_dir - .clone_from(&cache_dir); - config.shielded_scan.db_config_mut().ephemeral = false; - - // Add the cache dir to state. - config.state.cache_dir.clone_from(&cache_dir); - config.state.ephemeral = false; - - // Remove the scan directory before starting. - let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND); - fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok(); - - // Start zebra with the config. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Check scanner was started. - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // The first time - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 420000", - )?; - - // Make sure scanner scans a few blocks. - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 430000", - )?; - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 419200, now at block 440000", - )?; - - // Kill the node. - zebrad.kill(false)?; - let output = zebrad.wait_with_output()?; - - // Make sure the command was killed - output.assert_was_killed()?; - output.assert_failure()?; - - // Start the node again. - let mut zebrad = testdir()? - .with_exact_config(&config)? - .spawn_child(args!["start"])? - .with_timeout(test_type.zebrad_timeout()); - - // Resuming message. - zebrad.expect_stdout_line_matches( - "Last scanned height for key number 0 is 439000, resuming at 439001", - )?; - zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; - - // Start scanning where it was left. - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 439001, now at block 440000", - )?; - zebrad.expect_stdout_line_matches( - r"Scanning the blockchain for key 0, started at block 439001, now at block 450000", - )?; - } - - Ok(()) -} +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner task gets started when the node starts. +// #[test] +// #[cfg(feature = "shielded-scan")] +// fn scan_task_starts() -> Result<()> { +// use indexmap::IndexMap; +// use zebra_scan::tests::ZECPAGES_SAPLING_VIEWING_KEY; + +// let _init_guard = zebra_test::init(); + +// let test_type = TestType::LaunchWithEmptyState { +// launches_lightwalletd: false, +// }; +// let mut config = default_test_config(&Mainnet)?; +// let mut keys = IndexMap::new(); +// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); +// config.shielded_scan.sapling_keys_to_scan = keys; + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Check scanner was started. +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // Look for 2 scanner notices indicating we are below sapling activation. +// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; +// zebrad.expect_stdout_line_matches("scanner is waiting for Sapling activation. Current tip: [0-9]{1,4}, Sapling activation: 419200")?; + +// // Kill the node. +// zebrad.kill(false)?; + +// // Check that scan task started and the first scanning is done. +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// Ok(()) +// } + +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner gRPC server starts when the node starts. +// #[tokio::test] +// #[cfg(all(feature = "shielded-scan", not(target_os = "windows")))] +// async fn scan_rpc_server_starts() -> Result<()> { +// use zebra_grpc::scanner::{scanner_client::ScannerClient, Empty}; + +// let _init_guard = zebra_test::init(); + +// let test_type = TestType::LaunchWithEmptyState { +// launches_lightwalletd: false, +// }; + +// let port = random_known_port(); +// let listen_addr = format!("127.0.0.1:{port}"); +// let mut config = default_test_config(&Mainnet)?; +// config.shielded_scan.listen_addr = Some(listen_addr.parse()?); + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Wait until gRPC server is starting. +// tokio::time::sleep(LAUNCH_DELAY).await; +// zebrad.expect_stdout_line_matches("starting scan gRPC server")?; +// tokio::time::sleep(Duration::from_secs(1)).await; + +// let mut client = ScannerClient::connect(format!("http://{listen_addr}")).await?; + +// let request = tonic::Request::new(Empty {}); + +// client.get_info(request).await?; + +// // Kill the node. +// zebrad.kill(false)?; + +// // Check that scan task started and the first scanning is done. +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// Ok(()) +// } + +// TODO: Move this test once we have the new scanner binary. +// /// Test that the scanner can continue scanning where it was left when zebrad restarts. +// /// +// /// Needs a cache state close to the tip. A possible way to run it locally is: +// /// +// /// export ZEBRA_CACHED_STATE_DIR="/path/to/zebra/state" +// /// cargo test scan_start_where_left --features="shielded-scan" -- --ignored --nocapture +// /// +// /// The test will run zebrad with a key to scan, scan the first few blocks after sapling and then stops. +// /// Then it will restart zebrad and check that it resumes scanning where it was left. +// /// +// /// Note: This test will remove all the contents you may have in the ZEBRA_CACHED_STATE_DIR/private-scan directory +// /// so it can start with an empty scanning state. +// #[ignore] +// #[test] +// #[cfg(feature = "shielded-scan")] +// fn scan_start_where_left() -> Result<()> { +// use indexmap::IndexMap; +// use zebra_scan::{storage::db::SCANNER_DATABASE_KIND, tests::ZECPAGES_SAPLING_VIEWING_KEY}; + +// let _init_guard = zebra_test::init(); + +// // use `UpdateZebraCachedStateNoRpc` as the test type to make sure a zebrad cache state is available. +// let test_type = TestType::UpdateZebraCachedStateNoRpc; +// if let Some(cache_dir) = test_type.zebrad_state_path("scan test") { +// // Add a key to the config +// let mut config = default_test_config(&Mainnet)?; +// let mut keys = IndexMap::new(); +// keys.insert(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), 1); +// config.shielded_scan.sapling_keys_to_scan = keys; + +// // Add the cache dir to shielded scan, make it the same as the zebrad cache state. +// config +// .shielded_scan +// .db_config_mut() +// .cache_dir +// .clone_from(&cache_dir); +// config.shielded_scan.db_config_mut().ephemeral = false; + +// // Add the cache dir to state. +// config.state.cache_dir.clone_from(&cache_dir); +// config.state.ephemeral = false; + +// // Remove the scan directory before starting. +// let scan_db_path = cache_dir.join(SCANNER_DATABASE_KIND); +// fs::remove_dir_all(std::path::Path::new(&scan_db_path)).ok(); + +// // Start zebra with the config. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Check scanner was started. +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // The first time +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 420000", +// )?; + +// // Make sure scanner scans a few blocks. +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 430000", +// )?; +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 419200, now at block 440000", +// )?; + +// // Kill the node. +// zebrad.kill(false)?; +// let output = zebrad.wait_with_output()?; + +// // Make sure the command was killed +// output.assert_was_killed()?; +// output.assert_failure()?; + +// // Start the node again. +// let mut zebrad = testdir()? +// .with_exact_config(&config)? +// .spawn_child(args!["start"])? +// .with_timeout(test_type.zebrad_timeout()); + +// // Resuming message. +// zebrad.expect_stdout_line_matches( +// "Last scanned height for key number 0 is 439000, resuming at 439001", +// )?; +// zebrad.expect_stdout_line_matches("loaded Zebra scanner cache")?; + +// // Start scanning where it was left. +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 439001, now at block 440000", +// )?; +// zebrad.expect_stdout_line_matches( +// r"Scanning the blockchain for key 0, started at block 439001, now at block 450000", +// )?; +// } + +// Ok(()) +// } // TODO: Add this test to CI (#8236) /// Tests successful: From ae2f04b3420d05c14bc0bee80325d5ee6ecc7b18 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 10 Jun 2024 17:27:40 +0200 Subject: [PATCH 4/4] Disable scanning tests in CI --- .../workflows/ci-integration-tests-gcp.yml | 221 +++++++++--------- docker/entrypoint.sh | 3 +- 2 files changed, 112 insertions(+), 112 deletions(-) diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index 89544054709..b26510280d8 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -17,36 +17,36 @@ on: # Run this job every Friday at mid-day UTC # This is limited to the Zebra and lightwalletd Full Sync jobs # TODO: we should move this behavior to a separate workflow - - cron: '0 12 * * 5' + - cron: "0 12 * * 5" workflow_dispatch: inputs: network: - default: 'Mainnet' - description: 'Network to deploy: Mainnet or Testnet' + default: "Mainnet" + description: "Network to deploy: Mainnet or Testnet" required: true regenerate-disks: type: boolean default: false - description: 'Just run a Zebra checkpoint sync and update checkpoint disks' + description: "Just run a Zebra checkpoint sync and update checkpoint disks" required: true run-full-sync: type: boolean default: false - description: 'Just run a Zebra full sync on `network`, and update tip disks' + description: "Just run a Zebra full sync on `network`, and update tip disks" required: true run-lwd-sync: type: boolean default: false - description: 'Just run a lightwalletd full sync and update tip disks' + description: "Just run a lightwalletd full sync and update tip disks" required: true force_save_to_disk: required: false type: boolean default: false - description: 'Force tests to always create a cached state disk, if they already create disks' + description: "Force tests to always create a cached state disk, if they already create disks" no_cache: - description: 'Disable the Docker cache for this build' + description: "Disable the Docker cache for this build" required: false type: boolean default: false @@ -55,23 +55,23 @@ on: # Skip PRs where Rust code and dependencies aren't modified. paths: # code and tests - - '**/*.rs' + - "**/*.rs" # hard-coded checkpoints and proptest regressions - - '**/*.txt' + - "**/*.txt" # test data snapshots - - '**/*.snap' + - "**/*.snap" # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' + - "**/Cargo.toml" + - "**/Cargo.lock" # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' + - ".cargo/config.toml" + - "**/clippy.toml" # workflow definitions - - 'docker/**' - - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/sub-build-docker-image.yml' - - '.github/workflows/sub-find-cached-disks.yml' + - "docker/**" + - ".github/workflows/ci-integration-tests-gcp.yml" + - ".github/workflows/sub-deploy-integration-tests-gcp.yml" + - ".github/workflows/sub-build-docker-image.yml" + - ".github/workflows/sub-find-cached-disks.yml" push: # Skip main branch updates where Rust code and dependencies aren't modified. @@ -79,24 +79,24 @@ on: - main paths: # code and tests - - '**/*.rs' + - "**/*.rs" # hard-coded checkpoints and proptest regressions - - '**/*.txt' + - "**/*.txt" # test data snapshots - - '**/*.snap' + - "**/*.snap" # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' + - "**/Cargo.toml" + - "**/Cargo.lock" # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' + - ".cargo/config.toml" + - "**/clippy.toml" # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/sub-find-cached-disks.yml' - - '.github/workflows/sub-build-docker-image.yml' + - "docker/**" + - ".dockerignore" + - ".github/workflows/ci-integration-tests-gcp.yml" + - ".github/workflows/sub-deploy-integration-tests-gcp.yml" + - ".github/workflows/sub-find-cached-disks.yml" + - ".github/workflows/sub-build-docker-image.yml" # IMPORTANT # @@ -130,7 +130,7 @@ jobs: if: ${{ !startsWith(github.event_name, 'pull') || !github.event.pull_request.head.repo.fork }} uses: ./.github/workflows/sub-find-cached-disks.yml with: - network: 'Testnet' + network: "Testnet" # Build the docker image used by the tests. # @@ -161,14 +161,14 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs regenerate-stateful-disks: name: Zebra checkpoint - needs: [ build, get-available-disks ] + needs: [build, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -190,14 +190,14 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. test-stateful-sync: name: Zebra checkpoint update - needs: [ regenerate-stateful-disks, get-available-disks ] + needs: [regenerate-stateful-disks, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -218,7 +218,7 @@ jobs: # Note: the output from get-available-disks should match with the caller workflow inputs test-full-sync: name: Zebra tip - needs: [ build, get-available-disks ] + needs: [build, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: @@ -227,7 +227,7 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false @@ -261,21 +261,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. test-update-sync: name: Zebra tip update - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: update-to-tip test_description: Test syncing to tip with a Zebra tip state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_UPDATE_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true # update the disk on every PR, to increase CI speed saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -294,7 +294,7 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. checkpoints-mainnet: name: Generate checkpoints mainnet - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: @@ -302,13 +302,13 @@ jobs: test_id: checkpoints-mainnet test_description: Generate Zebra checkpoints on mainnet # TODO: update the test to use {{ input.network }} instead? - test_variables: '-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=Mainnet -e GENERATE_CHECKPOINTS_MAINNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -329,7 +329,7 @@ jobs: # Note: the output from get-available-disks-testnet should match with the caller workflow inputs test-full-sync-testnet: name: Zebra tip on testnet - needs: [ build, get-available-disks-testnet ] + needs: [build, get-available-disks-testnet] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: @@ -337,7 +337,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: '-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1' + test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. @@ -375,14 +375,14 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. checkpoints-testnet: name: Generate checkpoints testnet - needs: [ test-full-sync-testnet, get-available-disks-testnet ] + needs: [test-full-sync-testnet, get-available-disks-testnet] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: checkpoints-testnet test_description: Generate Zebra checkpoints on testnet - test_variables: '-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=Testnet -e GENERATE_CHECKPOINTS_TESTNET=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" network: "Testnet" needs_zebra_state: true # update the disk on every PR, to increase CI speed @@ -390,8 +390,8 @@ jobs: saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" height_grep_text: 'zebra_tip_height.*=.*Height.*\(' secrets: inherit @@ -408,7 +408,7 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-full-sync: name: lightwalletd tip - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} @@ -416,7 +416,7 @@ jobs: app_name: lightwalletd test_id: lwd-full-sync test_description: Test lightwalletd full sync - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_FULL_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: true @@ -425,10 +425,10 @@ jobs: force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' - height_grep_text: 'Waiting for block: ' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" + height_grep_text: "Waiting for block: " secrets: inherit # We want to prevent multiple lightwalletd full syncs running at the same time, # but we don't want to cancel running syncs on `main` if a new PR gets merged, @@ -448,24 +448,24 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-update-sync: name: lightwalletd tip update - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-update-sync test_description: Test lightwalletd update sync with both states - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_UPDATE_SYNC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' - height_grep_text: 'Waiting for block: ' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" + height_grep_text: "Waiting for block: " secrets: inherit # Test that Zebra can answer a synthetic RPC call, using a cached Zebra tip state @@ -480,19 +480,19 @@ jobs: # TODO: move this job below the rest of the mainnet jobs that just use Zebra cached state lightwalletd-rpc-test: name: Zebra tip JSON-RPC - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: fully-synced-rpc test_description: Test lightwalletd RPC with a Zebra tip state - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state @@ -505,21 +505,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-transactions-test: name: lightwalletd tip send - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-send-transactions test_description: Test sending transactions via lightwalletd - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_TRANSACTIONS=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" secrets: inherit # Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state @@ -532,21 +532,21 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. lightwalletd-grpc-test: name: lightwalletd GRPC tests - needs: [ lightwalletd-full-sync, get-available-disks ] + needs: [lightwalletd-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd test_id: lwd-grpc-wallet test_description: Test gRPC calls via lightwalletd - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_GRPC=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache -e LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache" needs_zebra_state: true needs_lwd_state: true saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' - lwd_state_dir: 'lwd-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" + lwd_state_dir: "lwd-cache" secrets: inherit ## getblocktemplate-rpcs using cached Zebra state on mainnet @@ -563,20 +563,20 @@ jobs: # Otherwise, if the state rebuild was skipped, runs immediately after the build job. get-block-template-test: name: get block template - needs: [ test-full-sync, get-available-disks ] + needs: [test-full-sync, get-available-disks] uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad test_id: get-block-template test_description: Test getblocktemplate RPC method via Zebra's rpc server - test_variables: '-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache' + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_GET_BLOCK_TEMPLATE=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true needs_lwd_state: false saves_to_disk: false disk_suffix: tip - root_state_path: '/var/cache' - zebra_state_dir: 'zebrad-cache' + root_state_path: "/var/cache" + zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state @@ -605,31 +605,32 @@ jobs: zebra_state_dir: "zebrad-cache" secrets: inherit - # Test that the scanner can continue scanning where it was left when zebrad restarts. - # - # Runs: - # - after every PR is merged to `main` - # - on every PR update - # - # If the state version has changed, waits for the new cached states to be created. - # Otherwise, if the state rebuild was skipped, runs immediately after the build job. - scan-start-where-left-test: - name: Scan starts where left - needs: [test-full-sync, get-available-disks] - uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml - if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - with: - app_name: zebrad - test_id: scan-start-where-left - test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" - needs_zebra_state: true - needs_lwd_state: false - saves_to_disk: true - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - secrets: inherit + # TODO: Move this test once we have the new scanner binary. + # # Test that the scanner can continue scanning where it was left when zebrad restarts. + # # + # # Runs: + # # - after every PR is merged to `main` + # # - on every PR update + # # + # # If the state version has changed, waits for the new cached states to be created. + # # Otherwise, if the state rebuild was skipped, runs immediately after the build job. + # scan-start-where-left-test: + # name: Scan starts where left + # needs: [test-full-sync, get-available-disks] + # uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml + # if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} + # with: + # app_name: zebrad + # test_id: scan-start-where-left + # test_description: Test that the scanner can continue scanning where it was left when zebrad restarts. + # test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_START_WHERE_LEFT=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" + # needs_zebra_state: true + # needs_lwd_state: false + # saves_to_disk: true + # disk_suffix: tip + # root_state_path: "/var/cache" + # zebra_state_dir: "zebrad-cache" + # secrets: inherit # Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. # @@ -678,7 +679,7 @@ jobs: get-block-template-test, submit-block-test, scan-start-where-left-test, - scan-task-commands-test + scan-task-commands-test, ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 0357c6a5744..8cd61a49bb3 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -349,8 +349,7 @@ case "$1" in elif [[ "${TEST_SCAN_START_WHERE_LEFT}" -eq "1" ]]; then # Test that the scanner can continue scanning where it was left when zebrad restarts. check_directory_files "${ZEBRA_CACHED_STATE_DIR}" - # TODO: Move this test once we have the new scanner binary. - # run_cargo_test "shielded-scan" "scan_start_where_left" + run_cargo_test "shielded-scan" "scan_start_where_left" elif [[ "${TEST_SCAN_TASK_COMMANDS}" -eq "1" ]]; then # Test that the scanner can continue scanning where it was left when zebrad restarts.