Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add(scan): Run scans_for_new_key and scan_subscribe_results tests in CI #8275

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .github/workflows/ci-integration-tests-gcp.patch-external.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,20 @@ jobs:
steps:
- run: 'echo "No build required"'

scans-for-new-key:
name: Scans for new key / Run scans-for-new-key test
needs: get-available-disks
runs-on: ubuntu-latest
steps:
- run: 'echo "No build required"'

scan-subscribe-results:
name: Scan subscribe results / Run scan-subscribe-results test
needs: get-available-disks
runs-on: ubuntu-latest
steps:
- run: 'echo "No build required"'

lightwalletd-full-sync:
name: lightwalletd tip / Run lwd-full-sync test
needs: get-available-disks
Expand Down
54 changes: 54 additions & 0 deletions .github/workflows/ci-integration-tests-gcp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -631,6 +631,58 @@ jobs:
zebra_state_dir: "zebrad-cache"
secrets: inherit

# Test that the scanner can start scanning for a new key while running.
#
# Runs:
# - after every PR is merged to `main`
# - on every PR update
#
# If the state version has changed, waits for the new cached states to be created.
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
scans-for-new-key-test:
name: Scans for new key
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: scans-for-new-key
test_description: Test that the scanner can start scanning for a new key while running.
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCANS_FOR_NEW_KEY=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
needs_lwd_state: false
saves_to_disk: false
disk_suffix: tip
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
secrets: inherit

# Test that the scanner can send new scan results to a results channel for subscribed keys.
#
# Runs:
# - after every PR is merged to `main`
# - on every PR update
#
# If the state version has changed, waits for the new cached states to be created.
# Otherwise, if the state rebuild was skipped, runs immediately after the build job.
scan-subscribe-results-test:
name: Scan subscribe results
needs: [test-full-sync, get-available-disks]
uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml
if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }}
with:
app_name: zebrad
test_id: scan-subscribe-results
test_description: Test that the scanner can start scanning for a new key while running.
test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_SCAN_SUBSCRIBE_RESULTS=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache"
needs_zebra_state: true
needs_lwd_state: false
saves_to_disk: false
disk_suffix: tip
root_state_path: "/var/cache"
zebra_state_dir: "zebrad-cache"
secrets: inherit

failure-issue:
name: Open or update issues for main branch failures
# When a new test is added to this workflow, add it to this list.
Expand All @@ -652,6 +704,8 @@ jobs:
get-block-template-test,
submit-block-test,
scan-start-where-left-test,
scans-for-new-key-test,
scan-subscribe-results-test,
]
# Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges.
# (PR statuses are already reported in the PR jobs list, and checked by Mergify.)
Expand Down
12 changes: 12 additions & 0 deletions docker/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ fi
: "${TEST_GET_BLOCK_TEMPLATE:=}"
: "${TEST_SUBMIT_BLOCK:=}"
: "${TEST_SCAN_START_WHERE_LEFT:=}"
: "${TEST_SCANS_FOR_NEW_KEY:=}"
: "${TEST_SCAN_SUBSCRIBE_RESULTS:=}"
: "${ENTRYPOINT_FEATURES:=}"

# Configuration file path
Expand Down Expand Up @@ -350,6 +352,16 @@ case "$1" in
check_directory_files "${ZEBRA_CACHED_STATE_DIR}"
run_cargo_test "shielded-scan" "scan_start_where_left"

elif [[ "${TEST_SCANS_FOR_NEW_KEY}" -eq "1" ]]; then
# Test that the scanner can scan for a newly registered key while it's running.
check_directory_files "${ZEBRA_CACHED_STATE_DIR}"
run_cargo_test "shielded-scan" "scans_for_new_key"

elif [[ "${TEST_SCAN_SUBSCRIBE_RESULTS}" -eq "1" ]]; then
# Test that the scanner can send new scan results to a results channel for subscribed keys.
check_directory_files "${ZEBRA_CACHED_STATE_DIR}"
run_cargo_test "shielded-scan" "scan_subscribe_results"

else
exec "$@"
fi
Expand Down
4 changes: 4 additions & 0 deletions docker/test.env
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,7 @@ TEST_LWD_TRANSACTIONS=
FULL_SYNC_MAINNET_TIMEOUT_MINUTES=
FULL_SYNC_TESTNET_TIMEOUT_MINUTES=
TEST_LWD_FULL_SYNC=
# These tests need a Zebra cached state
TEST_SCAN_START_WHERE_LEFT=
TEST_SCANS_FOR_NEW_KEY=
TEST_SCAN_SUBSCRIBE_RESULTS=
13 changes: 3 additions & 10 deletions zebrad/tests/common/shielded_scan/scans_for_new_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,6 @@ pub(crate) async fn run() -> Result<()> {
.zebrad_state_path(test_name)
.expect("already checked that there is a cached state path");

let shielded_scan_config = zebra_scan::Config::default();

let (state_service, _read_state_service, latest_chain_tip, chain_tip_change) =
start_state_service_with_cache_dir(network, zebrad_state_path).await?;

Expand All @@ -78,18 +76,15 @@ pub(crate) async fn run() -> Result<()> {

tracing::info!("opened state service with valid chain tip height, deleting any past keys in db and starting scan task",);

// Before spawning `ScanTask`, delete past results for the zecpages key, if any.
let mut storage = Storage::new(&shielded_scan_config, network, false);
storage.delete_sapling_keys(vec![ZECPAGES_SAPLING_VIEWING_KEY.to_string()]);

let storage = Storage::new(&zebra_scan::Config::ephemeral(), network, false);
let state = ServiceBuilder::new().buffer(10).service(state_service);

let mut scan_task = ScanTask::spawn(storage, state, chain_tip_change);
let mut scan_task = ScanTask::spawn(storage.clone(), state, chain_tip_change);

tracing::info!("started scan task, sending register keys message with zecpages key to start scanning for a new key",);

scan_task.register_keys(
[(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), None)]
[(ZECPAGES_SAPLING_VIEWING_KEY.to_string(), Some(780_000))]
.into_iter()
.collect(),
)?;
Expand All @@ -104,8 +99,6 @@ pub(crate) async fn run() -> Result<()> {

// Check that there are some results in the database for the key

let storage = Storage::new(&shielded_scan_config, network, true);

let results = storage.sapling_results(&ZECPAGES_SAPLING_VIEWING_KEY.to_string());

tracing::info!(?results, "got the results");
Expand Down
2 changes: 1 addition & 1 deletion zebrad/tests/common/shielded_scan/subscribe_results.rs
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ pub(crate) async fn run() -> Result<()> {
scan_task.register_keys(
keys.iter()
.cloned()
.map(|key| (key, Some(736000)))
.map(|key| (key, Some(780_000)))
.collect(),
)?;

Expand Down
Loading