From 2ab24278a90f27790e22e3615ddf3d7e68c9b8a8 Mon Sep 17 00:00:00 2001 From: Michael Sutton Date: Mon, 7 Oct 2024 18:16:32 +0000 Subject: [PATCH] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1378e7b6015fcbcc098ec03103f6ce02273adefc Author: D-Stacks <78099568+D-Stacks@users.noreply.github.com> Date: Mon Oct 7 14:41:32 2024 +0200 replace statrs and statest deps & upgrade some deps. (#425) * replace statrs and statest deps. * remove todo in toml.cargo and fmt & lints. * do a run of `cargo audit fix` for some miscellaneous reports. * use maintained alt ks crate. * add cargo.lock. * update * use new command * newline * refresh cargo lock with a few more version updates * fix minor readme glitches --------- Co-authored-by: Michael Sutton commit b37f0305401b16e80fea74e6d3b1a2cbb7ac5c44 Author: Michael Sutton Date: Sun Oct 6 14:55:24 2024 +0300 Bump tonic version (#579) commit 2b0f3ab57c09d58082560ba81447c52630506b51 Author: George Bogodukhov Date: Thu Oct 3 16:36:45 2024 +1000 Fix README.md layout and add linting section (#488) commit 66959d4a72abc150454da12179c0bef553a8bdd7 Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Mon Sep 30 20:43:35 2024 +0400 Bump tonic and prost versions, adapt middlewares (#553) * bump tonic, prost versions update middlewares * use unbounded channel * change log level to trace * use bounded channel * reuse counts bytes body to measure bytes body * remove unneeded clone commit 3bc2844ee36eb1dd4af8a342ba56a2663b7a5c25 Author: aspect Date: Sun Sep 29 18:59:54 2024 +0300 cleanup legacy bip39 cfg that interferes with docs.rs builds (#573) commit 180114e6a7839eec287687a3d5debcf0da593057 Author: aspect Date: Sun Sep 29 17:14:16 2024 +0300 fix wasm rpc method types for methods without mandatory arguments (#572) commit 035a394d144bac7f142c9556f65da4612c24a8fe Author: aspect Date: Fri Sep 27 05:59:59 2024 +0300 Documentation updates (#570) * docs * Export ConsensusSessionOwned * add CI pass to run `cargo doc` * module rust docs * lints * fix typos * replace glob import terminology with "re-exports" * cleanup commit 200b8ea63a6786d784713de092810a6854b81880 Author: aspect Date: Fri Sep 27 02:09:57 2024 +0300 fix wRPC json notification format (#571) commit 4bfa392922fff59c1e248b7b141d07b35f3294bc Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue Sep 24 21:45:43 2024 +0400 fix wrong combiner condition (#567) commit d66cbe3300bb54adfbbf38327881b20b2909d3ba Author: demisrael <81626907+demisrael@users.noreply.github.com> Date: Mon Sep 23 08:02:58 2024 +0300 rothschild: donate funds to external address with custom priority fee (#482) * rothschild: donate funds to external address Signed-off-by: Dmitry Perchanov * rothschild: Append priority fee to txs. Signed-off-by: Dmitry Perchanov * rothschild: add option to choose and randomize fee Signed-off-by: Dmitry Perchanov * rothschild: address clippy formatting issues Signed-off-by: Dmitry Perchanov --------- Signed-off-by: Dmitry Perchanov Signed-off-by: Dmitry Perchanov Co-authored-by: coderofstuff <114628839+coderofstuff@users.noreply.github.com> Co-authored-by: Dmitry Perchanov commit 9fae376500c3b7bde4ac0d0f03f15d47a4d6f12c Author: Michael Sutton Date: Fri Sep 20 13:31:55 2024 +0300 Deploy linux binary without musl in its name + various minor miscellaneous things towards v0.15.2 (#564) * remove musl from linux binary name * remove simpa from win and osx builds in order to be consistent with linux build * safe eqv optimization: use inplace truncate (tested) commit 4d03153f9cb3d7e6674ac7c28c1956c0f4b75a03 Author: Michael Sutton Date: Fri Sep 20 01:58:28 2024 +0300 A few CLI rpc query fixes (#563) commit b14537fac0c69505e64743dba8270c06fa1f11f5 Author: D-Stacks <78099568+D-Stacks@users.noreply.github.com> Date: Thu Sep 19 22:16:54 2024 +0200 `virtual chain from block` batching. (#454) * expose vspc_from_block batching possibilities to rpc. * fmt * limit by merged blocks, set source as def. start. * small clean-up * fmt * actually bound by num of merged blocks, in include transactions case. * fmt * update. * update_2 * new_high = high * remove high hash in consensus api.. as it is not required. * fmt * make proto comment more accurate. * fix tests, and lints, add to ser / der correctly. * change two freq warns to debug * remove option, default to pp. not source. * fix integration test, some Option left. * bump version: ´0.15.1 => 0.15.2` * remove "optional" startHash * add to cli rpc.rs * remove comment. * edit comment in .proto referencing def. startHash behavior. * only batch added chain blocks, not removed, add check if source is a chain ancestor of high. * remove dangling code in comment * remove error from some prev. commit. * Optionalize limts. --------- Co-authored-by: Michael Sutton commit 613d082b0c2e51247819d932af7ddb5ebd5aa460 Author: Michael Sutton Date: Sun Sep 15 02:06:13 2024 +0300 Fix new gRPC methods to use camel case (non-breaking change) (#560) commit f8f9915271c2f2d724dc40875f1967e02f73d6e6 Author: Cryptok777 <119089486+Cryptok777@users.noreply.github.com> Date: Thu Sep 12 03:47:21 2024 -0700 Fix typo in IFeeEstimate (#557) commit f335376be8d720f709f04f3d12fe27a5dea5535f Author: Michael Sutton Date: Wed Sep 11 20:55:53 2024 +0300 Bump to version 0.15.1 (#555) * bump version to 0.15.1 * update readme with new branch strategy commit 2a99817ee331c0295c1ae65231d957901c0c870c Author: aspect Date: Wed Sep 11 00:38:39 2024 +0300 15 rc4 updates (#554) * metrics: fix first snapshot sample + cleanup * Wallet SDK: scan() - fix UtxoContext processing latency during scan. Add UtxoProcessor notification lock to the scan processor. * cleanup commit d1dc5dd34426d7ce5f92ed9408701c8dae6cfe57 Author: thesheepcat <68336151+thesheepcat@users.noreply.github.com> Date: Tue Sep 10 00:59:36 2024 +0200 Simple wRPC client example (#550) * simple client example created * fine-tuning on simple wRPC client example (with comments) * code fixed after Aspect's suggestions * empty lines cleanup commit 5b9c3cf95718b0adba9baac80c531029c7815249 Author: aspect Date: Tue Sep 10 00:57:22 2024 +0300 Fix CLI not showing incorrect URL on connect + add disconnect() before connect() (#549) * wRPC: update descriptor with URL supplied in connect options * cli: present public node connection warning only once * cli: update messaging * bump version to 0.14.7 commit 7271509d7b1d82c695921c2a46d2233febdc1bce Author: Elichai Turkel Date: Tue Sep 10 00:33:42 2024 +0300 Replace openssl with rustls + ring + webpki_roots (#547) * Remove openssl and replace with rustls + ring + webpki_roots * - init calls, - unused imports - Removed init calls - Removed unused imports * Add rustls to network stack --------- Co-authored-by: starkbamse <139136798+starkbamse@users.noreply.github.com> commit 27cef9e06d4fe563017643cbec4843a24e383448 Author: aspect Date: Mon Sep 9 15:15:40 2024 +0300 WIP: Updates for 15-rc3 (#546) * update unit tests to unwrap mass to u64::max * update WASM mass APIs to be more clear with the data they produce * add fingerprint() fn to PublicKey * cleanup * fix recursive mutex in From<&Transaction> for RpcTransaction * update rust-version to 1.81.0 * generator: fix incorrect change output value info triggering a sanity-check assert! commit afbcf9e473acfe238b43785cab52f09387307588 Author: starkbamse <139136798+starkbamse@users.noreply.github.com> Date: Fri Sep 6 05:40:53 2024 +0200 Change directory back to repo root & Fix Rust v1.81 lints (#545) * Change directory back to repodir Change directory back to repodir after building toolchain * Clippy * Update crypto/txscript/src/caches.rs Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> * Update crypto/txscript/src/caches.rs * rename `is_none_or` -> `is_none_or_ex` to avoid conflict with future std * remove `use std::mem::size_of` wherever possible (added to std prelude recently) --------- Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Co-authored-by: Michael Sutton commit 06a874f4ee174d056d0c5c3d11ba1c3be545b8f4 Author: starkbamse <139136798+starkbamse@users.noreply.github.com> Date: Thu Sep 5 18:51:04 2024 +0200 Deprecate zigbuild and glibc in favor of static musl binaries for Linux builds. (#541) * CTNG Config file, Musl build instructions - Crosstools-ng configuration file for quick reproducable builds on musl. - Instructions for how to build RK on musl. * Test * Update ci.yaml * Test crosstools install * Cache ct-ng build * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Fix error in command * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Update ci.yaml * Comments, naming * Update ci.yaml * Update ci.yaml * Update ci.yaml * Merge new musl build with old CI, Release builds - Merges the old CI script with the new musl build. - Update deploy.yaml to use updated musl toolchain to build musl target for linux. * Move to workspace * Delete musl-build.md * Lock to ctng version 1.26 * Checkout fix * Revert master change * Indentation * Revert "Indentation" This reverts commit 6a7e6c094052f1b5848d1e3e9154f4f0164fad05. * Revert "Revert master change" This reverts commit 1a047e46863ca9383eea169fd2665a333c9d5431. * Update ci.yaml * Force mimalloc * Compress into script * Fix typo * Update build.sh * Replace bloaded config file * Update build.sh * Update build.sh * Source script * Revert vendor * Update defconfig * Update defconfig * Update defconfig * Update build.sh * Update build.sh * Update build.sh * Update build.sh * Update defconfig * Delete defconfig * Create defconfig * Update build.sh * Deprecate config, use default preset * Update build.sh * Add preset hash logic in script * Move preset hash update Move preset hash update after openssl build * Use openssl crate * Update exports, cache config * Remove spaces in export command * Update names Should not trigger cache * Move source preset * CD before preset * Add comment Adds comment and should invalidate cache. commit b04092e41e3c86c9bf7d7f7733eaabd563131eb7 Author: aspect Date: Thu Sep 5 18:00:24 2024 +0300 add proxy limit field to sysinfo (#544) commit f866dfad16eb54d67d36a127435d81fa00b39680 Author: Michael Sutton Date: Thu Sep 5 14:04:16 2024 +0300 Various miscellaneous changes towards 0.15.1 RC2 (#543) * infrequent logs should be debug * cleanup some todos * when a network starts, genesis has a body, so there's no need for a special exception * remove unneeded method and add an error just in case it is added in the future * count and log chain disqualified blocks * count and log mempool evictions * bump version to 0.14.5 commit 7cdabb4cdc76bdc293e3c879b37f582a6e4006bc Author: aspect Date: Mon Sep 2 23:54:04 2024 +0300 Refactoring of mass calculator utilities (#538) * rename verbose data mass to compute_mass * fix TS interface verbose transaction data property name * relocate consensus core processes MassCalculator to consensus core * WASM fix ECDSA pubkey generation * error variant related to ECDSA from PublicKey creation * refactor client mass APIs, propagate minimum_signatures via pending tx. rename mass calc functions to `unsigned` for clarity. * introduce change index to PTX * cleanup * cleanup * fix missing transfer of client Transaction mass to RpcTransaction * Introduce IRawBlock and IRawHeader TS types used in GetBlockTemplateResponse and SubmitBlockRequest * fix docs * Refactor mass calculator and extract a global storage mass fn which operates on iters + use from wallet mass calculator --------- Co-authored-by: Michael Sutton commit 68c298f1048bd7e4a5c1cac2387cf7e431378b23 Author: Michael Sutton Date: Mon Sep 2 08:15:46 2024 +0300 adjust mass per block by the specific mass of the outlier transaction and not by the avg mass (#537) commit 864aaf674023e0f076e44b08d9f9682a4324afa7 Author: Michael Sutton Date: Sun Sep 1 20:06:42 2024 +0300 Transaction pool -- fix rare cases where byte size tracking was inaccurate (#535) * remove crate-level mut access to inner mempool tx and fix bytes size tracking * another fix to the same problem: only update_revalidated_transaction if validation result is ok, otherwise we remove it anyway so why update * Apply suggestions from code review Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> * debug log `other` --------- Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> commit c839a9d3f7d3da333d9ea8688dd1246ca3294596 Author: Michael Sutton Date: Wed Aug 28 03:43:54 2024 +0300 Bump to version 0.14.3 (#531) * expire is 24h, so scan can be every 60s * apply ram scale down to mempool size limit * bump to v0.14.3 (0.14.2 was already used for unified tn11 unofficial rc) commit 63e2ab661a8b60f0291cea7df3e58a4d4d296dd4 Author: Ori Newman Date: Tue Aug 27 20:55:32 2024 +0300 Mempool tweaks (#524) * Some mempool tweaks * Change mempool tx eviction policy * Add test_evict * Some fixes * clippy * move to estimated size * remove estimated_size field * remove clippy allow * Fix clippy warnings * Mempool evict policy -- a few fixes (#7) * comments and logs * validate in context before execute rbf + note * use a dedicated mempool_estimated_bytes fn * fix redeemers check * make sure we found enough space (not sure about this, might be better to remove the assert in the calling function and turn into if) * reorganize checks * include tx itself in redeemers * Add comment to test_evict * Add case to test_evict * Remove explicit check for too big transactions * Raise DEFAULT_MEMPOOL_SIZE_LIMIT to 1GB --------- Co-authored-by: Michael Sutton commit 2306592af9f9900c64890a6db0e5166fa925b9ff Author: KaffinPX <73744616+KaffinPX@users.noreply.github.com> Date: Tue Aug 27 02:48:00 2024 +0300 get_current_block_color RPC utility (#528) * getCurrentBlockColor algorithm and RPC functions * Add a small comment over RPC test * Move get_current_block_color to consensus and apply standard sorting * Apply msuttons suggestions except the block check * Remove not needed return and format * Variable name consistency * Check for block existence on get_current_block_color * Add extra DAG order checks to ensure about children * includes: 1. stylistic changes using ? 2. `is_dag_ancestor_of(a, b)` is different than `!is_dag_ancestor_of(b, a)` -- they are not negations of each other, bcs there's also the anticone * 1. bug fix: hash -> child 2. make store calls only where they are actually used (within the if) * style: 1. use struct unfloding syntax, 2. use a name such as decedent which reflects the relation to `hash` * important note * Fix Omega compatibility issues * Remove Borsh derivations * Fix gRPC message codes * Fix gRPC getCurrentBlockColorResponse * improve tests --------- Co-authored-by: Michael Sutton commit 1c1a6927d20042953f0389b1089720e515fe74e5 Author: aspect Date: Mon Aug 26 22:52:31 2024 +0300 Cumulative PR - omega branch (wRPC v2, integrations, typescript-v2, resolver v2) (#506) * support numeric interface (ip) arg without port in --rpclisten-borsh or --rpclisten-json * isActive for UtxoProcessor and UtxoContext * Script utility functions + WASM changelog update * versioned serialization for wRPC * spelling * Refactorize State into PoW (#43) * Add fromRaw with optional target_bits * Upload builds as GitHub Artifact * try moving calculateTarget into PoW class as static funct * Use FromHex trait * Make PoW constructor accept IHeader & refactorize some parts * Lint * TransactionDataT and BindingT (#44) * borsh update to 1.5.1 * fix returning receive addr from bindings change addr fn. * lints * update WASM changelog * fix WASM module reference in examples * migrate lints.clippy to workspace and inherit this in the relevant crates * Add provisional fields for storage metrics (db size) * fix file creation timestamp issue on some ext4 file systems (updated via workflow-store::fs) * bigint values in TransactionRecord (#48) * change address decoding panics to errors * update error messaging * Update for bind/listen compatibility with WRS * range support added for transactions pagination (#49) * range support added for transactions pagination * cargo fmt/clippy * lints * WRS 0.13.0 * wallet ergonomics helper functions * WRS 0.13.1 * WRS 0.13.1 lock * WRS 0.13.2 * kaspa-wallet (cli) updates (using latest resolver APIs + guide cleanup) * range support for indexdb store load_range function (#52) * add balance info to account descriptors, additional wallet connectivity utilities and connect api method changes, export wallet utils in wallet prelude, * Improve ConnectRequest builder interface * Allow UtxoEntryReference to be re-integrated from a flat object structure. * indexdb data maintenance: timestamp fix (#53) * range support for indexdb store load_range function * indexdb data maintenance: timestamp fix * removing unused * Fix incorrect balance when sending to self * fix child_number typo * allow setting custom transaction maturity periods * fix missing renaming for record.value * fix typedoc references * pre-borsh-1-5-1 compat * testing typedoc * testing typedoc * lock typedoc to 0.25.8 * disable typedoc treatWarningsAsErrors * deps * WIP SPK WASM * Cargo.lock * SPK raw object cast + fix return of ISerializableTransaction * fix le to be when parsing SPK hex version * remove string version parsing from SPK * update WASM SDK changelog * incorrect balance issue (#55) * TransactionInput signature_script as Option, add associated TS types * restructure PSKT + WASM scaffolding (WIP) * wallet guard implementation to block concurrent async access to account operations * tx serialization example * change struct serialization version fields from u32 to u16 * WIP - decoupling RPC type aliases (#45) * Provisional RpcConnection propagation via RpcApi methods (#46) * provisional RpcConnection propagation via RpcApi methods * lints * change api version handling affecting get_server_info_call * Wallet watch-only accounts (#59) * Prints extended public keys on export mnemonic command (feature of go kaspawallet). * Watch-only account implementation for bip32 and multisig kind of accounts with new command account import watchonly. * Refactor code for less variables. * Patch import type for command. * CLI Support for watch only accounts in select account with args function. * Function sig_op_count equals implemented. * Helper function in wallet to format XPUB according to network. Converter NetworkId for Prefix. BIP32-Watch Account renamed from WatchOnly, multisig feature removed. Multisig account import as watch-only command added. * cli watch-only header in list and select. * Resolve merge. * update resolver to use v2 API * change default resolver subdomains * resolver v2 updates * Refactorize some addresses and Script related parts (#61) * Refactorize some addresses and Script related parts * A ScriptBuilder example on TypeScript * addOps Opcodes are now BinaryT * Move txscript bindings to relevant folders * Sort lines of deps and imports * Lint * fix wasm subscribe_daa_score * expose native RpcClient multiplexer in KaspaRpcClient * WIP (local wRPC) * breakdown wRPC serialization trait into two ser/de; improve future compatibility patterns; * get_connections_call() RPC method + provisional metrics dictionary * change get_connections() to return the actual value instead of Response struct. * priorityEntries implementation for tx generator * fix transaction WASM interface types affecting some function returns * input and output types on transactions (WASM) * rpc caps * update client resolver resolution * GetSystemInfo RPC call * make priorityEntries optional in the TS interface definition * merge cli-resolver * remove resolver crate from workspace (move to https://github.com/aspectron/kaspa-resolver) * WRS 0.14.0 * fix merge mishap * refactor systeminfo + update resolver target generation * Custom input signatureScript ability (#69) * Refactorize some addresses and Script related parts * A ScriptBuilder example on TypeScript * addOps Opcodes are now BinaryT * Move txscript bindings to relevant folders * Sort lines of deps and imports * Lint * Prototype of custom sighash operations * Experimental * Add SighashType enum and option on SignInput * Format and a small fix * Clippy * hex view for ScriptBuilder (#67) * add git_hash to system_id * add git_hash to system_id * wip * wip * refactor git head fetch to use build.rs * comment * split utils/sysinfo into utils/git, refactor utils/build.rs to run git to obtain hashes (in addition to file check) * using WalletGuard, account guard (#73) 1) private context issue on legacy accounts 2) optional url option handling on rpc client connect method 3) using `WalletGuard` type instead of `AsyncMutexGuard` * add short hash to sysinfo, return short hash in GetSystemInfo * add contributor DNS seeders (gerri and H@H) * Update phrase.rs (#74) * Base implementation for PSKB and usage in core account with generator wrapper (#64) * Base implementation for PSKB and usage in core account with generator wrapper stream handling. * prune test file * prune test file * Converters for transanction and populated transaction. * Optional signature import in PSKT conversion. * PSKB wallet cli commands. * More PSKB wallet cli commands for custom script locks. * Serialization test case * Reviews patches, cli script debug command added. * Doc about fee per transaction for script unlocking UTXOS * Parameter changed to priority_fee_sompi_per_transaction, revert function renaming. * Error handling * fix missing RPC refs * Update resolver config (WIP) * add version to GetSystemInfoResponse * fix git version handling * update client-side resolver properties to match current structs * update resolvers * fix kaspa-utils/build.rs to always produce git related env vars. * add git commit hash to WASM32 SDK artifacts during CI build * fix WASM32 CI build (testing) * fix the default url handling in wRPC client * Key attributes (make XPrv and XPub inspectable) (#77) * getters for XPrv and XPub attributes * fmt * post merge fixes * Merge RBF (#80) * Replace by fee on mempool (#499) * Replace by fee on mempool with tests * Add a custom RemovalReason -- ReplacedByFee * Let `MinerManager` handle replace by fee (RBF) for RPC and P2P * Refines success conditions and process of RBF policies * Add an RPC `submit_transaction_replacement` method * fix fmt * Fix CLI Build WASM32 error * Avoid breaking wRPC * Extend some tests coverage to all priority, orphan & RBF policy combinations * Let RBF fail early or at least before checking transaction scripts in consensus * Cleaning * More cleaning * Use contextual instead of compute mass in RBF eviction rule * Avoid collision with another PR * Avoid collision with another PR (2) * Extended test coverage of RBF * Extended test coverage of RBF (2) * Rename `TransactionBatchValidationArgs` to `TransactionValidationBatchArgs` * Add comments * Assert instead of condition * Add an `RbfPolicy` parameter to mining manager tx validate_and_insert_... fns * Infer RBF policy from unorphaned transaction * Apply the RBF policy to all orphan-related cases * In Rbf allowed mode, check feerate threshold vs all double spends (i.e., compare to the max) * Minor hashset optimization * Rename: fee_per_mass -> feerate * Use rbf policy arg for post processing step as well * Renames and comments * Relaxation: fail gracefully if rbf replaced tx is missing (also fixes an edge case where mempool duplication resulted in this scenario) * Tx id is appended by the caller anyway --------- Co-authored-by: Tiram <18632023+tiram88@users.noreply.github.com> Co-authored-by: Michael Sutton * post merge fixes --------- Co-authored-by: KaffinPX <73744616+KaffinPX@users.noreply.github.com> Co-authored-by: Tiram <18632023+tiram88@users.noreply.github.com> Co-authored-by: Michael Sutton * createInputSignature() utility function (#79) * ``signTransactionInput`` and move sign_input to its proper location * Fix typedoc warnings left from old PR * createInputSignature * Update docs for ConsensusParams (WASM mass calc) * fmt * bump wRPC * wrs 0.15.0 * replace Uuid.as_ref() to as_bytes() * assign RpcApiOps variants numerical values * cleanup * Remove WASM32 mass calculator + change createTransaction() signature (#81) * Kip9 updates to WASM/wallet framework mass calc (#66) * WIP * update kip9 processing in WASM mass calculator * XPrv.toPrivateKey support * replace lazy_static with OnceLock * remove NetworkParams Inner * make signatureScript optional on ITransactionInput (WASM32) * WIP mass calc (WASM32) * remove WASM32 mass calc, replace with dedicated functions * use OnceCell for NetworkParams (wallet-core) * Update changelog * fmt --------- Co-authored-by: Surinder Singh Matoo * change OnceCell to LazyLock in wallet-core utxo settings * WASM: update signTransaction() signature * fix TS types and method names * lints * split GetConnections counter into separate clients and peers variables * fix missing version in GetConnections method * Adding type conversion. (#76) * fmt * refactor kaspa-metrics to expose some internal methods (needed for external processing). * Word count (#83) * Update phrase.rs * private context issue for importing legacy wallet * account filter updated for calculating account_index * gen1 decrypt_mnemonic updated for error unwraping * adding resolver tls option * cleanup * Improve input signature capability (#85) * ``signTransactionInput`` and move sign_input to its proper location * Fix typedoc warnings left from old PR * createInputSignature * Fix createInputSignature and improve PendingTransaction Inputs DX * Format * A small Omega change applied to existing code * Pass reference of transaction in createInputSignature * fix WASM32 PSKT function names * refactor PSKB as a type wrapper + update serialization (#86) * Cleanup of unused JSON test file for PSKB and comments (#87) * Remove PSKB json test file. * Remove/change old PSKB comments and commented out inclusions. * PSKB+PSKT merge with omega branch (#82) * TransactionInput signature_script as Option, add associated TS types * restructure PSKT + WASM scaffolding (WIP) * Base implementation for PSKB and usage in core account with generator wrapper (#64) * Base implementation for PSKB and usage in core account with generator wrapper stream handling. * prune test file * prune test file * Converters for transanction and populated transaction. * Optional signature import in PSKT conversion. * PSKB wallet cli commands. * More PSKB wallet cli commands for custom script locks. * Serialization test case * Reviews patches, cli script debug command added. * Doc about fee per transaction for script unlocking UTXOS * Parameter changed to priority_fee_sompi_per_transaction, revert function renaming. * Error handling * Adding type conversion. (#76) * fmt * fix WASM32 PSKT function names * refactor PSKB as a type wrapper + update serialization (#86) * Cleanup of unused JSON test file for PSKB and comments (#87) * Remove PSKB json test file. * Remove/change old PSKB comments and commented out inclusions. --------- Co-authored-by: 1bananagirl <168954040+1bananagirl@users.noreply.github.com> * extra new line char removed (#89) * lock issue on wallet creation (#91) * wasm cast refs * resolver updates * CLI review: import cli watch-only changed to watch, PSKB parse view added (#92) * CLI - Import commands for watch-only accounts changed to: account watch bip32 and account watch multisig. * CLI - PSKB parse view added next to debug view showing input/output addresses and amounts. PSKT finalized check moved from debug view to parse view. Selected account requirement in commands only if needed. * WASM RBF (RPC) * WASM FeeEstimate (RPC) * hex encoding for kaspa_utils::SystemInfo * Some symmetry and type fixes (#93) * UtxoEntry typing fix and isometry w UtxoEntryReference * Fix type of IUtxoProcessorEvent * Fix interface typings of UtxoProcessor * Remove UtxoProcessorEventData and improve UtxoProcessor event * Remove unneeded overwrite * Clippy and a small mistake fix * Note: Clippy can cause fmt issues :nerd: * update WASM GeneratorSettings::priorityEntries? to accept UtxoEntryReference[] * WASM32 - update resolver casting * cleanup * WASM32: remove no longer used WAPI account module * cleanup * introduce new rpc header/block types for BBT and SB (#95) * introduce new rpc header/block types for BBT and SB * remove unneeded clone * WASM - Update types for Mnemonic::random() * WASM update deprecated methods in web-sys * Add bip32 Mnemonic class to kaspa-wasm-sdk-keys build package * misc dependency updates * Introduce profile data to GetConnections RPC method * WASM update TS declarations for wallet events * fix WASM sdk submitTransaction API (#96) * Add custom Debug to GetSystemInfoResponse * Add HexString type to ITransactionOutput::scriptPublicKey * fix camelCase on RpcTransactionOutpoint --------- Co-authored-by: KaffinPX <73744616+KaffinPX@users.noreply.github.com> Co-authored-by: surinder singh Co-authored-by: 1bananagirl <168954040+1bananagirl@users.noreply.github.com> Co-authored-by: Tiram <18632023+tiram88@users.noreply.github.com> Co-authored-by: Michael Sutton Co-authored-by: IgorKhomenko <70977170+IgorKhomenko@users.noreply.github.com> commit b0f07eff448f4e34193f3b7ace1a14d4b16b48df Author: Michael Sutton Date: Mon Aug 26 18:05:54 2024 +0300 Query all DNS seeders if missing many connections (#530) * refactor into `dns_seed_single` (no logical change yet) * impl dns seed many * rename commit 63e4863eb2329f253d7a796dec499a0d8789b644 Author: Michael Sutton Date: Mon Aug 26 13:58:00 2024 +0300 A few optimizations related to multi-level relations (#527) * parents_builder: optimize the common case for high levels * delete level relations for all levels below the affiliated proof level for this block * keep all multi-level parents of the pruning-point-and-anticone roots set * drop prune guard where possible * minor * practically impossible to reach this level (requires a pow hash which is all zeros), but for the sake of good order it should be this way * comments * fix `get_roots_of_set` for the ascending chain case + test * avoid quadratic roots search + rely on header cache * rollback `get_roots_of_set` commit 8e93437566853df08a10bedeafc9f1872db2550c Author: Michael Sutton Date: Thu Aug 22 18:56:55 2024 +0300 TN11 bug fix: activate mass hashing when modifying a cached block template (#476) * fix transaction hash merkle root calculation in modify block template to consider storage mass activation * avoid similar future errors: expose only a single calc_hash_merkle_root function with `include_mass_field` arg and update all test usages explicitly * move subnet checks to inner location * reorganize cache mem estimators commit 261a750467bc9d9cd46db5629ac7d3ce45d1bb83 Author: Michael Sutton Date: Thu Aug 22 12:29:24 2024 +0300 Semaphore tracing feature (for tracing prune readers vs writers time) (#526) * semaphore trace + feature * comments * unrelated: avoid mass fee mult due to possible edge cases * style: refactor code, move tracing atomics to TraceInner structure (#5) * style: refactor code, move tracing atomics to TraceInner structure * style: fmt * final refactor --------- Co-authored-by: Maxim <59533214+biryukovmaxim@users.noreply.github.com> commit 866f62f16d7232c0bb5de3a43daf1b76ff8eda8d Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Wed Aug 21 21:59:47 2024 +0400 feat: implement next_block_template_feerate (#523) * feat: implement next_block_template_feerate * add tests, fix feerate stats calculation * fix comment commit e6e0f5857b8e2d8745aaec60759374f9d5229f73 Author: starkbamse <139136798+starkbamse@users.noreply.github.com> Date: Mon Aug 19 10:57:24 2024 +0200 Upgrade mimalloc to 0.1.43 implementing the fix by microsoft team. commit 958bc64c2cb9e70ccdc993d83c5e6dd928a7dd50 Author: Michael Sutton Date: Fri Aug 16 16:32:41 2024 +0300 O(k log n) mempool transaction sampler + Fee estimation API (#513) * initial fee estimation logic + a python notebook detailing a challenge * initial usage of btreeset for ready transactions * initial frontier + sampling logic * mempool sampling benchmark (wip) * Use arc tx rather than tx id in order to save the indirect map access as well as reduce frontier sizes + filter all the top bucket and not only selected ones * Modify mempool bmk and simnet settings * Temp: rpc message initial * Move sample to rand utils * Fix top bucket sampling to match analysis * Add outliers to the bmk * sample comments and doc * use b plus tree with argument customization in order to implement a highly-efficient O(k log n) one-shot mempool sampling * todo * keep a computed weight field * Test feerate weight queries + an implied fix (change <= to <) * temp remove warns * 1. use inner BPlusTree in order to allow access to iterator as double ended 2. reduce collisions by removing the top element from the range if it was hit * rename * test btree rev iter * clamp the query to the bounds (logically) * use a larger tree for tests, add checks for clamped search bounds * Add benchmarks for frontier insertions and removals * add item removal to the queries test * Important numeric stability improvement: use the visitor api to implement a prefix weight counter to be used for search narrowing * test highly irregular sampling * Implement initial selectors + order the code a bit * Enhance and use the new selectors * rename * minor refactor * minor optimizations etc * increase default devnet prealloc amount to 100 tkas * cleanup * cleanup * initial build_feerate_estimator * todos * minor * Remove obsolete constant * Restructure search tree methods into an encapsulated struct * Rename module * documentation and comments * optimization: cmp with cached weight rather than compute feerate * minor * Finalize build fee estimator and add tests * updated notebook * fee estimator todos * expose get_realtime_feerate_estimations from the mining manager * min feerate from config * sample_inplace doc * test_total_mass_tracking * test prefix weights * test sequence selector * fix rpc feerate structs + comment * utils: expiring cache * rpc core fee estimate call * fee estimate verbose * grpc fee estimate calls * Benchmark worst-case collision cases + an optimization addressing these cases * Expose SearchTree * cli support (with @coderofstuff) * addressing a few minor review comments * feerate estimator - handle various edge cases (with @tiram88) * one more test (with @tiram88) * build_feerate_estimator - fix edge case of not trying the estimator without all frontier txs (+loop logic is more streamlined now) * monitor feerate estimations (debug print every 10 secs) * follow rpc naming conventions * proto leave blank index range * insert in correct abc location (keeping rest of the array as is for easier omega merge) * fix comment to reflect the most updated final algo * document feerate * update notebook * add an additional point to normal feerate buckets (between normal and low) * enum order * with 1 sec there are rare cases where mempool size does not change and we exit early * final stuff commit 6bf1c752812303221e2a43d947eb5bd6565bf80f Author: Ori Newman Date: Thu Aug 15 18:03:48 2024 +0300 Lazy load origin children (#518) * Lazy load origin children * remove redundant collect * Delete level-0 relations for blocks which only belong to higher proof levels * Comments * Edit comment commit 5ebd9fe28587d7c08e719359c475696a7e8c40cd Author: 1bananagirl <168954040+1bananagirl@users.noreply.github.com> Date: Wed Aug 7 13:47:14 2024 +0200 Add hint message for P2P reject reason block not found. (#512) commit 9feb5dcb6af73aa111594801e9eeb9e14ba0c76f Author: KaffinPX <73744616+KaffinPX@users.noreply.github.com> Date: Fri Jul 26 16:46:59 2024 +0300 Add input signature_script checking to submitTransaction RPC (#479) * Add input signature script checking to RPC * Return multiple indices w the EmptySignatureScript error * Apply suggestions * Refctorize some part of code to apply suggestions * Revert in-RPC changes and move error to in-consensus * Apply Maxims suggestions (match -> if) * Simple forgotten lints --------- Co-authored-by: Michael Sutton Co-authored-by: aspect commit 0feba1f843ee18888d9b2c14248c0cf41cc95045 Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Fri Jul 26 16:48:44 2024 +0400 style: fix clippy (#510) * style: fix clippy * relax the restriction to make ci green =) commit 56c19c995c096ca0b13651003b71c0ac6bb16ff4 Author: KaffinPX <73744616+KaffinPX@users.noreply.github.com> Date: Tue Jul 23 01:43:35 2024 +0300 Replace by fee on mempool (#499) * Replace by fee on mempool with tests * Add a custom RemovalReason -- ReplacedByFee * Let `MinerManager` handle replace by fee (RBF) for RPC and P2P * Refines success conditions and process of RBF policies * Add an RPC `submit_transaction_replacement` method * fix fmt * Fix CLI Build WASM32 error * Avoid breaking wRPC * Extend some tests coverage to all priority, orphan & RBF policy combinations * Let RBF fail early or at least before checking transaction scripts in consensus * Cleaning * More cleaning * Use contextual instead of compute mass in RBF eviction rule * Avoid collision with another PR * Avoid collision with another PR (2) * Extended test coverage of RBF * Extended test coverage of RBF (2) * Rename `TransactionBatchValidationArgs` to `TransactionValidationBatchArgs` * Add comments * Assert instead of condition * Add an `RbfPolicy` parameter to mining manager tx validate_and_insert_... fns * Infer RBF policy from unorphaned transaction * Apply the RBF policy to all orphan-related cases * In Rbf allowed mode, check feerate threshold vs all double spends (i.e., compare to the max) * Minor hashset optimization * Rename: fee_per_mass -> feerate * Use rbf policy arg for post processing step as well * Renames and comments * Relaxation: fail gracefully if rbf replaced tx is missing (also fixes an edge case where mempool duplication resulted in this scenario) * Tx id is appended by the caller anyway --------- Co-authored-by: Tiram <18632023+tiram88@users.noreply.github.com> Co-authored-by: Michael Sutton commit 6a56461ff1249e11e3124090253ca0bf128f7189 Author: Maxim <59533214+biryukovmaxim@users.noreply.github.com> Date: Tue Jun 18 18:09:29 2024 +0300 Implement PSKT(Partially Signed Kaspa Transaction) (#481) * initial support of pskt: supported roles: creator, constructor, updater, signer roles * add builder * handle combine errors * finalize * extractor * chore: typo * style: fmt * expose txid to global * chore: change version * feat: serde for optional bytes * feat: impl (de)serialization * style: fmt * add example, fixes * style: fmt * style: clippy * rollback unrelated changes * psbt -> pskt * refactor: avoid copy-paste by using recursion * docs: add description of roles commit a797e1ea324e67c509f70fd2b5f083abaf2cbd7c Author: George Bogodukhov Date: Mon Jun 17 15:50:16 2024 +1000 Add support for IP only for --rpclisten-borsh/json (#402) (#439) * Add support for IP only for --rpclisten-borsh/json * Fix cehck complaints --- .github/workflows/ci.yaml | 54 +- .github/workflows/deploy.yaml | 38 +- Cargo.lock | 3053 ++++++++--------- Cargo.toml | 221 +- README.md | 99 +- cli/Cargo.toml | 6 +- cli/src/cli.rs | 73 +- cli/src/error.rs | 12 + cli/src/extensions/transaction.rs | 29 +- cli/src/imports.rs | 2 +- cli/src/modules/account.rs | 39 +- cli/src/modules/connect.rs | 29 +- cli/src/modules/details.rs | 12 + cli/src/modules/export.rs | 43 +- cli/src/modules/guide.txt | 46 +- cli/src/modules/history.rs | 14 +- cli/src/modules/mod.rs | 3 +- cli/src/modules/pskb.rs | 266 ++ cli/src/modules/reload.rs | 6 +- cli/src/modules/rpc.rs | 111 +- cli/src/modules/send.rs | 2 +- cli/src/modules/settings.rs | 7 +- cli/src/modules/wallet.rs | 9 +- cli/src/wizards/account.rs | 59 + cli/src/wizards/wallet.rs | 25 +- components/addressmanager/Cargo.toml | 3 +- components/addressmanager/src/lib.rs | 17 +- .../src/stores/address_store.rs | 1 + components/connectionmanager/src/lib.rs | 79 +- components/consensusmanager/src/lib.rs | 3 +- components/consensusmanager/src/session.rs | 29 +- consensus/Cargo.toml | 1 + consensus/client/Cargo.toml | 4 +- consensus/client/src/error.rs | 2 + consensus/client/src/hash.rs | 7 + consensus/client/src/header.rs | 47 +- consensus/client/src/input.rs | 57 +- consensus/client/src/lib.rs | 34 +- consensus/client/src/outpoint.rs | 57 +- consensus/client/src/output.rs | 62 +- consensus/client/src/result.rs | 2 + consensus/client/src/serializable/mod.rs | 30 +- consensus/client/src/serializable/numeric.rs | 37 +- consensus/client/src/serializable/string.rs | 31 +- consensus/client/src/sign.rs | 18 +- consensus/client/src/transaction.rs | 140 +- consensus/client/src/utils.rs | 87 + consensus/client/src/utxo.rs | 91 +- consensus/client/src/vtx.rs | 35 - consensus/core/Cargo.toml | 5 +- consensus/core/src/api/args.rs | 47 + consensus/core/src/api/counters.rs | 4 + consensus/core/src/api/mod.rs | 37 +- consensus/core/src/api/stats.rs | 24 +- consensus/core/src/block.rs | 28 +- consensus/core/src/config/bps.rs | 2 +- consensus/core/src/config/genesis.rs | 2 +- consensus/core/src/config/params.rs | 15 +- consensus/core/src/errors/block.rs | 4 + consensus/core/src/errors/tx.rs | 12 + consensus/core/src/hashing/mod.rs | 2 + consensus/core/src/hashing/sighash_type.rs | 3 +- consensus/core/src/hashing/wasm.rs | 27 + consensus/core/src/header.rs | 13 + consensus/core/src/lib.rs | 6 + consensus/core/src/mass/mod.rs | 282 +- consensus/core/src/merkle.rs | 8 +- consensus/core/src/network.rs | 20 +- consensus/core/src/sign.rs | 19 +- consensus/core/src/subnets.rs | 55 +- consensus/core/src/tx.rs | 93 +- consensus/core/src/tx/script_public_key.rs | 37 +- consensus/core/src/utxo/utxo_diff.rs | 2 +- consensus/pow/Cargo.toml | 5 +- consensus/pow/src/wasm.rs | 76 +- consensus/src/consensus/mod.rs | 144 +- consensus/src/consensus/services.rs | 4 +- consensus/src/consensus/storage.rs | 2 +- consensus/src/consensus/test_consensus.rs | 2 +- consensus/src/model/services/reachability.rs | 9 + consensus/src/model/stores/acceptance_data.rs | 1 - .../src/model/stores/block_transactions.rs | 1 - consensus/src/model/stores/ghostdag.rs | 1 - consensus/src/model/stores/headers.rs | 5 +- consensus/src/model/stores/mod.rs | 7 +- consensus/src/model/stores/utxo_set.rs | 5 +- .../body_validation_in_context.rs | 23 +- .../body_validation_in_isolation.rs | 10 +- .../src/pipeline/body_processor/processor.rs | 7 +- .../pipeline/header_processor/processor.rs | 3 - consensus/src/pipeline/monitor.rs | 9 +- .../pipeline/pruning_processor/processor.rs | 78 +- .../pipeline/virtual_processor/processor.rs | 105 +- .../virtual_processor/test_block_builder.rs | 2 +- .../virtual_processor/utxo_validation.rs | 17 +- consensus/src/processes/coinbase.rs | 2 +- consensus/src/processes/ghostdag/protocol.rs | 2 +- consensus/src/processes/mass.rs | 256 -- consensus/src/processes/mod.rs | 1 - consensus/src/processes/parents_builder.rs | 139 +- consensus/src/processes/pruning.rs | 2 +- .../src/processes/reachability/interval.rs | 4 +- .../src/processes/reachability/tests/mod.rs | 6 + consensus/src/processes/sync/mod.rs | 4 +- .../processes/transaction_validator/mod.rs | 2 +- .../transaction_validator_populated.rs | 46 +- .../tx_validation_in_isolation.rs | 15 +- consensus/src/processes/traversal_manager.rs | 19 +- consensus/wasm/Cargo.toml | 4 +- crypto/addresses/Cargo.toml | 4 +- crypto/addresses/src/bech32.rs | 7 +- crypto/addresses/src/lib.rs | 50 +- crypto/hashes/src/hashers.rs | 2 +- crypto/hashes/src/lib.rs | 7 +- crypto/muhash/Cargo.toml | 2 + crypto/muhash/fuzz/fuzz_targets/u3072.rs | 1 - crypto/muhash/src/u3072.rs | 4 +- crypto/txscript/Cargo.toml | 11 + crypto/txscript/src/caches.rs | 7 +- crypto/txscript/src/data_stack.rs | 1 - crypto/txscript/src/error.rs | 89 + crypto/txscript/src/lib.rs | 4 + crypto/txscript/src/opcodes/mod.rs | 4 +- crypto/txscript/src/result.rs | 1 + crypto/txscript/src/script_builder.rs | 13 +- crypto/txscript/src/script_class.rs | 1 + crypto/txscript/src/standard.rs | 42 +- crypto/txscript/src/wasm/builder.rs | 179 + crypto/txscript/src/wasm/mod.rs | 15 + .../txscript/src/wasm/opcodes.rs | 241 +- database/src/registry.rs | 4 +- indexes/utxoindex/src/core/errors.rs | 4 +- indexes/utxoindex/src/index.rs | 5 +- indexes/utxoindex/src/stores/indexed_utxos.rs | 4 +- indexes/utxoindex/src/update_container.rs | 4 +- kaspad/Cargo.toml | 5 +- kaspad/src/args.rs | 4 +- kaspad/src/daemon.rs | 28 +- math/src/uint.rs | 2 +- metrics/core/src/data.rs | 172 +- metrics/core/src/error.rs | 3 + metrics/core/src/lib.rs | 107 +- mining/Cargo.toml | 3 +- mining/benches/bench.rs | 222 +- mining/errors/src/mempool.rs | 18 +- mining/src/block_template/builder.rs | 27 +- mining/src/block_template/model/tx.rs | 3 +- mining/src/block_template/policy.rs | 6 +- mining/src/block_template/selector.rs | 82 +- mining/src/feerate/fee_estimation.ipynb | 496 +++ mining/src/feerate/mod.rs | 231 ++ mining/src/lib.rs | 28 +- mining/src/manager.rs | 442 ++- mining/src/manager_tests.rs | 899 +++-- mining/src/mempool/config.rs | 38 +- mining/src/mempool/mod.rs | 87 +- mining/src/mempool/model/frontier.rs | 543 +++ .../src/mempool/model/frontier/feerate_key.rs | 108 + .../src/mempool/model/frontier/search_tree.rs | 336 ++ .../src/mempool/model/frontier/selectors.rs | 162 + mining/src/mempool/model/mod.rs | 1 + mining/src/mempool/model/orphan_pool.rs | 4 - mining/src/mempool/model/pool.rs | 2 - mining/src/mempool/model/transactions_pool.rs | 211 +- mining/src/mempool/model/tx.rs | 57 +- mining/src/mempool/model/utxo_set.rs | 32 +- .../populate_entries_and_try_validate.rs | 20 +- mining/src/mempool/remove_transaction.rs | 6 +- mining/src/mempool/replace_by_fee.rs | 149 + .../validate_and_insert_transaction.rs | 106 +- mining/src/model/candidate_tx.rs | 11 +- mining/src/model/mod.rs | 3 +- mining/src/model/tx_insert.rs | 14 + mining/src/monitor.rs | 14 +- mining/src/testutils/coinbase_mock.rs | 1 - mining/src/testutils/consensus_mock.rs | 34 +- musl-toolchain/build.sh | 96 + musl-toolchain/preset.sh | 4 + notify/Cargo.toml | 1 + notify/src/address/tracker.rs | 4 +- notify/src/notifier.rs | 4 +- notify/src/scope.rs | 146 + notify/src/subscription/mod.rs | 1 + protocol/flows/src/flow_context.rs | 40 +- protocol/flows/src/flowcontext/orphans.rs | 9 +- .../flows/src/flowcontext/transactions.rs | 2 +- protocol/flows/src/v5/txrelay/flow.rs | 4 +- protocol/p2p/build.rs | 2 +- protocol/p2p/src/common.rs | 4 + protocol/p2p/src/convert/net_address.rs | 5 +- protocol/p2p/src/core/connection_handler.rs | 9 +- protocol/p2p/src/echo.rs | 2 +- rothschild/src/main.rs | 102 +- rpc/core/Cargo.toml | 6 +- rpc/core/src/api/connection.rs | 11 + rpc/core/src/api/ctl.rs | 4 + rpc/core/src/api/mod.rs | 5 + rpc/core/src/api/notifications.rs | 102 +- rpc/core/src/api/ops.rs | 168 +- rpc/core/src/api/rpc.rs | 309 +- rpc/core/src/convert/block.rs | 49 +- rpc/core/src/convert/mod.rs | 4 + rpc/core/src/convert/notification.rs | 2 + rpc/core/src/convert/scope.rs | 2 + rpc/core/src/convert/tx.rs | 24 +- rpc/core/src/convert/utxo.rs | 8 +- rpc/core/src/error.rs | 15 +- rpc/core/src/lib.rs | 14 + rpc/core/src/model/address.rs | 42 +- rpc/core/src/model/block.rs | 120 +- rpc/core/src/model/feerate_estimate.rs | 109 + rpc/core/src/model/header.rs | 332 +- rpc/core/src/model/mempool.rs | 40 +- rpc/core/src/model/message.rs | 2564 +++++++++++++- rpc/core/src/model/mod.rs | 6 + rpc/core/src/model/tests.rs | 1332 +++++++ rpc/core/src/model/tx.rs | 305 +- rpc/core/src/notify/mod.rs | 4 + rpc/core/src/wasm/convert.rs | 11 +- rpc/core/src/wasm/message.rs | 357 +- rpc/core/src/wasm/mod.rs | 2 + rpc/grpc/client/Cargo.toml | 1 + rpc/grpc/client/src/lib.rs | 15 +- rpc/grpc/client/src/route.rs | 4 +- rpc/grpc/core/build.rs | 2 +- rpc/grpc/core/proto/messages.proto | 16 +- rpc/grpc/core/proto/rpc.proto | 149 +- rpc/grpc/core/src/convert/block.rs | 19 + rpc/grpc/core/src/convert/feerate_estimate.rs | 66 + rpc/grpc/core/src/convert/header.rs | 85 +- rpc/grpc/core/src/convert/kaspad.rs | 12 + rpc/grpc/core/src/convert/message.rs | 169 +- rpc/grpc/core/src/convert/metrics.rs | 24 + rpc/grpc/core/src/convert/mod.rs | 1 + rpc/grpc/core/src/convert/tx.rs | 4 +- rpc/grpc/core/src/ops.rs | 6 + rpc/grpc/server/Cargo.toml | 1 + rpc/grpc/server/src/connection_handler.rs | 4 +- .../server/src/request_handler/factory.rs | 6 + rpc/grpc/server/src/tests/rpc_core_mock.rs | 192 +- rpc/macros/src/grpc/server.rs | 5 +- rpc/macros/src/lib.rs | 6 + rpc/macros/src/wrpc/client.rs | 11 +- rpc/macros/src/wrpc/mod.rs | 1 + rpc/macros/src/wrpc/server.rs | 10 +- rpc/macros/src/wrpc/test.rs | 60 + rpc/macros/src/wrpc/wasm.rs | 4 +- rpc/service/Cargo.toml | 2 +- rpc/service/src/converter/consensus.rs | 7 +- rpc/service/src/converter/feerate_estimate.rs | 49 + rpc/service/src/converter/mod.rs | 1 + rpc/service/src/service.rs | 300 +- rpc/wrpc/client/Cargo.toml | 4 +- rpc/wrpc/client/Resolvers.toml | 46 +- rpc/wrpc/client/src/client.rs | 84 +- rpc/wrpc/client/src/error.rs | 2 + rpc/wrpc/client/src/lib.rs | 16 + rpc/wrpc/client/src/node.rs | 10 +- rpc/wrpc/client/src/parse.rs | 2 + rpc/wrpc/client/src/prelude.rs | 2 + rpc/wrpc/client/src/resolver.rs | 189 +- rpc/wrpc/client/src/result.rs | 2 + rpc/wrpc/examples/simple_client/Cargo.toml | 18 + rpc/wrpc/examples/simple_client/src/main.rs | 104 + rpc/wrpc/proxy/src/main.rs | 4 +- rpc/wrpc/resolver/Cargo.toml | 41 - rpc/wrpc/resolver/src/args.rs | 54 - rpc/wrpc/resolver/src/connection.rs | 262 -- rpc/wrpc/resolver/src/error.rs | 53 - rpc/wrpc/resolver/src/imports.rs | 28 - rpc/wrpc/resolver/src/log.rs | 44 - rpc/wrpc/resolver/src/main.rs | 41 - rpc/wrpc/resolver/src/monitor.rs | 241 -- rpc/wrpc/resolver/src/node.rs | 75 - rpc/wrpc/resolver/src/panic.rs | 10 - rpc/wrpc/resolver/src/params.rs | 146 - rpc/wrpc/resolver/src/result.rs | 1 - rpc/wrpc/resolver/src/server.rs | 149 - rpc/wrpc/resolver/src/transport.rs | 8 - rpc/wrpc/server/Cargo.toml | 8 +- rpc/wrpc/server/src/address.rs | 41 +- rpc/wrpc/server/src/connection.rs | 5 +- rpc/wrpc/server/src/router.rs | 29 +- rpc/wrpc/server/src/service.rs | 14 +- rpc/wrpc/wasm/Cargo.toml | 5 +- rpc/wrpc/wasm/src/client.rs | 45 +- rpc/wrpc/wasm/src/lib.rs | 4 + rpc/wrpc/wasm/src/notify.rs | 4 + rpc/wrpc/wasm/src/resolver.rs | 39 +- simpa/Cargo.toml | 4 +- simpa/src/main.rs | 17 +- simpa/src/simulator/miner.rs | 7 +- testing/integration/src/common/utils.rs | 14 +- .../src/daemon_integration_tests.rs | 15 +- testing/integration/src/mempool_benchmarks.rs | 4 +- testing/integration/src/rpc_tests.rs | 308 +- testing/integration/src/tasks/block/miner.rs | 10 +- .../integration/src/tasks/block/submitter.rs | 10 +- testing/integration/src/tasks/tx/sender.rs | 2 +- utils/Cargo.toml | 16 +- utils/alloc/Cargo.toml | 4 +- utils/build.rs | 82 + utils/src/expiring_cache.rs | 152 + utils/src/git.rs | 53 + utils/src/lib.rs | 13 + utils/src/mem_size.rs | 2 +- utils/src/networking.rs | 44 +- utils/src/option.rs | 5 +- utils/src/serde_bytes_optional.rs | 111 + utils/src/sync/mod.rs | 5 + utils/src/sync/semaphore.rs | 92 +- utils/src/sysinfo.rs | 127 + utils/src/vec.rs | 9 + utils/tower/Cargo.toml | 6 +- utils/tower/src/middleware.rs | 77 +- wallet/bip32/Cargo.toml | 3 +- wallet/bip32/src/address_type.rs | 5 + wallet/bip32/src/derivation_path.rs | 40 + wallet/bip32/src/lib.rs | 2 + wallet/bip32/src/mnemonic/mod.rs | 1 - wallet/bip32/src/mnemonic/phrase.rs | 8 +- wallet/bip32/src/mnemonic/seed.rs | 1 - wallet/bip32/src/prefix.rs | 13 + wallet/bip32/src/private_key.rs | 1 + wallet/bip32/src/public_key.rs | 2 +- wallet/bip32/src/xpublic_key.rs | 12 +- wallet/core/Cargo.toml | 5 +- wallet/core/src/account/descriptor.rs | 26 +- wallet/core/src/account/kind.rs | 33 +- wallet/core/src/account/mod.rs | 81 +- wallet/core/src/account/pskb.rs | 367 ++ wallet/core/src/account/variants/bip32.rs | 15 +- .../core/src/account/variants/bip32watch.rs | 252 ++ wallet/core/src/account/variants/keypair.rs | 13 +- wallet/core/src/account/variants/legacy.rs | 5 +- wallet/core/src/account/variants/mod.rs | 2 + wallet/core/src/account/variants/multisig.rs | 31 +- wallet/core/src/account/variants/resident.rs | 1 + wallet/core/src/account/variants/watchonly.rs | 300 ++ wallet/core/src/api/message.rs | 62 +- wallet/core/src/api/mod.rs | 2 + wallet/core/src/api/traits.rs | 32 +- wallet/core/src/api/transport.rs | 6 +- wallet/core/src/compat/gen1.rs | 8 +- wallet/core/src/compat/mod.rs | 4 + wallet/core/src/cryptobox.rs | 13 +- wallet/core/src/derivation.rs | 18 +- wallet/core/src/deterministic.rs | 35 +- wallet/core/src/encryption.rs | 2 +- wallet/core/src/error.rs | 33 +- wallet/core/src/events.rs | 6 + wallet/core/src/factory.rs | 4 + wallet/core/src/imports.rs | 6 +- wallet/core/src/lib.rs | 68 +- wallet/core/src/message.rs | 1 + wallet/core/src/metrics.rs | 29 +- wallet/core/src/prelude.rs | 8 +- wallet/core/src/rpc.rs | 8 +- wallet/core/src/serializer.rs | 4 +- wallet/core/src/settings.rs | 5 + wallet/core/src/storage/account.rs | 28 +- wallet/core/src/storage/binding.rs | 39 + wallet/core/src/storage/keydata/data.rs | 9 +- wallet/core/src/storage/local/interface.rs | 2 +- wallet/core/src/storage/local/payload.rs | 12 +- .../src/storage/local/transaction/fsio.rs | 13 +- .../src/storage/local/transaction/indexdb.rs | 171 +- wallet/core/src/storage/local/wallet.rs | 18 +- wallet/core/src/storage/metadata.rs | 8 +- wallet/core/src/storage/mod.rs | 2 +- wallet/core/src/storage/transaction/data.rs | 100 +- wallet/core/src/storage/transaction/record.rs | 61 +- wallet/core/src/tests/rpc_core_mock.rs | 192 +- wallet/core/src/tests/storage.rs | 4 +- wallet/core/src/tx/generator/generator.rs | 170 +- wallet/core/src/tx/generator/pending.rs | 72 +- wallet/core/src/tx/generator/settings.rs | 7 + wallet/core/src/tx/generator/test.rs | 77 +- wallet/core/src/tx/mass.rs | 146 +- wallet/core/src/tx/payment.rs | 26 +- wallet/core/src/utxo/balance.rs | 1 + wallet/core/src/utxo/context.rs | 72 +- wallet/core/src/utxo/processor.rs | 82 +- wallet/core/src/utxo/reference.rs | 6 +- wallet/core/src/utxo/scan.rs | 9 +- wallet/core/src/utxo/settings.rs | 133 +- wallet/core/src/wallet/api.rs | 135 +- wallet/core/src/wallet/args.rs | 16 +- wallet/core/src/wallet/maps.rs | 1 + wallet/core/src/wallet/mod.rs | 186 +- wallet/core/src/wasm/api/message.rs | 19 +- wallet/core/src/wasm/cryptobox.rs | 18 +- wallet/core/src/wasm/message.rs | 8 +- wallet/core/src/wasm/notify.rs | 141 +- wallet/core/src/wasm/signer.rs | 41 +- wallet/core/src/wasm/tx/consensus.rs | 36 - wallet/core/src/wasm/tx/fees.rs | 2 +- .../core/src/wasm/tx/generator/generator.rs | 24 +- wallet/core/src/wasm/tx/generator/pending.rs | 77 +- wallet/core/src/wasm/tx/mass.rs | 219 +- wallet/core/src/wasm/tx/mod.rs | 2 - wallet/core/src/wasm/tx/utils.rs | 40 +- wallet/core/src/wasm/utxo/context.rs | 14 +- wallet/core/src/wasm/utxo/processor.rs | 53 +- wallet/core/src/wasm/wallet/account.rs | 155 - wallet/core/src/wasm/wallet/mod.rs | 1 - wallet/keys/Cargo.toml | 4 +- wallet/keys/src/derivation/gen0/hd.rs | 2 +- wallet/keys/src/derivation/gen0/mod.rs | 2 +- wallet/keys/src/derivation/gen1/mod.rs | 3 +- wallet/keys/src/derivation/mod.rs | 4 + wallet/keys/src/derivation_path.rs | 13 +- wallet/keys/src/error.rs | 3 + wallet/keys/src/imports.rs | 2 +- wallet/keys/src/keypair.rs | 13 +- wallet/keys/src/lib.rs | 7 + wallet/keys/src/prelude.rs | 4 + wallet/keys/src/privatekey.rs | 13 +- wallet/keys/src/privkeygen.rs | 4 + wallet/keys/src/pubkeygen.rs | 8 +- wallet/keys/src/publickey.rs | 48 +- wallet/keys/src/secret.rs | 4 +- wallet/keys/src/types.rs | 2 +- wallet/keys/src/xprv.rs | 75 +- wallet/keys/src/xpub.rs | 83 +- wallet/macros/src/wallet/client.rs | 2 +- wallet/macros/src/wallet/server.rs | 2 +- wallet/pskt/Cargo.toml | 46 + wallet/pskt/examples/multisig.rs | 121 + wallet/pskt/src/bundle.rs | 358 ++ wallet/pskt/src/convert.rs | 115 + wallet/pskt/src/error.rs | 70 + wallet/pskt/src/global.rs | 170 + wallet/pskt/src/input.rs | 168 + wallet/pskt/src/lib.rs | 32 + wallet/pskt/src/output.rs | 85 + wallet/pskt/src/pskt.rs | 493 +++ wallet/pskt/src/role.rs | 29 + wallet/pskt/src/utils.rs | 31 + wallet/pskt/src/wasm/bundle.rs | 1 + wallet/pskt/src/wasm/error.rs | 64 + wallet/pskt/src/wasm/input.rs | 1 + wallet/pskt/src/wasm/mod.rs | 6 + wallet/pskt/src/wasm/output.rs | 1 + wallet/pskt/src/wasm/pskt.rs | 320 ++ wallet/pskt/src/wasm/result.rs | 1 + wallet/wasm/Cargo.toml | 2 +- wasm/CHANGELOG.md | 29 + wasm/Cargo.toml | 4 + wasm/build-node-dev | 3 +- wasm/core/Cargo.toml | 6 +- wasm/core/src/hex.rs | 152 + wasm/core/src/lib.rs | 3 +- wasm/core/src/types.rs | 2 +- .../nodejs/javascript/general/derivation.js | 17 +- .../{mining-state.js => mining-pow.js} | 8 +- .../javascript/transactions/serialize.js | 48 + .../transactions/simple-transaction.js | 2 +- .../transactions/single-transaction-demo.js | 8 +- .../nodejs/typescript/src/scriptBuilder.ts | 13 + wasm/src/lib.rs | 34 +- 461 files changed, 22957 insertions(+), 6958 deletions(-) create mode 100644 cli/src/modules/pskb.rs create mode 100644 consensus/client/src/utils.rs delete mode 100644 consensus/client/src/vtx.rs create mode 100644 consensus/core/src/api/args.rs create mode 100644 consensus/core/src/hashing/wasm.rs delete mode 100644 consensus/src/processes/mass.rs create mode 100644 crypto/txscript/src/error.rs create mode 100644 crypto/txscript/src/result.rs create mode 100644 crypto/txscript/src/wasm/builder.rs create mode 100644 crypto/txscript/src/wasm/mod.rs rename consensus/client/src/script.rs => crypto/txscript/src/wasm/opcodes.rs (51%) create mode 100644 mining/src/feerate/fee_estimation.ipynb create mode 100644 mining/src/feerate/mod.rs create mode 100644 mining/src/mempool/model/frontier.rs create mode 100644 mining/src/mempool/model/frontier/feerate_key.rs create mode 100644 mining/src/mempool/model/frontier/search_tree.rs create mode 100644 mining/src/mempool/model/frontier/selectors.rs create mode 100644 mining/src/mempool/replace_by_fee.rs create mode 100644 mining/src/model/tx_insert.rs create mode 100755 musl-toolchain/build.sh create mode 100755 musl-toolchain/preset.sh create mode 100644 rpc/core/src/api/connection.rs create mode 100644 rpc/core/src/model/feerate_estimate.rs create mode 100644 rpc/core/src/model/tests.rs create mode 100644 rpc/grpc/core/src/convert/feerate_estimate.rs create mode 100644 rpc/macros/src/wrpc/test.rs create mode 100644 rpc/service/src/converter/feerate_estimate.rs create mode 100644 rpc/wrpc/examples/simple_client/Cargo.toml create mode 100644 rpc/wrpc/examples/simple_client/src/main.rs delete mode 100644 rpc/wrpc/resolver/Cargo.toml delete mode 100644 rpc/wrpc/resolver/src/args.rs delete mode 100644 rpc/wrpc/resolver/src/connection.rs delete mode 100644 rpc/wrpc/resolver/src/error.rs delete mode 100644 rpc/wrpc/resolver/src/imports.rs delete mode 100644 rpc/wrpc/resolver/src/log.rs delete mode 100644 rpc/wrpc/resolver/src/main.rs delete mode 100644 rpc/wrpc/resolver/src/monitor.rs delete mode 100644 rpc/wrpc/resolver/src/node.rs delete mode 100644 rpc/wrpc/resolver/src/panic.rs delete mode 100644 rpc/wrpc/resolver/src/params.rs delete mode 100644 rpc/wrpc/resolver/src/result.rs delete mode 100644 rpc/wrpc/resolver/src/server.rs delete mode 100644 rpc/wrpc/resolver/src/transport.rs create mode 100644 utils/build.rs create mode 100644 utils/src/expiring_cache.rs create mode 100644 utils/src/git.rs create mode 100644 utils/src/serde_bytes_optional.rs create mode 100644 utils/src/sysinfo.rs create mode 100644 wallet/core/src/account/pskb.rs create mode 100644 wallet/core/src/account/variants/bip32watch.rs create mode 100644 wallet/core/src/account/variants/watchonly.rs delete mode 100644 wallet/core/src/wasm/tx/consensus.rs delete mode 100644 wallet/core/src/wasm/wallet/account.rs create mode 100644 wallet/pskt/Cargo.toml create mode 100644 wallet/pskt/examples/multisig.rs create mode 100644 wallet/pskt/src/bundle.rs create mode 100644 wallet/pskt/src/convert.rs create mode 100644 wallet/pskt/src/error.rs create mode 100644 wallet/pskt/src/global.rs create mode 100644 wallet/pskt/src/input.rs create mode 100644 wallet/pskt/src/lib.rs create mode 100644 wallet/pskt/src/output.rs create mode 100644 wallet/pskt/src/pskt.rs create mode 100644 wallet/pskt/src/role.rs create mode 100644 wallet/pskt/src/utils.rs create mode 100644 wallet/pskt/src/wasm/bundle.rs create mode 100644 wallet/pskt/src/wasm/error.rs create mode 100644 wallet/pskt/src/wasm/input.rs create mode 100644 wallet/pskt/src/wasm/mod.rs create mode 100644 wallet/pskt/src/wasm/output.rs create mode 100644 wallet/pskt/src/wasm/pskt.rs create mode 100644 wallet/pskt/src/wasm/result.rs create mode 100644 wasm/core/src/hex.rs rename wasm/examples/nodejs/javascript/general/{mining-state.js => mining-pow.js} (90%) create mode 100644 wasm/examples/nodejs/javascript/transactions/serialize.js create mode 100644 wasm/examples/nodejs/typescript/src/scriptBuilder.ts diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 43e21e237b..49fe4e4637 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -126,6 +126,9 @@ jobs: - name: Run cargo doc tests with features=no-asm on kaspa-hashes run: cargo test --doc --release -p kaspa-hashes --features=no-asm + - name: Run cargo doc + run: cargo doc --release --no-deps --workspace + # test-release: # name: Test Suite Release # runs-on: ${{ matrix.os }} @@ -211,7 +214,7 @@ jobs: check-wasm32: - name: Check Wasm32 + name: Check WASM32 runs-on: ubuntu-latest steps: - name: Checkout sources @@ -274,12 +277,16 @@ jobs: run: cargo clippy -p kaspa-wasm --target wasm32-unknown-unknown build-wasm32: - name: Build Wasm32 + name: Build WASM32 SDK runs-on: ubuntu-latest steps: - name: Checkout sources uses: actions/checkout@v4 + - name: Setup Environment + shell: bash + run: echo "SHORT_SHA=`git rev-parse --short HEAD`" >> $GITHUB_ENV + - name: Install Protoc uses: arduino/setup-protoc@v3 with: @@ -337,10 +344,20 @@ jobs: key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - name: Build wasm release - run: cd wasm && bash build-release - + run: | + pushd . + cd wasm + bash build-release + popd + mv wasm/release/kaspa-wasm32-sdk.zip wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip + + - name: Upload WASM build to GitHub + uses: actions/upload-artifact@v4 + with: + name: kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip + path: wasm/release/kaspa-wasm32-sdk-${{ env.SHORT_SHA }}.zip build-release: - name: Build Ubuntu Release + name: Build Linux Release runs-on: ubuntu-latest steps: - name: Checkout sources @@ -354,7 +371,7 @@ jobs: - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - - name: Cache + - name: Cache Cargo Build Outputs uses: actions/cache@v4 with: path: | @@ -364,14 +381,23 @@ jobs: ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + restore-keys: | + ${{ runner.os }}-cargo- + + - name: Cache Toolchain + uses: actions/cache@v4 + with: + path: | + ~/x-tools + key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} + restore-keys: | + ${{ runner.os }}-musl- - - name: Install zig - if: runner.os == 'Linux' - uses: goto-bus-stop/setup-zig@v2 # needed for cargo-zigbuild - - - name: Build on Linux + + - name: Build RK with musl toolchain if: runner.os == 'Linux' - # We're using musl to make the binaries statically linked and portable run: | - cargo install cargo-zigbuild - cargo --verbose zigbuild --bin kaspad --bin simpa --bin rothschild --release --target x86_64-unknown-linux-gnu.2.27 # Use an older glibc version + # Run build script for musl toolchain + source musl-toolchain/build.sh + # Build for musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml index 567adb557f..537eeef898 100644 --- a/.github/workflows/deploy.yaml +++ b/.github/workflows/deploy.yaml @@ -29,7 +29,7 @@ jobs: - name: Install stable toolchain uses: dtolnay/rust-toolchain@stable - - name: Cache + - name: Cache Cargo Build Outputs uses: actions/cache@v3 with: path: | @@ -40,23 +40,33 @@ jobs: target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - name: Install zig on linux - if: runner.os == 'Linux' - uses: goto-bus-stop/setup-zig@v2 # needed for cargo-zigbuild + - name: Cache Toolchain + uses: actions/cache@v4 + with: + path: | + ~/x-tools + key: ${{ runner.os }}-musl-${{ hashFiles('**/musl-toolchain/preset.sh') }} + restore-keys: | + ${{ runner.os }}-musl- - name: Build on Linux if: runner.os == 'Linux' # We're using musl to make the binaries statically linked and portable run: | - cargo install cargo-zigbuild - cargo --verbose zigbuild --bin kaspad --bin simpa --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-gnu.2.27 # Use an older glibc version + # Run build script for musl toolchain + source musl-toolchain/build.sh + + # Go back to the workspace + cd $GITHUB_WORKSPACE + + # Build for musl + cargo --verbose build --bin kaspad --bin rothschild --bin kaspa-wallet --release --target x86_64-unknown-linux-musl mkdir bin || true - cp target/x86_64-unknown-linux-gnu/release/kaspad bin/ - cp target/x86_64-unknown-linux-gnu/release/simpa bin/ - cp target/x86_64-unknown-linux-gnu/release/rothschild bin/ - cp target/x86_64-unknown-linux-gnu/release/kaspa-wallet bin/ - archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-gnu-amd64.zip" - asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-gnu-amd64.zip" + cp target/x86_64-unknown-linux-musl/release/kaspad bin/ + cp target/x86_64-unknown-linux-musl/release/rothschild bin/ + cp target/x86_64-unknown-linux-musl/release/kaspa-wallet bin/ + archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" + asset_name="rusty-kaspa-${{ github.event.release.tag_name }}-linux-amd64.zip" zip -r "${archive}" ./bin/* echo "archive=${archive}" >> $GITHUB_ENV echo "asset_name=${asset_name}" >> $GITHUB_ENV @@ -66,12 +76,10 @@ jobs: shell: bash run: | cargo build --bin kaspad --release - cargo build --bin simpa --release cargo build --bin rothschild --release cargo build --bin kaspa-wallet --release mkdir bin || true cp target/release/kaspad.exe bin/ - cp target/release/simpa.exe bin/ cp target/release/rothschild.exe bin/ cp target/release/kaspa-wallet.exe bin/ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-win64.zip" @@ -84,12 +92,10 @@ jobs: if: runner.os == 'macOS' run: | cargo build --bin kaspad --release - cargo build --bin simpa --release cargo build --bin rothschild --release cargo build --bin kaspa-wallet --release mkdir bin || true cp target/release/kaspad bin/ - cp target/release/simpa bin/ cp target/release/rothschild bin/ cp target/release/kaspa-wallet bin/ archive="bin/rusty-kaspa-${{ github.event.release.tag_name }}-osx.zip" diff --git a/Cargo.lock b/Cargo.lock index 86f536585e..a0db546302 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,30 +4,30 @@ version = 3 [[package]] name = "accessory" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850bb534b9dc04744fbbb71d30ad6d25a7e4cf6dc33e223c81ef3a92ebab4e0b" +checksum = "87537f9ae7cfa78d5b8ebd1a1db25959f5e737126be4d8eb44a5452fc4b63cde" dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aead" @@ -36,7 +36,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -50,17 +50,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.14", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.11" @@ -68,7 +57,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", - "getrandom 0.2.14", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -83,17 +72,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex 0.2.4", - "num-traits", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -126,9 +104,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -141,33 +119,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.3" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -175,18 +153,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" - -[[package]] -name = "approx" -version = "0.3.2" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arc-swap" @@ -208,15 +177,15 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "async-attributes" @@ -241,27 +210,26 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" dependencies = [ "concurrent-queue", - "event-listener 5.3.0", - "event-listener-strategy 0.5.2", + "event-listener-strategy", "futures-core", "pin-project-lite", ] [[package]] name = "async-executor" -version = "1.11.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.1.0", - "futures-lite 2.3.0", + "fastrand", + "futures-lite", "slab", ] @@ -271,90 +239,61 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-executor", - "async-io 2.3.2", - "async-lock 3.3.0", + "async-io", + "async-lock", "blocking", - "futures-lite 2.3.0", + "futures-lite", "once_cell", ] [[package]] name = "async-io" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" -dependencies = [ - "async-lock 2.8.0", - "autocfg", - "cfg-if 1.0.0", - "concurrent-queue", - "futures-lite 1.13.0", - "log", - "parking", - "polling 2.8.0", - "rustix 0.37.27", - "slab", - "socket2 0.4.10", - "waker-fn", -] - -[[package]] -name = "async-io" -version = "2.3.2" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock 3.3.0", + "async-lock", "cfg-if 1.0.0", "concurrent-queue", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "parking", - "polling 3.7.0", - "rustix 0.38.34", + "polling", + "rustix", "slab", "tracing", - "windows-sys 0.52.0", -] - -[[package]] -name = "async-lock" -version = "2.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" -dependencies = [ - "event-listener 2.5.3", + "windows-sys 0.59.0", ] [[package]] name = "async-lock" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.3", - "event-listener-strategy 0.4.0", + "event-listener 5.3.1", + "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io 1.13.0", - "async-lock 2.8.0", + "async-io", + "async-lock", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite 1.13.0", + "futures-lite", "gloo-timers", "kv-log-macro", "log", @@ -368,9 +307,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -379,13 +318,13 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -396,22 +335,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", -] - -[[package]] -name = "atomic-polyfill" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4" -dependencies = [ - "critical-section", + "syn 2.0.79", ] [[package]] @@ -444,53 +374,23 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - -[[package]] -name = "axum" -version = "0.6.20" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core 0.3.4", - "bitflags 1.3.2", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 0.1.2", - "tower", - "tower-layer", - "tower-service", -] +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", - "axum-core 0.4.3", + "axum-core", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", - "hyper-util", "itoa", "matchit", "memchr", @@ -499,76 +399,47 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", "sync_wrapper 1.0.1", - "tokio", - "tower", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "mime", - "rustversion", + "tower 0.5.1", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", - "tracing", ] [[package]] name = "backtrace" -version = "0.3.71" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.1" @@ -608,30 +479,29 @@ dependencies = [ "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", "which", ] [[package]] name = "bindgen" -version = "0.65.1" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", - "prettyplease", "proc-macro2", "quote", "regex", - "rustc-hash", + "rustc-hash 1.1.0", "shlex", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -642,9 +512,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "blake2" @@ -672,66 +542,44 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] name = "blocking" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495f7104e962b7356f0aeb34247aca1fe7d2e783b346582db7f2904cb5717e88" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel 2.2.1", - "async-lock 3.3.0", + "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite 2.3.0", + "futures-lite", "piper", ] [[package]] name = "borsh" -version = "0.9.3" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", - "hashbrown 0.11.2", + "cfg_aliases 0.2.1", ] [[package]] name = "borsh-derive" -version = "0.9.3" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", + "once_cell", "proc-macro-crate", - "proc-macro2", - "syn 1.0.109", -] - -[[package]] -name = "borsh-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" -dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", -] - -[[package]] -name = "borsh-schema-derive-internal" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "syn 2.0.79", + "syn_derive", ] [[package]] @@ -758,9 +606,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bzip2-sys" @@ -775,9 +623,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] @@ -813,13 +661,13 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.96" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] @@ -858,6 +706,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -906,7 +760,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -949,9 +803,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -975,9 +829,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.4" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +checksum = "7be5744db7978a28d9df86a214130d106a89ce49644cbc4e3f0c22c3fba30615" dependencies = [ "clap_builder", "clap_derive", @@ -985,9 +839,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.2" +version = "4.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "a5fbc17d3ef8278f55b282b2a2e75ae6f6c7d4bb70ed3d0382375104bfafdb4b" dependencies = [ "anstream", "anstyle", @@ -997,27 +851,27 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.4" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "concurrent-queue" @@ -1053,9 +907,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" [[package]] name = "convert_case" @@ -1090,24 +944,24 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if 1.0.0", ] @@ -1121,7 +975,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.4", + "clap 4.5.19", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1146,17 +1000,11 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "critical-section" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" - [[package]] name = "crossbeam-channel" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ "crossbeam-utils", ] @@ -1182,9 +1030,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crossterm" @@ -1192,10 +1040,10 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "crossterm_winapi", "libc", - "mio", + "mio 0.8.11", "parking_lot", "signal-hook", "signal-hook-mio", @@ -1223,8 +1071,8 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", + "generic-array", + "rand_core", "typenum", ] @@ -1252,7 +1100,7 @@ dependencies = [ "aead", "chacha20", "cipher", - "generic-array 0.14.7", + "generic-array", "poly1305", "salsa20", "subtle", @@ -1261,25 +1109,24 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.4" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ - "nix", - "windows-sys 0.52.0", + "nix 0.29.0", + "windows-sys 0.59.0", ] [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", "fiat-crypto", - "platforms", "rustc_version", "subtle", "zeroize", @@ -1293,14 +1140,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "darling" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -1308,36 +1155,37 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn 2.0.60", + "strsim 0.11.1", + "syn 2.0.79", ] [[package]] name = "darling_macro" -version = "0.20.8" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if 1.0.0", + "crossbeam-utils", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -1359,7 +1207,7 @@ dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -1383,17 +1231,48 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_builder" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" +dependencies = [ + "derive_builder_core", + "syn 2.0.79", +] + [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", "proc-macro2", "quote", "rustc_version", - "syn 1.0.109", + "syn 2.0.79", ] [[package]] @@ -1404,9 +1283,9 @@ checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" [[package]] name = "deunicode" -version = "1.4.4" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322ef0094744e63628e6f0eb2295517f79276a5b342a4c2ff3042566ca181d4e" +checksum = "339544cc9e2c4dc3fc7149fd630c5f22263a4fdf18a98afd0075784968b5cf00" [[package]] name = "dhat" @@ -1418,7 +1297,7 @@ dependencies = [ "lazy_static", "mintex", "parking_lot", - "rustc-hash", + "rustc-hash 1.1.0", "serde", "serde_json", "thousands", @@ -1456,6 +1335,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + [[package]] name = "downcast" version = "0.11.0" @@ -1468,17 +1353,29 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" +[[package]] +name = "duct" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ab5718d1224b63252cd0c6f74f6480f9ffeb117438a2e0f5cf6d9a4798929c" +dependencies = [ + "libc", + "once_cell", + "os_pipe", + "shared_child", +] + [[package]] name = "duration-string" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fcc1d9ae294a15ed05aeae8e11ee5f2b3fe971c077d45a42fb20825fba6ee13" +checksum = "2334658684d7c213e18602aa72ce37e94d1c9b535882ef6e30bc444b7514a1ee" [[package]] name = "either" -version = "1.11.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encode_unicode" @@ -1497,13 +1394,13 @@ dependencies = [ [[package]] name = "enum-primitive-derive" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c375b9c5eadb68d0a6efee2999fef292f45854c3444c86f09d8ab086ba942b0e" +checksum = "ba7795da175654fe16979af73f81f26a8ea27638d8d9823d317016888a63dc4c" dependencies = [ "num-traits", "quote", - "syn 1.0.109", + "syn 2.0.79", ] [[package]] @@ -1527,9 +1424,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1543,43 +1440,22 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" -dependencies = [ - "concurrent-queue", - "parking", - "pin-project-lite", -] - -[[package]] -name = "event-listener" -version = "5.3.0" +version = "5.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" dependencies = [ "concurrent-queue", "parking", "pin-project-lite", ] -[[package]] -name = "event-listener-strategy" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" -dependencies = [ - "event-listener 4.0.3", - "pin-project-lite", -] - [[package]] name = "event-listener-strategy" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 5.3.0", + "event-listener 5.3.1", "pin-project-lite", ] @@ -1594,22 +1470,16 @@ dependencies = [ [[package]] name = "fancy_constructor" -version = "1.2.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f71f317e4af73b2f8f608fac190c52eac4b1879d2145df1db2fe48881ca69435" +checksum = "07b19d0e43eae2bfbafe4931b5e79c73fb1a849ca15cd41a761a7b8587f9a1a2" dependencies = [ "macroific", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] -[[package]] -name = "faster-hex" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e2ce894d53b295cf97b05685aa077950ff3e8541af83217fc720a6437169f8" - [[package]] name = "faster-hex" version = "0.9.0" @@ -1621,35 +1491,26 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" -dependencies = [ - "instant", -] - -[[package]] -name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "fiat-crypto" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38793c55593b33412e3ae40c2c9781ffaa6f438f6f8c10f24e71846fbd7ae01e" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" -version = "0.2.23" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ee447700ac8aa0b2f2bd7bc4462ad686ba06baa6727ac149a2d6277f0d240fd" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.4.1", - "windows-sys 0.52.0", + "libredox", + "windows-sys 0.59.0", ] [[package]] @@ -1660,18 +1521,18 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "fixedstr" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4e4dfef7b590ab7d11e531d602fdfb6a3413b09924db1428902bbc4410a9a8" +checksum = "60aba7afd9b1b9e1950c2b7e8bcac3cc44a273c62a02717dedca2d0a1aee694d" dependencies = [ "serde", ] [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -1683,21 +1544,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1709,9 +1555,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1724,9 +1570,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1734,15 +1580,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1751,24 +1597,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-lite" -version = "1.13.0" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" -dependencies = [ - "fastrand 1.9.0", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite", - "waker-fn", -] +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1776,7 +1607,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand", "futures-core", "futures-io", "parking", @@ -1785,32 +1616,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1824,15 +1655,6 @@ dependencies = [ "slab", ] -[[package]] -name = "generic-array" -version = "0.13.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f797e67af32588215eaaab8327027ee8e71b9dd0b2b26996aedf20c030fce309" -dependencies = [ - "typenum", -] - [[package]] name = "generic-array" version = "0.14.7" @@ -1846,33 +1668,22 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if 1.0.0", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.28.1" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -1882,9 +1693,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "gloo-timers" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" dependencies = [ "futures-channel", "futures-core", @@ -1904,7 +1715,26 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.6.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "h2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.1.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1923,27 +1753,27 @@ dependencies = [ [[package]] name = "hash32" -version = "0.2.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" +checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606" dependencies = [ "byteorder", ] [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash 0.7.8", -] +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] [[package]] name = "hashbrown" @@ -1951,19 +1781,22 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.11", + "ahash", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "heapless" -version = "0.7.17" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f" +checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad" dependencies = [ - "atomic-polyfill", "hash32", - "rustc_version", - "spin", "stable_deref_trait", ] @@ -1988,6 +1821,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -2066,9 +1905,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -2076,28 +1915,22 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", - "futures-core", + "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2113,22 +1946,22 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.28" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2", + "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", "tower-service", "tracing", @@ -2137,76 +1970,87 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", "pin-project-lite", "smallvec", "tokio", + "want", ] [[package]] -name = "hyper-timeout" -version = "0.4.1" +name = "hyper-rustls" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ - "hyper 0.14.28", - "pin-project-lite", + "futures-util", + "http 1.1.0", + "hyper 1.4.1", + "hyper-util", + "rustls", + "rustls-pki-types", "tokio", - "tokio-io-timeout", + "tokio-rustls", + "tower-service", + "webpki-roots", ] [[package]] -name = "hyper-tls" -version = "0.5.0" +name = "hyper-timeout" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "bytes", - "hyper 0.14.28", - "native-tls", + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", "tokio", - "tokio-native-tls", + "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.3" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", + "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.3.1", + "http-body 1.0.1", + "hyper 1.4.1", "pin-project-lite", - "socket2 0.5.7", + "socket2", "tokio", + "tower-service", + "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.52.0", ] [[package]] @@ -2245,9 +2089,9 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.28", + "hyper 0.14.30", "log", - "rand 0.8.5", + "rand", "tokio", "url", "xmltree", @@ -2255,16 +2099,16 @@ dependencies = [ [[package]] name = "indexed_db_futures" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cc2083760572ee02385ab8b7c02c20925d2dd1f97a1a25a8737a238608f1152" +checksum = "43315957678a70eb21fb0d2384fe86dde0d6c859a01e24ce127eb65a0143d28c" dependencies = [ "accessory", "cfg-if 1.0.0", "delegate-display", "fancy_constructor", "js-sys", - "uuid 1.6.1", + "uuid 1.10.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -2283,12 +2127,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -2298,14 +2142,14 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -2336,39 +2180,28 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "io-lifetimes" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.9", - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.4.0", "libc", "windows-sys 0.52.0", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -2397,6 +2230,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -2405,25 +2247,25 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] [[package]] name = "kaspa-addresses" -version = "0.14.1" +version = "0.15.2" dependencies = [ "borsh", "criterion", @@ -2440,11 +2282,11 @@ dependencies = [ [[package]] name = "kaspa-addressmanager" -version = "0.14.1" +version = "0.15.2" dependencies = [ "borsh", "igd-next", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-consensus-core", "kaspa-core", "kaspa-database", @@ -2452,37 +2294,37 @@ dependencies = [ "local-ip-address", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", + "rv", "serde", - "statest", - "statrs", "thiserror", "tokio", ] [[package]] name = "kaspa-alloc" -version = "0.14.1" +version = "0.15.2" dependencies = [ "mimalloc", ] [[package]] name = "kaspa-bip32" -version = "0.14.1" +version = "0.15.2" dependencies = [ "borsh", "bs58", - "faster-hex 0.6.1", - "getrandom 0.2.14", + "faster-hex", + "getrandom", "hmac", "js-sys", + "kaspa-consensus-core", "kaspa-utils", "once_cell", "pbkdf2", - "rand 0.8.5", - "rand_core 0.6.4", + "rand", + "rand_core", "ripemd", "secp256k1", "serde", @@ -2496,7 +2338,7 @@ dependencies = [ [[package]] name = "kaspa-cli" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2504,8 +2346,9 @@ dependencies = [ "convert_case 0.6.0", "dashmap", "downcast", - "faster-hex 0.6.1", + "faster-hex", "futures", + "hex", "js-sys", "kaspa-addresses", "kaspa-bip32", @@ -2517,6 +2360,7 @@ dependencies = [ "kaspa-utils", "kaspa-wallet-core", "kaspa-wallet-keys", + "kaspa-wallet-pskt", "kaspa-wrpc-client", "nw-sys", "pad", @@ -2541,35 +2385,35 @@ dependencies = [ [[package]] name = "kaspa-connectionmanager" -version = "0.14.1" +version = "0.15.2" dependencies = [ "duration-string", "futures-util", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-addressmanager", "kaspa-core", "kaspa-p2p-lib", "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] [[package]] name = "kaspa-consensus" -version = "0.14.1" +version = "0.15.2" dependencies = [ "arc-swap", - "async-channel 2.2.1", + "async-channel 2.3.1", "bincode", "criterion", "crossbeam-channel", - "faster-hex 0.6.1", + "faster-hex", "flate2", "futures-util", - "indexmap 2.2.6", - "itertools 0.11.0", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensus-notify", @@ -2588,8 +2432,8 @@ dependencies = [ "log", "once_cell", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -2602,13 +2446,13 @@ dependencies = [ [[package]] name = "kaspa-consensus-client" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "ahash 0.8.11", + "ahash", "cfg-if 1.0.0", - "faster-hex 0.6.1", + "faster-hex", "hex", - "itertools 0.11.0", + "itertools 0.13.0", "js-sys", "kaspa-addresses", "kaspa-consensus-core", @@ -2617,7 +2461,7 @@ dependencies = [ "kaspa-txscript", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2630,7 +2474,7 @@ dependencies = [ [[package]] name = "kaspa-consensus-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ "arc-swap", "async-trait", @@ -2638,10 +2482,10 @@ dependencies = [ "borsh", "cfg-if 1.0.0", "criterion", - "faster-hex 0.6.1", + "faster-hex", "futures-util", - "getrandom 0.2.14", - "itertools 0.11.0", + "getrandom", + "itertools 0.13.0", "js-sys", "kaspa-addresses", "kaspa-core", @@ -2651,7 +2495,7 @@ dependencies = [ "kaspa-muhash", "kaspa-txscript-errors", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2663,14 +2507,15 @@ dependencies = [ "web-sys", "workflow-core", "workflow-log", + "workflow-serializer", "workflow-wasm", ] [[package]] name = "kaspa-consensus-notify" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "cfg-if 1.0.0", "derive_more", "futures", @@ -2687,10 +2532,10 @@ dependencies = [ [[package]] name = "kaspa-consensus-wasm" -version = "0.14.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", - "faster-hex 0.6.1", + "faster-hex", "js-sys", "kaspa-addresses", "kaspa-consensus-client", @@ -2698,7 +2543,7 @@ dependencies = [ "kaspa-hashes", "kaspa-txscript", "kaspa-utils", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde-wasm-bindgen", @@ -2711,25 +2556,25 @@ dependencies = [ [[package]] name = "kaspa-consensusmanager" -version = "0.14.1" +version = "0.15.2" dependencies = [ "duration-string", "futures", "futures-util", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-consensus-core", "kaspa-consensus-notify", "kaspa-core", "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "tokio", ] [[package]] name = "kaspa-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "ctrlc", @@ -2747,7 +2592,7 @@ dependencies = [ [[package]] name = "kaspa-daemon" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2769,19 +2614,19 @@ dependencies = [ [[package]] name = "kaspa-database" -version = "0.14.1" +version = "0.15.2" dependencies = [ "bincode", "enum-primitive-derive", - "faster-hex 0.6.1", - "indexmap 2.2.6", - "itertools 0.11.0", + "faster-hex", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-hashes", "kaspa-utils", "num-traits", "num_cpus", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "smallvec", @@ -2791,16 +2636,16 @@ dependencies = [ [[package]] name = "kaspa-grpc-client" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-stream", "async-trait", - "faster-hex 0.6.1", + "faster-hex", "futures", "futures-util", - "h2", - "itertools 0.11.0", + "h2 0.4.6", + "itertools 0.13.0", "kaspa-core", "kaspa-grpc-core", "kaspa-notify", @@ -2811,8 +2656,9 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", + "rand", "regex", + "rustls", "thiserror", "tokio", "tokio-stream", @@ -2822,14 +2668,14 @@ dependencies = [ [[package]] name = "kaspa-grpc-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-stream", "async-trait", - "faster-hex 0.6.1", + "faster-hex", "futures", - "h2", + "h2 0.4.6", "kaspa-consensus-core", "kaspa-core", "kaspa-notify", @@ -2838,7 +2684,7 @@ dependencies = [ "log", "paste", "prost", - "rand 0.8.5", + "rand", "regex", "thiserror", "tokio", @@ -2851,15 +2697,15 @@ dependencies = [ [[package]] name = "kaspa-grpc-server" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-stream", "async-trait", - "faster-hex 0.6.1", + "faster-hex", "futures", - "h2", - "itertools 0.11.0", + "h2 0.4.6", + "itertools 0.13.0", "kaspa-consensus-core", "kaspa-core", "kaspa-grpc-client", @@ -2875,29 +2721,30 @@ dependencies = [ "parking_lot", "paste", "prost", - "rand 0.8.5", + "rand", + "rustls", "thiserror", "tokio", "tokio-stream", "tonic", "triggered", - "uuid 1.6.1", + "uuid 1.10.0", ] [[package]] name = "kaspa-hashes" -version = "0.14.1" +version = "0.15.2" dependencies = [ "blake2b_simd", "borsh", "cc", "criterion", - "faster-hex 0.6.1", + "faster-hex", "js-sys", "kaspa-utils", "keccak", "once_cell", - "rand 0.8.5", + "rand", "serde", "sha2", "sha3", @@ -2907,9 +2754,9 @@ dependencies = [ [[package]] name = "kaspa-index-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-trait", "derive_more", "futures", @@ -2926,9 +2773,9 @@ dependencies = [ [[package]] name = "kaspa-index-processor" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-trait", "derive_more", "futures", @@ -2946,7 +2793,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "thiserror", "tokio", "triggered", @@ -2954,16 +2801,16 @@ dependencies = [ [[package]] name = "kaspa-math" -version = "0.14.1" +version = "0.15.2" dependencies = [ "borsh", "criterion", - "faster-hex 0.6.1", + "faster-hex", "js-sys", "kaspa-utils", "malachite-base", "malachite-nz", - "rand_chacha 0.3.1", + "rand_chacha", "serde", "serde-wasm-bindgen", "thiserror", @@ -2975,14 +2822,14 @@ dependencies = [ [[package]] name = "kaspa-merkle" -version = "0.14.1" +version = "0.15.2" dependencies = [ "kaspa-hashes", ] [[package]] name = "kaspa-metrics-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -2998,11 +2845,11 @@ dependencies = [ [[package]] name = "kaspa-mining" -version = "0.14.1" +version = "0.15.2" dependencies = [ "criterion", "futures-util", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", "kaspa-consensusmanager", @@ -3014,17 +2861,18 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", "smallvec", + "sweep-bptree", "thiserror", "tokio", ] [[package]] name = "kaspa-mining-errors" -version = "0.14.1" +version = "0.15.2" dependencies = [ "kaspa-consensus-core", "thiserror", @@ -3032,30 +2880,30 @@ dependencies = [ [[package]] name = "kaspa-muhash" -version = "0.14.1" +version = "0.15.2" dependencies = [ "criterion", "kaspa-hashes", "kaspa-math", - "rand 0.8.5", - "rand_chacha 0.3.1", + "rand", + "rand_chacha", "rayon", "serde", ] [[package]] name = "kaspa-notify" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-trait", "borsh", "criterion", "derive_more", "futures", "futures-util", - "indexmap 2.2.6", - "itertools 0.11.0", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", "kaspa-consensus-core", @@ -3068,7 +2916,7 @@ dependencies = [ "log", "parking_lot", "paste", - "rand 0.8.5", + "rand", "serde", "thiserror", "tokio", @@ -3076,17 +2924,18 @@ dependencies = [ "workflow-core", "workflow-log", "workflow-perf-monitor", + "workflow-serializer", ] [[package]] name = "kaspa-p2p-flows" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "chrono", "futures", - "indexmap 2.2.6", - "itertools 0.11.0", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-addressmanager", "kaspa-connectionmanager", "kaspa-consensus-core", @@ -3102,23 +2951,23 @@ dependencies = [ "kaspa-utils-tower", "log", "parking_lot", - "rand 0.8.5", + "rand", "thiserror", "tokio", "tokio-stream", - "uuid 1.6.1", + "uuid 1.10.0", ] [[package]] name = "kaspa-p2p-lib" -version = "0.14.1" +version = "0.15.2" dependencies = [ "borsh", "ctrlc", "futures", - "h2", + "h2 0.4.6", "hex", - "itertools 0.11.0", + "itertools 0.13.0", "kaspa-consensus-core", "kaspa-core", "kaspa-hashes", @@ -3129,7 +2978,7 @@ dependencies = [ "log", "parking_lot", "prost", - "rand 0.8.5", + "rand", "seqlock", "serde", "thiserror", @@ -3137,12 +2986,12 @@ dependencies = [ "tokio-stream", "tonic", "tonic-build", - "uuid 1.6.1", + "uuid 1.10.0", ] [[package]] name = "kaspa-perf-monitor" -version = "0.14.1" +version = "0.15.2" dependencies = [ "kaspa-core", "log", @@ -3154,7 +3003,7 @@ dependencies = [ [[package]] name = "kaspa-pow" -version = "0.14.1" +version = "0.15.2" dependencies = [ "criterion", "js-sys", @@ -3168,47 +3017,17 @@ dependencies = [ "workflow-wasm", ] -[[package]] -name = "kaspa-resolver" -version = "0.14.1" -dependencies = [ - "ahash 0.8.11", - "axum 0.7.5", - "cfg-if 1.0.0", - "clap 4.5.4", - "console", - "convert_case 0.6.0", - "futures", - "kaspa-consensus-core", - "kaspa-rpc-core", - "kaspa-utils", - "kaspa-wrpc-client", - "mime", - "serde", - "serde_json", - "thiserror", - "tokio", - "toml 0.8.12", - "tower", - "tower-http 0.5.2", - "tracing-subscriber", - "workflow-core", - "workflow-http", - "workflow-log", - "xxhash-rust", -] - [[package]] name = "kaspa-rpc-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-trait", "borsh", "cfg-if 1.0.0", "derive_more", "downcast", - "faster-hex 0.6.1", + "faster-hex", "hex", "js-sys", "kaspa-addresses", @@ -3227,20 +3046,22 @@ dependencies = [ "kaspa-utils", "log", "paste", + "rand", "serde", "serde-wasm-bindgen", "serde_json", "smallvec", "thiserror", - "uuid 1.6.1", + "uuid 1.10.0", "wasm-bindgen", "workflow-core", + "workflow-serializer", "workflow-wasm", ] [[package]] name = "kaspa-rpc-macros" -version = "0.14.1" +version = "0.15.2" dependencies = [ "convert_case 0.6.0", "proc-macro-error", @@ -3252,7 +3073,7 @@ dependencies = [ [[package]] name = "kaspa-rpc-service" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "kaspa-addresses", @@ -3281,21 +3102,21 @@ dependencies = [ [[package]] name = "kaspa-testing-integration" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-trait", "bincode", "chrono", - "clap 4.5.4", + "clap 4.5.19", "criterion", "crossbeam-channel", "dhat", - "faster-hex 0.6.1", + "faster-hex", "flate2", "futures-util", - "indexmap 2.2.6", - "itertools 0.11.0", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-alloc", "kaspa-bip32", @@ -3325,8 +3146,8 @@ dependencies = [ "kaspad", "log", "parking_lot", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "rocksdb", "secp256k1", @@ -3341,33 +3162,39 @@ dependencies = [ [[package]] name = "kaspa-txscript" -version = "0.14.1" +version = "0.15.2" dependencies = [ "blake2b_simd", "borsh", + "cfg-if 1.0.0", "criterion", "hex", - "indexmap 2.2.6", - "itertools 0.11.0", + "hexplay", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", "kaspa-hashes", "kaspa-txscript-errors", + "kaspa-utils", + "kaspa-wasm-core", "log", "parking_lot", - "rand 0.8.5", + "rand", "secp256k1", "serde", + "serde-wasm-bindgen", "serde_json", "sha2", "smallvec", "thiserror", "wasm-bindgen", + "workflow-wasm", ] [[package]] name = "kaspa-txscript-errors" -version = "0.14.1" +version = "0.15.2" dependencies = [ "secp256k1", "thiserror", @@ -3375,50 +3202,59 @@ dependencies = [ [[package]] name = "kaspa-utils" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", + "arc-swap", + "async-channel 2.3.1", "async-trait", "bincode", "borsh", "cfg-if 1.0.0", "criterion", + "duct", "event-listener 2.5.3", - "faster-hex 0.6.1", + "faster-hex", "futures-util", "ipnet", - "itertools 0.11.0", + "itertools 0.13.0", "log", + "mac_address", + "num_cpus", + "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "rlimit", "serde", "serde_json", + "sha2", "smallvec", + "sysinfo", "thiserror", "tokio", "triggered", - "uuid 1.6.1", + "uuid 1.10.0", "wasm-bindgen", ] [[package]] name = "kaspa-utils-tower" -version = "0.14.1" +version = "0.15.2" dependencies = [ + "bytes", "cfg-if 1.0.0", "futures", - "hyper 0.14.28", + "http-body 1.0.1", + "http-body-util", "log", "pin-project-lite", "tokio", - "tower", - "tower-http 0.4.4", + "tower 0.5.1", + "tower-http", ] [[package]] name = "kaspa-utxoindex" -version = "0.14.1" +version = "0.15.2" dependencies = [ "futures", "kaspa-consensus", @@ -3431,7 +3267,7 @@ dependencies = [ "kaspa-utils", "log", "parking_lot", - "rand 0.8.5", + "rand", "rocksdb", "serde", "thiserror", @@ -3439,7 +3275,7 @@ dependencies = [ [[package]] name = "kaspa-wallet" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-std", "async-trait", @@ -3451,7 +3287,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-cli-wasm" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "js-sys", @@ -3465,15 +3301,15 @@ dependencies = [ [[package]] name = "kaspa-wallet-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ "aes", - "ahash 0.8.11", + "ahash", "argon2", - "async-channel 2.2.1", + "async-channel 2.3.1", "async-std", "async-trait", - "base64 0.21.7", + "base64", "borsh", "cfb-mode", "cfg-if 1.0.0", @@ -3484,7 +3320,7 @@ dependencies = [ "derivative", "downcast", "evpkdf", - "faster-hex 0.6.1", + "faster-hex", "fixedstr", "futures", "heapless", @@ -3492,7 +3328,7 @@ dependencies = [ "hmac", "home", "indexed_db_futures", - "itertools 0.11.0", + "itertools 0.13.0", "js-sys", "kaspa-addresses", "kaspa-bip32", @@ -3509,13 +3345,14 @@ dependencies = [ "kaspa-utils", "kaspa-wallet-keys", "kaspa-wallet-macros", + "kaspa-wallet-pskt", "kaspa-wasm-core", "kaspa-wrpc-client", "kaspa-wrpc-wasm", "md-5", "pad", "pbkdf2", - "rand 0.8.5", + "rand", "regex", "ripemd", "secp256k1", @@ -3545,12 +3382,12 @@ dependencies = [ [[package]] name = "kaspa-wallet-keys" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", "downcast", - "faster-hex 0.6.1", + "faster-hex", "hmac", "js-sys", "kaspa-addresses", @@ -3560,7 +3397,7 @@ dependencies = [ "kaspa-txscript-errors", "kaspa-utils", "kaspa-wasm-core", - "rand 0.8.5", + "rand", "ripemd", "secp256k1", "serde", @@ -3578,7 +3415,7 @@ dependencies = [ [[package]] name = "kaspa-wallet-macros" -version = "0.14.1" +version = "0.15.2" dependencies = [ "convert_case 0.5.0", "proc-macro-error", @@ -3589,19 +3426,48 @@ dependencies = [ "xxhash-rust", ] +[[package]] +name = "kaspa-wallet-pskt" +version = "0.15.2" +dependencies = [ + "bincode", + "derive_builder", + "futures", + "hex", + "js-sys", + "kaspa-addresses", + "kaspa-bip32", + "kaspa-consensus-client", + "kaspa-consensus-core", + "kaspa-txscript", + "kaspa-txscript-errors", + "kaspa-utils", + "secp256k1", + "serde", + "serde-value", + "serde-wasm-bindgen", + "serde_json", + "serde_repr", + "thiserror", + "wasm-bindgen", + "workflow-wasm", +] + [[package]] name = "kaspa-wasm" -version = "0.14.1" +version = "0.15.2" dependencies = [ "cfg-if 1.0.0", "js-sys", "kaspa-addresses", + "kaspa-bip32", "kaspa-consensus-core", "kaspa-consensus-wasm", "kaspa-core", "kaspa-math", "kaspa-pow", "kaspa-rpc-core", + "kaspa-txscript", "kaspa-utils", "kaspa-wallet-core", "kaspa-wallet-keys", @@ -3617,16 +3483,18 @@ dependencies = [ [[package]] name = "kaspa-wasm-core" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "faster-hex 0.6.1", + "faster-hex", + "hexplay", "js-sys", "wasm-bindgen", + "workflow-wasm", ] [[package]] name = "kaspa-wrpc-client" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-std", "async-trait", @@ -3641,13 +3509,14 @@ dependencies = [ "kaspa-rpc-core", "kaspa-rpc-macros", "paste", - "rand 0.8.5", + "rand", "regex", + "rustls", "serde", "serde-wasm-bindgen", "serde_json", "thiserror", - "toml 0.8.12", + "toml", "wasm-bindgen", "wasm-bindgen-futures", "workflow-core", @@ -3655,12 +3524,13 @@ dependencies = [ "workflow-http", "workflow-log", "workflow-rpc", + "workflow-serializer", "workflow-wasm", ] [[package]] name = "kaspa-wrpc-example-subscriber" -version = "0.14.1" +version = "0.15.2" dependencies = [ "ctrlc", "futures", @@ -3675,10 +3545,10 @@ dependencies = [ [[package]] name = "kaspa-wrpc-proxy" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", - "clap 4.5.4", + "clap 4.5.19", "kaspa-consensus-core", "kaspa-grpc-client", "kaspa-rpc-core", @@ -3694,7 +3564,7 @@ dependencies = [ [[package]] name = "kaspa-wrpc-server" -version = "0.14.1" +version = "0.15.2" dependencies = [ "async-trait", "borsh", @@ -3709,21 +3579,32 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "openssl", "paste", + "rustls", "serde", "thiserror", "tokio", "workflow-core", "workflow-log", "workflow-rpc", + "workflow-serializer", +] + +[[package]] +name = "kaspa-wrpc-simple-client-example" +version = "0.15.2" +dependencies = [ + "futures", + "kaspa-rpc-core", + "kaspa-wrpc-client", + "tokio", ] [[package]] name = "kaspa-wrpc-wasm" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "ahash 0.8.11", + "ahash", "async-std", "cfg-if 1.0.0", "futures", @@ -3737,6 +3618,7 @@ dependencies = [ "kaspa-rpc-macros", "kaspa-wasm-core", "kaspa-wrpc-client", + "ring", "serde", "serde-wasm-bindgen", "serde_json", @@ -3750,10 +3632,11 @@ dependencies = [ [[package]] name = "kaspad" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", - "clap 4.5.4", + "async-channel 2.3.1", + "cfg-if 1.0.0", + "clap 4.5.19", "dhat", "dirs", "futures-util", @@ -3782,14 +3665,14 @@ dependencies = [ "kaspa-wrpc-server", "log", "num_cpus", - "rand 0.8.5", + "rand", "rayon", "serde", "serde_with", "tempfile", "thiserror", "tokio", - "toml 0.8.12", + "toml", "workflow-log", ] @@ -3813,9 +3696,9 @@ dependencies = [ [[package]] name = "lazy_static" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "lazycell" @@ -3825,18 +3708,18 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.154" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if 1.0.0", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3847,9 +3730,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libmimalloc-sys" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81eb4061c0582dedea1cbc7aff2240300dd6982e0239d1c99e65c1dbf4a30ba7" +checksum = "23aa6811d3bd4deb8a84dde645f943476d13b248d818edcf8ce0b2f37f036b44" dependencies = [ "cc", "libc", @@ -3861,17 +3744,18 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "libc", + "redox_syscall", ] [[package]] name = "librocksdb-sys" -version = "0.11.0+8.1.1" +version = "0.16.0+8.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" dependencies = [ - "bindgen 0.65.1", + "bindgen 0.69.4", "bzip2-sys", "cc", "glob", @@ -3883,9 +3767,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.16" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -3914,26 +3798,20 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" - -[[package]] -name = "linux-raw-sys" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.5.7" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "612ed4ea9ce5acfb5d26339302528a5e1e59dfed95e9e11af3c083236ff1d15d" +checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", "thiserror", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -3948,9 +3826,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" dependencies = [ "serde", "value-bag", @@ -3980,7 +3858,7 @@ dependencies = [ "log-mdc", "once_cell", "parking_lot", - "rand 0.8.5", + "rand", "serde", "serde-value", "serde_json", @@ -3992,20 +3870,39 @@ dependencies = [ ] [[package]] -name = "lz4-sys" -version = "1.9.4" +name = "lru" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +checksum = "71e7d46de488603ffdd5f30afbc64fbba2378214a2c3a2fb83abf3d33126df17" dependencies = [ - "cc", - "libc", + "hashbrown 0.13.2", ] [[package]] -name = "mach" -version = "0.3.2" +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "mac_address" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8836fae9d0d4be2c8b4efcdd79e828a2faa058a90d005abf42f91cac5493a08e" +dependencies = [ + "nix 0.28.0", + "winapi", +] + +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" dependencies = [ "libc", ] @@ -4030,7 +3927,7 @@ dependencies = [ "cfg-if 1.0.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -4041,7 +3938,7 @@ checksum = "13198c120864097a565ccb3ff947672d969932b7975ebd4085732c9f09435e55" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -4054,14 +3951,14 @@ dependencies = [ "macroific_core", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "malachite-base" -version = "0.4.7" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d073a3d1e4e037975af5ef176a2632672e25e8ddbe8e1811745c2e0726b6ad94" +checksum = "46059721011b0458b7bd6d9179be5d0b60294281c23320c207adceaecc54d13b" dependencies = [ "hashbrown 0.14.5", "itertools 0.11.0", @@ -4071,9 +3968,9 @@ dependencies = [ [[package]] name = "malachite-nz" -version = "0.4.7" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2546fc6ae29728079e87a2a0f011509e6060713b65e62ee46ba5d413b495ebc7" +checksum = "1503b27e825cabd1c3d0ff1e95a39fb2ec9eab6fd3da6cfa41aec7091d273e78" dependencies = [ "itertools 0.11.0", "libm", @@ -4097,11 +3994,15 @@ checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "matrixmultiply" -version = "0.2.4" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" +checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" dependencies = [ + "autocfg", + "num_cpus", + "once_cell", "rawpointer", + "thread-tree", ] [[package]] @@ -4116,15 +4017,24 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] [[package]] name = "mimalloc" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f41a2280ded0da56c8cf898babb86e8f10651a34adcfff190ae9a1159c6908d" +checksum = "68914350ae34959d83f732418d51e2427a794055d0b9529f48259ac07af65633" dependencies = [ "libmimalloc-sys", ] @@ -4135,6 +4045,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minicov" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71e683cd655513b99affab7d317deb690528255a0d5f717f1024093c12b169" +dependencies = [ + "cc", + "walkdir", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4143,11 +4063,11 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] @@ -4164,33 +4084,27 @@ checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] [[package]] -name = "multimap" -version = "0.10.0" +name = "mio" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "wasi", + "windows-sys 0.52.0", +] [[package]] -name = "nalgebra" -version = "0.19.0" +name = "multimap" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abb021006c01b126a936a8dd1351e0720d83995f4fc942d0d426c654f990745" -dependencies = [ - "alga", - "approx", - "generic-array 0.13.3", - "matrixmultiply", - "num-complex 0.2.4", - "num-rational 0.2.4", - "num-traits", - "rand 0.7.3", - "rand_distr 0.2.2", - "typenum", -] +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "nanoid" @@ -4198,38 +4112,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand 0.8.5", -] - -[[package]] -name = "native-tls" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "ndarray" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac06db03ec2f46ee0ecdca1a1c34a99c0d188a0d83439b84bf0cb4b386e4ab09" -dependencies = [ - "matrixmultiply", - "num-complex 0.2.4", - "num-integer", - "num-traits", - "rawpointer", + "rand", ] [[package]] @@ -4263,9 +4146,22 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "cfg-if 1.0.0", - "cfg_aliases", + "cfg_aliases 0.1.1", + "libc", + "memoffset", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.6.0", + "cfg-if 1.0.0", + "cfg_aliases 0.2.1", "libc", ] @@ -4293,55 +4189,43 @@ dependencies = [ ] [[package]] -name = "nu-ansi-term" -version = "0.46.0" +name = "ntapi" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ - "overload", "winapi", ] [[package]] name = "num" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135b08af27d103b0a51f2ae0f8632117b7b185ccf931445affa8df530576a41" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ "num-bigint", - "num-complex 0.4.5", + "num-complex", "num-integer", "num-iter", - "num-rational 0.4.1", + "num-rational", "num-traits", ] [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] @@ -4374,22 +4258,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -4445,24 +4317,24 @@ dependencies = [ [[package]] name = "object" -version = "0.32.2" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" @@ -4470,66 +4342,18 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" -dependencies = [ - "bitflags 2.5.0", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.60", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-src" -version = "300.2.3+3.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" -dependencies = [ - "cc", -] - -[[package]] -name = "openssl-sys" -version = "0.9.102" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" -dependencies = [ - "cc", - "libc", - "openssl-src", - "pkg-config", - "vcpkg", -] - [[package]] name = "option-ext" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "order-stat" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efa535d5117d3661134dbf1719b6f0ffe06f2375843b13935db186cd094105eb" + [[package]] name = "ordered-float" version = "2.10.1" @@ -4540,10 +4364,14 @@ dependencies = [ ] [[package]] -name = "overload" -version = "0.1.1" +name = "os_pipe" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "5ffd2b0a5634335b135d5728d84c5e0fd726954b87111f7506a61c502280d982" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] [[package]] name = "pad" @@ -4556,15 +4384,15 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -4578,16 +4406,16 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.1", + "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] name = "parse-variants" -version = "1.0.1" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f048110646aae15ec0e4299c37a012739d28d92c82b7ad945c0578c188cbe3" +checksum = "99088a2b0695df5940d7b5a3b4c4460b765053cf5ecd6d46da43812d3fad7f13" dependencies = [ "parse-variants-derive", ] @@ -4600,7 +4428,7 @@ checksum = "70df726c43c645ef1dde24c7ae14692036ebe5457c92c5f0ec4cfceb99634ff6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -4610,15 +4438,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -4642,34 +4470,58 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "peroxide" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703b5fbdc1f9018a66e2db8758633cec31d39ad3127bfd38c9b6ad510637519c" +dependencies = [ + "matrixmultiply", + "order-stat", + "peroxide-ad", + "puruspe", + "rand", + "rand_distr", +] + +[[package]] +name = "peroxide-ad" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6fba8ff3f40b67996f7c745f699babaa3e57ef5c8178ec999daf7eedc51dc8c" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap 2.6.0", ] [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -4686,56 +4538,34 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand", "futures-io", ] [[package]] name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "platforms" -version = "3.4.0" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" -version = "2.8.0" +version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ - "autocfg", - "bitflags 1.3.2", "cfg-if 1.0.0", "concurrent-queue", - "libc", - "log", + "hermit-abi 0.4.0", "pin-project-lite", - "windows-sys 0.48.0", -] - -[[package]] -name = "polling" -version = "3.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" -dependencies = [ - "cfg-if 1.0.0", - "concurrent-queue", - "hermit-abi 0.3.9", - "pin-project-lite", - "rustix 0.38.34", + "rustix", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4751,9 +4581,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -4763,27 +4593,30 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "proc-macro-crate" -version = "0.1.5" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml 0.5.11", + "toml_edit", ] [[package]] @@ -4811,18 +4644,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.12.4" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0f5d036824e4761737860779c906171497f6d55681139d8312388f8fe398922" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", "prost-derive", @@ -4830,13 +4663,13 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.4" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", "heck", - "itertools 0.12.1", + "itertools 0.13.0", "log", "multimap", "once_cell", @@ -4845,92 +4678,114 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.60", + "syn 2.0.79", "tempfile", ] [[package]] name = "prost-derive" -version = "0.12.4" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "prost-types" -version = "0.12.4" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" dependencies = [ "prost", ] [[package]] -name = "quote" -version = "1.0.36" +name = "puruspe" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3804877ffeba468c806c2ad9057bbbae92e4b2c410c2f108baaa0042f241fa4c" + +[[package]] +name = "quinn" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ - "proc-macro2", + "bytes", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.0.0", + "rustls", + "socket2", + "thiserror", + "tokio", + "tracing", ] [[package]] -name = "rand" -version = "0.7.3" +name = "quinn-proto" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", + "bytes", + "rand", + "ring", + "rustc-hash 2.0.0", + "rustls", + "slab", + "thiserror", + "tinyvec", + "tracing", ] [[package]] -name = "rand" -version = "0.8.5" +name = "quinn-udp" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.59.0", ] [[package]] -name = "rand_chacha" -version = "0.2.2" +name = "quote" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "proc-macro2", ] [[package]] -name = "rand_chacha" -version = "0.3.1" +name = "rand" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ - "ppv-lite86", - "rand_core 0.6.4", + "libc", + "rand_chacha", + "rand_core", ] [[package]] -name = "rand_core" -version = "0.5.1" +name = "rand_chacha" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ - "getrandom 0.1.16", + "ppv-lite86", + "rand_core", ] [[package]] @@ -4939,16 +4794,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.14", -] - -[[package]] -name = "rand_distr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" -dependencies = [ - "rand 0.7.3", + "getrandom", ] [[package]] @@ -4958,16 +4804,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand 0.8.5", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", + "rand", ] [[package]] @@ -4998,38 +4835,29 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.1" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.14", + "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", @@ -5039,9 +4867,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -5050,48 +4878,53 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.11.27" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ - "base64 0.21.7", + "base64", "bytes", "encoding_rs", "futures-core", "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-tls", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-rustls", + "hyper-util", "ipnet", "js-sys", "log", "mime", - "native-tls", "once_cell", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "system-configuration", "tokio", - "tokio-native-tls", + "tokio-rustls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "webpki-roots", + "windows-registry", ] [[package]] @@ -5102,7 +4935,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if 1.0.0", - "getrandom 0.2.14", + "getrandom", "libc", "spin", "untrusted", @@ -5120,18 +4953,18 @@ dependencies = [ [[package]] name = "rlimit" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3560f70f30a0f16d11d01ed078a07740fe6b489667abc7c7b029155d9f21c3d8" +checksum = "7043b63bd0cd1aaa628e476b80e6d4023a3b50eb32789f2728908107bd0c793a" dependencies = [ "libc", ] [[package]] name = "rocksdb" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" dependencies = [ "libc", "librocksdb-sys", @@ -5139,13 +4972,13 @@ dependencies = [ [[package]] name = "rothschild" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", - "clap 4.5.4", + "async-channel 2.3.1", + "clap 4.5.19", "criterion", - "faster-hex 0.6.1", - "itertools 0.11.0", + "faster-hex", + "itertools 0.13.0", "kaspa-addresses", "kaspa-consensus-core", "kaspa-core", @@ -5163,9 +4996,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -5174,83 +5007,101 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "rustc_version" -version = "0.4.0" +name = "rustc-hash" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" -dependencies = [ - "semver", -] +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] -name = "rustix" -version = "0.37.27" +name = "rustc_version" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "bitflags 1.3.2", - "errno", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", + "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "log", + "once_cell", "ring", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] name = "rustls-pemfile" -version = "1.0.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.21.7", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" + [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", + "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "rv" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c07e0a3b756794c7ea2f05d93760ffb946ff4f94b255d92444d94c19fd71f4ab" +dependencies = [ + "doc-comment", + "lru", + "num", + "num-traits", + "peroxide", + "rand", + "rand_distr", + "special", +] [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "salsa20" @@ -5270,15 +5121,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "schannel" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "scoped-tls" version = "1.0.1" @@ -5291,64 +5133,31 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "secp256k1" -version = "0.28.2" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +checksum = "9465315bc9d4566e1724f0fffcbcc446268cb522e60f9a27bcded6b19c108113" dependencies = [ - "rand 0.8.5", + "rand", "secp256k1-sys", "serde", ] [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ "cc", ] -[[package]] -name = "security-framework" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" -dependencies = [ - "bitflags 2.5.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5370,9 +5179,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.200" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc6f9cc94d67c0e21aaf7eda3a010fd3af78ebf6e096aa6e2e13c79749cce4f" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5400,36 +5209,27 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.200" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "856f046b9400cee3c8c94ed572ecdb752444c24528c035cd35882aad6f492bcb" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] -[[package]] -name = "serde_path_to_error" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" -dependencies = [ - "itoa", - "serde", -] - [[package]] name = "serde_repr" version = "0.1.19" @@ -5438,14 +5238,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -5464,15 +5264,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.8.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -5482,14 +5282,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.8.1" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65569b702f41443e8bc8bbb1c5779bd0450bbe723b56198980e80ec45780bce2" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -5498,7 +5298,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5538,12 +5338,13 @@ dependencies = [ ] [[package]] -name = "sharded-slab" -version = "0.1.7" +name = "shared_child" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +checksum = "09fa9338aed9a1df411814a5b2252f7cd206c55ae9bf2fa763f8de84603aa60c" dependencies = [ - "lazy_static", + "libc", + "windows-sys 0.59.0", ] [[package]] @@ -5564,12 +5365,12 @@ dependencies = [ [[package]] name = "signal-hook-mio" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" +checksum = "34db1a06d485c9142248b7a054f034b349b212551f3dfd19c94d45a754a217cd" dependencies = [ "libc", - "mio", + "mio 0.8.11", "signal-hook", ] @@ -5584,15 +5385,16 @@ dependencies = [ [[package]] name = "simpa" -version = "0.14.1" +version = "0.15.2" dependencies = [ - "async-channel 2.2.1", - "clap 4.5.4", + "async-channel 2.3.1", + "cfg-if 1.0.0", + "clap 4.5.19", "dhat", "futures", "futures-util", - "indexmap 2.2.6", - "itertools 0.11.0", + "indexmap 2.6.0", + "itertools 0.13.0", "kaspa-alloc", "kaspa-consensus", "kaspa-consensus-core", @@ -5604,8 +5406,8 @@ dependencies = [ "kaspa-utils", "log", "num_cpus", - "rand 0.8.5", - "rand_distr 0.4.3", + "rand", + "rand_distr", "rayon", "secp256k1", "tokio", @@ -5645,16 +5447,6 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" -[[package]] -name = "socket2" -version = "0.4.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "socket2" version = "0.5.7" @@ -5672,52 +5464,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dd62203d74a728ae353b4d716fc2a80e8da881dfdf8bbc0c012d877a58c4030" [[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "statest" -version = "0.2.2" +name = "special" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04ed65138bd1680f47e4d980ac7d3cf5e827fa99c2fa6683e640094a494602b4" +checksum = "b89cf0d71ae639fdd8097350bfac415a41aabf1d5ddd356295fdc95f09760382" dependencies = [ - "ndarray", - "num-traits", - "statrs", + "libm", ] [[package]] -name = "statrs" -version = "0.13.0" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e34b58a8f9b7462b6922e0b4e3c83d1b3c2075f7f996a56d6c66afa81590064" -dependencies = [ - "nalgebra", - "rand 0.7.3", -] +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] -name = "strsim" -version = "0.8.0" +name = "stable_deref_trait" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "strsim" -version = "0.10.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "strsim" @@ -5727,9 +5498,15 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "sweep-bptree" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "bea7b1b7c5eaabc40bab84ec98b2f12523d97e91c9bfc430fe5d2a1ea15c9960" [[package]] name = "syn" @@ -5744,15 +5521,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -5764,23 +5553,40 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] + +[[package]] +name = "sysinfo" +version = "0.31.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355dbe4f8799b304b05e1b0f05fc59b2a18d36645cf169607da45bde2f69a1be" +dependencies = [ + "core-foundation-sys", + "libc", + "memchr", + "ntapi", + "rayon", + "windows", +] [[package]] name = "system-configuration" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" dependencies = [ "core-foundation-sys", "libc", @@ -5788,14 +5594,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if 1.0.0", - "fastrand 2.1.0", - "rustix 0.38.34", - "windows-sys 0.52.0", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", ] [[package]] @@ -5829,22 +5636,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -5855,22 +5662,21 @@ checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" [[package]] name = "thread-id" -version = "4.2.1" +version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0ec81c46e9eb50deaa257be2f148adf052d1fb7701cfd55ccfab2525280b70b" +checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" dependencies = [ "libc", "winapi", ] [[package]] -name = "thread_local" -version = "1.1.8" +name = "thread-tree" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "ffbd370cb847953a25954d9f63e14824a36113f8c72eecf6eccef5dc4b45d630" dependencies = [ - "cfg-if 1.0.0", - "once_cell", + "crossbeam-channel", ] [[package]] @@ -5918,9 +5724,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -5933,68 +5739,48 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", - "mio", - "num_cpus", + "mio 1.0.2", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.7", + "socket2", "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.79", ] [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ "rustls", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -6003,23 +5789,25 @@ dependencies = [ [[package]] name = "tokio-tungstenite" -version = "0.21.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +checksum = "c6989540ced10490aaf14e6bad2e3d33728a2813310a0c71d1574304c49631cd" dependencies = [ "futures-util", "log", - "native-tls", + "rustls", + "rustls-pki-types", "tokio", - "tokio-native-tls", + "tokio-rustls", "tungstenite", + "webpki-roots", ] [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6030,18 +5818,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - -[[package]] -name = "toml" -version = "0.8.12" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", @@ -6051,20 +5830,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.12" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -6073,46 +5852,50 @@ dependencies = [ [[package]] name = "tonic" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", - "axum 0.6.20", - "base64 0.21.7", + "axum", + "base64", "bytes", "flate2", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", "prost", - "rustls", "rustls-pemfile", + "socket2", "tokio", "tokio-rustls", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", + "webpki-roots", ] [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" dependencies = [ "prettyplease", "proc-macro2", "prost-build", + "prost-types", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -6126,7 +5909,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -6136,19 +5919,15 @@ dependencies = [ ] [[package]] -name = "tower-http" -version = "0.4.4" +name = "tower" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" dependencies = [ - "bitflags 2.5.0", - "bytes", "futures-core", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", "pin-project-lite", + "sync_wrapper 0.1.2", "tower-layer", "tower-service", ] @@ -6159,10 +5938,10 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.6.0", "bytes", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "pin-project-lite", "tower-layer", @@ -6171,15 +5950,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -6187,7 +5966,6 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -6201,7 +5979,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] @@ -6211,32 +5989,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.3.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" -dependencies = [ - "nu-ansi-term", - "sharded-slab", - "smallvec", - "thread_local", - "tracing-core", - "tracing-log", ] [[package]] @@ -6253,9 +6005,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tungstenite" -version = "0.21.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +checksum = "6e2e2ce1e47ed2994fd43b04c8f618008d4cabdd5ee34027cf14f9d918edd9c8" dependencies = [ "byteorder", "bytes", @@ -6263,11 +6015,11 @@ dependencies = [ "http 1.1.0", "httparse", "log", - "native-tls", - "rand 0.8.5", + "rand", + "rustls", + "rustls-pki-types", "sha1", "thiserror", - "url", "utf-8", ] @@ -6288,15 +6040,15 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-linebreak" @@ -6306,24 +6058,24 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "universal-hash" @@ -6358,9 +6110,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -6375,9 +6127,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" @@ -6385,27 +6137,21 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.14", + "getrandom", ] [[package]] name = "uuid" -version = "1.6.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ - "getrandom 0.2.14", - "rand 0.8.5", + "getrandom", + "rand", "serde", "wasm-bindgen", ] -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - [[package]] name = "value-bag" version = "1.9.0" @@ -6426,9 +6172,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "vergen" -version = "8.3.1" +version = "8.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27d6bdd219887a9eadd19e1c34f32e47fa332301184935c6d9bca26f3cca525" +checksum = "2990d9ea5967266ea0ccf413a4aa5c42a93dbcfda9cb49a97de6931726b12566" dependencies = [ "anyhow", "cargo_metadata", @@ -6441,15 +6187,9 @@ dependencies = [ [[package]] name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "waker-fn" -version = "1.1.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "walkdir" @@ -6470,12 +6210,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6484,11 +6218,12 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if 1.0.0", + "once_cell", "serde", "serde_json", "wasm-bindgen-macro", @@ -6496,24 +6231,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -6523,9 +6258,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6533,31 +6268,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-bindgen-test" -version = "0.3.42" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9bf62a58e0780af3e852044583deee40983e5886da43a271dd772379987667b" +checksum = "68497a05fb21143a08a7d24fc81763384a3072ee43c44e86aad1744d6adef9d9" dependencies = [ "console_error_panic_hook", "js-sys", + "minicov", "scoped-tls", "wasm-bindgen", "wasm-bindgen-futures", @@ -6566,25 +6302,34 @@ dependencies = [ [[package]] name = "wasm-bindgen-test-macro" -version = "0.3.42" +version = "0.3.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f89739351a2e03cb94beb799d47fb2cac01759b40ec441f7de39b00cbf7ef0" +checksum = "4b8220be1fa9e4c889b30fd207d4906657e7e90b12e0e6b0c8b8d8709f5de021" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "which" version = "4.4.2" @@ -6594,7 +6339,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.34", + "rustix", ] [[package]] @@ -6615,11 +6360,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6628,13 +6373,96 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143" +dependencies = [ + "windows-core 0.57.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows-core" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-core" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result 0.1.2", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "windows-interface" +version = "0.57.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result 0.2.0", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result 0.2.0", + "windows-targets 0.52.6", ] [[package]] @@ -6652,7 +6480,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -6672,18 +6509,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -6694,9 +6531,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -6706,9 +6543,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -6718,15 +6555,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -6736,9 +6573,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -6748,9 +6585,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -6760,9 +6597,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -6772,34 +6609,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.7" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if 1.0.0", - "windows-sys 0.48.0", -] - [[package]] name = "workflow-chrome" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109b6289f65b3e1cdfa6f2d9e8eb454453d5763c5061350e2300473c48d91b99" +checksum = "1e0c0dfbc178cb7c3a47bd2aabf6902364d2db7e4c4f5b0dad57b75d78c6fe1f" dependencies = [ "cfg-if 1.0.0", "chrome-sys", @@ -6812,23 +6639,24 @@ dependencies = [ [[package]] name = "workflow-core" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcea01cb6122ac3f20dc14f8e4104e2c0cd9c718c17ddb3fc115f9b2ed99f9ae" +checksum = "a1d67bbe225ea90aa6979167f28935275506696ac867661e218893d3a42e1666" dependencies = [ - "async-channel 2.2.1", + "async-channel 2.3.1", "async-std", "borsh", "bs58", "cfg-if 1.0.0", "chrono", "dirs", - "faster-hex 0.9.0", + "faster-hex", "futures", - "getrandom 0.2.14", + "getrandom", "instant", "js-sys", - "rand 0.8.5", + "rand", + "rlimit", "serde", "serde-wasm-bindgen", "thiserror", @@ -6839,13 +6667,14 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "workflow-core-macros", + "workflow-log", ] [[package]] name = "workflow-core-macros" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe24820a62e2b544c75c000cff72781383495a0e05157ec3e29b2abafe1ca2cb" +checksum = "65659ed208b0066a9344142218abda353eb6c6cc1fc3ae4808b750c560de004b" dependencies = [ "convert_case 0.6.0", "parse-variants", @@ -6860,9 +6689,9 @@ dependencies = [ [[package]] name = "workflow-dom" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91264d4e789f23c6730c2f3adede04a24b6a9eb9797f9d4ab23de370ba04c27f" +checksum = "503bba85907753c960ddfd73b4e79bffadf521cc3c992ef2b2a29fd3af09a957" dependencies = [ "futures", "js-sys", @@ -6878,9 +6707,9 @@ dependencies = [ [[package]] name = "workflow-http" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b191def1625c3aa5e7d62d1ebbbb3e639113a4a2f122418e4cf8d3379374f8" +checksum = "a3c654c7395e448001c658309377a44a8c3d7c28c7acb30e9babbaeacb575bb0" dependencies = [ "cfg-if 1.0.0", "reqwest", @@ -6894,9 +6723,9 @@ dependencies = [ [[package]] name = "workflow-log" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "077a8f720aa45c8cd867de1ccc73e068c4084d9fea46d11be7697a108e6a00ba" +checksum = "64bf52c539193f219b7a79eb0c7c5f6c222ccf9b95c5e0bd59e924feb762256f" dependencies = [ "cfg-if 1.0.0", "console", @@ -6910,9 +6739,9 @@ dependencies = [ [[package]] name = "workflow-macro-tools" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a8af8b8951fa0cf94df4057b8cf583e067a525d3d997370db7797f33ba201f" +checksum = "085d3045d5ca780fb589d230030e34fec962b3638d6c69806a72a7d7d1affea4" dependencies = [ "convert_case 0.6.0", "parse-variants", @@ -6923,9 +6752,9 @@ dependencies = [ [[package]] name = "workflow-node" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7748eb6c76779993ed7f4457356d6b57f48f97f9e264c64c3405098330bcb8c7" +checksum = "9b85c9add43b5da3bed3d0d6d92eb3a2c5986c0ae65c7c3f5189876c19648154" dependencies = [ "borsh", "futures", @@ -6944,17 +6773,17 @@ dependencies = [ [[package]] name = "workflow-nw" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "010fff3468303b39fb0d5d267847a3d293ed083afbf83f4184fb1a749be56010" +checksum = "a2dd8c77686e6456be8e92237daaa88ad31546974e04514a09b1a38f812530ef" dependencies = [ - "ahash 0.8.11", + "ahash", "async-trait", "borsh", "futures", "js-sys", "nw-sys", - "rand 0.8.5", + "rand", "serde", "serde-wasm-bindgen", "thiserror", @@ -6968,9 +6797,9 @@ dependencies = [ [[package]] name = "workflow-panic-hook" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c1ed51290daf255e5fd83dfe6bd754b108e371b971afbb5c5fd1ea8fe148af" +checksum = "74c76ca8b459e4f0c949f06ce2d45565a6769748e83ca7064d36671bbd67b4da" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen", @@ -6993,20 +6822,20 @@ dependencies = [ [[package]] name = "workflow-rpc" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14784fbad27d0403fc752d835c4c4683cfc6af970a484ea83f40ce7ad6dc7745" +checksum = "ec4235eb167f0bef3bcbdf0c578823a0105ab5303115e3b2afb4d526e2498b08" dependencies = [ - "ahash 0.8.11", + "ahash", "async-std", "async-trait", "borsh", "downcast-rs", "futures", "futures-util", - "getrandom 0.2.14", + "getrandom", "manual_future", - "rand 0.8.5", + "rand", "serde", "serde_json", "thiserror", @@ -7023,9 +6852,9 @@ dependencies = [ [[package]] name = "workflow-rpc-macros" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c372e99d1336a137b907274a3c50fc195e30141c87fc6da4dba54e7d4b09b8ec" +checksum = "f048ca6b1c551f468c3c0c829f958e83dd15b20138b5466bb617ffde500e8cf4" dependencies = [ "parse-variants", "proc-macro-error", @@ -7034,17 +6863,28 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "workflow-serializer" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64679db6856852a472caff4ce869e3ecebe291fbccc9406e9643eb5951a0904a" +dependencies = [ + "ahash", + "borsh", + "serde", +] + [[package]] name = "workflow-store" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "762861614298160b9205302bec4f2b7eb45853413d10a90ad8edca44bafc324b" +checksum = "d161c4b844eee479f81306f2526266f9608a663e0a679d9fc0572ee15c144e06" dependencies = [ "async-std", - "base64 0.21.7", + "base64", "cfg-if 1.0.0", "chrome-sys", - "faster-hex 0.9.0", + "faster-hex", "filetime", "home", "js-sys", @@ -7064,9 +6904,9 @@ dependencies = [ [[package]] name = "workflow-task" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4023e2598734e04aa4e968a4dd1cd2b5d0c344edc38b40970926d5742f5afa0" +checksum = "3d1a90743bb4d3f68606cb4e9a78551a53399ebc35ddba981cbb56bf2b31940a" dependencies = [ "futures", "thiserror", @@ -7076,9 +6916,9 @@ dependencies = [ [[package]] name = "workflow-task-macros" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057801365ce04c520a2a694bc5bfdf1784f1a33fff97af4cd735f94eb12947b1" +checksum = "7ecf6be36b52dc1e16d11b55f717d9ec2fec5804aff7f392af591933ba4af45e" dependencies = [ "convert_case 0.6.0", "parse-variants", @@ -7092,9 +6932,9 @@ dependencies = [ [[package]] name = "workflow-terminal" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "895c236dd5cf493e01fc31733c4687b3e67032f610d594ce3b8e5cafd14eaf33" +checksum = "75b64a2ecf68442edf844c3138f0b78e1398cfe4279540f94cc51b4afb885e5b" dependencies = [ "async-std", "async-trait", @@ -7121,9 +6961,9 @@ dependencies = [ [[package]] name = "workflow-terminal-macros" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1fe67beb12d31f2e69715898aa32abd2349ffc8fe0555617f0d77500cebc56" +checksum = "7bf96dca7d1847a74d7566c5620610d1accc854032308489105b80c08ebf525f" dependencies = [ "convert_case 0.6.0", "parse-variants", @@ -7137,12 +6977,12 @@ dependencies = [ [[package]] name = "workflow-wasm" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ffbd1de665304ba6040a1ab4e0867fd9174446491d257bc6a1474ae25d4a6c" +checksum = "799e5fbf266e0fffb5c24d6103735eb2b94bb31f93b664b91eaaf63b4f959804" dependencies = [ "cfg-if 1.0.0", - "faster-hex 0.9.0", + "faster-hex", "futures", "js-sys", "serde", @@ -7158,9 +6998,9 @@ dependencies = [ [[package]] name = "workflow-wasm-macros" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "082644f52215ecc86b4b8a20a763e482adee52c338208ade268f47fe25eb07ca" +checksum = "40237c65ecff78dbfedb13985e33f802a31f6f7de72dff12a6674fcdcf601822" dependencies = [ "js-sys", "proc-macro-error", @@ -7172,12 +7012,12 @@ dependencies = [ [[package]] name = "workflow-websocket" -version = "0.12.1" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6967baf2bd85deb2a014a32d34c1664ded9333e10d11d43ffc179fa09cc55db8" +checksum = "515483a047477c91b5142e1090cce0afc21a0139d9c0c06ea42f0d3dbf3a6fcd" dependencies = [ - "ahash 0.8.11", - "async-channel 2.2.1", + "ahash", + "async-channel 2.3.1", "async-std", "async-trait", "cfg-if 1.0.0", @@ -7201,9 +7041,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.20" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" +checksum = "af4e2e2f7cba5a093896c1e150fbfe177d1883e7448200efb81d40b9d339ef26" [[package]] name = "xmltree" @@ -7216,41 +7056,42 @@ dependencies = [ [[package]] name = "xxhash-rust" -version = "0.8.10" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927da81e25be1e1a2901d59b81b37dd2efd1fc9c9345a55007f09bf5a2d3ee03" +checksum = "6a5cbf750400958819fb6178eaa83bee5cd9c29a26a40cc241df8c70fdd46984" [[package]] name = "zerocopy" -version = "0.7.33" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "087eca3c1eaf8c47b94d02790dd086cd594b912d2043d4de4bfdd466b3befb7c" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.33" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4b6c273f496d8fd4eaf18853e6b448760225dc030ff2c485a786859aea6393" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.79", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" [[package]] name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 283981df88..dd5eb31320 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "wallet/wasm", "wallet/bip32", "wallet/keys", + "wallet/pskt", "consensus", "consensus/core", "consensus/client", @@ -35,12 +36,12 @@ members = [ "rpc/grpc/core", "rpc/grpc/client", "rpc/grpc/server", - "rpc/wrpc/resolver", "rpc/wrpc/server", "rpc/wrpc/client", "rpc/wrpc/proxy", "rpc/wrpc/wasm", "rpc/wrpc/examples/subscriber", + "rpc/wrpc/examples/simple_client", "mining", "mining/errors", "protocol/p2p", @@ -61,8 +62,8 @@ members = [ ] [workspace.package] -rust-version = "1.78.0" -version = "0.14.1" +rust-version = "1.81.0" +version = "0.15.2" authors = ["Kaspa developers"] license = "ISC" repository = "https://github.com/kaspanet/rusty-kaspa" @@ -79,62 +80,61 @@ include = [ ] [workspace.dependencies] -# kaspa-testing-integration = { version = "0.14.1", path = "testing/integration" } -kaspa-addresses = { version = "0.14.1", path = "crypto/addresses" } -kaspa-addressmanager = { version = "0.14.1", path = "components/addressmanager" } -kaspa-bip32 = { version = "0.14.1", path = "wallet/bip32" } -kaspa-resolver = { version = "0.14.1", path = "rpr/wrpc/resolver" } -kaspa-cli = { version = "0.14.1", path = "cli" } -kaspa-connectionmanager = { version = "0.14.1", path = "components/connectionmanager" } -kaspa-consensus = { version = "0.14.1", path = "consensus" } -kaspa-consensus-core = { version = "0.14.1", path = "consensus/core" } -kaspa-consensus-client = { version = "0.14.1", path = "consensus/client" } -kaspa-consensus-notify = { version = "0.14.1", path = "consensus/notify" } -kaspa-consensus-wasm = { version = "0.14.1", path = "consensus/wasm" } -kaspa-consensusmanager = { version = "0.14.1", path = "components/consensusmanager" } -kaspa-core = { version = "0.14.1", path = "core" } -kaspa-daemon = { version = "0.14.1", path = "daemon" } -kaspa-database = { version = "0.14.1", path = "database" } -kaspa-grpc-client = { version = "0.14.1", path = "rpc/grpc/client" } -kaspa-grpc-core = { version = "0.14.1", path = "rpc/grpc/core" } -kaspa-grpc-server = { version = "0.14.1", path = "rpc/grpc/server" } -kaspa-hashes = { version = "0.14.1", path = "crypto/hashes" } -kaspa-index-core = { version = "0.14.1", path = "indexes/core" } -kaspa-index-processor = { version = "0.14.1", path = "indexes/processor" } -kaspa-math = { version = "0.14.1", path = "math" } -kaspa-merkle = { version = "0.14.1", path = "crypto/merkle" } -kaspa-metrics-core = { version = "0.14.1", path = "metrics/core" } -kaspa-mining = { version = "0.14.1", path = "mining" } -kaspa-mining-errors = { version = "0.14.1", path = "mining/errors" } -kaspa-muhash = { version = "0.14.1", path = "crypto/muhash" } -kaspa-notify = { version = "0.14.1", path = "notify" } -kaspa-p2p-flows = { version = "0.14.1", path = "protocol/flows" } -kaspa-p2p-lib = { version = "0.14.1", path = "protocol/p2p" } -kaspa-perf-monitor = { version = "0.14.1", path = "metrics/perf_monitor" } -kaspa-pow = { version = "0.14.1", path = "consensus/pow" } -kaspa-rpc-core = { version = "0.14.1", path = "rpc/core" } -kaspa-rpc-macros = { version = "0.14.1", path = "rpc/macros" } -kaspa-rpc-service = { version = "0.14.1", path = "rpc/service" } -kaspa-txscript = { version = "0.14.1", path = "crypto/txscript" } -kaspa-txscript-errors = { version = "0.14.1", path = "crypto/txscript/errors" } -kaspa-utils = { version = "0.14.1", path = "utils" } -kaspa-utils-tower = { version = "0.14.1", path = "utils/tower" } -kaspa-utxoindex = { version = "0.14.1", path = "indexes/utxoindex" } -kaspa-wallet = { version = "0.14.1", path = "wallet/native" } -kaspa-wallet-cli-wasm = { version = "0.14.1", path = "wallet/wasm" } -kaspa-wallet-keys = { version = "0.14.1", path = "wallet/keys" } -kaspa-wallet-core = { version = "0.14.1", path = "wallet/core" } -kaspa-wallet-macros = { version = "0.14.1", path = "wallet/macros" } -kaspa-wasm = { version = "0.14.1", path = "wasm" } -kaspa-wasm-core = { version = "0.14.1", path = "wasm/core" } -kaspa-wrpc-client = { version = "0.14.1", path = "rpc/wrpc/client" } -kaspa-wrpc-core = { version = "0.14.1", path = "rpc/wrpc/core" } -kaspa-wrpc-proxy = { version = "0.14.1", path = "rpc/wrpc/proxy" } -kaspa-wrpc-server = { version = "0.14.1", path = "rpc/wrpc/server" } -kaspa-wrpc-wasm = { version = "0.14.1", path = "rpc/wrpc/wasm" } -kaspa-wrpc-example-subscriber = { version = "0.14.1", path = "rpc/wrpc/examples/subscriber" } -kaspad = { version = "0.14.1", path = "kaspad" } -kaspa-alloc = { version = "0.14.1", path = "utils/alloc" } +# kaspa-testing-integration = { version = "0.15.2", path = "testing/integration" } +kaspa-addresses = { version = "0.15.2", path = "crypto/addresses" } +kaspa-addressmanager = { version = "0.15.2", path = "components/addressmanager" } +kaspa-bip32 = { version = "0.15.2", path = "wallet/bip32" } +kaspa-cli = { version = "0.15.2", path = "cli" } +kaspa-connectionmanager = { version = "0.15.2", path = "components/connectionmanager" } +kaspa-consensus = { version = "0.15.2", path = "consensus" } +kaspa-consensus-core = { version = "0.15.2", path = "consensus/core" } +kaspa-consensus-client = { version = "0.15.2", path = "consensus/client" } +kaspa-consensus-notify = { version = "0.15.2", path = "consensus/notify" } +kaspa-consensus-wasm = { version = "0.15.2", path = "consensus/wasm" } +kaspa-consensusmanager = { version = "0.15.2", path = "components/consensusmanager" } +kaspa-core = { version = "0.15.2", path = "core" } +kaspa-daemon = { version = "0.15.2", path = "daemon" } +kaspa-database = { version = "0.15.2", path = "database" } +kaspa-grpc-client = { version = "0.15.2", path = "rpc/grpc/client" } +kaspa-grpc-core = { version = "0.15.2", path = "rpc/grpc/core" } +kaspa-grpc-server = { version = "0.15.2", path = "rpc/grpc/server" } +kaspa-hashes = { version = "0.15.2", path = "crypto/hashes" } +kaspa-index-core = { version = "0.15.2", path = "indexes/core" } +kaspa-index-processor = { version = "0.15.2", path = "indexes/processor" } +kaspa-math = { version = "0.15.2", path = "math" } +kaspa-merkle = { version = "0.15.2", path = "crypto/merkle" } +kaspa-metrics-core = { version = "0.15.2", path = "metrics/core" } +kaspa-mining = { version = "0.15.2", path = "mining" } +kaspa-mining-errors = { version = "0.15.2", path = "mining/errors" } +kaspa-muhash = { version = "0.15.2", path = "crypto/muhash" } +kaspa-notify = { version = "0.15.2", path = "notify" } +kaspa-p2p-flows = { version = "0.15.2", path = "protocol/flows" } +kaspa-p2p-lib = { version = "0.15.2", path = "protocol/p2p" } +kaspa-perf-monitor = { version = "0.15.2", path = "metrics/perf_monitor" } +kaspa-pow = { version = "0.15.2", path = "consensus/pow" } +kaspa-rpc-core = { version = "0.15.2", path = "rpc/core" } +kaspa-rpc-macros = { version = "0.15.2", path = "rpc/macros" } +kaspa-rpc-service = { version = "0.15.2", path = "rpc/service" } +kaspa-txscript = { version = "0.15.2", path = "crypto/txscript" } +kaspa-txscript-errors = { version = "0.15.2", path = "crypto/txscript/errors" } +kaspa-utils = { version = "0.15.2", path = "utils" } +kaspa-utils-tower = { version = "0.15.2", path = "utils/tower" } +kaspa-utxoindex = { version = "0.15.2", path = "indexes/utxoindex" } +kaspa-wallet = { version = "0.15.2", path = "wallet/native" } +kaspa-wallet-cli-wasm = { version = "0.15.2", path = "wallet/wasm" } +kaspa-wallet-keys = { version = "0.15.2", path = "wallet/keys" } +kaspa-wallet-pskt = { version = "0.15.2", path = "wallet/pskt" } +kaspa-wallet-core = { version = "0.15.2", path = "wallet/core" } +kaspa-wallet-macros = { version = "0.15.2", path = "wallet/macros" } +kaspa-wasm = { version = "0.15.2", path = "wasm" } +kaspa-wasm-core = { version = "0.15.2", path = "wasm/core" } +kaspa-wrpc-client = { version = "0.15.2", path = "rpc/wrpc/client" } +kaspa-wrpc-proxy = { version = "0.15.2", path = "rpc/wrpc/proxy" } +kaspa-wrpc-server = { version = "0.15.2", path = "rpc/wrpc/server" } +kaspa-wrpc-wasm = { version = "0.15.2", path = "rpc/wrpc/wasm" } +kaspa-wrpc-example-subscriber = { version = "0.15.2", path = "rpc/wrpc/examples/subscriber" } +kaspad = { version = "0.15.2", path = "kaspad" } +kaspa-alloc = { version = "0.15.2", path = "utils/alloc" } # external aes = "0.8.3" @@ -145,11 +145,12 @@ async-channel = "2.0.0" async-std = { version = "1.12.0", features = ['attributes'] } async-stream = "0.3.5" async-trait = "0.1.74" -base64 = "0.21.5" +base64 = "0.22.1" bincode = { version = "1.3.3", default-features = false } blake2b_simd = "1.0.2" -borsh = { version = "0.9.1", features = ["rc"] } # please keep this fixed +borsh = { version = "1.5.1", features = ["derive", "rc"] } bs58 = { version = "0.5.0", features = ["check"], default-features = false } +bytes = "1.7.1" cc = "1.0.83" cfb-mode = "0.8.2" cfg-if = "1.0.0" @@ -160,41 +161,47 @@ criterion = { version = "0.5.1", default-features = false } crossbeam-channel = "0.5.8" ctrlc = "3.4.1" crypto_box = { version = "0.9.1", features = ["chacha20"] } -dashmap = "5.5.3" +dashmap = "6.0.1" derivative = "2.2.0" +derive_builder = "0.20.0" derive_more = "0.99.17" +# derive_more = { version = "1.0.0", features = ["full"] } dhat = "0.3.2" dirs = "5.0.1" downcast = "0.11.0" downcast-rs = "1.2.0" -duration-string = "0.3.0" -enum-primitive-derive = "0.2.2" +duration-string = "0.4.0" +enum-primitive-derive = "0.3.0" event-listener = "2.5.3" # TODO "3.0.1" evpkdf = "0.2.0" -faster-hex = "0.6.1" # TODO "0.8.1" - fails unit tests +faster-hex = "0.9.0" fixedstr = { version = "0.5.4", features = ["serde"] } flate2 = "1.0.28" futures = { version = "0.3.29" } -futures-util = { version = "0.3.29", default-features = false, features = [ - "alloc", -] } +futures-util = { version = "0.3.29", default-features = false, features = ["alloc"] } getrandom = { version = "0.2.10", features = ["js"] } -h2 = "0.3.21" -heapless = "0.7.16" +h2 = "0.4.6" +# h2 = "0.3.21" +heapless = "0.8.0" +# heapless = "0.7.16" hex = { version = "0.4.3", features = ["serde"] } hex-literal = "0.4.1" +hexplay = "0.3.0" hmac = { version = "0.12.1", default-features = false } home = "0.5.5" +http-body = "1.0.1" +http-body-util = "0.1.2" igd-next = { version = "0.14.2", features = ["aio_tokio"] } indexmap = "2.1.0" intertrait = "0.2.2" ipnet = "2.9.0" -itertools = "0.11.0" -js-sys = "0.3.67" +itertools = "0.13.0" +js-sys = "0.3.70" keccak = "0.1.4" -local-ip-address = "0.5.6" +local-ip-address = "0.6.1" log = "0.4.20" log4rs = "1.2.0" +mac_address = "1.1.7" malachite-base = "0.4.4" malachite-nz = "0.4.4" md-5 = "0.10.6" @@ -207,7 +214,7 @@ parking_lot = "0.12.1" paste = "1.0.14" pbkdf2 = "0.12.2" portable-atomic = { version = "1.5.1", features = ["float"] } -prost = "0.12.1" +prost = "0.13.2" rand = "0.8.5" rand_chacha = "0.3.1" rand_core = { version = "0.6.4", features = ["std"] } @@ -216,8 +223,9 @@ rayon = "1.8.0" regex = "1.10.2" ripemd = { version = "0.1.3", default-features = false } rlimit = "0.10.1" -rocksdb = "0.21.0" -secp256k1 = { version = "0.28.2", features = [ +rocksdb = "0.22.0" +rv = "0.16.4" +secp256k1 = { version = "0.29.0", features = [ "global-context", "rand-std", "serde", @@ -228,6 +236,7 @@ serde = { version = "1.0.190", features = ["derive", "rc"] } serde_bytes = "0.11.12" serde_json = "1.0.107" serde_repr = "0.1.18" +serde-value = "0.7.0" serde-wasm-bindgen = "0.6.1" sha1 = "0.10.6" sha2 = "0.10.8" @@ -235,52 +244,52 @@ sha3 = "0.10.8" slugify-rs = "0.0.3" smallvec = { version = "1.11.1", features = ["serde"] } sorted-insert = "0.2.3" -statest = "0.2.2" -statrs = "0.13.0" # TODO "0.16.0" subtle = { version = "2.5.0", default-features = false } +sysinfo = "0.31.2" tempfile = "3.8.1" textwrap = "0.16.0" thiserror = "1.0.50" tokio = { version = "1.33.0", features = ["sync", "rt-multi-thread"] } tokio-stream = "0.1.14" toml = "0.8.8" -tonic = { version = "0.10.2", features = ["tls", "gzip", "transport"] } -tonic-build = { version = "0.10.2", features = ["prost"] } +tonic = { version = "0.12.3", features = ["tls-webpki-roots", "gzip", "transport"] } +tonic-build = { version = "0.12.3", features = ["prost"] } triggered = "0.1.2" uuid = { version = "1.5.0", features = ["v4", "fast-rng", "serde"] } -wasm-bindgen = { version = "0.2.92", features = ["serde-serialize"] } -wasm-bindgen-futures = "0.4.40" -wasm-bindgen-test = "0.3.37" -web-sys = "0.3.67" +wasm-bindgen = { version = "0.2.93", features = ["serde-serialize"] } +wasm-bindgen-futures = "0.4.43" +wasm-bindgen-test = "0.3.43" +web-sys = "0.3.70" xxhash-rust = { version = "0.8.7", features = ["xxh3"] } zeroize = { version = "1.6.0", default-features = false, features = ["alloc"] } pin-project-lite = "0.2.13" -tower-http = { version = "0.4.4", features = [ +tower-http = { version = "0.5.2", features = [ "map-response-body", "map-request-body", ] } -tower = "0.4.7" -hyper = "0.14.27" +tower = "0.5.1" chrono = "0.4.31" -indexed_db_futures = "0.4.1" +indexed_db_futures = "0.5.0" # workflow dependencies that are not a part of core libraries # workflow-perf-monitor = { path = "../../../workflow-perf-monitor-rs" } workflow-perf-monitor = "0.0.2" nw-sys = "0.1.6" +rustls = { version = "0.23", default-features = false, features = ["ring"] } # workflow dependencies -workflow-core = { version = "0.12.1" } -workflow-d3 = { version = "0.12.1" } -workflow-dom = { version = "0.12.1" } -workflow-http = { version = "0.12.1" } -workflow-log = { version = "0.12.1" } -workflow-node = { version = "0.12.1" } -workflow-nw = { version = "0.12.1" } -workflow-rpc = { version = "0.12.1" } -workflow-store = { version = "0.12.1" } -workflow-terminal = { version = "0.12.1" } -workflow-wasm = { version = "0.12.1" } +workflow-core = { version = "0.18.0" } +workflow-d3 = { version = "0.18.0" } +workflow-dom = { version = "0.18.0" } +workflow-http = { version = "0.18.0", default-features = false, features = ["rustls-tls-webpki-roots", "http2", "charset", "macos-system-configuration"] } +workflow-log = { version = "0.18.0" } +workflow-node = { version = "0.18.0" } +workflow-nw = { version = "0.18.0" } +workflow-rpc = { version = "0.18.0", default-features = false, features = ["rustls-tls-webpki-roots"] } +workflow-serializer = { version = "0.18.0" } +workflow-store = { version = "0.18.0" } +workflow-terminal = { version = "0.18.0" } +workflow-wasm = { version = "0.18.0" } # if below is enabled, this means that there is an ongoing work # on the workflow-rs crate. This requires that you clone workflow-rs @@ -288,24 +297,28 @@ workflow-wasm = { version = "0.12.1" } # workflow-core = { path = "../workflow-rs/core" } # workflow-d3 = { path = "../workflow-rs/d3" } # workflow-dom = { path = "../workflow-rs/dom" } -# workflow-http = { path = "../workflow-rs/http" } +# # Same features as default but with rustls-tls-webpki-roots instead of native-tls +# workflow-http = { path = "../workflow-rs/http", default-features = false, features = ["rustls-tls-webpki-roots", "http2", "charset", "macos-system-configuration"] } # workflow-log = { path = "../workflow-rs/log" } # workflow-node = { path = "../workflow-rs/node" } # workflow-nw = { path = "../workflow-rs/nw" } -# workflow-rpc = { path = "../workflow-rs/rpc" } +# workflow-rpc = { path = "../workflow-rs/rpc", default-features = false, features = ["rustls-tls-webpki-roots"] } +# workflow-serializer = { path = "../workflow-rs/serializer" } # workflow-store = { path = "../workflow-rs/store" } # workflow-terminal = { path = "../workflow-rs/terminal" } # workflow-wasm = { path = "../workflow-rs/wasm" } + # --- # workflow-core = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-d3 = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-dom = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } -# workflow-http = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } +# workflow-http = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master", default-features = false, features = ["rustls-tls-webpki-roots", "http2", "charset", "macos-system-configuration"] } # workflow-log = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-node = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-nw = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } -# workflow-rpc = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } +# workflow-rpc = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master", default-features = false, features = ["rustls-tls-webpki-roots"] } +# workflow-serializer = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-store = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-terminal = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } # workflow-wasm = { git = "https://github.com/workflow-rs/workflow-rs.git", branch = "master" } @@ -322,3 +335,5 @@ inherits = "release" debug = true strip = false +[workspace.lints.clippy] +empty_docs = "allow" diff --git a/README.md b/README.md index 5644e06dea..d9066efa84 100644 --- a/README.md +++ b/README.md @@ -3,22 +3,24 @@ Welcome to the Rust-based implementation of the Kaspa full-node and its ancillary libraries. The contained node release serves as a drop-in replacement to the established Golang node and to date is the recommended node software for the Kaspa network, introducing developers to the possibilities of Rust in the Kaspa network's context. -We invite developers and blockchain enthusiasts to collaborate, test, and optimize our Rust implementation. Each line of code here is an opportunity to contribute to the open-source blockchain movement, shaping a platform designed for scalability and speed without compromising on decentralization. +We invite developers and blockchain enthusiasts to collaborate, test, and optimize our Rust implementation. Each line of code here is an opportunity to contribute to the open-source blockchain movement, shaping a platform designed for scalability and speed without compromising on security and decentralization. Your feedback, contributions, and issue reports will be integral to evolving this codebase and continuing its maturity as a reliable node in the Kaspa network. +The default branch of this repository is `master` and new contributions are constantly merged into it. For a stable branch corresponding to the latest stable release please pull and compile the `stable` branch. + ## Installation
Building on Linux - + 1. Install general prerequisites ```bash - sudo apt install curl git build-essential libssl-dev pkg-config + sudo apt install curl git build-essential libssl-dev pkg-config ``` 2. Install Protobuf (required for gRPC) - + ```bash sudo apt install protobuf-compiler libprotobuf-dev #Required for gRPC ``` @@ -34,8 +36,8 @@ Your feedback, contributions, and issue reports will be integral to evolving thi llvm python3-clang ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -43,7 +45,7 @@ Your feedback, contributions, and issue reports will be integral to evolving thi 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa @@ -53,7 +55,7 @@ Your feedback, contributions, and issue reports will be integral to evolving thi -
+
Building on Windows @@ -61,18 +63,18 @@ Your feedback, contributions, and issue reports will be integral to evolving thi 2. Install [Protocol Buffers](https://github.com/protocolbuffers/protobuf/releases/download/v21.10/protoc-21.10-win64.zip) and add the `bin` directory to your `Path` - + 3. Install [LLVM-15.0.6-win64.exe](https://github.com/llvm/llvm-project/releases/download/llvmorg-15.0.6/LLVM-15.0.6-win64.exe) Add the `bin` directory of the LLVM installation (`C:\Program Files\LLVM\bin`) to PATH - + set `LIBCLANG_PATH` environment variable to point to the `bin` directory as well **IMPORTANT:** Due to C++ dependency configuration issues, LLVM `AR` installation on Windows may not function correctly when switching between WASM and native C++ code compilation (native `RocksDB+secp256k1` vs WASM32 builds of `secp256k1`). Unfortunately, manually setting `AR` environment variable also confuses C++ build toolchain (it should not be set for native but should be set for WASM32 targets). Currently, the best way to address this, is as follows: after installing LLVM on Windows, go to the target `bin` installation directory and copy or rename `LLVM_AR.exe` to `AR.exe`. - + 4. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 5. Install wasm-pack ```bash cargo install wasm-pack @@ -80,16 +82,16 @@ Your feedback, contributions, and issue reports will be integral to evolving thi 6. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 7. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
-
+
Building on Mac OS @@ -97,8 +99,8 @@ Your feedback, contributions, and issue reports will be integral to evolving thi ```bash brew install protobuf ``` - 2. Install llvm. - + 2. Install llvm. + The default XCode installation of `llvm` does not support WASM build targets. To build WASM on MacOS you need to install `llvm` from homebrew (at the time of writing, the llvm version for MacOS is 16.0.1). ```bash @@ -131,8 +133,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of source ~/.zshrc ``` 3. Install the [rust toolchain](https://rustup.rs/) - - If you already have rust installed, update it by running: `rustup update` + + If you already have rust installed, update it by running: `rustup update` 4. Install wasm-pack ```bash cargo install wasm-pack @@ -140,14 +142,14 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of 4. Install wasm32 target ```bash rustup target add wasm32-unknown-unknown - ``` + ``` 5. Clone the repo ```bash git clone https://github.com/kaspanet/rusty-kaspa cd rusty-kaspa ``` -
+
@@ -182,7 +184,8 @@ To build WASM on MacOS you need to install `llvm` from homebrew (at the time of Kaspa CLI + Wallet -`kaspa-cli` crate provides cli-driven RPC interface to the node and a + +`kaspa-cli` crate provides a cli-driven RPC interface to the node and a terminal interface to the Rusty Kaspa Wallet runtime. These wallets are compatible with WASM SDK Wallet API and Kaspa NG projects. @@ -233,7 +236,7 @@ cargo run --release --bin kaspad -- --testnet ``` **Testnet 11** - + For participation in the 10BPS test network (TN11), see the following detailed [guide](docs/testnet11.md).
@@ -247,7 +250,7 @@ cargo run --release --bin kaspad -- --configfile /path/to/configfile.toml # or cargo run --release --bin kaspad -- -C /path/to/configfile.toml ``` - - The config file should be a list of \ = \ separated by newlines. + - The config file should be a list of \ = \ separated by newlines. - Whitespace around the `=` is fine, `arg=value` and `arg = value` are both parsed correctly. - Values with special characters like `.` or `=` will require quoting the value i.e \ = "\". - Arguments with multiple values should be surrounded with brackets like `addpeer = ["10.0.0.1", "1.2.3.4"]`. @@ -295,17 +298,17 @@ wRPC **Sidenote:** Rusty Kaspa integrates an optional wRPC - subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC + subsystem. wRPC is a high-performance, platform-neutral, Rust-centric, WebSocket-framed RPC implementation that can use [Borsh](https://borsh.io/) and JSON protocol encoding. - JSON protocol messaging - is similar to JSON-RPC 1.0, but differs from the specification due to server-side + JSON protocol messaging + is similar to JSON-RPC 1.0, but differs from the specification due to server-side notifications. [Borsh](https://borsh.io/) encoding is meant for inter-process communication. When using [Borsh](https://borsh.io/) - both client and server should be built from the same codebase. + both client and server should be built from the same codebase. - JSON protocol is based on + JSON protocol is based on Kaspa data structures and is data-structure-version agnostic. You can connect to the JSON endpoint using any WebSocket library. Built-in RPC clients for JavaScript and TypeScript capable of running in web browsers and Node.js are available as a part of @@ -314,27 +317,21 @@ wRPC
- -
- - ## Benchmarking & Testing -
+
Simulation framework (Simpa) -Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.10.0/env_logger/#filtering-results) by either: - -The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). +The current codebase supports a full in-process network simulation, building an actual DAG over virtual time with virtual delay and benchmarking validation time (following the simulation generation). To see the available commands -```bash +```bash cargo run --release --bin simpa -- --help -``` +``` -The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. +The following command will run a simulation to produce 1000 blocks with communication delay of 2 seconds and 8 BPS (blocks per second) while attempting to fill each block with up to 200 transactions. ```bash cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 @@ -345,7 +342,7 @@ cargo run --release --bin simpa -- -t=200 -d=2 -b=8 -n=1000 -
+
Heap Profiling @@ -360,7 +357,7 @@ It will produce `{bin-name}-heap.json` file in the root of the workdir, that can
-
+
Tests @@ -382,12 +379,21 @@ cd rusty-kaspa cargo nextest run --release ``` +
+ +
+ +Lints +```bash +cd rusty-kaspa +./check +```
-
+
Benchmarks @@ -398,7 +404,7 @@ cargo bench
-
+
Logging @@ -413,6 +419,3 @@ Logging in `kaspad` and `simpa` can be [filtered](https://docs.rs/env_logger/0.1 In this command we set the `loglevel` to `INFO`.
- - - diff --git a/cli/Cargo.toml b/cli/Cargo.toml index f2c80a5fa3..60a43002a0 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -33,6 +33,7 @@ downcast.workspace = true faster-hex.workspace = true futures.workspace = true js-sys.workspace = true +hex.workspace = true kaspa-addresses.workspace = true kaspa-bip32.workspace = true kaspa-consensus-core.workspace = true @@ -43,6 +44,7 @@ kaspa-rpc-core.workspace = true kaspa-utils.workspace = true kaspa-wallet-core.workspace = true kaspa-wallet-keys.workspace = true +kaspa-wallet-pskt.workspace = true kaspa-wrpc-client.workspace = true nw-sys.workspace = true pad.workspace = true @@ -80,5 +82,5 @@ features = [ [target.'cfg(not(target_arch = "wasm32"))'.dependencies] tokio.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 4f562e7494..5ca1997ea3 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -6,9 +6,10 @@ use crate::modules::node::Node; use crate::notifier::{Notification, Notifier}; use crate::result::Result; use kaspa_daemon::{DaemonEvent, DaemonKind, Daemons}; +use kaspa_wallet_core::account::Account; use kaspa_wallet_core::rpc::DynRpcApi; use kaspa_wallet_core::storage::{IdT, PrvKeyDataInfo}; -use kaspa_wrpc_client::KaspaRpcClient; +use kaspa_wrpc_client::{KaspaRpcClient, Resolver}; use workflow_core::channel::*; use workflow_core::time::Instant; use workflow_log::*; @@ -102,7 +103,7 @@ impl KaspaCli { } pub async fn try_new_arc(options: Options) -> Result> { - let wallet = Arc::new(Wallet::try_new(Wallet::local_store()?, None, None)?); + let wallet = Arc::new(Wallet::try_new(Wallet::local_store()?, Some(Resolver::default()), None)?); let kaspa_cli = Arc::new(KaspaCli { term: Arc::new(Mutex::new(None)), @@ -311,7 +312,9 @@ impl KaspaCli { Events::SyncState { sync_state } => { if sync_state.is_synced() && this.wallet().is_open() { - if let Err(error) = this.wallet().reload(false).await { + let guard = this.wallet().guard(); + let guard = guard.lock().await; + if let Err(error) = this.wallet().reload(false, &guard).await { terrorln!(this, "Unable to reload wallet: {error}"); } } @@ -383,8 +386,11 @@ impl KaspaCli { record } => { if !this.is_mutted() || (this.is_mutted() && this.flags.get(Track::Pending)) { + let guard = this.wallet.guard(); + let guard = guard.lock().await; + let include_utxos = this.flags.get(Track::Utxo); - let tx = record.format_transaction_with_state(&this.wallet,Some("reorg"),include_utxos).await; + let tx = record.format_transaction_with_state(&this.wallet,Some("reorg"),include_utxos, &guard).await; tx.iter().for_each(|line|tprintln!(this,"{NOTIFY} {line}")); } }, @@ -393,8 +399,11 @@ impl KaspaCli { } => { // Pending and coinbase stasis fall under the same `Track` category if !this.is_mutted() || (this.is_mutted() && this.flags.get(Track::Pending)) { + let guard = this.wallet.guard(); + let guard = guard.lock().await; + let include_utxos = this.flags.get(Track::Utxo); - let tx = record.format_transaction_with_state(&this.wallet,Some("stasis"),include_utxos).await; + let tx = record.format_transaction_with_state(&this.wallet,Some("stasis"),include_utxos, &guard).await; tx.iter().for_each(|line|tprintln!(this,"{NOTIFY} {line}")); } }, @@ -411,8 +420,11 @@ impl KaspaCli { record } => { if !this.is_mutted() || (this.is_mutted() && this.flags.get(Track::Pending)) { + let guard = this.wallet.guard(); + let guard = guard.lock().await; + let include_utxos = this.flags.get(Track::Utxo); - let tx = record.format_transaction_with_state(&this.wallet,Some("pending"),include_utxos).await; + let tx = record.format_transaction_with_state(&this.wallet,Some("pending"),include_utxos, &guard).await; tx.iter().for_each(|line|tprintln!(this,"{NOTIFY} {line}")); } }, @@ -420,8 +432,11 @@ impl KaspaCli { record } => { if !this.is_mutted() || (this.is_mutted() && this.flags.get(Track::Tx)) { + let guard = this.wallet.guard(); + let guard = guard.lock().await; + let include_utxos = this.flags.get(Track::Utxo); - let tx = record.format_transaction_with_state(&this.wallet,Some("confirmed"),include_utxos).await; + let tx = record.format_transaction_with_state(&this.wallet,Some("confirmed"),include_utxos, &guard).await; tx.iter().for_each(|line|tprintln!(this,"{NOTIFY} {line}")); } }, @@ -532,6 +547,9 @@ impl KaspaCli { } async fn select_account_with_args(&self, autoselect: bool) -> Result> { + let guard = self.wallet.guard(); + let guard = guard.lock().await; + let mut selection = None; let mut list_by_key = Vec::<(Arc, Vec<(usize, Arc)>)>::new(); @@ -540,7 +558,7 @@ impl KaspaCli { let mut keys = self.wallet.keys().await?; while let Some(key) = keys.try_next().await? { let mut prv_key_accounts = Vec::new(); - let mut accounts = self.wallet.accounts(Some(key.id)).await?; + let mut accounts = self.wallet.accounts(Some(key.id), &guard).await?; while let Some(account) = accounts.next().await { let account = account?; prv_key_accounts.push((flat_list.len(), account.clone())); @@ -550,6 +568,16 @@ impl KaspaCli { list_by_key.push((key.clone(), prv_key_accounts)); } + let mut watch_accounts = Vec::<(usize, Arc)>::new(); + let mut unfiltered_accounts = self.wallet.accounts(None, &guard).await?; + + while let Some(account) = unfiltered_accounts.try_next().await? { + if account.feature().is_some() { + watch_accounts.push((flat_list.len(), account.clone())); + flat_list.push(account.clone()); + } + } + if flat_list.is_empty() { return Err(Error::NoAccounts); } else if autoselect && flat_list.len() == 1 { @@ -569,6 +597,16 @@ impl KaspaCli { }) }); + if !watch_accounts.is_empty() { + tprintln!(self, "• watch-only"); + } + + watch_accounts.iter().for_each(|(seq, account)| { + let seq = style(seq.to_string()).cyan(); + let ls_string = account.get_list_string().unwrap_or_else(|err| panic!("{err}")); + tprintln!(self, " {seq}: {ls_string}"); + }); + tprintln!(self); let range = if flat_list.len() > 1 { format!("[{}..{}] ", 0, flat_list.len() - 1) } else { "".to_string() }; @@ -643,18 +681,35 @@ impl KaspaCli { } pub async fn list(&self) -> Result<()> { + let guard = self.wallet.guard(); + let guard = guard.lock().await; + let mut keys = self.wallet.keys().await?; tprintln!(self); while let Some(key) = keys.try_next().await? { tprintln!(self, "• {}", style(&key).dim()); - let mut accounts = self.wallet.accounts(Some(key.id)).await?; + + let mut accounts = self.wallet.accounts(Some(key.id), &guard).await?; while let Some(account) = accounts.try_next().await? { let receive_address = account.receive_address()?; tprintln!(self, " • {}", account.get_list_string()?); tprintln!(self, " {}", style(receive_address.to_string()).blue()); } } + + let mut unfiltered_accounts = self.wallet.accounts(None, &guard).await?; + let mut feature_header_printed = false; + while let Some(account) = unfiltered_accounts.try_next().await? { + if let Some(feature) = account.feature() { + if !feature_header_printed { + tprintln!(self, "{}", style("• watch-only").dim()); + feature_header_printed = true; + } + tprintln!(self, " • {}", account.get_list_string().unwrap()); + tprintln!(self, " • {}", style(feature).cyan()); + } + } tprintln!(self); Ok(()) diff --git a/cli/src/error.rs b/cli/src/error.rs index a1701be355..23bb261243 100644 --- a/cli/src/error.rs +++ b/cli/src/error.rs @@ -72,6 +72,9 @@ pub enum Error { #[error("wallet secret is required")] WalletSecretRequired, + #[error("watch-only wallet kpub is required")] + WalletBip32WatchXpubRequired, + #[error("wallet secrets do not match")] WalletSecretMatch, @@ -84,6 +87,9 @@ pub enum Error { #[error("key data not found")] KeyDataNotFound, + #[error("no key data to export for watch-only account")] + WatchOnlyAccountNoKeyData, + #[error("no accounts found, please create an account to continue")] NoAccounts, @@ -122,6 +128,12 @@ pub enum Error { #[error(transparent)] KaspaWalletKeys(#[from] kaspa_wallet_keys::error::Error), + + #[error(transparent)] + PskbLockScriptSigError(#[from] kaspa_wallet_pskt::error::Error), + + #[error("To hex serialization error")] + PskbSerializeToHexError, } impl Error { diff --git a/cli/src/extensions/transaction.rs b/cli/src/extensions/transaction.rs index 70d73615ac..415aa7a34b 100644 --- a/cli/src/extensions/transaction.rs +++ b/cli/src/extensions/transaction.rs @@ -2,6 +2,7 @@ use crate::imports::*; use kaspa_consensus_core::tx::{TransactionInput, TransactionOutpoint}; use kaspa_wallet_core::storage::Binding; use kaspa_wallet_core::storage::{TransactionData, TransactionKind, TransactionRecord}; +use kaspa_wallet_core::wallet::WalletGuard; use workflow_log::style; pub trait TransactionTypeExtension { @@ -48,8 +49,14 @@ impl TransactionTypeExtension for TransactionKind { #[async_trait] pub trait TransactionExtension { - async fn format_transaction(&self, wallet: &Arc, include_utxos: bool) -> Vec; - async fn format_transaction_with_state(&self, wallet: &Arc, state: Option<&str>, include_utxos: bool) -> Vec; + async fn format_transaction(&self, wallet: &Arc, include_utxos: bool, guard: &WalletGuard) -> Vec; + async fn format_transaction_with_state( + &self, + wallet: &Arc, + state: Option<&str>, + include_utxos: bool, + guard: &WalletGuard, + ) -> Vec; async fn format_transaction_with_args( &self, wallet: &Arc, @@ -58,17 +65,24 @@ pub trait TransactionExtension { include_utxos: bool, history: bool, account: Option>, + guard: &WalletGuard, ) -> Vec; } #[async_trait] impl TransactionExtension for TransactionRecord { - async fn format_transaction(&self, wallet: &Arc, include_utxos: bool) -> Vec { - self.format_transaction_with_args(wallet, None, None, include_utxos, false, None).await + async fn format_transaction(&self, wallet: &Arc, include_utxos: bool, guard: &WalletGuard) -> Vec { + self.format_transaction_with_args(wallet, None, None, include_utxos, false, None, guard).await } - async fn format_transaction_with_state(&self, wallet: &Arc, state: Option<&str>, include_utxos: bool) -> Vec { - self.format_transaction_with_args(wallet, state, None, include_utxos, false, None).await + async fn format_transaction_with_state( + &self, + wallet: &Arc, + state: Option<&str>, + include_utxos: bool, + guard: &WalletGuard, + ) -> Vec { + self.format_transaction_with_args(wallet, state, None, include_utxos, false, None, guard).await } async fn format_transaction_with_args( @@ -79,6 +93,7 @@ impl TransactionExtension for TransactionRecord { include_utxos: bool, history: bool, account: Option>, + guard: &WalletGuard, ) -> Vec { let TransactionRecord { id, binding, block_daa_score, transaction_data, .. } = self; @@ -88,7 +103,7 @@ impl TransactionExtension for TransactionRecord { let account = if let Some(account) = account { Some(account) } else { - wallet.get_account_by_id(account_id).await.ok().flatten() + wallet.get_account_by_id(account_id, guard).await.ok().flatten() }; if let Some(account) = account { diff --git a/cli/src/imports.rs b/cli/src/imports.rs index a158128046..24de4b0ddd 100644 --- a/cli/src/imports.rs +++ b/cli/src/imports.rs @@ -14,7 +14,7 @@ pub use kaspa_utils::hex::*; pub use kaspa_wallet_core::compat::*; pub use kaspa_wallet_core::prelude::*; pub use kaspa_wallet_core::settings::{DefaultSettings, SettingsStore, WalletSettings}; -pub use kaspa_wallet_core::utils::*; +pub use kaspa_wrpc_client::prelude::*; pub use pad::PadStr; pub use regex::Regex; pub use separator::Separatable; diff --git a/cli/src/modules/account.rs b/cli/src/modules/account.rs index be627047e0..5848d43fba 100644 --- a/cli/src/modules/account.rs +++ b/cli/src/modules/account.rs @@ -177,7 +177,44 @@ impl Account { } _ => { tprintln!(ctx, "unknown account import type: '{import_kind}'"); - tprintln!(ctx, "supported import types are: 'mnemonic' or 'legacy-data'\r\n"); + tprintln!(ctx, "supported import types are: 'mnemonic', 'legacy-data' or 'multisig-watch'\r\n"); + return Ok(()); + } + } + } + "watch" => { + if argv.is_empty() { + tprintln!(ctx, "usage: 'account watch [account name]'"); + tprintln!(ctx, ""); + tprintln!(ctx, "examples:"); + tprintln!(ctx, ""); + ctx.term().help( + &[ + ("account watch bip32", "Import a extended public key for a watch-only bip32 account"), + ("account watch multisig", "Import extended public keys for a watch-only multisig account"), + ], + None, + )?; + + return Ok(()); + } + + let watch_kind = argv.remove(0); + + let account_name = argv.first().map(|name| name.trim()).filter(|name| !name.is_empty()).map(|name| name.to_string()); + + let account_name = account_name.as_deref(); + + match watch_kind.as_ref() { + "bip32" => { + wizards::account::bip32_watch(&ctx, account_name).await?; + } + "multisig" => { + wizards::account::multisig_watch(&ctx, account_name).await?; + } + _ => { + tprintln!(ctx, "unknown account watch type: '{watch_kind}'"); + tprintln!(ctx, "supported watch types are: 'bip32' or 'multisig'\r\n"); return Ok(()); } } diff --git a/cli/src/modules/connect.rs b/cli/src/modules/connect.rs index 26173256b4..a755915d4f 100644 --- a/cli/src/modules/connect.rs +++ b/cli/src/modules/connect.rs @@ -1,5 +1,4 @@ use crate::imports::*; -use kaspa_wrpc_client::Resolver; #[derive(Default, Handler)] #[help("Connect to a Kaspa network")] @@ -15,11 +14,11 @@ impl Connect { let (is_public, url) = match arg_or_server_address.as_deref() { Some("public") => { tprintln!(ctx, "Connecting to a public node"); - (true, Resolver::default().fetch(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?.url) + (true, Resolver::default().get_url(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?) } None => { tprintln!(ctx, "No server set, connecting to a public node"); - (true, Resolver::default().fetch(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?.url) + (true, Resolver::default().get_url(WrpcEncoding::Borsh, network_id).await.map_err(|e| e.to_string())?) } Some(url) => { (false, wrpc_client.parse_url_with_network_type(url.to_string(), network_id.into()).map_err(|e| e.to_string())?) @@ -27,13 +26,23 @@ impl Connect { }; if is_public { - tpara!( - ctx, - "Please note that default public nodes are community-operated and \ - accessing them may expose your IP address to different node providers. \ - Consider running your own node for better privacy. \ - ", - ); + static WARNING: AtomicBool = AtomicBool::new(false); + if !WARNING.load(Ordering::Relaxed) { + WARNING.store(true, Ordering::Relaxed); + + tprintln!(ctx); + + tpara!( + ctx, + "Please note that public node infrastructure is operated by contributors and \ + accessing it may expose your IP address to different node providers. \ + Consider running your own node for better privacy. \ + ", + ); + tprintln!(ctx); + tpara!(ctx, "Please do not connect to public nodes directly as they are load-balanced."); + tprintln!(ctx); + } } let options = ConnectOptions { diff --git a/cli/src/modules/details.rs b/cli/src/modules/details.rs index ed44a9c825..896ecd3e87 100644 --- a/cli/src/modules/details.rs +++ b/cli/src/modules/details.rs @@ -27,6 +27,18 @@ impl Details { tprintln!(ctx.term(), "{:>4}{}", "", style(address.to_string()).blue()); }); + if let Some(xpub_keys) = account.xpub_keys() { + if account.feature().is_some() { + if let Some(feature) = account.feature() { + tprintln!(ctx.term(), "Feature: {}", style(feature).cyan()); + } + tprintln!(ctx.term(), "Extended public keys:"); + xpub_keys.iter().for_each(|xpub| { + tprintln!(ctx.term(), "{:>4}{}", "", style(ctx.wallet().network_format_xpub(xpub)).dim()); + }); + } + } + Ok(()) } } diff --git a/cli/src/modules/export.rs b/cli/src/modules/export.rs index 8a6b26e577..006cd7d36a 100644 --- a/cli/src/modules/export.rs +++ b/cli/src/modules/export.rs @@ -1,5 +1,5 @@ use crate::imports::*; -use kaspa_wallet_core::account::{multisig::MultiSig, Account, MULTISIG_ACCOUNT_KIND}; +use kaspa_wallet_core::account::{multisig::MultiSig, Account, BIP32_ACCOUNT_KIND, MULTISIG_ACCOUNT_KIND}; #[derive(Default, Handler)] #[help("Export transactions, a wallet or a private key")] @@ -32,8 +32,8 @@ impl Export { async fn export_multisig_account(ctx: Arc, account: Arc) -> Result<()> { match &account.prv_key_data_ids() { - None => Err(Error::KeyDataNotFound), - Some(v) if v.is_empty() => Err(Error::KeyDataNotFound), + None => Err(Error::WatchOnlyAccountNoKeyData), + Some(v) if v.is_empty() => Err(Error::WatchOnlyAccountNoKeyData), Some(prv_key_data_ids) => { let wallet_secret = Secret::new(ctx.term().ask(true, "Enter wallet password: ").await?.trim().as_bytes().to_vec()); if wallet_secret.as_ref().is_empty() { @@ -45,26 +45,38 @@ async fn export_multisig_account(ctx: Arc, account: Arc) -> let prv_key_data_store = ctx.store().as_prv_key_data_store()?; let mut generated_xpub_keys = Vec::with_capacity(prv_key_data_ids.len()); + for (id, prv_key_data_id) in prv_key_data_ids.iter().enumerate() { let prv_key_data = prv_key_data_store.load_key_data(&wallet_secret, prv_key_data_id).await?.unwrap(); let mnemonic = prv_key_data.as_mnemonic(None).unwrap().unwrap(); + let xpub_key: kaspa_bip32::ExtendedPublicKey = + prv_key_data.create_xpub(None, MULTISIG_ACCOUNT_KIND.into(), 0).await?; // todo it can be done concurrently + + tprintln!(ctx, ""); + tprintln!(ctx, "extended public key {}:", id + 1); + tprintln!(ctx, ""); + tprintln!(ctx, "{}", ctx.wallet().network_format_xpub(&xpub_key)); + tprintln!(ctx, ""); + tprintln!(ctx, "mnemonic {}:", id + 1); tprintln!(ctx, ""); tprintln!(ctx, "{}", mnemonic.phrase()); tprintln!(ctx, ""); - let xpub_key = prv_key_data.create_xpub(None, MULTISIG_ACCOUNT_KIND.into(), 0).await?; // todo it can be done concurrently generated_xpub_keys.push(xpub_key); } - - let additional = account.xpub_keys().iter().filter(|xpub| !generated_xpub_keys.contains(xpub)); - additional.enumerate().for_each(|(idx, xpub)| { - if idx == 0 { - tprintln!(ctx, "additional xpubs: "); - } - tprintln!(ctx, "{xpub}"); - }); + let test = account.xpub_keys(); + + if let Some(keys) = test { + let additional = keys.iter().filter(|item| !generated_xpub_keys.contains(item)); + additional.enumerate().for_each(|(idx, xpub)| { + if idx == 0 { + tprintln!(ctx, "additional xpubs: "); + } + tprintln!(ctx, "{}", ctx.wallet().network_format_xpub(xpub)); + }); + } Ok(()) } } @@ -94,6 +106,13 @@ async fn export_single_key_account(ctx: Arc, account: Arc let prv_key_data = keydata.payload.decrypt(payment_secret.as_ref())?; let mnemonic = prv_key_data.as_ref().as_mnemonic()?; + let xpub_key = keydata.create_xpub(None, BIP32_ACCOUNT_KIND.into(), 0).await?; // todo it can be done concurrently + + tprintln!(ctx, "extended public key:"); + tprintln!(ctx, ""); + tprintln!(ctx, "{}", ctx.wallet().network_format_xpub(&xpub_key)); + tprintln!(ctx, ""); + match mnemonic { None => { tprintln!(ctx, "mnemonic is not available for this private key"); diff --git a/cli/src/modules/guide.txt b/cli/src/modules/guide.txt index ecc0f8d968..b993622ba8 100644 --- a/cli/src/modules/guide.txt +++ b/cli/src/modules/guide.txt @@ -1,49 +1,23 @@ -Please note - this is an alpha version of the softeware, not all features are currently functional. - -If using a dekstop or a web version of this software, you can use Ctrl+'+' or Ctrl+'-' (Command on MacOS) to -change the terminal font size. - -If using a desktop version, you can use Ctrl+M (Command on MacOS) to bring up metrics. - -Type `help` to see the complete list of commands. `exit` to exit this application. -On Windows you can use `Alt+F4` and on MacOS `Command+Q` to exit. - ---- - Before you start, you must configure the default network setting. There are currently -3 networks available. `mainnet`, `testnet-10` and `testnet-11`. While this software -is in alpha stage, you should not use it on the mainnet. If you wish to experiment, -you should select `testnet-10` by entering `network testnet-10` +3 networks available. `mainnet`, `testnet-10` and `testnet-11`. If you wish to experiment, +you should select `testnet-11` by entering `network testnet-11` The `server` command configures the target server. You can connect to any Rusty Kaspa -node that has User RPC enabled with `--rpclisten-borsh=public`. If you are running the node -from within KOS, it is locked to listen to a local IP address. +node that has wRPC enabled with `--rpclisten-borsh=0.0.0.0`. If the server setting +is set to 'public' the node will connect to the public node infrastructure. Both network and server values are stored in the application settings and are used when running a local node or connecting to a remote node. --- -You can use `node start` to start the node. Type `node` to see an overview of commands. -`node mute` toggles node log output (you can also use `node logs`). `node select` allows -you to choose between locally installed flavors (if running in the development environment). -You can also specify an absolute path by typing `node select `. - -For developers: `node select` scans 'target' folder for the debug and release builds -so you can switch between builds at runtime using the `node select` command. - -Once you node is running, you can connect to it using the `connect` command. - -When starting the node and the `server` setting is configured to your local host, -the `connect` action will occure automatically. - -`wallet create []` Use theis command to create a local wallet. The argument +`wallet create []` Use this command to create a local wallet. The argument is optional (the default wallet name is "kaspa") and allows you to create multiple named wallets. Only one wallet can be opened at a time. Keep in mind that a wallet can have multiple accounts, as such you only need one wallet, unless, for example, you want to separate wallets for personal and business needs (but you can also create isolated accounts within a wallet). -Make sure to record your mnemonic, even if working with a testnet, not to loose your +Make sure to record your mnemonic, even if working with a testnet, not to lose your testnet KAS. `open ` - opens the wallet (the wallet is open automatically after creation). @@ -56,9 +30,6 @@ testnet KAS. `address` - shows your selected account address -Note - you can click on the address to copy it to the clipboard. (When on mainnet, Ctrl+Click on addresses, transactions and -block hashes will open a new browser window with an explorer.) - Before you transact: `mute` option (enabled by default) toggles mute on/off. Mute enables terminal output of internal framework events. Rust and JavaScript/TypeScript applications integrating with this platform are meant to update their state by monitoring event notifications. Mute allows you to see these events in @@ -78,11 +49,6 @@ the selected account to an account named 'pete' (starts with a 'p' letter) `history details` - Show previous account transactions with extended information. -Once your node is synced, you can start the CPU miner. - -`miner start` - Starts the miner. The miner will mine to your currently selected account. (So you need to have a wallet open and an -account selected to start the miner) - `monitor` - A test screen environment that periodically updates account balances. `rpc` - Allows you to execute RPC methods against the node (not all methods are currently available) diff --git a/cli/src/modules/history.rs b/cli/src/modules/history.rs index 299701c51f..8fdf31f4db 100644 --- a/cli/src/modules/history.rs +++ b/cli/src/modules/history.rs @@ -10,6 +10,9 @@ impl History { async fn main(self: Arc, ctx: &Arc, mut argv: Vec, _cmd: &str) -> Result<()> { let ctx = ctx.clone().downcast_arc::()?; + let guard = ctx.wallet().guard(); + let guard = guard.lock().await; + if argv.is_empty() { self.display_help(ctx, argv).await?; return Ok(()); @@ -34,7 +37,15 @@ impl History { match store.load_single(&binding, &network_id, &txid).await { Ok(tx) => { let lines = tx - .format_transaction_with_args(&ctx.wallet(), None, current_daa_score, true, true, Some(account.clone())) + .format_transaction_with_args( + &ctx.wallet(), + None, + current_daa_score, + true, + true, + Some(account.clone()), + &guard, + ) .await; lines.iter().for_each(|line| tprintln!(ctx, "{line}")); } @@ -116,6 +127,7 @@ impl History { include_utxo, true, Some(account.clone()), + &guard, ) .await; lines.iter().for_each(|line| tprintln!(ctx, "{line}")); diff --git a/cli/src/modules/mod.rs b/cli/src/modules/mod.rs index a6371814f2..7990a9d6e1 100644 --- a/cli/src/modules/mod.rs +++ b/cli/src/modules/mod.rs @@ -26,6 +26,7 @@ pub mod network; pub mod node; pub mod open; pub mod ping; +pub mod pskb; pub mod reload; pub mod rpc; pub mod select; @@ -57,7 +58,7 @@ pub fn register_handlers(cli: &Arc) -> Result<()> { cli.handlers(), [ account, address, close, connect, details, disconnect, estimate, exit, export, guide, help, history, rpc, list, miner, - message, monitor, mute, network, node, open, ping, reload, select, send, server, settings, sweep, track, transfer, + message, monitor, mute, network, node, open, ping, pskb, reload, select, send, server, settings, sweep, track, transfer, wallet, // halt, // theme, start, stop diff --git a/cli/src/modules/pskb.rs b/cli/src/modules/pskb.rs new file mode 100644 index 0000000000..fd33087c22 --- /dev/null +++ b/cli/src/modules/pskb.rs @@ -0,0 +1,266 @@ +#![allow(unused_imports)] + +use crate::imports::*; +use kaspa_addresses::Prefix; +use kaspa_consensus_core::tx::{TransactionOutpoint, UtxoEntry}; +use kaspa_wallet_core::account::pskb::finalize_pskt_one_or_more_sig_and_redeem_script; +use kaspa_wallet_pskt::{ + prelude::{lock_script_sig_templating, script_sig_to_address, unlock_utxos_as_pskb, Bundle, Signer, PSKT}, + pskt::Inner, +}; + +#[derive(Default, Handler)] +#[help("Send a Kaspa transaction to a public address")] +pub struct Pskb; + +impl Pskb { + async fn main(self: Arc, ctx: &Arc, mut argv: Vec, _cmd: &str) -> Result<()> { + let ctx = ctx.clone().downcast_arc::()?; + + if !ctx.wallet().is_open() { + return Err(Error::WalletIsNotOpen); + } + + if argv.is_empty() { + return self.display_help(ctx, argv).await; + } + + let action = argv.remove(0); + + match action.as_str() { + "create" => { + if argv.len() < 2 || argv.len() > 3 { + return self.display_help(ctx, argv).await; + } + let (wallet_secret, payment_secret) = ctx.ask_wallet_secret(None).await?; + let _ = ctx.notifier().show(Notification::Processing).await; + + let address = Address::try_from(argv.first().unwrap().as_str())?; + let amount_sompi = try_parse_required_nonzero_kaspa_as_sompi_u64(argv.get(1))?; + let outputs = PaymentOutputs::from((address, amount_sompi)); + let priority_fee_sompi = try_parse_optional_kaspa_as_sompi_i64(argv.get(2))?.unwrap_or(0); + let abortable = Abortable::default(); + + let account: Arc = ctx.wallet().account()?; + let signer = account + .pskb_from_send_generator( + outputs.into(), + priority_fee_sompi.into(), + None, + wallet_secret.clone(), + payment_secret.clone(), + &abortable, + ) + .await?; + + match signer.serialize() { + Ok(encoded) => tprintln!(ctx, "{encoded}"), + Err(e) => return Err(e.into()), + } + } + "script" => { + if argv.len() < 2 || argv.len() > 4 { + return self.display_help(ctx, argv).await; + } + let subcommand = argv.remove(0); + let payload = argv.remove(0); + let account = ctx.wallet().account()?; + let receive_address = account.receive_address()?; + let (wallet_secret, payment_secret) = ctx.ask_wallet_secret(None).await?; + let _ = ctx.notifier().show(Notification::Processing).await; + + let script_sig = match lock_script_sig_templating(payload.clone(), Some(&receive_address.payload)) { + Ok(value) => value, + Err(e) => { + terrorln!(ctx, "{}", e.to_string()); + return Err(e.into()); + } + }; + + let script_p2sh = match script_sig_to_address(&script_sig, ctx.wallet().address_prefix()?) { + Ok(p2sh) => p2sh, + Err(e) => { + terrorln!(ctx, "Error generating script address: {}", e.to_string()); + return Err(e.into()); + } + }; + + match subcommand.as_str() { + "lock" => { + let amount_sompi = try_parse_required_nonzero_kaspa_as_sompi_u64(argv.first())?; + let outputs = PaymentOutputs::from((script_p2sh, amount_sompi)); + let priority_fee_sompi = try_parse_optional_kaspa_as_sompi_i64(argv.get(1))?.unwrap_or(0); + let abortable = Abortable::default(); + + let signer = account + .pskb_from_send_generator( + outputs.into(), + priority_fee_sompi.into(), + None, + wallet_secret.clone(), + payment_secret.clone(), + &abortable, + ) + .await?; + + match signer.serialize() { + Ok(encoded) => tprintln!(ctx, "{encoded}"), + Err(e) => return Err(e.into()), + } + } + "unlock" => { + if argv.len() != 1 { + return self.display_help(ctx, argv).await; + } + + // Get locked UTXO set. + let spend_utxos: Vec = + ctx.wallet().rpc_api().get_utxos_by_addresses(vec![script_p2sh.clone()]).await?; + let priority_fee_sompi = try_parse_optional_kaspa_as_sompi_i64(argv.first())?.unwrap_or(0) as u64; + + if spend_utxos.is_empty() { + twarnln!(ctx, "No locked UTXO set found."); + return Ok(()); + } + + let references: Vec<(UtxoEntry, TransactionOutpoint)> = + spend_utxos.iter().map(|entry| (entry.utxo_entry.clone().into(), entry.outpoint.into())).collect(); + + let total_locked_sompi: u64 = spend_utxos.iter().map(|entry| entry.utxo_entry.amount).sum(); + + tprintln!( + ctx, + "{} locked UTXO{} found with total amount of {} KAS", + spend_utxos.len(), + if spend_utxos.len() == 1 { "" } else { "s" }, + sompi_to_kaspa(total_locked_sompi) + ); + + // Sweep UTXO set. + match unlock_utxos_as_pskb(references, &receive_address, script_sig, priority_fee_sompi as u64) { + Ok(pskb) => { + let pskb_hex = pskb.serialize()?; + tprintln!(ctx, "{pskb_hex}"); + } + Err(e) => tprintln!(ctx, "Error generating unlock PSKB: {}", e.to_string()), + } + } + "sign" => { + let pskb = Self::parse_input_pskb(argv.first().unwrap().as_str())?; + + // Sign PSKB using the account's receiver address. + match account.pskb_sign(&pskb, wallet_secret.clone(), payment_secret.clone(), Some(&receive_address)).await { + Ok(signed_pskb) => { + let pskb_pack = String::try_from(signed_pskb)?; + tprintln!(ctx, "{pskb_pack}"); + } + Err(e) => terrorln!(ctx, "{}", e.to_string()), + } + } + "address" => { + tprintln!(ctx, "\r\nP2SH address: {}", script_p2sh); + } + v => { + terrorln!(ctx, "unknown command: '{v}'\r\n"); + return self.display_help(ctx, argv).await; + } + } + } + "sign" => { + if argv.len() != 1 { + return self.display_help(ctx, argv).await; + } + let (wallet_secret, payment_secret) = ctx.ask_wallet_secret(None).await?; + let pskb = Self::parse_input_pskb(argv.first().unwrap().as_str())?; + let account = ctx.wallet().account()?; + match account.pskb_sign(&pskb, wallet_secret.clone(), payment_secret.clone(), None).await { + Ok(signed_pskb) => { + let pskb_pack = String::try_from(signed_pskb)?; + tprintln!(ctx, "{pskb_pack}"); + } + Err(e) => terrorln!(ctx, "{}", e.to_string()), + } + } + "send" => { + if argv.len() != 1 { + return self.display_help(ctx, argv).await; + } + let pskb = Self::parse_input_pskb(argv.first().unwrap().as_str())?; + let account = ctx.wallet().account()?; + match account.pskb_broadcast(&pskb).await { + Ok(sent) => tprintln!(ctx, "Sent transactions {:?}", sent), + Err(e) => terrorln!(ctx, "Send error {:?}", e), + } + } + "debug" => { + if argv.len() != 1 { + return self.display_help(ctx, argv).await; + } + let pskb = Self::parse_input_pskb(argv.first().unwrap().as_str())?; + tprintln!(ctx, "{:?}", pskb); + } + "parse" => { + if argv.len() != 1 { + return self.display_help(ctx, argv).await; + } + let pskb = Self::parse_input_pskb(argv.first().unwrap().as_str())?; + tprintln!(ctx, "{}", pskb.display_format(ctx.wallet().network_id()?, sompi_to_kaspa_string_with_suffix)); + + for (pskt_index, bundle_inner) in pskb.0.iter().enumerate() { + tprintln!(ctx, "PSKT #{:03} finalized check:", pskt_index + 1); + let pskt: PSKT = PSKT::::from(bundle_inner.to_owned()); + + let finalizer = pskt.finalizer(); + + if let Ok(pskt_finalizer) = finalize_pskt_one_or_more_sig_and_redeem_script(finalizer) { + // Verify if extraction is possible. + match pskt_finalizer.extractor() { + Ok(ex) => match ex.extract_tx() { + Ok(_) => tprintln!( + ctx, + " Transaction extracted successfully: PSKT is finalized with a valid script signature." + ), + Err(e) => terrorln!(ctx, " PSKT transaction extraction error: {}", e.to_string()), + }, + Err(_) => twarnln!(ctx, " PSKT not finalized"), + } + } else { + twarnln!(ctx, " PSKT not signed"); + } + } + } + v => { + tprintln!(ctx, "unknown command: '{v}'\r\n"); + return self.display_help(ctx, argv).await; + } + } + Ok(()) + } + + fn parse_input_pskb(input: &str) -> Result { + match Bundle::try_from(input) { + Ok(bundle) => Ok(bundle), + Err(e) => Err(Error::custom(format!("Error while parsing input PSKB {}", e))), + } + } + + async fn display_help(self: Arc, ctx: Arc, _argv: Vec) -> Result<()> { + ctx.term().help( + &[ + ("pskb create
", "Create a PSKB from single send transaction"), + ("pskb sign ", "Sign given PSKB"), + ("pskb send ", "Broadcast bundled transactions"), + ("pskb debug ", "Print PSKB debug view"), + ("pskb parse ", "Print PSKB formatted view"), + ("pskb script lock [priority fee]", "Generate a PSKB with one send transaction to given P2SH payload. Optional public key placeholder in payload: {{pubkey}}"), + ("pskb script unlock ", "Generate a PSKB to unlock UTXOS one by one from given P2SH payload. Fee amount will be applied to every spent UTXO, meaning every transaction. Optional public key placeholder in payload: {{pubkey}}"), + ("pskb script sign ", "Sign all PSKB's P2SH locked inputs"), + ("pskb script sign ", "Sign all PSKB's P2SH locked inputs"), + ("pskb script address ", "Prints P2SH address"), + ], + None, + )?; + + Ok(()) + } +} diff --git a/cli/src/modules/reload.rs b/cli/src/modules/reload.rs index bc1eb717eb..b4c1ed7a52 100644 --- a/cli/src/modules/reload.rs +++ b/cli/src/modules/reload.rs @@ -10,8 +10,12 @@ impl Reload { // workflow_dom::utils::window().location().reload().ok(); let ctx = ctx.clone().downcast_arc::()?; + + let guard = ctx.wallet().guard(); + let guard = guard.lock().await; + tprintln!(ctx, "{}", style("reloading wallet ...").magenta()); - ctx.wallet().reload(true).await?; + ctx.wallet().reload(true, &guard).await?; Ok(()) } diff --git a/cli/src/modules/rpc.rs b/cli/src/modules/rpc.rs index c849154800..cf6bc6bd20 100644 --- a/cli/src/modules/rpc.rs +++ b/cli/src/modules/rpc.rs @@ -1,6 +1,6 @@ use crate::imports::*; use convert_case::{Case, Casing}; -use kaspa_rpc_core::{api::ops::RpcApiOps, *}; +use kaspa_rpc_core::api::ops::RpcApiOps; #[derive(Default, Handler)] #[help("Execute RPC commands against the connected Kaspa node")] @@ -38,19 +38,27 @@ impl Rpc { tprintln!(ctx, "ok"); } RpcApiOps::GetMetrics => { - let result = rpc.get_metrics(true, true, true, true).await?; + let result = rpc.get_metrics(true, true, true, true, true, true).await?; + self.println(&ctx, result); + } + RpcApiOps::GetSystemInfo => { + let result = rpc.get_system_info().await?; + self.println(&ctx, result); + } + RpcApiOps::GetConnections => { + let result = rpc.get_connections(true).await?; self.println(&ctx, result); } RpcApiOps::GetServerInfo => { - let result = rpc.get_server_info_call(GetServerInfoRequest {}).await?; + let result = rpc.get_server_info_call(None, GetServerInfoRequest {}).await?; self.println(&ctx, result); } RpcApiOps::GetSyncStatus => { - let result = rpc.get_sync_status_call(GetSyncStatusRequest {}).await?; + let result = rpc.get_sync_status_call(None, GetSyncStatusRequest {}).await?; self.println(&ctx, result); } RpcApiOps::GetCurrentNetwork => { - let result = rpc.get_current_network_call(GetCurrentNetworkRequest {}).await?; + let result = rpc.get_current_network_call(None, GetCurrentNetworkRequest {}).await?; self.println(&ctx, result); } // RpcApiOps::SubmitBlock => { @@ -62,11 +70,11 @@ impl Rpc { // self.println(&ctx, result); // } RpcApiOps::GetPeerAddresses => { - let result = rpc.get_peer_addresses_call(GetPeerAddressesRequest {}).await?; + let result = rpc.get_peer_addresses_call(None, GetPeerAddressesRequest {}).await?; self.println(&ctx, result); } RpcApiOps::GetSink => { - let result = rpc.get_sink_call(GetSinkRequest {}).await?; + let result = rpc.get_sink_call(None, GetSinkRequest {}).await?; self.println(&ctx, result); } // RpcApiOps::GetMempoolEntry => { @@ -76,12 +84,15 @@ impl Rpc { RpcApiOps::GetMempoolEntries => { // TODO let result = rpc - .get_mempool_entries_call(GetMempoolEntriesRequest { include_orphan_pool: true, filter_transaction_pool: true }) + .get_mempool_entries_call( + None, + GetMempoolEntriesRequest { include_orphan_pool: true, filter_transaction_pool: true }, + ) .await?; self.println(&ctx, result); } RpcApiOps::GetConnectedPeerInfo => { - let result = rpc.get_connected_peer_info_call(GetConnectedPeerInfoRequest {}).await?; + let result = rpc.get_connected_peer_info_call(None, GetConnectedPeerInfoRequest {}).await?; self.println(&ctx, result); } RpcApiOps::AddPeer => { @@ -90,7 +101,7 @@ impl Rpc { } let peer_address = argv.remove(0).parse::()?; let is_permanent = argv.remove(0).parse::().unwrap_or(false); - let result = rpc.add_peer_call(AddPeerRequest { peer_address, is_permanent }).await?; + let result = rpc.add_peer_call(None, AddPeerRequest { peer_address, is_permanent }).await?; self.println(&ctx, result); } // RpcApiOps::SubmitTransaction => { @@ -103,27 +114,38 @@ impl Rpc { } let hash = argv.remove(0); let hash = RpcHash::from_hex(hash.as_str())?; - let result = rpc.get_block_call(GetBlockRequest { hash, include_transactions: true }).await?; + let include_transactions = argv.first().and_then(|x| x.parse::().ok()).unwrap_or(true); + let result = rpc.get_block_call(None, GetBlockRequest { hash, include_transactions }).await?; self.println(&ctx, result); } // RpcApiOps::GetSubnetwork => { // let result = rpc.get_subnetwork_call(GetSubnetworkRequest { }).await?; // self.println(&ctx, result); // } - // RpcApiOps::GetVirtualChainFromBlock => { - // let result = rpc.get_virtual_chain_from_block_call(GetVirtualChainFromBlockRequest { }).await?; - // self.println(&ctx, result); - // } + RpcApiOps::GetVirtualChainFromBlock => { + if argv.is_empty() { + return Err(Error::custom("Missing startHash argument")); + }; + let start_hash = RpcHash::from_hex(argv.remove(0).as_str())?; + let include_accepted_transaction_ids = argv.first().and_then(|x| x.parse::().ok()).unwrap_or_default(); + let result = rpc + .get_virtual_chain_from_block_call( + None, + GetVirtualChainFromBlockRequest { start_hash, include_accepted_transaction_ids }, + ) + .await?; + self.println(&ctx, result); + } // RpcApiOps::GetBlocks => { // let result = rpc.get_blocks_call(GetBlocksRequest { }).await?; // self.println(&ctx, result); // } RpcApiOps::GetBlockCount => { - let result = rpc.get_block_count_call(GetBlockCountRequest {}).await?; + let result = rpc.get_block_count_call(None, GetBlockCountRequest {}).await?; self.println(&ctx, result); } RpcApiOps::GetBlockDagInfo => { - let result = rpc.get_block_dag_info_call(GetBlockDagInfoRequest {}).await?; + let result = rpc.get_block_dag_info_call(None, GetBlockDagInfoRequest {}).await?; self.println(&ctx, result); } // RpcApiOps::ResolveFinalityConflict => { @@ -131,7 +153,7 @@ impl Rpc { // self.println(&ctx, result); // } RpcApiOps::Shutdown => { - let result = rpc.shutdown_call(ShutdownRequest {}).await?; + let result = rpc.shutdown_call(None, ShutdownRequest {}).await?; self.println(&ctx, result); } // RpcApiOps::GetHeaders => { @@ -143,7 +165,7 @@ impl Rpc { return Err(Error::custom("Please specify at least one address")); } let addresses = argv.iter().map(|s| Address::try_from(s.as_str())).collect::, _>>()?; - let result = rpc.get_utxos_by_addresses_call(GetUtxosByAddressesRequest { addresses }).await?; + let result = rpc.get_utxos_by_addresses_call(None, GetUtxosByAddressesRequest { addresses }).await?; self.println(&ctx, result); } RpcApiOps::GetBalanceByAddress => { @@ -152,7 +174,7 @@ impl Rpc { } let addresses = argv.iter().map(|s| Address::try_from(s.as_str())).collect::, _>>()?; for address in addresses { - let result = rpc.get_balance_by_address_call(GetBalanceByAddressRequest { address }).await?; + let result = rpc.get_balance_by_address_call(None, GetBalanceByAddressRequest { address }).await?; self.println(&ctx, sompi_to_kaspa(result.balance)); } } @@ -161,11 +183,11 @@ impl Rpc { return Err(Error::custom("Please specify at least one address")); } let addresses = argv.iter().map(|s| Address::try_from(s.as_str())).collect::, _>>()?; - let result = rpc.get_balances_by_addresses_call(GetBalancesByAddressesRequest { addresses }).await?; + let result = rpc.get_balances_by_addresses_call(None, GetBalancesByAddressesRequest { addresses }).await?; self.println(&ctx, result); } RpcApiOps::GetSinkBlueScore => { - let result = rpc.get_sink_blue_score_call(GetSinkBlueScoreRequest {}).await?; + let result = rpc.get_sink_blue_score_call(None, GetSinkBlueScoreRequest {}).await?; self.println(&ctx, result); } RpcApiOps::Ban => { @@ -173,7 +195,7 @@ impl Rpc { return Err(Error::custom("Please specify peer IP address")); } let ip: RpcIpAddress = argv.remove(0).parse()?; - let result = rpc.ban_call(BanRequest { ip }).await?; + let result = rpc.ban_call(None, BanRequest { ip }).await?; self.println(&ctx, result); } RpcApiOps::Unban => { @@ -181,11 +203,11 @@ impl Rpc { return Err(Error::custom("Please specify peer IP address")); } let ip: RpcIpAddress = argv.remove(0).parse()?; - let result = rpc.unban_call(UnbanRequest { ip }).await?; + let result = rpc.unban_call(None, UnbanRequest { ip }).await?; self.println(&ctx, result); } RpcApiOps::GetInfo => { - let result = rpc.get_info_call(GetInfoRequest {}).await?; + let result = rpc.get_info_call(None, GetInfoRequest {}).await?; self.println(&ctx, result); } // RpcApiOps::EstimateNetworkHashesPerSecond => { @@ -200,16 +222,15 @@ impl Rpc { let include_orphan_pool = true; let filter_transaction_pool = true; let result = rpc - .get_mempool_entries_by_addresses_call(GetMempoolEntriesByAddressesRequest { - addresses, - include_orphan_pool, - filter_transaction_pool, - }) + .get_mempool_entries_by_addresses_call( + None, + GetMempoolEntriesByAddressesRequest { addresses, include_orphan_pool, filter_transaction_pool }, + ) .await?; self.println(&ctx, result); } RpcApiOps::GetCoinSupply => { - let result = rpc.get_coin_supply_call(GetCoinSupplyRequest {}).await?; + let result = rpc.get_coin_supply_call(None, GetCoinSupplyRequest {}).await?; self.println(&ctx, result); } RpcApiOps::GetDaaScoreTimestampEstimate => { @@ -220,8 +241,9 @@ impl Rpc { match daa_score_result { Ok(daa_scores) => { - let result = - rpc.get_daa_score_timestamp_estimate_call(GetDaaScoreTimestampEstimateRequest { daa_scores }).await?; + let result = rpc + .get_daa_score_timestamp_estimate_call(None, GetDaaScoreTimestampEstimateRequest { daa_scores }) + .await?; self.println(&ctx, result); } Err(_err) => { @@ -229,6 +251,24 @@ impl Rpc { } } } + RpcApiOps::GetFeeEstimate => { + let result = rpc.get_fee_estimate_call(None, GetFeeEstimateRequest {}).await?; + self.println(&ctx, result); + } + RpcApiOps::GetFeeEstimateExperimental => { + let verbose = if argv.is_empty() { false } else { argv.remove(0).parse().unwrap_or(false) }; + let result = rpc.get_fee_estimate_experimental_call(None, GetFeeEstimateExperimentalRequest { verbose }).await?; + self.println(&ctx, result); + } + RpcApiOps::GetCurrentBlockColor => { + if argv.is_empty() { + return Err(Error::custom("Missing block hash argument")); + } + let hash = argv.remove(0); + let hash = RpcHash::from_hex(hash.as_str())?; + let result = rpc.get_current_block_color_call(None, GetCurrentBlockColorRequest { hash }).await?; + self.println(&ctx, result); + } _ => { tprintln!(ctx, "rpc method exists but is not supported by the cli: '{op_str}'\r\n"); return Ok(()); @@ -243,9 +283,8 @@ impl Rpc { async fn display_help(self: Arc, ctx: Arc, _argv: Vec) -> Result<()> { // RpcApiOps that do not contain docs are not displayed - let help = RpcApiOps::list() - .iter() - .filter_map(|op| op.doc().is_not_empty().then_some((op.as_str().to_case(Case::Kebab).to_string(), op.doc()))) + let help = RpcApiOps::into_iter() + .filter_map(|op| op.rustdoc().is_not_empty().then_some((op.as_str().to_case(Case::Kebab).to_string(), op.rustdoc()))) .collect::>(); ctx.term().help(&help, None)?; diff --git a/cli/src/modules/send.rs b/cli/src/modules/send.rs index d9f35d994d..773861dd4a 100644 --- a/cli/src/modules/send.rs +++ b/cli/src/modules/send.rs @@ -39,7 +39,7 @@ impl Send { .await?; tprintln!(ctx, "Send - {summary}"); - // tprintln!(ctx, "\nSending {} KAS to {address}, tx ids:", sompi_to_kaspa_string(amount_sompi)); + tprintln!(ctx, "\nSending {} KAS to {address}, tx ids:", sompi_to_kaspa_string(amount_sompi)); // tprintln!(ctx, "{}\n", ids.into_iter().map(|a| a.to_string()).collect::>().join("\n")); Ok(()) diff --git a/cli/src/modules/settings.rs b/cli/src/modules/settings.rs index e7214418a3..b144c4cc3b 100644 --- a/cli/src/modules/settings.rs +++ b/cli/src/modules/settings.rs @@ -9,12 +9,11 @@ impl Settings { let ctx = ctx.clone().downcast_arc::()?; tprintln!(ctx, "\nSettings:\n"); - let list = WalletSettings::list(); - let list = list - .iter() + // let list = WalletSettings::list(); + let list = WalletSettings::into_iter() .map(|setting| { let value: String = ctx.wallet().settings().get(setting.clone()).unwrap_or_else(|| "-".to_string()); - let descr = setting.descr(); + let descr = setting.describe(); (setting.as_str().to_lowercase(), value, descr) }) .collect::>(); diff --git a/cli/src/modules/wallet.rs b/cli/src/modules/wallet.rs index 6019b19086..70180e78d1 100644 --- a/cli/src/modules/wallet.rs +++ b/cli/src/modules/wallet.rs @@ -9,6 +9,9 @@ impl Wallet { async fn main(self: Arc, ctx: &Arc, mut argv: Vec, cmd: &str) -> Result<()> { let ctx = ctx.clone().downcast_arc::()?; + let guard = ctx.wallet().guard(); + let guard = guard.lock().await; + if argv.is_empty() { return self.display_help(ctx, argv).await; } @@ -48,7 +51,7 @@ impl Wallet { let wallet_name = wallet_name.as_deref(); let import_with_mnemonic = op.as_str() == "import"; - wizards::wallet::create(&ctx, wallet_name, import_with_mnemonic).await?; + wizards::wallet::create(&ctx, guard.into(), wallet_name, import_with_mnemonic).await?; } "open" => { let name = if let Some(name) = argv.first().cloned() { @@ -67,8 +70,8 @@ impl Wallet { let (wallet_secret, _) = ctx.ask_wallet_secret(None).await?; let _ = ctx.notifier().show(Notification::Processing).await; let args = WalletOpenArgs::default_with_legacy_accounts(); - ctx.wallet().open(&wallet_secret, name, args).await?; - ctx.wallet().activate_accounts(None).await?; + ctx.wallet().open(&wallet_secret, name, args, &guard).await?; + ctx.wallet().activate_accounts(None, &guard).await?; } "close" => { ctx.wallet().close().await?; diff --git a/cli/src/wizards/account.rs b/cli/src/wizards/account.rs index 7a3afb73b1..9d3d4d5916 100644 --- a/cli/src/wizards/account.rs +++ b/cli/src/wizards/account.rs @@ -85,3 +85,62 @@ async fn create_multisig(ctx: &Arc, account_name: Option, mnem wallet.select(Some(&account)).await?; Ok(()) } + +pub(crate) async fn bip32_watch(ctx: &Arc, name: Option<&str>) -> Result<()> { + let term = ctx.term(); + let wallet = ctx.wallet(); + + let name = if let Some(name) = name { + Some(name.to_string()) + } else { + Some(term.ask(false, "Please enter account name (optional, press to skip): ").await?.trim().to_string()) + }; + + let mut xpub_keys = Vec::with_capacity(1); + let xpub_key = term.ask(false, "Enter extended public key: ").await?; + xpub_keys.push(xpub_key.trim().to_owned()); + + let wallet_secret = Secret::new(term.ask(true, "Enter wallet password: ").await?.trim().as_bytes().to_vec()); + if wallet_secret.as_ref().is_empty() { + return Err(Error::WalletSecretRequired); + } + + let account_create_args_bip32_watch = AccountCreateArgsBip32Watch::new(name, xpub_keys); + let account = wallet.create_account_bip32_watch(&wallet_secret, account_create_args_bip32_watch).await?; + + tprintln!(ctx, "\naccount created: {}\n", account.get_list_string()?); + wallet.select(Some(&account)).await?; + Ok(()) +} + +pub(crate) async fn multisig_watch(ctx: &Arc, name: Option<&str>) -> Result<()> { + let term = ctx.term(); + + let account_name = if let Some(name) = name { + Some(name.to_string()) + } else { + Some(term.ask(false, "Please enter account name (optional, press to skip): ").await?.trim().to_string()) + }; + + let term = ctx.term(); + let wallet = ctx.wallet(); + let (wallet_secret, _) = ctx.ask_wallet_secret(None).await?; + let minimum_signatures: u16 = term.ask(false, "Enter the minimum number of signatures required: ").await?.parse()?; + + let prv_key_data_args = Vec::with_capacity(0); + + let answer = term.ask(false, "Enter the number of extended public keys: ").await?.trim().to_string(); //.parse()?; + let xpub_keys_len: usize = if answer.is_empty() { 0 } else { answer.parse()? }; + + let mut xpub_keys = Vec::with_capacity(xpub_keys_len); + for i in 1..=xpub_keys_len { + let xpub_key = term.ask(false, &format!("Enter extended public {i} key: ")).await?; + xpub_keys.push(xpub_key.trim().to_owned()); + } + let account = + wallet.create_account_multisig(&wallet_secret, prv_key_data_args, xpub_keys, account_name, minimum_signatures).await?; + + tprintln!(ctx, "\naccount created: {}\n", account.get_list_string()?); + wallet.select(Some(&account)).await?; + Ok(()) +} diff --git a/cli/src/wizards/wallet.rs b/cli/src/wizards/wallet.rs index 8563a8619b..0fae267a36 100644 --- a/cli/src/wizards/wallet.rs +++ b/cli/src/wizards/wallet.rs @@ -2,12 +2,25 @@ use crate::cli::KaspaCli; use crate::imports::*; use crate::result::Result; use kaspa_bip32::{Language, Mnemonic, WordCount}; -use kaspa_wallet_core::storage::{make_filename, Hint}; - -pub(crate) async fn create(ctx: &Arc, name: Option<&str>, import_with_mnemonic: bool) -> Result<()> { +use kaspa_wallet_core::{ + storage::{make_filename, Hint}, + wallet::WalletGuard, +}; + +pub(crate) async fn create( + ctx: &Arc, + wallet_guard: Option>, + name: Option<&str>, + import_with_mnemonic: bool, +) -> Result<()> { let term = ctx.term(); let wallet = ctx.wallet(); + let local_guard = ctx.wallet().guard(); + let guard = match wallet_guard { + Some(locked_guard) => locked_guard, + None => local_guard.lock().await, + }; // TODO @aspect let word_count = WordCount::Words12; @@ -86,7 +99,7 @@ pub(crate) async fn create(ctx: &Arc, name: Option<&str>, import_with_ "\ PLEASE NOTE: The optional bip39 mnemonic passphrase, if provided, will be required to \ issue transactions. This passphrase will also be required when recovering your wallet \ - in addition to your private key or mnemonic. If you loose this passphrase, you will not \ + in addition to your private key or mnemonic. If you lose this passphrase, you will not \ be able to use or recover your wallet! \ \ If you do not want to use bip39 recovery passphrase, press ENTER.\ @@ -173,8 +186,8 @@ pub(crate) async fn create(ctx: &Arc, name: Option<&str>, import_with_ term.writeln(style(receive_address).blue().to_string()); term.writeln(""); - wallet.open(&wallet_secret, name.map(String::from), WalletOpenArgs::default_with_legacy_accounts()).await?; - wallet.activate_accounts(None).await?; + wallet.open(&wallet_secret, name.map(String::from), WalletOpenArgs::default_with_legacy_accounts(), &guard).await?; + wallet.activate_accounts(None, &guard).await?; Ok(()) } diff --git a/components/addressmanager/Cargo.toml b/components/addressmanager/Cargo.toml index e4398dc4e5..ef735b19c0 100644 --- a/components/addressmanager/Cargo.toml +++ b/components/addressmanager/Cargo.toml @@ -27,5 +27,4 @@ thiserror.workspace = true tokio.workspace = true [dev-dependencies] -statrs.workspace = true -statest.workspace = true +rv.workspace = true diff --git a/components/addressmanager/src/lib.rs b/components/addressmanager/src/lib.rs index 85f9acb3e2..093323e155 100644 --- a/components/addressmanager/src/lib.rs +++ b/components/addressmanager/src/lib.rs @@ -520,8 +520,7 @@ mod address_store_with_cache { use kaspa_database::create_temp_db; use kaspa_database::prelude::ConnBuilder; use kaspa_utils::networking::IpAddress; - use statest::ks::KSTest; - use statrs::distribution::Uniform; + use rv::{dist::Uniform, misc::ks_test as one_way_ks_test, traits::Cdf}; use std::net::{IpAddr, Ipv6Addr}; #[test] @@ -591,10 +590,11 @@ mod address_store_with_cache { assert!(num_of_buckets >= 12); // Run multiple Kolmogorov–Smirnov tests to offset random noise of the random weighted iterator - let num_of_trials = 512; + let num_of_trials = 2048; // Number of trials to run the test, chosen to reduce random noise. let mut cul_p = 0.; // The target uniform distribution - let target_u_dist = Uniform::new(0.0, (num_of_buckets) as f64).unwrap(); + let target_uniform_dist = Uniform::new(1.0, num_of_buckets as f64).unwrap(); + let uniform_cdf = |x: f64| target_uniform_dist.cdf(&x); for _ in 0..num_of_trials { // The weight sampled expected uniform distibution let prioritized_address_distribution = am @@ -603,13 +603,12 @@ mod address_store_with_cache { .take(num_of_buckets) .map(|addr| addr.prefix_bucket().as_u64() as f64) .collect_vec(); - - let ks_test = KSTest::new(prioritized_address_distribution.as_slice()); - cul_p += ks_test.ks1(&target_u_dist).0; + cul_p += one_way_ks_test(prioritized_address_distribution.as_slice(), uniform_cdf).1; } // Normalize and adjust p to test for uniformity, over average of all trials. - let adjusted_p = (0.5 - cul_p / num_of_trials as f64).abs(); + // we do this to reduce the effect of random noise failing this test. + let adjusted_p = ((cul_p / num_of_trials as f64) - 0.5).abs(); // Define the significance threshold. let significance = 0.10; @@ -619,7 +618,7 @@ mod address_store_with_cache { adjusted_p, significance ); - assert!(adjusted_p <= significance) + assert!(adjusted_p <= significance); } } } diff --git a/components/addressmanager/src/stores/address_store.rs b/components/addressmanager/src/stores/address_store.rs index accfcfda45..fe4ddb244b 100644 --- a/components/addressmanager/src/stores/address_store.rs +++ b/components/addressmanager/src/stores/address_store.rs @@ -21,6 +21,7 @@ pub struct Entry { impl MemSizeEstimator for Entry {} pub trait AddressesStoreReader { + #[allow(dead_code)] fn get(&self, key: AddressKey) -> Result; } diff --git a/components/connectionmanager/src/lib.rs b/components/connectionmanager/src/lib.rs index 1509df6bfe..2146ec62d1 100644 --- a/components/connectionmanager/src/lib.rs +++ b/components/connectionmanager/src/lib.rs @@ -7,7 +7,7 @@ use std::{ }; use duration_string::DurationString; -use futures_util::future::join_all; +use futures_util::future::{join_all, try_join_all}; use itertools::Itertools; use kaspa_addressmanager::{AddressManager, NetAddress}; use kaspa_core::{debug, info, warn}; @@ -227,12 +227,14 @@ impl ConnectionManager { } if missing_connections > 0 && !self.dns_seeders.is_empty() { - let cmgr = self.clone(); - // DNS lookup is a blocking i/o operation, so we spawn it as a blocking task - let _ = tokio::task::spawn_blocking(move || { - cmgr.dns_seed(missing_connections); //TODO: Consider putting a number higher than `missing_connections`. - }) - .await; + if missing_connections > self.outbound_target / 2 { + // If we are missing more than half of our target, query all in parallel. + // This will always be the case on new node start-up and is the most resilient strategy in such a case. + self.dns_seed_many(self.dns_seeders.len()).await; + } else { + // Try to obtain at least twice the number of missing connections + self.dns_seed_with_address_target(2 * missing_connections).await; + } } } @@ -251,26 +253,17 @@ impl ConnectionManager { join_all(futures).await; } - fn dns_seed(self: &Arc, mut min_addresses_to_fetch: usize) { + /// Queries DNS seeders in random order, one after the other, until obtaining `min_addresses_to_fetch` addresses + async fn dns_seed_with_address_target(self: &Arc, min_addresses_to_fetch: usize) { + let cmgr = self.clone(); + tokio::task::spawn_blocking(move || cmgr.dns_seed_with_address_target_blocking(min_addresses_to_fetch)).await.unwrap(); + } + + fn dns_seed_with_address_target_blocking(self: &Arc, mut min_addresses_to_fetch: usize) { let shuffled_dns_seeders = self.dns_seeders.choose_multiple(&mut thread_rng(), self.dns_seeders.len()); for &seeder in shuffled_dns_seeders { - info!("Querying DNS seeder {}", seeder); - // Since the DNS lookup protocol doesn't come with a port, we must assume that the default port is used. - let addrs = match (seeder, self.default_port).to_socket_addrs() { - Ok(addrs) => addrs, - Err(e) => { - warn!("Error connecting to DNS seeder {}: {}", seeder, e); - continue; - } - }; - - let addrs_len = addrs.len(); - info!("Retrieved {} addresses from DNS seeder {}", addrs_len, seeder); - let mut amgr_lock = self.address_manager.lock(); - for addr in addrs { - amgr_lock.add_address(NetAddress::new(addr.ip().into(), addr.port())); - } - + // Query seeders sequentially until reaching the desired number of addresses + let addrs_len = self.dns_seed_single(seeder); if addrs_len >= min_addresses_to_fetch { break; } else { @@ -279,6 +272,42 @@ impl ConnectionManager { } } + /// Queries `num_seeders_to_query` random DNS seeders in parallel + async fn dns_seed_many(self: &Arc, num_seeders_to_query: usize) -> usize { + info!("Querying {} DNS seeders", num_seeders_to_query); + let shuffled_dns_seeders = self.dns_seeders.choose_multiple(&mut thread_rng(), num_seeders_to_query); + let jobs = shuffled_dns_seeders.map(|seeder| { + let cmgr = self.clone(); + tokio::task::spawn_blocking(move || cmgr.dns_seed_single(seeder)) + }); + try_join_all(jobs).await.unwrap().into_iter().sum() + } + + /// Query a single DNS seeder and add the obtained addresses to the address manager. + /// + /// DNS lookup is a blocking i/o operation so this function is assumed to be called + /// from a blocking execution context. + fn dns_seed_single(self: &Arc, seeder: &str) -> usize { + info!("Querying DNS seeder {}", seeder); + // Since the DNS lookup protocol doesn't come with a port, we must assume that the default port is used. + let addrs = match (seeder, self.default_port).to_socket_addrs() { + Ok(addrs) => addrs, + Err(e) => { + warn!("Error connecting to DNS seeder {}: {}", seeder, e); + return 0; + } + }; + + let addrs_len = addrs.len(); + info!("Retrieved {} addresses from DNS seeder {}", addrs_len, seeder); + let mut amgr_lock = self.address_manager.lock(); + for addr in addrs { + amgr_lock.add_address(NetAddress::new(addr.ip().into(), addr.port())); + } + + addrs_len + } + /// Bans the given IP and disconnects from all the peers with that IP. /// /// _GO-KASPAD: BanByIP_ diff --git a/components/consensusmanager/src/lib.rs b/components/consensusmanager/src/lib.rs index 54bdda40b9..6d31653aab 100644 --- a/components/consensusmanager/src/lib.rs +++ b/components/consensusmanager/src/lib.rs @@ -9,7 +9,8 @@ mod session; pub use batch::BlockProcessingBatch; pub use session::{ - spawn_blocking, ConsensusInstance, ConsensusProxy, ConsensusSessionBlocking, SessionLock, SessionReadGuard, SessionWriteGuard, + spawn_blocking, ConsensusInstance, ConsensusProxy, ConsensusSessionBlocking, ConsensusSessionOwned, SessionLock, SessionReadGuard, + SessionWriteGuard, }; /// Consensus controller trait. Includes methods required to start/stop/control consensus, but which should not diff --git a/components/consensusmanager/src/session.rs b/components/consensusmanager/src/session.rs index 3e30783e60..8e0c6e9335 100644 --- a/components/consensusmanager/src/session.rs +++ b/components/consensusmanager/src/session.rs @@ -91,7 +91,7 @@ impl ConsensusInstance { /// Returns an unguarded *blocking* consensus session. There's no guarantee that data will not be pruned between /// two sequential consensus calls. This session doesn't hold the consensus pruning lock, so it should - /// be preferred upon [`session_blocking`] when data consistency is not important. + /// be preferred upon [`session_blocking()`](Self::session_blocking) when data consistency is not important. pub fn unguarded_session_blocking(&self) -> ConsensusSessionBlocking<'static> { ConsensusSessionBlocking::new_without_session_guard(self.consensus.clone()) } @@ -100,7 +100,7 @@ impl ConsensusInstance { /// that consensus state is consistent between operations, that is, no pruning was performed between the calls. /// The returned object is an *owned* consensus session type which can be cloned and shared across threads. /// The sharing ability is useful for spawning blocking operations on a different thread using the same - /// session object, see [`ConsensusSessionOwned::spawn_blocking`]. The caller is responsible to make sure + /// session object, see [`ConsensusSessionOwned::spawn_blocking()`](ConsensusSessionOwned::spawn_blocking). The caller is responsible to make sure /// that the overall lifetime of this session is not too long (~2 seconds max) pub async fn session(&self) -> ConsensusSessionOwned { let g = self.session_lock.read_owned().await; @@ -109,7 +109,7 @@ impl ConsensusInstance { /// Returns an unguarded consensus session. There's no guarantee that data will not be pruned between /// two sequential consensus calls. This session doesn't hold the consensus pruning lock, so it should - /// be preferred upon [`session`] when data consistency is not important. + /// be preferred upon [`session()`](Self::session) when data consistency is not important. pub fn unguarded_session(&self) -> ConsensusSessionOwned { ConsensusSessionOwned::new_without_session_guard(self.consensus.clone()) } @@ -139,7 +139,8 @@ impl Deref for ConsensusSessionBlocking<'_> { } /// An *owned* consensus session type which can be cloned and shared across threads. -/// See method `spawn_blocking` within for context on the usefulness of this type +/// See method `spawn_blocking` within for context on the usefulness of this type. +/// Please note - you must use [`ConsensusProxy`] type alias instead of this struct. #[derive(Clone)] pub struct ConsensusSessionOwned { _session_guard: Option, @@ -247,6 +248,10 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(|c| c.get_sink_timestamp()).await } + pub async fn async_get_current_block_color(&self, hash: Hash) -> Option { + self.clone().spawn_blocking(move |c| c.get_current_block_color(hash)).await + } + /// source refers to the earliest block from which the current node has full header & block data pub async fn async_get_source(&self) -> Hash { self.clone().spawn_blocking(|c| c.get_source()).await @@ -263,8 +268,12 @@ impl ConsensusSessionOwned { self.clone().spawn_blocking(|c| c.is_nearly_synced()).await } - pub async fn async_get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { - self.clone().spawn_blocking(move |c| c.get_virtual_chain_from_block(hash)).await + pub async fn async_get_virtual_chain_from_block( + &self, + low: Hash, + chain_path_added_limit: Option, + ) -> ConsensusResult { + self.clone().spawn_blocking(move |c| c.get_virtual_chain_from_block(low, chain_path_added_limit)).await } pub async fn async_get_virtual_utxos( @@ -376,8 +385,12 @@ impl ConsensusSessionOwned { /// Returns acceptance data for a set of blocks belonging to the selected parent chain. /// /// See `self::get_virtual_chain` - pub async fn async_get_blocks_acceptance_data(&self, hashes: Vec) -> ConsensusResult>> { - self.clone().spawn_blocking(move |c| c.get_blocks_acceptance_data(&hashes)).await + pub async fn async_get_blocks_acceptance_data( + &self, + hashes: Vec, + merged_blocks_limit: Option, + ) -> ConsensusResult>> { + self.clone().spawn_blocking(move |c| c.get_blocks_acceptance_data(&hashes, merged_blocks_limit)).await } pub async fn async_is_chain_block(&self, hash: Hash) -> ConsensusResult { diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index 7082ed7b14..b9a183ea8c 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -30,6 +30,7 @@ kaspa-muhash.workspace = true kaspa-notify.workspace = true kaspa-pow.workspace = true kaspa-txscript.workspace = true +kaspa-txscript-errors.workspace = true kaspa-utils.workspace = true log.workspace = true once_cell.workspace = true diff --git a/consensus/client/Cargo.toml b/consensus/client/Cargo.toml index 38cbed9a33..698348508f 100644 --- a/consensus/client/Cargo.toml +++ b/consensus/client/Cargo.toml @@ -38,5 +38,5 @@ itertools.workspace = true workflow-wasm.workspace = true workflow-log.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/consensus/client/src/error.rs b/consensus/client/src/error.rs index e0aab2156c..e632f517d5 100644 --- a/consensus/client/src/error.rs +++ b/consensus/client/src/error.rs @@ -1,3 +1,5 @@ +//! The [`Error`](enum@Error) enum used by this crate + use thiserror::Error; use wasm_bindgen::{JsError, JsValue}; use workflow_wasm::jserror::JsErrorData; diff --git a/consensus/client/src/hash.rs b/consensus/client/src/hash.rs index 4402cfb1b5..1577689a67 100644 --- a/consensus/client/src/hash.rs +++ b/consensus/client/src/hash.rs @@ -1,3 +1,10 @@ +//! +//! WASM bindings for transaction hashers: [`TransactionSigningHash`](native::TransactionSigningHash) +//! and [`TransactionSigningHashECDSA`](native::TransactionSigningHashECDSA). +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::result::Result; use kaspa_hashes as native; diff --git a/consensus/client/src/header.rs b/consensus/client/src/header.rs index 6294d21326..6f04a73c43 100644 --- a/consensus/client/src/header.rs +++ b/consensus/client/src/header.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the Block [`Header`] struct. +//! + +#![allow(non_snake_case)] + use crate::error::Error; use js_sys::{Array, Object}; use kaspa_consensus_core::hashing; @@ -32,14 +38,42 @@ export interface IHeader { blueScore: bigint; pruningPoint: HexString; } + +/** + * Interface defining the structure of a raw block header. + * + * This interface is explicitly used by GetBlockTemplate and SubmitBlock RPCs + * and unlike `IHeader`, does not include a hash. + * + * @category Consensus + */ +export interface IRawHeader { + version: number; + parentsByLevel: Array>; + hashMerkleRoot: HexString; + acceptedIdMerkleRoot: HexString; + utxoCommitment: HexString; + timestamp: bigint; + bits: number; + nonce: bigint; + daaScore: bigint; + blueWork: bigint | HexString; + blueScore: bigint; + pruningPoint: HexString; +} "#; #[wasm_bindgen] extern "C" { - #[wasm_bindgen(typescript_type = "IHeader | Header")] - pub type IHeader; + /// WASM (TypeScript) type definition for the Header-like struct: `Header | IHeader | IRawHeader`. + /// + /// @category Consensus + #[wasm_bindgen(typescript_type = "Header | IHeader | IRawHeader")] + pub type HeaderT; } +/// Kaspa Block Header +/// /// @category Consensus #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[serde(rename_all = "camelCase")] @@ -64,7 +98,7 @@ impl Header { #[wasm_bindgen] impl Header { #[wasm_bindgen(constructor)] - pub fn constructor(js_value: IHeader) -> std::result::Result { + pub fn constructor(js_value: HeaderT) -> std::result::Result { Ok(js_value.try_into_owned()?) } @@ -232,8 +266,11 @@ impl Header { impl TryCastFromJs for Header { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(object) = Object::try_from(value.as_ref()) { let parents_by_level = object .get_vec("parentsByLevel")? diff --git a/consensus/client/src/input.rs b/consensus/client/src/input.rs index 8b48c2d942..a5018199d5 100644 --- a/consensus/client/src/input.rs +++ b/consensus/client/src/input.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the client-side [`TransactionInput`] struct used by the client-side [`Transaction`] struct. +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::result::Result; use crate::TransactionOutpoint; @@ -13,7 +19,7 @@ const TS_TRANSACTION: &'static str = r#" */ export interface ITransactionInput { previousOutpoint: ITransactionOutpoint; - signatureScript: HexString; + signatureScript?: HexString; sequence: bigint; sigOpCount: number; utxo?: UtxoEntryReference; @@ -33,15 +39,26 @@ export interface ITransactionInputVerboseData { } #[wasm_bindgen] extern "C" { - #[wasm_bindgen(typescript_type = "ITransactionInput")] - pub type ITransactionInput; + /// WASM (TypeScript) type representing `ITransactionInput | TransactionInput` + /// @category Consensus + #[wasm_bindgen(typescript_type = "ITransactionInput | TransactionInput")] + pub type TransactionInputT; + /// WASM (TypeScript) type representing `ITransactionInput[] | TransactionInput[]` + /// @category Consensus + #[wasm_bindgen(typescript_type = "(ITransactionInput | TransactionInput)[]")] + pub type TransactionInputArrayAsArgT; + /// WASM (TypeScript) type representing `TransactionInput[]` + /// @category Consensus + #[wasm_bindgen(typescript_type = "TransactionInput[]")] + pub type TransactionInputArrayAsResultT; } +/// Inner type used by [`TransactionInput`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionInputInner { pub previous_outpoint: TransactionOutpoint, - pub signature_script: Vec, + pub signature_script: Option>, pub sequence: u64, pub sig_op_count: u8, pub utxo: Option, @@ -50,7 +67,7 @@ pub struct TransactionInputInner { impl TransactionInputInner { pub fn new( previous_outpoint: TransactionOutpoint, - signature_script: Vec, + signature_script: Option>, sequence: u64, sig_op_count: u8, utxo: Option, @@ -70,7 +87,7 @@ pub struct TransactionInput { impl TransactionInput { pub fn new( previous_outpoint: TransactionOutpoint, - signature_script: Vec, + signature_script: Option>, sequence: u64, sig_op_count: u8, utxo: Option, @@ -91,6 +108,10 @@ impl TransactionInput { self.inner().sig_op_count } + pub fn signature_script_length(&self) -> usize { + self.inner().signature_script.as_ref().map(|signature_script| signature_script.len()).unwrap_or_default() + } + pub fn utxo(&self) -> Option { self.inner().utxo.clone() } @@ -99,7 +120,7 @@ impl TransactionInput { #[wasm_bindgen] impl TransactionInput { #[wasm_bindgen(constructor)] - pub fn constructor(value: &ITransactionInput) -> Result { + pub fn constructor(value: &TransactionInputT) -> Result { Self::try_owned_from(value) } @@ -120,8 +141,8 @@ impl TransactionInput { } #[wasm_bindgen(getter = signatureScript)] - pub fn get_signature_script_as_hex(&self) -> String { - self.inner().signature_script.to_hex() + pub fn get_signature_script_as_hex(&self) -> Option { + self.inner().signature_script.as_ref().map(|script| script.to_hex()) } #[wasm_bindgen(setter = signatureScript)] @@ -163,7 +184,7 @@ impl TransactionInput { impl TransactionInput { pub fn set_signature_script(&self, signature_script: Vec) { - self.inner().signature_script = signature_script; + self.inner().signature_script.replace(signature_script); } pub fn script_public_key(&self) -> Option { @@ -179,14 +200,17 @@ impl AsRef for TransactionInput { impl TryCastFromJs for TransactionInput { type Error = Error; - fn try_cast_from(value: impl AsRef) -> std::result::Result, Self::Error> { - Self::resolve_cast(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve_cast(value, || { if let Some(object) = Object::try_from(value.as_ref()) { let previous_outpoint: TransactionOutpoint = object.get_value("previousOutpoint")?.as_ref().try_into()?; - let signature_script = object.get_vec_u8("signatureScript")?; + let signature_script = object.get_vec_u8("signatureScript").ok(); let sequence = object.get_u64("sequence")?; let sig_op_count = object.get_u8("sigOpCount")?; - let utxo = object.try_get_cast::("utxo")?.map(Cast::into_owned); + let utxo = object.try_cast_into::("utxo")?; Ok(TransactionInput::new(previous_outpoint, signature_script, sequence, sig_op_count, utxo).into()) } else { Err("TransactionInput must be an object".into()) @@ -199,7 +223,7 @@ impl From for TransactionInput { fn from(tx_input: cctx::TransactionInput) -> Self { TransactionInput::new( tx_input.previous_outpoint.into(), - tx_input.signature_script, + Some(tx_input.signature_script), tx_input.sequence, tx_input.sig_op_count, None, @@ -212,7 +236,8 @@ impl From<&TransactionInput> for cctx::TransactionInput { let inner = tx_input.inner(); cctx::TransactionInput::new( inner.previous_outpoint.clone().into(), - inner.signature_script.clone(), + // TODO - discuss: should this unwrap_or_default or return an error? + inner.signature_script.clone().unwrap_or_default(), inner.sequence, inner.sig_op_count, ) diff --git a/consensus/client/src/lib.rs b/consensus/client/src/lib.rs index 4935b16f76..3afae2f78b 100644 --- a/consensus/client/src/lib.rs +++ b/consensus/client/src/lib.rs @@ -1,33 +1,43 @@ +//! +//! # Client-side consensus primitives. +//! +//! This crate offers client-side primitives mirroring the consensus layer of the Kaspa p2p node. +//! It declares structs such as [`Transaction`], [`TransactionInput`], [`TransactionOutput`], +//! [`TransactionOutpoint`], [`UtxoEntry`], and [`UtxoEntryReference`] +//! that are used by the Wallet subsystem as well as WASM bindings. +//! +//! Unlike raw consensus primitives (used for high-performance DAG processing) the primitives +//! offered in this crate are designed to be used in client-side applications. Their internal +//! data is typically wrapped into `Arc>`, allowing for easy sharing between +//! async / threaded environments and WASM bindings. +//! + pub mod error; mod imports; +mod input; mod outpoint; mod output; pub mod result; +mod serializable; +mod transaction; mod utxo; +pub use input::*; pub use outpoint::*; pub use output::*; +pub use serializable::*; +pub use transaction::*; pub use utxo::*; cfg_if::cfg_if! { if #[cfg(feature = "wasm32-sdk")] { mod header; - mod input; - mod transaction; - mod vtx; + mod utils; mod hash; mod sign; - mod script; - mod serializable; - pub use header::*; - pub use input::*; - pub use transaction::*; - pub use serializable::*; - pub use vtx::*; + pub use utils::*; pub use hash::*; - // pub use signing::*; - pub use script::*; pub use sign::sign_with_multiple_v3; } } diff --git a/consensus/client/src/outpoint.rs b/consensus/client/src/outpoint.rs index 77e17d542e..a9b39f5e4f 100644 --- a/consensus/client/src/outpoint.rs +++ b/consensus/client/src/outpoint.rs @@ -1,3 +1,11 @@ +//! +//! Implementation of the client-side [`TransactionOutpoint`] used by the [`TransactionInput`] struct. +//! + +#![allow(non_snake_case)] + +use cfg_if::cfg_if; + use crate::imports::*; use crate::result::Result; @@ -14,6 +22,7 @@ export interface ITransactionOutpoint { } "#; +/// Inner type used by [`TransactionOutpoint`] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash, Ord, PartialOrd)] #[serde(rename_all = "camelCase")] pub struct TransactionOutpointInner { @@ -110,26 +119,31 @@ impl TransactionOutpoint { } } -#[cfg_attr(feature = "wasm32-sdk", wasm_bindgen)] -impl TransactionOutpoint { - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(constructor))] - pub fn ctor(transaction_id: TransactionId, index: u32) -> TransactionOutpoint { - Self { inner: Arc::new(TransactionOutpointInner { transaction_id, index }) } - } +cfg_if! { + if #[cfg(feature = "wasm32-sdk")] { - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(js_name = "getId"))] - pub fn id_string(&self) -> String { - format!("{}-{}", self.get_transaction_id_as_string(), self.get_index()) - } + #[wasm_bindgen] + impl TransactionOutpoint { + #[wasm_bindgen(constructor)] + pub fn ctor(transaction_id: TransactionId, index: u32) -> TransactionOutpoint { + Self { inner: Arc::new(TransactionOutpointInner { transaction_id, index }) } + } - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(getter, js_name = transactionId))] - pub fn get_transaction_id_as_string(&self) -> String { - self.inner().transaction_id.to_string() - } + #[wasm_bindgen(js_name = "getId")] + pub fn id_string(&self) -> String { + format!("{}-{}", self.get_transaction_id_as_string(), self.get_index()) + } - #[cfg_attr(feature = "wasm32-sdk", wasm_bindgen(getter, js_name = index))] - pub fn get_index(&self) -> TransactionIndexType { - self.inner().index + #[wasm_bindgen(getter, js_name = transactionId)] + pub fn get_transaction_id_as_string(&self) -> String { + self.inner().transaction_id.to_string() + } + + #[wasm_bindgen(getter, js_name = index)] + pub fn get_index(&self) -> TransactionIndexType { + self.inner().index + } + } } } @@ -165,6 +179,15 @@ impl From for cctx::TransactionOutpoint { } } +impl From<&TransactionOutpoint> for cctx::TransactionOutpoint { + fn from(outpoint: &TransactionOutpoint) -> Self { + let inner = outpoint.inner(); + let transaction_id = inner.transaction_id; + let index = inner.index; + cctx::TransactionOutpoint::new(transaction_id, index) + } +} + impl TransactionOutpoint { pub fn simulated() -> Self { Self::new(TransactionId::from_slice(&rand::random::<[u8; kaspa_hashes::HASH_SIZE]>()), 0) diff --git a/consensus/client/src/output.rs b/consensus/client/src/output.rs index 99fe38ec3a..17b4a58c80 100644 --- a/consensus/client/src/output.rs +++ b/consensus/client/src/output.rs @@ -1,3 +1,9 @@ +//! +//! Implementation of the client-side [`TransactionOutput`] used by the [`Transaction`] struct. +//! + +#![allow(non_snake_case)] + use crate::imports::*; #[wasm_bindgen(typescript_custom_section)] @@ -9,7 +15,7 @@ const TS_TRANSACTION_OUTPUT: &'static str = r#" */ export interface ITransactionOutput { value: bigint; - scriptPublicKey: IScriptPublicKey; + scriptPublicKey: IScriptPublicKey | HexString; /** Optional verbose data provided by RPC */ verboseData?: ITransactionOutputVerboseData; @@ -26,6 +32,23 @@ export interface ITransactionOutputVerboseData { } "#; +#[wasm_bindgen] +extern "C" { + /// WASM (TypeScript) type representing `ITransactionOutput | TransactionOutput` + /// @category Consensus + #[wasm_bindgen(typescript_type = "ITransactionOutput | TransactionOutput")] + pub type TransactionOutputT; + /// WASM (TypeScript) type representing `ITransactionOutput[] | TransactionOutput[]` + /// @category Consensus + #[wasm_bindgen(typescript_type = "(ITransactionOutput | TransactionOutput)[]")] + pub type TransactionOutputArrayAsArgT; + /// WASM (TypeScript) type representing `TransactionOutput[]` + /// @category Consensus + #[wasm_bindgen(typescript_type = "TransactionOutput[]")] + pub type TransactionOutputArrayAsResultT; +} + +/// Inner type used by [`TransactionOutput`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionOutputInner { @@ -55,7 +78,7 @@ impl TransactionOutput { self.inner.lock().unwrap() } - pub fn script_length(&self) -> usize { + pub fn script_public_key_length(&self) -> usize { self.inner().script_public_key.script().len() } } @@ -69,7 +92,7 @@ impl TransactionOutput { } #[wasm_bindgen(getter, js_name = value)] - pub fn get_value(&self) -> u64 { + pub fn value(&self) -> u64 { self.inner().value } @@ -114,25 +137,20 @@ impl From<&TransactionOutput> for cctx::TransactionOutput { } } -impl TryFrom<&JsValue> for TransactionOutput { - type Error = Error; - fn try_from(js_value: &JsValue) -> Result { - // workflow_log::log_trace!("js_value->TransactionOutput: {js_value:?}"); - if let Some(object) = Object::try_from(js_value) { - let has_address = Object::has_own(object, &JsValue::from("address")); - workflow_log::log_trace!("js_value->TransactionOutput: has_address:{has_address:?}"); - let value = object.get_u64("value")?; - let script_public_key = ScriptPublicKey::try_cast_from(object.get_value("scriptPublicKey")?)?; - Ok(TransactionOutput::new(value, script_public_key.into_owned())) - } else { - Err("TransactionInput must be an object".into()) - } - } -} - -impl TryFrom for TransactionOutput { +impl TryCastFromJs for TransactionOutput { type Error = Error; - fn try_from(js_value: JsValue) -> Result { - Self::try_from(&js_value) + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve_cast(value, || { + if let Some(object) = Object::try_from(value.as_ref()) { + let value = object.get_u64("value")?; + let script_public_key = ScriptPublicKey::try_owned_from(object.get_value("scriptPublicKey")?)?; + Ok(TransactionOutput::new(value, script_public_key).into()) + } else { + Err("TransactionInput must be an object".into()) + } + }) } } diff --git a/consensus/client/src/result.rs b/consensus/client/src/result.rs index 4c8cb83f54..d8bff8aa10 100644 --- a/consensus/client/src/result.rs +++ b/consensus/client/src/result.rs @@ -1 +1,3 @@ +//! [`Result`] type alias that is bound to the [`Error`](super::error::Error) type from this crate. + pub type Result = std::result::Result; diff --git a/consensus/client/src/serializable/mod.rs b/consensus/client/src/serializable/mod.rs index 5855e26dfb..ab78d956be 100644 --- a/consensus/client/src/serializable/mod.rs +++ b/consensus/client/src/serializable/mod.rs @@ -1,3 +1,24 @@ +//! +//! # Standardized JSON serialization and deserialization of Kaspa transactions. +//! +//! This module provides standardized JSON serialization and deserialization of +//! Kaspa transactions. There are two sub-modules: `numeric` and `string`. +//! +//! The `numeric` module provides serialization and deserialization of transactions +//! with all large integer values as `bigint` types in WASM or numerical values that +//! exceed the largest integer that can be represented by the JavaScript `number` type. +//! +//! The `string` module provides serialization and deserialization of transactions +//! with all large integer values as `string` types. This allows deserialization +//! via JSON in JavaScript environments and later conversion to `bigint` types. +//! +//! These data structures can be used for manual transport of transactions using JSON. +//! For more advanced use cases, please refer to `PSKT` in the [`kaspa_wallet_pskt`](https://docs.rs/kaspa_wallet_pskt) +//! crate. +//! + +#![allow(non_snake_case)] + pub mod numeric; pub mod string; @@ -30,7 +51,7 @@ export interface ISerializableTransactionInput { index: number; sequence: bigint; sigOpCount: number; - signatureScript: HexString; + signatureScript?: HexString; utxo: ISerializableUtxoEntry; } @@ -77,3 +98,10 @@ export interface ISerializableTransaction { } "#; + +#[wasm_bindgen] +extern "C" { + /// WASM (TypeScript) representation of the `ISerializableTransaction` interface. + #[wasm_bindgen(extends = js_sys::Array, typescript_type = "ISerializableTransaction")] + pub type SerializableTransactionT; +} diff --git a/consensus/client/src/serializable/numeric.rs b/consensus/client/src/serializable/numeric.rs index 0c413fdcfe..6c24db634a 100644 --- a/consensus/client/src/serializable/numeric.rs +++ b/consensus/client/src/serializable/numeric.rs @@ -1,4 +1,10 @@ -//! This module implements the primitives for external transaction signing. +//! +//! This module implements transaction-related primitives for JSON serialization +//! where all large integer values (`u64`) are serialized to JSON using `serde` and +//! can exceed the largest integer value representable by the JavaScript `number` type. +//! (i.e. transactions serialized using this module can not be deserialized in JavaScript +//! but may be deserialized in other JSON-capable environments that support large integers) +//! use crate::error::Error; use crate::imports::*; @@ -80,6 +86,7 @@ pub struct SerializableTransactionInput { pub sequence: u64, pub sig_op_count: u8, #[serde(with = "hex::serde")] + // TODO - convert to Option> and use hex serialization over Option pub signature_script: Vec, pub utxo: SerializableUtxoEntry, } @@ -91,6 +98,8 @@ impl SerializableTransactionInput { Self { transaction_id: input.previous_outpoint.transaction_id, index: input.previous_outpoint.index, + // TODO - convert signature_script to Option> + // signature_script: (!input.signature_script.is_empty()).then_some(input.signature_script.clone()), signature_script: input.signature_script.clone(), sequence: input.sequence, sig_op_count: input.sig_op_count, @@ -134,15 +143,16 @@ impl TryFrom for cctx::TransactionInput { impl TryFrom<&SerializableTransactionInput> for TransactionInput { type Error = Error; - fn try_from(signable_input: &SerializableTransactionInput) -> Result { - let utxo = UtxoEntryReference::try_from(signable_input)?; + fn try_from(serializable_input: &SerializableTransactionInput) -> Result { + let utxo = UtxoEntryReference::try_from(serializable_input)?; - let previous_outpoint = TransactionOutpoint::new(signable_input.transaction_id, signable_input.index); + let previous_outpoint = TransactionOutpoint::new(serializable_input.transaction_id, serializable_input.index); let inner = TransactionInputInner { previous_outpoint, - signature_script: signable_input.signature_script.clone(), - sequence: signable_input.sequence, - sig_op_count: signable_input.sig_op_count, + // TODO - convert to Option> and use hex serialization over Option + signature_script: (!serializable_input.signature_script.is_empty()).then_some(serializable_input.signature_script.clone()), + sequence: serializable_input.sequence, + sig_op_count: serializable_input.sig_op_count, utxo: Some(utxo), }; @@ -159,7 +169,8 @@ impl TryFrom<&TransactionInput> for SerializableTransactionInput { Ok(Self { transaction_id: inner.previous_outpoint.transaction_id(), index: inner.previous_outpoint.index(), - signature_script: inner.signature_script.clone(), + // TODO - convert to Option> and use hex serialization over Option + signature_script: inner.signature_script.clone().unwrap_or_default(), sequence: inner.sequence, sig_op_count: inner.sig_op_count, utxo, @@ -218,6 +229,8 @@ pub struct SerializableTransaction { pub outputs: Vec, pub lock_time: u64, pub gas: u64, + #[serde(default)] + pub mass: u64, pub subnetwork_id: SubnetworkId, #[serde(with = "hex::serde")] pub payload: Vec, @@ -260,6 +273,7 @@ impl SerializableTransaction { lock_time: transaction.lock_time, subnetwork_id: transaction.subnetwork_id.clone(), gas: transaction.gas, + mass: transaction.mass(), payload: transaction.payload.clone(), id: transaction.id(), }) @@ -279,6 +293,7 @@ impl SerializableTransaction { subnetwork_id: inner.subnetwork_id.clone(), gas: inner.gas, payload: inner.payload.clone(), + mass: inner.mass, id: inner.id, }) } @@ -306,6 +321,7 @@ impl SerializableTransaction { lock_time: transaction.lock_time, subnetwork_id: transaction.subnetwork_id.clone(), gas: transaction.gas, + mass: transaction.mass(), payload: transaction.payload.clone(), }) } @@ -331,7 +347,8 @@ impl TryFrom for cctx::SignableTransaction { serializable.subnetwork_id, serializable.gas, serializable.payload, - ); + ) + .with_mass(serializable.mass); Ok(Self::with_entries(tx, entries)) } @@ -344,6 +361,6 @@ impl TryFrom for Transaction { let inputs: Vec = tx.inputs.iter().map(TryInto::try_into).collect::>>()?; let outputs: Vec = tx.outputs.iter().map(TryInto::try_into).collect::>>()?; - Transaction::new(Some(id), tx.version, inputs, outputs, tx.lock_time, tx.subnetwork_id, tx.gas, tx.payload) + Transaction::new(Some(id), tx.version, inputs, outputs, tx.lock_time, tx.subnetwork_id, tx.gas, tx.payload, tx.mass) } } diff --git a/consensus/client/src/serializable/string.rs b/consensus/client/src/serializable/string.rs index be3981b0ea..35c7907b29 100644 --- a/consensus/client/src/serializable/string.rs +++ b/consensus/client/src/serializable/string.rs @@ -1,4 +1,7 @@ -//! This module implements the primitives for external transaction signing. +//! +//! This module implements transaction-related primitives for JSON serialization +//! where all large integer values (`u64`) are serialized to and from JSON as strings. +//! use crate::imports::*; use crate::result::Result; @@ -139,7 +142,8 @@ impl TryFrom<&SerializableTransactionInput> for TransactionInput { let previous_outpoint = TransactionOutpoint::new(serializable_input.transaction_id, serializable_input.index); let inner = TransactionInputInner { previous_outpoint, - signature_script: serializable_input.signature_script.clone(), + // TODO - convert to Option> and use hex serialization over Option + signature_script: (!serializable_input.signature_script.is_empty()).then_some(serializable_input.signature_script.clone()), sequence: serializable_input.sequence.parse()?, sig_op_count: serializable_input.sig_op_count, utxo: Some(utxo), @@ -158,7 +162,8 @@ impl TryFrom<&TransactionInput> for SerializableTransactionInput { Ok(Self { transaction_id: inner.previous_outpoint.transaction_id(), index: inner.previous_outpoint.index(), - signature_script: inner.signature_script.clone(), + // TODO - convert to Option> and use hex serialization over Option + signature_script: inner.signature_script.clone().unwrap_or_default(), sequence: inner.sequence.to_string(), sig_op_count: inner.sig_op_count, utxo, @@ -217,6 +222,8 @@ pub struct SerializableTransaction { pub subnetwork_id: SubnetworkId, pub lock_time: String, pub gas: String, + #[serde(default)] + pub mass: String, #[serde(with = "hex::serde")] pub payload: Vec, } @@ -258,6 +265,7 @@ impl SerializableTransaction { lock_time: transaction.lock_time.to_string(), subnetwork_id: transaction.subnetwork_id.clone(), gas: transaction.gas.to_string(), + mass: transaction.mass().to_string(), payload: transaction.payload.clone(), }) } @@ -275,6 +283,7 @@ impl SerializableTransaction { lock_time: inner.lock_time.to_string(), subnetwork_id: inner.subnetwork_id.clone(), gas: inner.gas.to_string(), + mass: inner.mass.to_string(), payload: inner.payload.clone(), id: inner.id, }) @@ -303,6 +312,7 @@ impl SerializableTransaction { lock_time: transaction.lock_time.to_string(), subnetwork_id: transaction.subnetwork_id.clone(), gas: transaction.gas.to_string(), + mass: transaction.mass().to_string(), payload: transaction.payload.clone(), }) } @@ -328,7 +338,8 @@ impl TryFrom for cctx::SignableTransaction { signable.subnetwork_id, signable.gas.parse()?, signable.payload, - ); + ) + .with_mass(signable.mass.parse().unwrap_or_default()); Ok(Self::with_entries(tx, entries)) } @@ -341,6 +352,16 @@ impl TryFrom for crate::Transaction { let inputs: Vec = tx.inputs.iter().map(TryInto::try_into).collect::>>()?; let outputs: Vec = tx.outputs.iter().map(TryInto::try_into).collect::>>()?; - Transaction::new(Some(id), tx.version, inputs, outputs, tx.lock_time.parse()?, tx.subnetwork_id, tx.gas.parse()?, tx.payload) + Transaction::new( + Some(id), + tx.version, + inputs, + outputs, + tx.lock_time.parse()?, + tx.subnetwork_id, + tx.gas.parse()?, + tx.payload, + tx.mass.parse().unwrap_or_default(), + ) } } diff --git a/consensus/client/src/sign.rs b/consensus/client/src/sign.rs index a92abb90d9..18ff3c8491 100644 --- a/consensus/client/src/sign.rs +++ b/consensus/client/src/sign.rs @@ -1,3 +1,7 @@ +//! +//! Utilities for signing transactions. +//! + use crate::transaction::Transaction; use core::iter::once; use itertools::Itertools; @@ -13,14 +17,14 @@ use std::collections::BTreeMap; /// A wrapper enum that represents the transaction signed state. A transaction /// contained by this enum can be either fully signed or partially signed. -pub enum Signed { - Fully(Transaction), - Partially(Transaction), +pub enum Signed<'a> { + Fully(&'a Transaction), + Partially(&'a Transaction), } -impl Signed { +impl<'a> Signed<'a> { /// Returns the transaction regardless of whether it is fully or partially signed - pub fn unwrap(self) -> Transaction { + pub fn unwrap(self) -> &'a Transaction { match self { Signed::Fully(tx) => tx, Signed::Partially(tx) => tx, @@ -31,7 +35,7 @@ impl Signed { /// TODO (aspect) - merge this with `v1` fn above or refactor wallet core to use the script engine. /// Sign a transaction using schnorr #[allow(clippy::result_large_err)] -pub fn sign_with_multiple_v3(tx: Transaction, privkeys: &[[u8; 32]]) -> crate::result::Result { +pub fn sign_with_multiple_v3<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> crate::result::Result> { let mut map = BTreeMap::new(); for privkey in privkeys { let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, privkey).unwrap(); @@ -44,7 +48,7 @@ pub fn sign_with_multiple_v3(tx: Transaction, privkeys: &[[u8; 32]]) -> crate::r let mut additional_signatures_required = false; { let input_len = tx.inner().inputs.len(); - let (cctx, utxos) = tx.tx_and_utxos(); + let (cctx, utxos) = tx.tx_and_utxos()?; let populated_transaction = PopulatedTransaction::new(&cctx, utxos); for i in 0..input_len { let script_pub_key = match tx.inner().inputs[i].script_public_key() { diff --git a/consensus/client/src/transaction.rs b/consensus/client/src/transaction.rs index 3293497149..17cc381265 100644 --- a/consensus/client/src/transaction.rs +++ b/consensus/client/src/transaction.rs @@ -1,11 +1,15 @@ +//! +//! Declares the client-side [`Transaction`] type, which represents a Kaspa transaction. +//! + #![allow(non_snake_case)] use crate::imports::*; -use crate::input::TransactionInput; +use crate::input::{TransactionInput, TransactionInputArrayAsArgT, TransactionInputArrayAsResultT}; use crate::outpoint::TransactionOutpoint; -use crate::output::TransactionOutput; +use crate::output::{TransactionOutput, TransactionOutputArrayAsArgT, TransactionOutputArrayAsResultT}; use crate::result::Result; -use crate::serializable::{numeric, string}; +use crate::serializable::{numeric, string, SerializableTransactionT}; use crate::utxo::{UtxoEntryId, UtxoEntryReference}; use ahash::AHashMap; use kaspa_consensus_core::network::NetworkType; @@ -30,6 +34,8 @@ export interface ITransaction { subnetworkId: HexString; gas: bigint; payload: HexString; + /** The mass of the transaction (the mass is undefined or zero unless explicitly set or obtained from the node) */ + mass?: bigint; /** Optional verbose data provided by RPC */ verboseData?: ITransactionVerboseData; @@ -43,7 +49,7 @@ export interface ITransaction { export interface ITransactionVerboseData { transactionId : HexString; hash : HexString; - mass : bigint; + computeMass : bigint; blockHash : HexString; blockTime : bigint; } @@ -51,10 +57,13 @@ export interface ITransactionVerboseData { #[wasm_bindgen] extern "C" { - #[wasm_bindgen(typescript_type = "ITransaction")] - pub type ITransaction; + /// WASM (TypeScript) type representing `ITransaction | Transaction` + /// @category Consensus + #[wasm_bindgen(typescript_type = "ITransaction | Transaction")] + pub type TransactionT; } +/// Inner type used by [`Transaction`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionInner { @@ -65,6 +74,7 @@ pub struct TransactionInner { pub subnetwork_id: SubnetworkId, pub gas: u64, pub payload: Vec, + pub mass: u64, // A field that is used to cache the transaction ID. // Always use the corresponding self.id() instead of accessing this field directly @@ -92,6 +102,7 @@ impl Transaction { subnetwork_id: SubnetworkId, gas: u64, payload: Vec, + mass: u64, ) -> Result { let finalize = id.is_none(); let tx = Self { @@ -104,6 +115,7 @@ impl Transaction { subnetwork_id, gas, payload, + mass, })), }; if finalize { @@ -149,14 +161,14 @@ impl Transaction { } #[wasm_bindgen(constructor)] - pub fn constructor(js_value: &ITransaction) -> std::result::Result { + pub fn constructor(js_value: &TransactionT) -> std::result::Result { Ok(js_value.try_into_owned()?) } #[wasm_bindgen(getter = inputs)] - pub fn get_inputs_as_js_array(&self) -> Array { + pub fn get_inputs_as_js_array(&self) -> TransactionInputArrayAsResultT { let inputs = self.inner.lock().unwrap().inputs.clone().into_iter().map(JsValue::from); - Array::from_iter(inputs) + Array::from_iter(inputs).unchecked_into() } /// Returns a list of unique addresses used by transaction inputs. @@ -179,7 +191,7 @@ impl Transaction { } #[wasm_bindgen(setter = inputs)] - pub fn set_inputs_from_js_array(&mut self, js_value: &JsValue) { + pub fn set_inputs_from_js_array(&mut self, js_value: &TransactionInputArrayAsArgT) { let inputs = Array::from(js_value) .iter() .map(|js_value| { @@ -190,16 +202,16 @@ impl Transaction { } #[wasm_bindgen(getter = outputs)] - pub fn get_outputs_as_js_array(&self) -> Array { + pub fn get_outputs_as_js_array(&self) -> TransactionOutputArrayAsResultT { let outputs = self.inner.lock().unwrap().outputs.clone().into_iter().map(JsValue::from); - Array::from_iter(outputs) + Array::from_iter(outputs).unchecked_into() } #[wasm_bindgen(setter = outputs)] - pub fn set_outputs_from_js_array(&mut self, js_value: &JsValue) { + pub fn set_outputs_from_js_array(&mut self, js_value: &TransactionOutputArrayAsArgT) { let outputs = Array::from(js_value) .iter() - .map(|js_value| TransactionOutput::try_from(&js_value).unwrap_or_else(|err| panic!("invalid transaction output: {err}"))) + .map(|js_value| TryCastFromJs::try_owned_from(&js_value).unwrap_or_else(|err| panic!("invalid transaction output: {err}"))) .collect::>(); self.inner().outputs = outputs; } @@ -214,12 +226,12 @@ impl Transaction { self.inner().version = v; } - #[wasm_bindgen(getter, js_name = lock_time)] + #[wasm_bindgen(getter, js_name = lockTime)] pub fn get_lock_time(&self) -> u64 { self.inner().lock_time } - #[wasm_bindgen(setter, js_name = lock_time)] + #[wasm_bindgen(setter, js_name = lockTime)] pub fn set_lock_time(&self, v: u64) { self.inner().lock_time = v; } @@ -254,21 +266,36 @@ impl Transaction { pub fn set_payload_from_js_value(&mut self, js_value: JsValue) { self.inner.lock().unwrap().payload = js_value.try_as_vec_u8().unwrap_or_else(|err| panic!("payload value error: {err}")); } + + #[wasm_bindgen(getter = mass)] + pub fn get_mass(&self) -> u64 { + self.inner().mass + } + + #[wasm_bindgen(setter = mass)] + pub fn set_mass(&self, v: u64) { + self.inner().mass = v; + } } impl TryCastFromJs for Transaction { type Error = Error; - fn try_cast_from(value: impl AsRef) -> std::result::Result, Self::Error> { - Self::resolve_cast(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve_cast(value, || { if let Some(object) = Object::try_from(value.as_ref()) { if let Some(tx) = object.try_get_value("tx")? { - Transaction::try_cast_from(&tx) + Transaction::try_captured_cast_from(tx) } else { - let id = object.try_get_cast::("id")?.map(|id| id.into_owned()); + let id = object.try_cast_into::("id")?; let version = object.get_u16("version")?; let lock_time = object.get_u64("lockTime")?; let gas = object.get_u64("gas")?; let payload = object.get_vec_u8("payload")?; + // mass field is optional + let mass = object.get_u64("mass").unwrap_or_default(); let subnetwork_id = object.get_vec_u8("subnetworkId")?; if subnetwork_id.len() != subnets::SUBNETWORK_ID_SIZE { return Err(Error::Custom("subnetworkId must be 20 bytes long".into())); @@ -285,9 +312,9 @@ impl TryCastFromJs for Transaction { let outputs: Vec = object .get_vec("outputs")? .iter() - .map(|jsv| jsv.try_into()) + .map(TryCastFromJs::try_owned_from) .collect::, Error>>()?; - Transaction::new(id, version, inputs, outputs, lock_time, subnetwork_id, gas, payload).map(Into::into) + Transaction::new(id, version, inputs, outputs, lock_time, subnetwork_id, gas, payload, mass).map(Into::into) } } else { Err("Transaction must be an object".into()) @@ -300,6 +327,7 @@ impl TryCastFromJs for Transaction { impl From for Transaction { fn from(tx: cctx::Transaction) -> Self { let id = tx.id(); + let mass = tx.mass(); let inputs: Vec = tx.inputs.into_iter().map(|input| input.into()).collect::>(); let outputs: Vec = tx.outputs.into_iter().map(|output| output.into()).collect::>(); Self::new_with_inner(TransactionInner { @@ -309,6 +337,7 @@ impl From for Transaction { lock_time: tx.lock_time, gas: tx.gas, payload: tx.payload, + mass, subnetwork_id: tx.subnetwork_id, id, }) @@ -331,6 +360,7 @@ impl From<&Transaction> for cctx::Transaction { inner.gas, inner.payload.clone(), ) + .with_mass(inner.mass) } } @@ -342,7 +372,13 @@ impl Transaction { .map(|input| { let previous_outpoint: TransactionOutpoint = input.previous_outpoint.into(); let utxo = utxos.get(previous_outpoint.id()).cloned(); - TransactionInput::new(previous_outpoint, input.signature_script.clone(), input.sequence, input.sig_op_count, utxo) + TransactionInput::new( + previous_outpoint, + Some(input.signature_script.clone()), + input.sequence, + input.sig_op_count, + utxo, + ) }) .collect::>(); let outputs: Vec = tx.outputs.iter().map(|output| output.into()).collect::>(); @@ -355,22 +391,23 @@ impl Transaction { lock_time: tx.lock_time, gas: tx.gas, payload: tx.payload.clone(), + mass: tx.mass(), subnetwork_id: tx.subnetwork_id.clone(), }) } - pub fn tx_and_utxos(&self) -> (cctx::Transaction, Vec) { - let mut utxos = vec![]; + pub fn tx_and_utxos(&self) -> Result<(cctx::Transaction, Vec)> { + let mut inputs = vec![]; let inner = self.inner(); - let inputs: Vec = inner + let utxos: Vec = inner .inputs .clone() .into_iter() .map(|input| { - utxos.push((&input.get_utxo().unwrap().entry()).into()); - input.as_ref().into() + inputs.push(input.as_ref().into()); + Ok(input.get_utxo().ok_or(Error::MissingUtxoEntry)?.entry().as_ref().into()) }) - .collect::>(); + .collect::>>()?; let outputs: Vec = inner.outputs.clone().into_iter().map(|output| output.as_ref().into()).collect::>(); let tx = cctx::Transaction::new( @@ -381,9 +418,40 @@ impl Transaction { inner.subnetwork_id.clone(), inner.gas, inner.payload.clone(), - ); + ) + .with_mass(inner.mass); + + Ok((tx, utxos)) + } + + pub fn utxo_entry_references(&self) -> Result> { + let inner = self.inner(); + let utxo_entry_references = inner + .inputs + .clone() + .into_iter() + .map(|input| input.get_utxo().ok_or(Error::MissingUtxoEntry)) + .collect::>>()?; + Ok(utxo_entry_references) + } - (tx, utxos) + pub fn outputs(&self) -> Vec { + let inner = self.inner(); + let outputs = inner.outputs.iter().map(|output| output.into()).collect::>(); + outputs + } + + pub fn inputs(&self) -> Vec { + let inner = self.inner(); + let inputs = inner.inputs.iter().map(Into::into).collect::>(); + inputs + } + + pub fn inputs_outputs(&self) -> (Vec, Vec) { + let inner = self.inner(); + let inputs = inner.inputs.iter().map(Into::into).collect::>(); + let outputs = inner.outputs.iter().map(Into::into).collect::>(); + (inputs, outputs) } pub fn set_signature_script(&self, input_index: usize, signature_script: Vec) -> Result<()> { @@ -393,6 +461,14 @@ impl Transaction { self.inner().inputs[input_index].set_signature_script(signature_script); Ok(()) } + + pub fn payload(&self) -> Vec { + self.inner().payload.clone() + } + + pub fn payload_len(&self) -> usize { + self.inner().payload.len() + } } #[wasm_bindgen] @@ -401,7 +477,7 @@ impl Transaction { /// The schema of the JavaScript object is defined by {@link ISerializableTransaction}. /// @see {@link ISerializableTransaction} #[wasm_bindgen(js_name = "serializeToObject")] - pub fn serialize_to_object(&self) -> Result { + pub fn serialize_to_object(&self) -> Result { Ok(numeric::SerializableTransaction::from_client_transaction(self)?.serialize_to_object()?.into()) } diff --git a/consensus/client/src/utils.rs b/consensus/client/src/utils.rs new file mode 100644 index 0000000000..7e08556fec --- /dev/null +++ b/consensus/client/src/utils.rs @@ -0,0 +1,87 @@ +//! +//! Client-side utility functions and their WASM bindings. +//! + +#![allow(non_snake_case)] + +use crate::imports::*; +use crate::result::Result; +use kaspa_addresses::*; +use kaspa_consensus_core::{ + network::{NetworkType, NetworkTypeT}, + tx::ScriptPublicKeyT, +}; +use kaspa_txscript::{script_class::ScriptClass, standard}; +use kaspa_utils::hex::ToHex; +use kaspa_wasm_core::types::{BinaryT, HexString}; + +/// Creates a new script to pay a transaction output to the specified address. +/// @category Wallet SDK +#[wasm_bindgen(js_name = payToAddressScript)] +pub fn pay_to_address_script(address: &AddressT) -> Result { + let address = Address::try_cast_from(address)?; + Ok(standard::pay_to_address_script(address.as_ref())) +} + +/// Takes a script and returns an equivalent pay-to-script-hash script. +/// @param redeem_script - The redeem script ({@link HexString} or Uint8Array). +/// @category Wallet SDK +#[wasm_bindgen(js_name = payToScriptHashScript)] +pub fn pay_to_script_hash_script(redeem_script: BinaryT) -> Result { + let redeem_script = redeem_script.try_as_vec_u8()?; + Ok(standard::pay_to_script_hash_script(redeem_script.as_slice())) +} + +/// Generates a signature script that fits a pay-to-script-hash script. +/// @param redeem_script - The redeem script ({@link HexString} or Uint8Array). +/// @param signature - The signature ({@link HexString} or Uint8Array). +/// @category Wallet SDK +#[wasm_bindgen(js_name = payToScriptHashSignatureScript)] +pub fn pay_to_script_hash_signature_script(redeem_script: BinaryT, signature: BinaryT) -> Result { + let redeem_script = redeem_script.try_as_vec_u8()?; + let signature = signature.try_as_vec_u8()?; + let script = standard::pay_to_script_hash_signature_script(redeem_script, signature)?; + Ok(script.to_hex().into()) +} + +/// Returns the address encoded in a script public key. +/// @param script_public_key - The script public key ({@link ScriptPublicKey}). +/// @param network - The network type. +/// @category Wallet SDK +#[wasm_bindgen(js_name = addressFromScriptPublicKey)] +pub fn address_from_script_public_key(script_public_key: &ScriptPublicKeyT, network: &NetworkTypeT) -> Result { + let script_public_key = ScriptPublicKey::try_cast_from(script_public_key)?; + let network_type = NetworkType::try_from(network)?; + + match standard::extract_script_pub_key_address(script_public_key.as_ref(), network_type.into()) { + Ok(address) => Ok(AddressOrUndefinedT::from(JsValue::from(address))), + Err(_) => Ok(AddressOrUndefinedT::from(JsValue::UNDEFINED)), + } +} + +/// Returns true if the script passed is a pay-to-pubkey. +/// @param script - The script ({@link HexString} or Uint8Array). +/// @category Wallet SDK +#[wasm_bindgen(js_name = isScriptPayToPubkey)] +pub fn is_script_pay_to_pubkey(script: BinaryT) -> Result { + let script = script.try_as_vec_u8()?; + Ok(ScriptClass::is_pay_to_pubkey(script.as_slice())) +} + +/// Returns returns true if the script passed is an ECDSA pay-to-pubkey. +/// @param script - The script ({@link HexString} or Uint8Array). +/// @category Wallet SDK +#[wasm_bindgen(js_name = isScriptPayToPubkeyECDSA)] +pub fn is_script_pay_to_pubkey_ecdsa(script: BinaryT) -> Result { + let script = script.try_as_vec_u8()?; + Ok(ScriptClass::is_pay_to_pubkey_ecdsa(script.as_slice())) +} + +/// Returns true if the script passed is a pay-to-script-hash (P2SH) format, false otherwise. +/// @param script - The script ({@link HexString} or Uint8Array). +/// @category Wallet SDK +#[wasm_bindgen(js_name = isScriptPayToScriptHash)] +pub fn is_script_pay_to_script_hash(script: BinaryT) -> Result { + let script = script.try_as_vec_u8()?; + Ok(ScriptClass::is_pay_to_script_hash(script.as_slice())) +} diff --git a/consensus/client/src/utxo.rs b/consensus/client/src/utxo.rs index ffa7f7a49c..bbfc1199d1 100644 --- a/consensus/client/src/utxo.rs +++ b/consensus/client/src/utxo.rs @@ -1,3 +1,13 @@ +//! +//! # UTXO client-side data structures. +//! +//! This module provides client-side data structures for UTXO management. +//! In particular, the [`UtxoEntry`] and [`UtxoEntryReference`] structs +//! are used to represent UTXO entries in the wallet subsystem and WASM bindings. +//! + +#![allow(non_snake_case)] + use crate::imports::*; use crate::outpoint::{TransactionOutpoint, TransactionOutpointInner}; use crate::result::Result; @@ -29,16 +39,22 @@ export interface IUtxoEntry { #[wasm_bindgen] extern "C" { + /// WASM type representing an array of [`UtxoEntryReference`] objects (i.e. `UtxoEntryReference[]`) #[wasm_bindgen(extends = Array, typescript_type = "UtxoEntryReference[]")] pub type UtxoEntryReferenceArrayT; + /// WASM type representing a UTXO entry interface (a UTXO-like object) #[wasm_bindgen(typescript_type = "IUtxoEntry")] pub type IUtxoEntry; + /// WASM type representing an array of UTXO entries (i.e. `IUtxoEntry[]`) #[wasm_bindgen(typescript_type = "IUtxoEntry[]")] pub type IUtxoEntryArray; } +/// A UTXO entry Id is a unique identifier for a UTXO entry defined by the `txid+output_index`. pub type UtxoEntryId = TransactionOutpointInner; +/// [`UtxoEntry`] struct represents a client-side UTXO entry. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[serde(rename_all = "camelCase")] @@ -101,6 +117,12 @@ impl UtxoEntry { } } +impl AsRef for UtxoEntry { + fn as_ref(&self) -> &UtxoEntry { + self + } +} + impl From<&UtxoEntry> for cctx::UtxoEntry { fn from(utxo: &UtxoEntry) -> Self { cctx::UtxoEntry { @@ -113,6 +135,8 @@ impl From<&UtxoEntry> for cctx::UtxoEntry { } } +/// [`Arc`] reference to a [`UtxoEntry`] used by the wallet subsystems. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, CastFromJs)] #[wasm_bindgen(inspectable)] @@ -136,14 +160,14 @@ impl UtxoEntryReference { self.as_ref().clone() } - #[wasm_bindgen(js_name = "getTransactionId")] - pub fn transaction_id_as_string(&self) -> String { - self.utxo.outpoint.get_transaction_id_as_string() + #[wasm_bindgen(getter)] + pub fn outpoint(&self) -> TransactionOutpoint { + self.utxo.outpoint.clone() } - #[wasm_bindgen(js_name = "getId")] - pub fn id_string(&self) -> String { - self.utxo.outpoint.id_string() + #[wasm_bindgen(getter)] + pub fn address(&self) -> Option
{ + self.utxo.address.clone() } #[wasm_bindgen(getter)] @@ -160,6 +184,11 @@ impl UtxoEntryReference { pub fn block_daa_score(&self) -> u64 { self.utxo.block_daa_score } + + #[wasm_bindgen(getter, js_name = "scriptPublicKey")] + pub fn script_public_key(&self) -> ScriptPublicKey { + self.utxo.script_public_key.clone() + } } impl UtxoEntryReference { @@ -240,6 +269,7 @@ impl PartialOrd for UtxoEntryReference { } } +/// An extension trait to convert a JS value into a vec of UTXO entry references. pub trait TryIntoUtxoEntryReferences { fn try_into_utxo_entry_references(&self) -> Result>; } @@ -252,7 +282,10 @@ impl TryIntoUtxoEntryReferences for JsValue { impl TryCastFromJs for UtxoEntry { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { Ok(Self::try_ref_from_js_value_as_cast(value)?) } } @@ -372,21 +405,45 @@ impl TryFrom for UtxoEntries { impl TryCastFromJs for UtxoEntryReference { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Ok(utxo_entry) = UtxoEntry::try_ref_from_js_value(&value) { Ok(Self::from(utxo_entry.clone())) } else if let Some(object) = Object::try_from(value.as_ref()) { - let address = object.get_cast::
("address")?.into_owned(); + let address = object.try_cast_into::
("address")?; let outpoint = TransactionOutpoint::try_from(object.get_value("outpoint")?.as_ref())?; let utxo_entry = Object::from(object.get_value("utxoEntry")?); - let amount = utxo_entry.get_u64("amount")?; - let script_public_key = ScriptPublicKey::try_owned_from(utxo_entry.get_value("scriptPublicKey")?)?; - let block_daa_score = utxo_entry.get_u64("blockDaaScore")?; - let is_coinbase = utxo_entry.get_bool("isCoinbase")?; - let utxo_entry = - UtxoEntry { address: Some(address), outpoint, amount, script_public_key, block_daa_score, is_coinbase }; + let utxo_entry = if !utxo_entry.is_undefined() { + let amount = utxo_entry.get_u64("amount").map_err(|_| { + Error::custom("Supplied object does not contain `utxoEntry.amount` property (or it is not a numerical value)") + })?; + let script_public_key = ScriptPublicKey::try_owned_from(utxo_entry.get_value("scriptPublicKey")?) + .map_err(|_|Error::custom("Supplied object does not contain `utxoEntry.scriptPublicKey` property (or it is not a hex string or a ScriptPublicKey class)"))?; + let block_daa_score = utxo_entry.get_u64("blockDaaScore").map_err(|_| { + Error::custom( + "Supplied object does not contain `utxoEntry.blockDaaScore` property (or it is not a numerical value)", + ) + })?; + let is_coinbase = utxo_entry.get_bool("isCoinbase")?; + + UtxoEntry { address, outpoint, amount, script_public_key, block_daa_score, is_coinbase } + } else { + let amount = object.get_u64("amount").map_err(|_| { + Error::custom("Supplied object does not contain `amount` property (or it is not a numerical value)") + })?; + let script_public_key = ScriptPublicKey::try_owned_from(object.get_value("scriptPublicKey")?) + .map_err(|_|Error::custom("Supplied object does not contain `scriptPublicKey` property (or it is not a hex string or a ScriptPublicKey class)"))?; + let block_daa_score = object.get_u64("blockDaaScore").map_err(|_| { + Error::custom("Supplied object does not contain `blockDaaScore` property (or it is not a numerical value)") + })?; + let is_coinbase = object.try_get_bool("isCoinbase")?.unwrap_or(false); + + UtxoEntry { address, outpoint, amount, script_public_key, block_daa_score, is_coinbase } + }; Ok(UtxoEntryReference::from(utxo_entry)) } else { @@ -407,7 +464,7 @@ impl UtxoEntryReference { let outpoint = TransactionOutpoint::simulated(); let script_public_key = kaspa_txscript::pay_to_address_script(address); let block_daa_score = 0; - let is_coinbase = true; + let is_coinbase = false; let utxo_entry = UtxoEntry { address: Some(address.clone()), outpoint, amount, script_public_key, block_daa_score, is_coinbase }; diff --git a/consensus/client/src/vtx.rs b/consensus/client/src/vtx.rs deleted file mode 100644 index e5fdd92363..0000000000 --- a/consensus/client/src/vtx.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::imports::*; -// use crate::serializable::{numeric,string}; -use crate::result::Result; -use kaspa_addresses::Address; -use serde::de::DeserializeOwned; -// use serde::de::DeserializeOwned; - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct VirtualTransactionT -where - T: Clone + serde::Serialize, -{ - //} + Deserialize { - pub version: u32, - pub generator: Option, - pub transactions: Vec, - pub addresses: Option>, -} - -impl VirtualTransactionT -where - T: Clone + Serialize, -{ - pub fn deserialize(json: &str) -> Result - where - T: DeserializeOwned, - { - Ok(serde_json::from_str(json)?) - } - - pub fn serialize(&self) -> String { - serde_json::to_string(self).unwrap() - } -} diff --git a/consensus/core/Cargo.toml b/consensus/core/Cargo.toml index eb22eb7282..228b4ac11d 100644 --- a/consensus/core/Cargo.toml +++ b/consensus/core/Cargo.toml @@ -42,6 +42,7 @@ thiserror.workspace = true wasm-bindgen.workspace = true workflow-core.workspace = true workflow-log.workspace = true +workflow-serializer.workspace = true workflow-wasm.workspace = true [dev-dependencies] @@ -54,5 +55,5 @@ web-sys.workspace = true name = "serde_benchmark" harness = false -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/consensus/core/src/api/args.rs b/consensus/core/src/api/args.rs new file mode 100644 index 0000000000..ebc76d97d0 --- /dev/null +++ b/consensus/core/src/api/args.rs @@ -0,0 +1,47 @@ +use std::collections::HashMap; + +use crate::tx::TransactionId; + +/// A struct provided to consensus for transaction validation processing calls +#[derive(Clone, Debug, Default)] +pub struct TransactionValidationArgs { + /// Optional fee/mass threshold above which a bound transaction in not rejected + pub feerate_threshold: Option, +} + +impl TransactionValidationArgs { + pub fn new(feerate_threshold: Option) -> Self { + Self { feerate_threshold } + } +} + +/// A struct provided to consensus for transactions validation batch processing calls +pub struct TransactionValidationBatchArgs { + tx_args: HashMap, +} + +impl TransactionValidationBatchArgs { + const DEFAULT_ARGS: TransactionValidationArgs = TransactionValidationArgs { feerate_threshold: None }; + + pub fn new() -> Self { + Self { tx_args: HashMap::new() } + } + + /// Set some fee/mass threshold for transaction `transaction_id`. + pub fn set_feerate_threshold(&mut self, transaction_id: TransactionId, feerate_threshold: f64) { + self.tx_args + .entry(transaction_id) + .and_modify(|x| x.feerate_threshold = Some(feerate_threshold)) + .or_insert(TransactionValidationArgs::new(Some(feerate_threshold))); + } + + pub fn get(&self, transaction_id: &TransactionId) -> &TransactionValidationArgs { + self.tx_args.get(transaction_id).unwrap_or(&Self::DEFAULT_ARGS) + } +} + +impl Default for TransactionValidationBatchArgs { + fn default() -> Self { + Self::new() + } +} diff --git a/consensus/core/src/api/counters.rs b/consensus/core/src/api/counters.rs index 5faee5bc3b..0297dab265 100644 --- a/consensus/core/src/api/counters.rs +++ b/consensus/core/src/api/counters.rs @@ -9,6 +9,7 @@ pub struct ProcessingCounters { pub body_counts: AtomicU64, pub txs_counts: AtomicU64, pub chain_block_counts: AtomicU64, + pub chain_disqualified_counts: AtomicU64, pub mass_counts: AtomicU64, } @@ -22,6 +23,7 @@ impl ProcessingCounters { body_counts: self.body_counts.load(Ordering::Relaxed), txs_counts: self.txs_counts.load(Ordering::Relaxed), chain_block_counts: self.chain_block_counts.load(Ordering::Relaxed), + chain_disqualified_counts: self.chain_disqualified_counts.load(Ordering::Relaxed), mass_counts: self.mass_counts.load(Ordering::Relaxed), } } @@ -36,6 +38,7 @@ pub struct ProcessingCountersSnapshot { pub body_counts: u64, pub txs_counts: u64, pub chain_block_counts: u64, + pub chain_disqualified_counts: u64, pub mass_counts: u64, } @@ -51,6 +54,7 @@ impl core::ops::Sub for &ProcessingCountersSnapshot { body_counts: self.body_counts.saturating_sub(rhs.body_counts), txs_counts: self.txs_counts.saturating_sub(rhs.txs_counts), chain_block_counts: self.chain_block_counts.saturating_sub(rhs.chain_block_counts), + chain_disqualified_counts: self.chain_disqualified_counts.saturating_sub(rhs.chain_disqualified_counts), mass_counts: self.mass_counts.saturating_sub(rhs.mass_counts), } } diff --git a/consensus/core/src/api/mod.rs b/consensus/core/src/api/mod.rs index 23e7abb530..365b8404c1 100644 --- a/consensus/core/src/api/mod.rs +++ b/consensus/core/src/api/mod.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use crate::{ acceptance_data::AcceptanceData, + api::args::{TransactionValidationArgs, TransactionValidationBatchArgs}, block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId}, blockstatus::BlockStatus, coinbase::MinerData, @@ -25,6 +26,7 @@ use kaspa_hashes::Hash; pub use self::stats::{BlockCount, ConsensusStats}; +pub mod args; pub mod counters; pub mod stats; @@ -37,7 +39,7 @@ pub struct BlockValidationFutures { /// A future triggered when DAG state which included this block has been processed by the virtual processor /// (exceptions are header-only blocks and trusted blocks which have the future completed before virtual - /// processing along with the [`block_task`]) + /// processing along with the `block_task`) pub virtual_state_task: BlockValidationFuture, } @@ -62,14 +64,18 @@ pub trait ConsensusApi: Send + Sync { } /// Populates the mempool transaction with maximally found UTXO entry data and proceeds to full transaction - /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. - fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { + /// validation if all are found. If validation is successful, also `transaction.calculated_fee` is expected to be populated. + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction, args: &TransactionValidationArgs) -> TxResult<()> { unimplemented!() } /// Populates the mempool transactions with maximally found UTXO entry data and proceeds to full transactions - /// validation if all are found. If validation is successful, also [`transaction.calculated_fee`] is expected to be populated. - fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { + /// validation if all are found. If validation is successful, also `transaction.calculated_fee` is expected to be populated. + fn validate_mempool_transactions_in_parallel( + &self, + transactions: &mut [MutableTransaction], + args: &TransactionValidationBatchArgs, + ) -> Vec> { unimplemented!() } @@ -127,6 +133,10 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } + fn get_current_block_color(&self, hash: Hash) -> Option { + unimplemented!() + } + fn get_virtual_state_approx_id(&self) -> VirtualStateApproxId { unimplemented!() } @@ -147,7 +157,12 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } - fn get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { + /// Gets the virtual chain paths from `low` to the `sink` hash, or until `chain_path_added_limit` is reached + /// + /// Note: + /// 1) `chain_path_added_limit` will populate removed fully, and then the added chain path, up to `chain_path_added_limit` amount of hashes. + /// 1.1) use `None to impose no limit with optimized backward chain iteration, for better performance in cases where batching is not required. + fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { unimplemented!() } @@ -184,6 +199,10 @@ pub trait ConsensusApi: Send + Sync { unimplemented!() } + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { + unimplemented!() + } + fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> { unimplemented!() } @@ -283,7 +302,11 @@ pub trait ConsensusApi: Send + Sync { /// Returns acceptance data for a set of blocks belonging to the selected parent chain. /// /// See `self::get_virtual_chain` - fn get_blocks_acceptance_data(&self, hashes: &[Hash]) -> ConsensusResult>> { + fn get_blocks_acceptance_data( + &self, + hashes: &[Hash], + merged_blocks_limit: Option, + ) -> ConsensusResult>> { unimplemented!() } diff --git a/consensus/core/src/api/stats.rs b/consensus/core/src/api/stats.rs index fd59f09ae5..c2fea489cd 100644 --- a/consensus/core/src/api/stats.rs +++ b/consensus/core/src/api/stats.rs @@ -1,7 +1,7 @@ -use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, Default)] +#[derive(Clone, Debug, Serialize, Deserialize, Default)] #[serde(rename_all = "camelCase")] pub struct BlockCount { pub header_count: u64, @@ -14,6 +14,26 @@ impl BlockCount { } } +impl Serializer for BlockCount { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.header_count, writer)?; + store!(u64, &self.block_count, writer)?; + + Ok(()) + } +} + +impl Deserializer for BlockCount { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let header_count = load!(u64, reader)?; + let block_count = load!(u64, reader)?; + + Ok(Self { header_count, block_count }) + } +} + #[derive(Clone, Default)] pub struct VirtualStateStats { /// Number of direct parents of virtual diff --git a/consensus/core/src/block.rs b/consensus/core/src/block.rs index dde6fd5e75..cbd76b42dc 100644 --- a/consensus/core/src/block.rs +++ b/consensus/core/src/block.rs @@ -5,6 +5,7 @@ use crate::{ BlueWorkType, }; use kaspa_hashes::Hash; +use kaspa_utils::mem_size::MemSizeEstimator; use std::sync::Arc; /// A mutable block structure where header and transactions within can still be mutated. @@ -66,6 +67,20 @@ impl Block { pub fn from_precomputed_hash(hash: Hash, parents: Vec) -> Block { Block::from_header(Header::from_precomputed_hash(hash, parents)) } + + pub fn asses_for_cache(&self) -> Option<()> { + (self.estimate_mem_bytes() < 1_000_000).then_some(()) + } +} + +impl MemSizeEstimator for Block { + fn estimate_mem_bytes(&self) -> usize { + // Calculates mem bytes of the block (for cache tracking purposes) + size_of::() + + self.header.estimate_mem_bytes() + + size_of::>() + + self.transactions.iter().map(Transaction::estimate_mem_bytes).sum::() + } } /// An abstraction for a recallable transaction selector with persistent state @@ -105,6 +120,8 @@ pub struct BlockTemplate { pub selected_parent_timestamp: u64, pub selected_parent_daa_score: u64, pub selected_parent_hash: Hash, + /// Expected length is one less than txs length due to lack of coinbase transaction + pub calculated_fees: Vec, } impl BlockTemplate { @@ -115,8 +132,17 @@ impl BlockTemplate { selected_parent_timestamp: u64, selected_parent_daa_score: u64, selected_parent_hash: Hash, + calculated_fees: Vec, ) -> Self { - Self { block, miner_data, coinbase_has_red_reward, selected_parent_timestamp, selected_parent_daa_score, selected_parent_hash } + Self { + block, + miner_data, + coinbase_has_red_reward, + selected_parent_timestamp, + selected_parent_daa_score, + selected_parent_hash, + calculated_fees, + } } pub fn to_virtual_state_approx_id(&self) -> VirtualStateApproxId { diff --git a/consensus/core/src/config/bps.rs b/consensus/core/src/config/bps.rs index c0c52a6dfd..5e98aac5df 100644 --- a/consensus/core/src/config/bps.rs +++ b/consensus/core/src/config/bps.rs @@ -33,7 +33,7 @@ impl Bps { } /// Returns the GHOSTDAG K value which was pre-computed for this BPS - /// (see [`calculate_ghostdag_k`] and [`gen_ghostdag_table`] for the full calculation) + /// (see [`calculate_ghostdag_k`] and `gen_ghostdag_table` for the full calculation) #[rustfmt::skip] pub const fn ghostdag_k() -> KType { match BPS { diff --git a/consensus/core/src/config/genesis.rs b/consensus/core/src/config/genesis.rs index 204098ad2a..9f9ea21e54 100644 --- a/consensus/core/src/config/genesis.rs +++ b/consensus/core/src/config/genesis.rs @@ -231,7 +231,7 @@ mod tests { fn test_genesis_hashes() { [GENESIS, TESTNET_GENESIS, TESTNET11_GENESIS, SIMNET_GENESIS, DEVNET_GENESIS].into_iter().for_each(|genesis| { let block: Block = (&genesis).into(); - assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter()), block.header.hash_merkle_root); + assert_hashes_eq(calc_hash_merkle_root(block.transactions.iter(), false), block.header.hash_merkle_root); assert_hashes_eq(block.hash(), genesis.hash); }); } diff --git a/consensus/core/src/config/params.rs b/consensus/core/src/config/params.rs index e2f2639a16..f3479b4c2b 100644 --- a/consensus/core/src/config/params.rs +++ b/consensus/core/src/config/params.rs @@ -309,6 +309,10 @@ pub const MAINNET_PARAMS: Params = Params { "kaspadns.kaspacalc.net", // This DNS seeder is run by supertypo "n-mainnet.kaspa.ws", + // This DNS seeder is run by -gerri- + "dnsseeder-kaspa-mainnet.x-con.at", + // This DNS seeder is run by H@H + "ns-mainnet.kaspa-dnsseeder.net", ], net: NetworkId::new(NetworkType::Mainnet), genesis: GENESIS, @@ -368,6 +372,10 @@ pub const TESTNET_PARAMS: Params = Params { dns_seeders: &[ // This DNS seeder is run by Tiram "seeder1-testnet.kaspad.net", + // This DNS seeder is run by -gerri- + "dnsseeder-kaspa-testnet.x-con.at", + // This DNS seeder is run by H@H + "ns-testnet10.kaspa-dnsseeder.net", ], net: NetworkId::with_suffix(NetworkType::Testnet, 10), genesis: TESTNET_GENESIS, @@ -429,6 +437,10 @@ pub const TESTNET11_PARAMS: Params = Params { "seeder1-testnet-11.kaspad.net", // This DNS seeder is run by supertypo "n-testnet-11.kaspa.ws", + // This DNS seeder is run by -gerri- + "dnsseeder-kaspa-testnet11.x-con.at", + // This DNS seeder is run by H@H + "ns-testnet11.kaspa-dnsseeder.net", ], net: NetworkId::with_suffix(NetworkType::Testnet, 11), genesis: TESTNET11_GENESIS, @@ -501,7 +513,8 @@ pub const SIMNET_PARAMS: Params = Params { target_time_per_block: Testnet11Bps::target_time_per_block(), past_median_time_sample_rate: Testnet11Bps::past_median_time_sample_rate(), difficulty_sample_rate: Testnet11Bps::difficulty_adjustment_sample_rate(), - max_block_parents: Testnet11Bps::max_block_parents(), + // For simnet, we deviate from TN11 configuration and allow at least 64 parents in order to support mempool benchmarks out of the box + max_block_parents: if Testnet11Bps::max_block_parents() > 64 { Testnet11Bps::max_block_parents() } else { 64 }, mergeset_size_limit: Testnet11Bps::mergeset_size_limit(), merge_depth: Testnet11Bps::merge_depth_bound(), finality_depth: Testnet11Bps::finality_depth(), diff --git a/consensus/core/src/errors/block.rs b/consensus/core/src/errors/block.rs index 9aab18905f..f5c235476a 100644 --- a/consensus/core/src/errors/block.rs +++ b/consensus/core/src/errors/block.rs @@ -147,6 +147,10 @@ pub enum RuleError { #[error("DAA window data has only {0} entries")] InsufficientDaaWindowSize(usize), + + /// Currently this error is never created because it is impossible to submit such a block + #[error("cannot add block body to a pruned block")] + PrunedBlock, } pub type BlockProcessResult = std::result::Result; diff --git a/consensus/core/src/errors/tx.rs b/consensus/core/src/errors/tx.rs index e1936d37aa..f21409857f 100644 --- a/consensus/core/src/errors/tx.rs +++ b/consensus/core/src/errors/tx.rs @@ -1,4 +1,5 @@ use crate::constants::MAX_SOMPI; +use crate::subnets::SubnetworkId; use crate::tx::TransactionOutpoint; use kaspa_txscript_errors::TxScriptError; use thiserror::Error; @@ -80,6 +81,9 @@ pub enum TxRuleError { #[error("failed to verify the signature script: {0}")] SignatureInvalid(TxScriptError), + #[error("failed to verify empty signature script. Inner error: {0}")] + SignatureEmpty(TxScriptError), + #[error("input {0} sig op count is {1}, but the calculated value is {2}")] WrongSigOpCount(usize, u64, u64), @@ -88,6 +92,14 @@ pub enum TxRuleError { #[error("calculated contextual mass (including storage mass) {0} is not equal to the committed mass field {1}")] WrongMass(u64, u64), + + #[error("transaction subnetwork id {0} is neither native nor coinbase")] + SubnetworksDisabled(SubnetworkId), + + /// [`TxRuleError::FeerateTooLow`] is not a consensus error but a mempool error triggered by the + /// fee/mass RBF validation rule + #[error("fee rate per contextual mass gram is not greater than the fee rate of the replaced transaction")] + FeerateTooLow, } pub type TxResult = std::result::Result; diff --git a/consensus/core/src/hashing/mod.rs b/consensus/core/src/hashing/mod.rs index aa0ac2d9b6..edcca1d034 100644 --- a/consensus/core/src/hashing/mod.rs +++ b/consensus/core/src/hashing/mod.rs @@ -5,6 +5,8 @@ pub mod header; pub mod sighash; pub mod sighash_type; pub mod tx; +#[cfg(feature = "wasm32-sdk")] +pub mod wasm; pub trait HasherExtensions { /// Writes the len as u64 little endian bytes diff --git a/consensus/core/src/hashing/sighash_type.rs b/consensus/core/src/hashing/sighash_type.rs index 76d772f0da..a80091bbac 100644 --- a/consensus/core/src/hashing/sighash_type.rs +++ b/consensus/core/src/hashing/sighash_type.rs @@ -1,3 +1,4 @@ +use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; pub const SIG_HASH_ALL: SigHashType = SigHashType(0b00000001); @@ -18,7 +19,7 @@ const ALLOWED_SIG_HASH_TYPES_VALUES: [u8; 6] = [ SIG_HASH_SINGLE.0 | SIG_HASH_ANY_ONE_CAN_PAY.0, ]; -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone, Serialize, Deserialize)] #[wasm_bindgen] pub struct SigHashType(pub(crate) u8); diff --git a/consensus/core/src/hashing/wasm.rs b/consensus/core/src/hashing/wasm.rs new file mode 100644 index 0000000000..4c9c94b223 --- /dev/null +++ b/consensus/core/src/hashing/wasm.rs @@ -0,0 +1,27 @@ +use super::sighash_type::{self, SigHashType}; +use wasm_bindgen::prelude::*; + +/// Kaspa Sighash types allowed by consensus +/// @category Consensus +#[wasm_bindgen] +pub enum SighashType { + All, + None, + Single, + AllAnyOneCanPay, + NoneAnyOneCanPay, + SingleAnyOneCanPay, +} + +impl From for SigHashType { + fn from(sighash_type: SighashType) -> SigHashType { + match sighash_type { + SighashType::All => sighash_type::SIG_HASH_ALL, + SighashType::None => sighash_type::SIG_HASH_NONE, + SighashType::Single => sighash_type::SIG_HASH_SINGLE, + SighashType::AllAnyOneCanPay => sighash_type::SIG_HASH_ANY_ONE_CAN_PAY, + SighashType::NoneAnyOneCanPay => SigHashType(sighash_type::SIG_HASH_NONE.0 | sighash_type::SIG_HASH_ANY_ONE_CAN_PAY.0), + SighashType::SingleAnyOneCanPay => SigHashType(sighash_type::SIG_HASH_SINGLE.0 | sighash_type::SIG_HASH_ANY_ONE_CAN_PAY.0), + } + } +} diff --git a/consensus/core/src/header.rs b/consensus/core/src/header.rs index b6c2b9bc7e..e53de44255 100644 --- a/consensus/core/src/header.rs +++ b/consensus/core/src/header.rs @@ -1,6 +1,7 @@ use crate::{hashing, BlueWorkType}; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_hashes::Hash; +use kaspa_utils::mem_size::MemSizeEstimator; use serde::{Deserialize, Serialize}; /// @category Consensus @@ -92,6 +93,18 @@ impl Header { } } +impl AsRef
for Header { + fn as_ref(&self) -> &Header { + self + } +} + +impl MemSizeEstimator for Header { + fn estimate_mem_bytes(&self) -> usize { + size_of::() + self.parents_by_level.iter().map(|l| l.len()).sum::() * size_of::() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 46ad3f2cea..188b2403b4 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -1,3 +1,9 @@ +//! +//! # Consensus Core +//! +//! This crate implements primitives used in the Kaspa node consensus processing. +//! + extern crate alloc; extern crate core; extern crate self as consensus_core; diff --git a/consensus/core/src/mass/mod.rs b/consensus/core/src/mass/mod.rs index 6e348299c2..3a83077983 100644 --- a/consensus/core/src/mass/mod.rs +++ b/consensus/core/src/mass/mod.rs @@ -1,9 +1,21 @@ use crate::{ + config::params::Params, subnets::SUBNETWORK_ID_SIZE, - tx::{Transaction, TransactionInput, TransactionOutput}, + tx::{Transaction, TransactionInput, TransactionOutput, VerifiableTransaction}, }; use kaspa_hashes::HASH_SIZE; +/// Temp enum for the transition phases of KIP9 +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Kip9Version { + /// Initial KIP9 mass calculation, w/o the relaxed formula and summing storage mass and compute mass + Alpha, + + /// Currently proposed KIP9 mass calculation, with the relaxed formula (for the cases `|O| = 1 OR |O| <= |I| <= 2`), + /// and using a maximum operator over storage and compute mass + Beta, +} + // transaction_estimated_serialized_size is the estimated size of a transaction in some // serialization. This has to be deterministic, but not necessarily accurate, since // it's only used as the size component in the transaction and block mass limit @@ -55,3 +67,271 @@ pub fn transaction_output_estimated_serialized_size(output: &TransactionOutput) size += output.script_public_key.script().len() as u64; size } + +// Note: consensus mass calculator operates on signed transactions. +// To calculate mass for unsigned transactions, please use +// `kaspa_wallet_core::tx::mass::MassCalculator` +#[derive(Clone)] +pub struct MassCalculator { + mass_per_tx_byte: u64, + mass_per_script_pub_key_byte: u64, + mass_per_sig_op: u64, + storage_mass_parameter: u64, +} + +impl MassCalculator { + pub fn new(mass_per_tx_byte: u64, mass_per_script_pub_key_byte: u64, mass_per_sig_op: u64, storage_mass_parameter: u64) -> Self { + Self { mass_per_tx_byte, mass_per_script_pub_key_byte, mass_per_sig_op, storage_mass_parameter } + } + + pub fn new_with_consensus_params(consensus_params: &Params) -> Self { + Self { + mass_per_tx_byte: consensus_params.mass_per_tx_byte, + mass_per_script_pub_key_byte: consensus_params.mass_per_script_pub_key_byte, + mass_per_sig_op: consensus_params.mass_per_sig_op, + storage_mass_parameter: consensus_params.storage_mass_parameter, + } + } + + /// Calculates the compute mass of this transaction. This does not include the storage mass calculation below which + /// requires full UTXO context + pub fn calc_tx_compute_mass(&self, tx: &Transaction) -> u64 { + if tx.is_coinbase() { + return 0; + } + + let size = transaction_estimated_serialized_size(tx); + let mass_for_size = size * self.mass_per_tx_byte; + let total_script_public_key_size: u64 = tx + .outputs + .iter() + .map(|output| 2 /* script public key version (u16) */ + output.script_public_key.script().len() as u64) + .sum(); + let total_script_public_key_mass = total_script_public_key_size * self.mass_per_script_pub_key_byte; + + let total_sigops: u64 = tx.inputs.iter().map(|input| input.sig_op_count as u64).sum(); + let total_sigops_mass = total_sigops * self.mass_per_sig_op; + + mass_for_size + total_script_public_key_mass + total_sigops_mass + } + + /// Calculates the storage mass for this populated transaction. + /// Assumptions which must be verified before this call: + /// 1. All output values are non-zero + /// 2. At least one input (unless coinbase) + /// + /// Otherwise this function should never fail. + pub fn calc_tx_storage_mass(&self, tx: &impl VerifiableTransaction, version: Kip9Version) -> Option { + calc_storage_mass( + tx.is_coinbase(), + tx.populated_inputs().map(|(_, entry)| entry.amount), + tx.outputs().iter().map(|out| out.value), + version, + self.storage_mass_parameter, + ) + } + + /// Calculates the overall mass of this transaction, combining both compute and storage masses. + /// The combination strategy depends on the version passed. + pub fn calc_tx_overall_mass( + &self, + tx: &impl VerifiableTransaction, + cached_compute_mass: Option, + version: Kip9Version, + ) -> Option { + match version { + Kip9Version::Alpha => self + .calc_tx_storage_mass(tx, version) + .and_then(|mass| mass.checked_add(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))), + Kip9Version::Beta => self + .calc_tx_storage_mass(tx, version) + .map(|mass| mass.max(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))), + } + } +} + +/// Calculates the storage mass for the provided input and output values. +/// Assumptions which must be verified before this call: +/// 1. All output values are non-zero +/// 2. At least one input (unless coinbase) +/// +/// Otherwise this function should never fail. +pub fn calc_storage_mass( + is_coinbase: bool, + input_values: impl ExactSizeIterator, + output_values: impl ExactSizeIterator, + version: Kip9Version, + storage_mass_parameter: u64, +) -> Option { + if is_coinbase { + return Some(0); + } + + let outs_len = output_values.len() as u64; + let ins_len = input_values.len() as u64; + + /* The code below computes the following formula: + + max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) + + where C is the mass storage parameter, O is the set of output values, I is the set of + input values, H(S) := |S|/sum_{s in S} 1 / s is the harmonic mean over the set S and + A(S) := sum_{s in S} / |S| is the arithmetic mean. + + See KIP-0009 for more details + */ + + // Since we are doing integer division, we perform the multiplication with C over the inner + // fractions, otherwise we'll get a sum of zeros or ones. + // + // If sum of fractions overflowed (nearly impossible, requires 10^7 outputs for C = 10^12), + // we return `None` indicating mass is incomputable + // + // Note: in theory this can be tighten by subtracting input mass in the process (possibly avoiding the overflow), + // however the overflow case is so unpractical with current mass limits so we avoid the hassle + let harmonic_outs = + output_values.map(|out| storage_mass_parameter / out).try_fold(0u64, |total, current| total.checked_add(current))?; // C·|O|/H(O) + + /* + KIP-0009 relaxed formula for the cases |O| = 1 OR |O| <= |I| <= 2: + max( 0 , C·( |O|/H(O) - |I|/H(I) ) ) + + Note: in the case |I| = 1 both formulas are equal, yet the following code (harmonic_ins) is a bit more efficient. + Hence, we transform the condition to |O| = 1 OR |I| = 1 OR |O| = |I| = 2 which is equivalent (and faster). + */ + if version == Kip9Version::Beta && (outs_len == 1 || ins_len == 1 || (outs_len == 2 && ins_len == 2)) { + let harmonic_ins = + input_values.map(|value| storage_mass_parameter / value).fold(0u64, |total, current| total.saturating_add(current)); // C·|I|/H(I) + return Some(harmonic_outs.saturating_sub(harmonic_ins)); // max( 0 , C·( |O|/H(O) - |I|/H(I) ) ); + } + + // Total supply is bounded, so a sum of existing UTXO entries cannot overflow (nor can it be zero) + let sum_ins = input_values.sum::(); // |I|·A(I) + let mean_ins = sum_ins / ins_len; + + // Inner fraction must be with C and over the mean value, in order to maximize precision. + // We can saturate the overall expression at u64::MAX since we lower-bound the subtraction below by zero anyway + let arithmetic_ins = ins_len.saturating_mul(storage_mass_parameter / mean_ins); // C·|I|/A(I) + + Some(harmonic_outs.saturating_sub(arithmetic_ins)) // max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + constants::{SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}, + subnets::SubnetworkId, + tx::*, + }; + use std::str::FromStr; + + #[test] + fn test_mass_storage() { + // Tx with less outs than ins + let mut tx = generate_tx_from_amounts(&[100, 200, 300], &[300, 300]); + let test_version = Kip9Version::Alpha; + + // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) + + let storage_mass = + MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 0); // Compounds from 3 to 2, with symmetric outputs and no fee, should be zero + + // Create asymmetry + tx.tx.outputs[0].value = 50; + tx.tx.outputs[1].value = 550; + let storage_mass_parameter = 10u64.pow(12); + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, storage_mass_parameter / 50 + storage_mass_parameter / 550 - 3 * (storage_mass_parameter / 200)); + + // Create a tx with more outs than ins + let base_value = 10_000 * SOMPI_PER_KASPA; + let mut tx = generate_tx_from_amounts(&[base_value, base_value, base_value * 2], &[base_value; 4]); + let storage_mass_parameter = STORAGE_MASS_PARAMETER; + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 4); // Inputs are above C so they don't contribute negative mass, 4 outputs exactly equal C each charge 1 + + let mut tx2 = tx.clone(); + tx2.tx.outputs[0].value = 10 * SOMPI_PER_KASPA; + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx2.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 1003); + + // Increase values over the lim + for out in tx.tx.outputs.iter_mut() { + out.value += 1 + } + tx.entries[0].as_mut().unwrap().amount += tx.tx.outputs.len() as u64; + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 0); + } + + #[test] + fn test_mass_storage_beta() { + // 2:2 transaction + let mut tx = generate_tx_from_amounts(&[100, 200], &[50, 250]); + let storage_mass_parameter = 10u64.pow(12); + let test_version = Kip9Version::Beta; + // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/O(I) ) ) + + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 9000000000); + + // Set outputs to be equal to inputs + tx.tx.outputs[0].value = 100; + tx.tx.outputs[1].value = 200; + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 0); + + // Remove an output and make sure the other is small enough to make storage mass greater than zero + tx.tx.outputs.pop(); + tx.tx.outputs[0].value = 50; + let storage_mass = + MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); + assert_eq!(storage_mass, 5000000000); + } + + fn generate_tx_from_amounts(ins: &[u64], outs: &[u64]) -> MutableTransaction { + let script_pub_key = ScriptVec::from_slice(&[]); + let prev_tx_id = TransactionId::from_str("880eb9819a31821d9d2399e2f35e2433b72637e393d71ecc9b8d0250f49153c3").unwrap(); + let tx = Transaction::new( + 0, + (0..ins.len()) + .map(|i| TransactionInput { + previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: i as u32 }, + signature_script: vec![], + sequence: 0, + sig_op_count: 0, + }) + .collect(), + outs.iter() + .copied() + .map(|out_amount| TransactionOutput { + value: out_amount, + script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()), + }) + .collect(), + 1615462089000, + SubnetworkId::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, + vec![], + ); + let entries = ins + .iter() + .copied() + .map(|in_amount| UtxoEntry { + amount: in_amount, + script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()), + block_daa_score: 0, + is_coinbase: false, + }) + .collect(); + MutableTransaction::with_entries(tx, entries) + } +} diff --git a/consensus/core/src/merkle.rs b/consensus/core/src/merkle.rs index cfd5c90451..59c6ca7c4c 100644 --- a/consensus/core/src/merkle.rs +++ b/consensus/core/src/merkle.rs @@ -2,14 +2,10 @@ use crate::{hashing, tx::Transaction}; use kaspa_hashes::Hash; use kaspa_merkle::calc_merkle_root; -pub fn calc_hash_merkle_root_with_options<'a>(txs: impl ExactSizeIterator, include_mass_field: bool) -> Hash { +pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator, include_mass_field: bool) -> Hash { calc_merkle_root(txs.map(|tx| hashing::tx::hash(tx, include_mass_field))) } -pub fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { - calc_merkle_root(txs.map(|tx| hashing::tx::hash(tx, false))) -} - #[cfg(test)] mod tests { use crate::merkle::calc_hash_merkle_root; @@ -242,7 +238,7 @@ mod tests { ), ]; assert_eq!( - calc_hash_merkle_root(txs.iter()), + calc_hash_merkle_root(txs.iter(), false), Hash::from_slice(&[ 0x46, 0xec, 0xf4, 0x5b, 0xe3, 0xba, 0xca, 0x34, 0x9d, 0xfe, 0x8a, 0x78, 0xde, 0xaf, 0x05, 0x3b, 0x0a, 0xa6, 0xd5, 0x38, 0x97, 0x4d, 0xa5, 0x0f, 0xd6, 0xef, 0xb4, 0xd2, 0x66, 0xbc, 0x8d, 0x21, diff --git a/consensus/core/src/network.rs b/consensus/core/src/network.rs index ad59adfc3f..18e52eacbf 100644 --- a/consensus/core/src/network.rs +++ b/consensus/core/src/network.rs @@ -1,3 +1,16 @@ +//! +//! # Network Types +//! +//! This module implements [`NetworkType`] (such as `mainnet`, `testnet`, `devnet`, and `simnet`) +//! and [`NetworkId`] that combines a network type with an optional numerical suffix. +//! +//! The suffix is used to differentiate between multiple networks of the same type and is used +//! explicitly with `testnet` networks, allowing declaration of testnet versions such as +//! `testnet-10`, `testnet-11`, etc. +//! + +#![allow(non_snake_case)] + use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_addresses::Prefix; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; @@ -400,8 +413,11 @@ impl TryFrom for NetworkId { impl TryCastFromJs for NetworkId { type Error = NetworkIdError; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(network_id) = value.as_ref().as_string() { Ok(NetworkId::from_str(&network_id)?) } else { diff --git a/consensus/core/src/sign.rs b/consensus/core/src/sign.rs index 55513be9d7..3d6ed032f0 100644 --- a/consensus/core/src/sign.rs +++ b/consensus/core/src/sign.rs @@ -1,9 +1,9 @@ use crate::{ hashing::{ sighash::{calc_schnorr_signature_hash, SigHashReusedValuesUnsync}, - sighash_type::SIG_HASH_ALL, + sighash_type::{SigHashType, SIG_HASH_ALL}, }, - tx::SignableTransaction, + tx::{SignableTransaction, VerifiableTransaction}, }; use itertools::Itertools; use std::collections::BTreeMap; @@ -153,7 +153,20 @@ pub fn sign_with_multiple_v2(mut mutable_tx: SignableTransaction, privkeys: &[[u } } -pub fn verify(tx: &impl crate::tx::VerifiableTransaction) -> Result<(), Error> { +/// Sign a transaction input with a sighash_type using schnorr +pub fn sign_input(tx: &impl VerifiableTransaction, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Vec { + let mut reused_values = SigHashReusedValues::new(); + + let hash = calc_schnorr_signature_hash(tx, input_index, hash_type, &mut reused_values); + let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); + let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key).unwrap(); + let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); + + // This represents OP_DATA_65 (since signature length is 64 bytes and SIGHASH_TYPE is one byte) + std::iter::once(65u8).chain(sig).chain([hash_type.to_u8()]).collect() +} + +pub fn verify(tx: &impl VerifiableTransaction) -> Result<(), Error> { let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { if input.signature_script.is_empty() { diff --git a/consensus/core/src/subnets.rs b/consensus/core/src/subnets.rs index 2456f84444..756c4d40a8 100644 --- a/consensus/core/src/subnets.rs +++ b/consensus/core/src/subnets.rs @@ -4,7 +4,7 @@ use std::str::{self, FromStr}; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_utils::hex::{FromHex, ToHex}; use kaspa_utils::{serde_impl_deser_fixed_bytes_ref, serde_impl_ser_fixed_bytes_ref}; -use thiserror::Error; +use thiserror::Error; /// The size of the array used to store subnetwork IDs. pub const SUBNETWORK_ID_SIZE: usize = 20; @@ -59,35 +59,34 @@ impl SubnetworkId { *self == SUBNETWORK_ID_COINBASE || *self == SUBNETWORK_ID_REGISTRY } + /// Returns true if the subnetwork is the native subnetwork + #[inline] + pub fn is_native(&self) -> bool { + *self == SUBNETWORK_ID_NATIVE + } + /// Returns true if the subnetwork is the native or a built-in subnetwork #[inline] pub fn is_builtin_or_native(&self) -> bool { - *self == SUBNETWORK_ID_NATIVE || self.is_builtin() + self.is_native() || self.is_builtin() } } -#[derive(Error, Debug, Clone)] -pub enum SubnetworkConversionError { - #[error("Invalid bytes")] - InvalidBytes, - - #[error(transparent)] - SliceError(#[from] std::array::TryFromSliceError), - - #[error(transparent)] - HexError(#[from] faster_hex::Error), -} - +#[derive(Error, Debug, Clone)] +pub enum SubnetworkConversionError { + #[error(transparent)] + SliceError(#[from] std::array::TryFromSliceError), + + #[error(transparent)] + HexError(#[from] faster_hex::Error), +} + impl TryFrom<&[u8]> for SubnetworkId { - type Error = SubnetworkConversionError; + type Error = SubnetworkConversionError; fn try_from(value: &[u8]) -> Result { let bytes = <[u8; SUBNETWORK_ID_SIZE]>::try_from(value)?; - if bytes != Self::from_byte(0).0 && bytes != Self::from_byte(1).0 { - Err(Self::Error::InvalidBytes) - } else { - Ok(Self(bytes)) - } + Ok(Self(bytes)) } } @@ -109,30 +108,22 @@ impl ToHex for SubnetworkId { } impl FromStr for SubnetworkId { - type Err = SubnetworkConversionError; + type Err = SubnetworkConversionError; #[inline] fn from_str(hex_str: &str) -> Result { let mut bytes = [0u8; SUBNETWORK_ID_SIZE]; faster_hex::hex_decode(hex_str.as_bytes(), &mut bytes)?; - if bytes != Self::from_byte(0).0 && bytes != Self::from_byte(1).0 { - Err(Self::Err::InvalidBytes) - } else { - Ok(Self(bytes)) - } + Ok(Self(bytes)) } } impl FromHex for SubnetworkId { - type Error = SubnetworkConversionError; + type Error = SubnetworkConversionError; fn from_hex(hex_str: &str) -> Result { let mut bytes = [0u8; SUBNETWORK_ID_SIZE]; faster_hex::hex_decode(hex_str.as_bytes(), &mut bytes)?; - if bytes != Self::from_byte(0).0 && bytes != Self::from_byte(1).0 { - Err(Self::Error::InvalidBytes) - } else { - Ok(Self(bytes)) - } + Ok(Self(bytes)) } } diff --git a/consensus/core/src/tx.rs b/consensus/core/src/tx.rs index 1376337016..a4dd7dd45b 100644 --- a/consensus/core/src/tx.rs +++ b/consensus/core/src/tx.rs @@ -1,11 +1,22 @@ +//! +//! # Transaction +//! +//! This module implements consensus [`Transaction`] structure and related types. +//! + +#![allow(non_snake_case)] + mod script_public_key; use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_utils::hex::ToHex; use kaspa_utils::mem_size::MemSizeEstimator; use kaspa_utils::{serde_bytes, serde_bytes_fixed_ref}; -pub use script_public_key::{scriptvec, ScriptPublicKey, ScriptPublicKeyVersion, ScriptPublicKeys, ScriptVec, SCRIPT_VECTOR_SIZE}; +pub use script_public_key::{ + scriptvec, ScriptPublicKey, ScriptPublicKeyT, ScriptPublicKeyVersion, ScriptPublicKeys, ScriptVec, SCRIPT_VECTOR_SIZE, +}; use serde::{Deserialize, Serialize}; +use std::collections::HashSet; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering::SeqCst; use std::{ @@ -22,6 +33,7 @@ use crate::{ /// COINBASE_TRANSACTION_INDEX is the index of the coinbase transaction in every block pub const COINBASE_TRANSACTION_INDEX: usize = 0; +/// A 32-byte Kaspa transaction identifier. pub type TransactionId = kaspa_hashes::Hash; /// Holds details about an individual transaction output in a utxo @@ -29,7 +41,7 @@ pub type TransactionId = kaspa_hashes::Hash; /// score of the block that accepts the tx, its public key script, and how /// much it pays. /// @category Consensus -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] #[wasm_bindgen(inspectable, js_name = TransactionUtxoEntry)] pub struct UtxoEntry { @@ -53,7 +65,7 @@ impl MemSizeEstimator for UtxoEntry {} pub type TransactionIndexType = u32; /// Represents a Kaspa transaction outpoint -#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Eq, Default, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] pub struct TransactionOutpoint { #[serde(with = "serde_bytes_fixed_ref")] @@ -137,8 +149,8 @@ impl Clone for TransactionMass { } impl BorshDeserialize for TransactionMass { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let mass: u64 = borsh::BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let mass: u64 = borsh::BorshDeserialize::deserialize_reader(reader)?; Ok(Self(AtomicU64::new(mass))) } } @@ -163,7 +175,6 @@ pub struct Transaction { pub payload: Vec, #[serde(default)] - #[borsh_skip] // TODO: skipped for now as it is only required for consensus storage and miner grpc mass: TransactionMass, // A field that is used to cache the transaction ID. @@ -228,6 +239,28 @@ impl Transaction { pub fn mass(&self) -> u64 { self.mass.0.load(SeqCst) } + + pub fn with_mass(self, mass: u64) -> Self { + self.set_mass(mass); + self + } +} + +impl MemSizeEstimator for Transaction { + fn estimate_mem_bytes(&self) -> usize { + // Calculates mem bytes of the transaction (for cache tracking purposes) + size_of::() + + self.payload.len() + + self + .inputs + .iter() + .map(|i| i.signature_script.len() + size_of::()) + .chain(self.outputs.iter().map(|o| { + // size_of::() already counts SCRIPT_VECTOR_SIZE bytes within, so we only add the delta + o.script_public_key.script().len().saturating_sub(SCRIPT_VECTOR_SIZE) + size_of::() + })) + .sum::() + } } /// Represents any kind of transaction which has populated UTXO entry data and can be verified/signed etc @@ -406,6 +439,50 @@ impl> MutableTransaction { *entry = None; } } + + /// Returns the calculated feerate. The feerate is calculated as the amount of fee + /// this transactions pays per gram of the full contextual (compute & storage) mass. The + /// function returns a value when calculated fee exists and the contextual mass is greater + /// than zero, otherwise `None` is returned. + pub fn calculated_feerate(&self) -> Option { + let contextual_mass = self.tx.as_ref().mass(); + if contextual_mass > 0 { + self.calculated_fee.map(|fee| fee as f64 / contextual_mass as f64) + } else { + None + } + } + + /// A function for estimating the amount of memory bytes used by this transaction (dedicated to mempool usage). + /// We need consistency between estimation calls so only this function should be used for this purpose since + /// `estimate_mem_bytes` is sensitive to pointer wrappers such as Arc + pub fn mempool_estimated_bytes(&self) -> usize { + self.estimate_mem_bytes() + } + + pub fn has_parent(&self, possible_parent: TransactionId) -> bool { + self.tx.as_ref().inputs.iter().any(|x| x.previous_outpoint.transaction_id == possible_parent) + } + + pub fn has_parent_in_set(&self, possible_parents: &HashSet) -> bool { + self.tx.as_ref().inputs.iter().any(|x| possible_parents.contains(&x.previous_outpoint.transaction_id)) + } +} + +impl> MemSizeEstimator for MutableTransaction { + fn estimate_mem_bytes(&self) -> usize { + size_of::() + + self + .entries + .iter() + .map(|op| { + // size_of::>() already counts SCRIPT_VECTOR_SIZE bytes within, so we only add the delta + size_of::>() + + op.as_ref().map_or(0, |e| e.script_public_key.script().len().saturating_sub(SCRIPT_VECTOR_SIZE)) + }) + .sum::() + + self.tx.as_ref().estimate_mem_bytes() + } } impl> AsRef for MutableTransaction { @@ -604,12 +681,12 @@ mod tests { fn test_spk_borsh() { // Tests for ScriptPublicKey Borsh ser/deser since we manually implemented them let spk = ScriptPublicKey::from_vec(12, vec![32; 20]); - let bin = spk.try_to_vec().unwrap(); + let bin = borsh::to_vec(&spk).unwrap(); let spk2: ScriptPublicKey = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(spk, spk2); let spk = ScriptPublicKey::from_vec(55455, vec![11; 200]); - let bin = spk.try_to_vec().unwrap(); + let bin = borsh::to_vec(&spk).unwrap(); let spk2: ScriptPublicKey = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(spk, spk2); } diff --git a/consensus/core/src/tx/script_public_key.rs b/consensus/core/src/tx/script_public_key.rs index 7f3ade6943..dfed2ab5ce 100644 --- a/consensus/core/src/tx/script_public_key.rs +++ b/consensus/core/src/tx/script_public_key.rs @@ -1,6 +1,7 @@ use alloc::borrow::Cow; use borsh::{BorshDeserialize, BorshSerialize}; use core::fmt::Formatter; +use js_sys::Object; use kaspa_utils::{ hex::{FromHex, ToHex}, serde_bytes::FromHexVisitor, @@ -41,6 +42,7 @@ const TS_SCRIPT_PUBLIC_KEY: &'static str = r#" * @category Consensus */ export interface IScriptPublicKey { + version : number; script: HexString; } "#; @@ -328,6 +330,12 @@ impl ScriptPublicKey { } } +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(typescript_type = "ScriptPublicKey | HexString")] + pub type ScriptPublicKeyT; +} + #[wasm_bindgen] impl ScriptPublicKey { #[wasm_bindgen(constructor)] @@ -357,19 +365,36 @@ impl BorshSerialize for ScriptPublicKey { } impl BorshDeserialize for ScriptPublicKey { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { + fn deserialize_reader(reader: &mut R) -> std::io::Result { // Deserialize into vec first since we have no custom smallvec support - Ok(Self::from_vec(borsh::BorshDeserialize::deserialize(buf)?, borsh::BorshDeserialize::deserialize(buf)?)) + Ok(Self::from_vec(borsh::BorshDeserialize::deserialize_reader(reader)?, borsh::BorshDeserialize::deserialize_reader(reader)?)) } } type CastError = workflow_wasm::error::Error; impl TryCastFromJs for ScriptPublicKey { type Error = workflow_wasm::error::Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(hex_str) = value.as_ref().as_string() { Ok(Self::from_str(&hex_str).map_err(CastError::custom)?) + } else if let Some(object) = Object::try_from(value.as_ref()) { + let version = object.try_get_value("version")?.ok_or(CastError::custom( + "ScriptPublicKey must be a hex string or an object with 'version' and 'script' properties", + ))?; + + let version = if let Ok(version) = version.try_as_u16() { + version + } else { + return Err(CastError::custom("Invalid version value '{version:?}'")); + }; + + let script = object.get_vec_u8("script")?; + + Ok(ScriptPublicKey::from_vec(version, script)) } else { Err(CastError::custom(format!("Unable to convert ScriptPublicKey from: {:?}", value.as_ref()))) } @@ -403,12 +428,12 @@ mod tests { fn test_spk_borsh() { // Tests for ScriptPublicKey Borsh ser/deser since we manually implemented them let spk = ScriptPublicKey::from_vec(12, vec![32; 20]); - let bin = spk.try_to_vec().unwrap(); + let bin = borsh::to_vec(&spk).unwrap(); let spk2: ScriptPublicKey = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(spk, spk2); let spk = ScriptPublicKey::from_vec(55455, vec![11; 200]); - let bin = spk.try_to_vec().unwrap(); + let bin = borsh::to_vec(&spk).unwrap(); let spk2: ScriptPublicKey = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(spk, spk2); } diff --git a/consensus/core/src/utxo/utxo_diff.rs b/consensus/core/src/utxo/utxo_diff.rs index 3cd12f3f5b..fb4ffbcbaa 100644 --- a/consensus/core/src/utxo/utxo_diff.rs +++ b/consensus/core/src/utxo/utxo_diff.rs @@ -5,7 +5,7 @@ use super::{ use crate::tx::{TransactionOutpoint, UtxoEntry, VerifiableTransaction}; use kaspa_utils::mem_size::MemSizeEstimator; use serde::{Deserialize, Serialize}; -use std::{collections::hash_map::Entry::Vacant, mem::size_of}; +use std::collections::hash_map::Entry::Vacant; pub trait ImmutableUtxoDiff { fn added(&self) -> &UtxoCollection; diff --git a/consensus/pow/Cargo.toml b/consensus/pow/Cargo.toml index 13dc80f378..b2dd714e74 100644 --- a/consensus/pow/Cargo.toml +++ b/consensus/pow/Cargo.toml @@ -28,4 +28,7 @@ criterion.workspace = true [[bench]] name = "bench" -harness = false \ No newline at end of file +harness = false + +[lints] +workspace = true diff --git a/consensus/pow/src/wasm.rs b/consensus/pow/src/wasm.rs index f5179e44a2..92017d6c8a 100644 --- a/consensus/pow/src/wasm.rs +++ b/consensus/pow/src/wasm.rs @@ -1,74 +1,104 @@ use crate::matrix::Matrix; use js_sys::BigInt; use kaspa_consensus_client::Header; +use kaspa_consensus_client::HeaderT; use kaspa_consensus_core::hashing; use kaspa_hashes::Hash; use kaspa_hashes::PowHash; use kaspa_math::Uint256; +use kaspa_utils::hex::FromHex; use kaspa_utils::hex::ToHex; use num::Float; use wasm_bindgen::prelude::*; +use workflow_wasm::convert::TryCastFromJs; use workflow_wasm::error::Error; -use workflow_wasm::prelude::*; use workflow_wasm::result::Result; -/// @category PoW +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = js_sys::Array, typescript_type = "[boolean, bigint]")] + pub type WorkT; +} + +/// Represents a Kaspa header PoW manager +/// @category Mining #[wasm_bindgen(inspectable)] -pub struct State { +pub struct PoW { inner: crate::State, pre_pow_hash: Hash, } #[wasm_bindgen] -impl State { +impl PoW { #[wasm_bindgen(constructor)] - pub fn new(header: &Header) -> Self { + pub fn new(header: &HeaderT, timestamp: Option) -> Result { // this function replicates crate::State::new() but caches // the pre_pow_hash value internally, making it available // via the `pre_pow_hash` property getter. - - // obtain locked inner + let header = Header::try_cast_from(header).map_err(Error::custom)?; + let header = header.as_ref(); let header = header.inner(); + // Get required target from header bits. let target = Uint256::from_compact_target_bits(header.bits); // Zero out the time and nonce. let pre_pow_hash = hashing::header::hash_override_nonce_time(header, 0, 0); // PRE_POW_HASH || TIME || 32 zero byte padding || NONCE - let hasher = PowHash::new(pre_pow_hash, header.timestamp); + let hasher = PowHash::new(pre_pow_hash, timestamp.unwrap_or(header.timestamp)); let matrix = Matrix::generate(pre_pow_hash); - Self { inner: crate::State { matrix, target, hasher }, pre_pow_hash } + Ok(Self { inner: crate::State { matrix, target, hasher }, pre_pow_hash }) } + /// The target based on the provided bits. #[wasm_bindgen(getter)] pub fn target(&self) -> Result { - self.inner.target.try_into().map_err(|err| Error::Custom(format!("{err:?}"))) + self.inner.target.try_into().map_err(|err| Error::custom(format!("{err:?}"))) } - #[wasm_bindgen(js_name=checkPow)] - pub fn check_pow(&self, nonce_jsv: JsValue) -> Result { - let nonce = nonce_jsv.try_as_u64()?; + /// Checks if the computed target meets or exceeds the difficulty specified in the template. + /// @returns A boolean indicating if it reached the target and a bigint representing the reached target. + #[wasm_bindgen(js_name=checkWork)] + pub fn check_work(&self, nonce: u64) -> Result { let (c, v) = self.inner.check_pow(nonce); let array = js_sys::Array::new(); array.push(&JsValue::from(c)); - array.push(&v.to_bigint().map_err(|err| Error::Custom(format!("{err:?}")))?.into()); + array.push(&v.to_bigint().map_err(|err| Error::custom(format!("{err:?}")))?.into()); - Ok(array) + Ok(array.unchecked_into()) } - #[wasm_bindgen(getter = prePowHash)] + /// Hash of the header without timestamp and nonce. + #[wasm_bindgen(getter = prePoWHash)] pub fn get_pre_pow_hash(&self) -> String { self.pre_pow_hash.to_hex() } + + /// Can be used for parsing Stratum templates. + #[wasm_bindgen(js_name=fromRaw)] + pub fn from_raw(pre_pow_hash: &str, timestamp: u64, target_bits: Option) -> Result { + // Convert the pre_pow_hash from hex string to Hash + let pre_pow_hash = Hash::from_hex(pre_pow_hash).map_err(|err| Error::custom(format!("{err:?}")))?; + + // Generate the target from compact target bits if provided + let target = Uint256::from_compact_target_bits(target_bits.unwrap_or_default()); + + // Initialize the matrix and hasher using pre_pow_hash and timestamp + let matrix = Matrix::generate(pre_pow_hash); + let hasher = PowHash::new(pre_pow_hash, timestamp); + + Ok(PoW { inner: crate::State { matrix, target, hasher }, pre_pow_hash }) + } } // https://github.com/tmrlvi/kaspa-miner/blob/bf361d02a46c580f55f46b5dfa773477634a5753/src/client/stratum.rs#L36 const DIFFICULTY_1_TARGET: (u64, i16) = (0xffffu64, 208); // 0xffff 2^208 -/// `calculate_difficulty` is based on set_difficulty function: -/// @category PoW -#[wasm_bindgen(js_name = calculateDifficulty)] -pub fn calculate_difficulty(difficulty: f32) -> std::result::Result { +/// Calculates target from difficulty, based on set_difficulty function on +/// +/// @category Mining +#[wasm_bindgen(js_name = calculateTarget)] +pub fn calculate_target(difficulty: f32) -> Result { let mut buf = [0u64, 0u64, 0u64, 0u64]; let (mantissa, exponent, _) = difficulty.recip().integer_decode(); let new_mantissa = mantissa * DIFFICULTY_1_TARGET.0; @@ -80,10 +110,8 @@ pub fn calculate_difficulty(difficulty: f32) -> std::result::Result> (64 - remainder); // top } else if new_mantissa.leading_zeros() < remainder as u32 { - return Err(JsError::new("Target is too big")); + return Err(Error::custom("Target is too big")); } - // let target_pool = Uint256(buf); - // workflow_log::log_info!("Difficulty: {:?}, Target: 0x{}", difficulty, target_pool.to_hex()); - Ok(Uint256(buf).try_into()?) + Uint256(buf).try_into().map_err(Error::custom) } diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 80babbef0f..1731729a32 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -36,11 +36,18 @@ use crate::{ virtual_processor::{errors::PruningImportResult, VirtualStateProcessor}, ProcessingCounters, }, - processes::window::{WindowManager, WindowType}, + processes::{ + ghostdag::ordering::SortableBlock, + window::{WindowManager, WindowType}, + }, }; use kaspa_consensus_core::{ acceptance_data::AcceptanceData, - api::{stats::BlockCount, BlockValidationFutures, ConsensusApi, ConsensusStats}, + api::{ + args::{TransactionValidationArgs, TransactionValidationBatchArgs}, + stats::BlockCount, + BlockValidationFutures, ConsensusApi, ConsensusStats, + }, block::{Block, BlockTemplate, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId}, blockhash::BlockHashExtensions, blockstatus::BlockStatus, @@ -49,16 +56,18 @@ use kaspa_consensus_core::{ errors::{ coinbase::CoinbaseResult, consensus::{ConsensusError, ConsensusResult}, + difficulty::DifficultyError, + pruning::PruningImportError, tx::TxResult, }, - errors::{difficulty::DifficultyError, pruning::PruningImportError}, header::Header, + merkle::calc_hash_merkle_root, muhash::MuHashExtensions, network::NetworkType, pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList}, trusted::{ExternalGhostdagData, TrustedBlock}, tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry}, - BlockHashSet, BlueWorkType, ChainPath, + BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; @@ -74,6 +83,8 @@ use kaspa_muhash::MuHash; use kaspa_txscript::caches::TxScriptCacheCounters; use std::{ + cmp::Reverse, + collections::BinaryHeap, future::Future, iter::once, ops::Deref, @@ -418,13 +429,17 @@ impl ConsensusApi for Consensus { BlockValidationFutures { block_task: Box::pin(block_task), virtual_state_task: Box::pin(virtual_state_task) } } - fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { - self.virtual_processor.validate_mempool_transaction(transaction)?; + fn validate_mempool_transaction(&self, transaction: &mut MutableTransaction, args: &TransactionValidationArgs) -> TxResult<()> { + self.virtual_processor.validate_mempool_transaction(transaction, args)?; Ok(()) } - fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { - self.virtual_processor.validate_mempool_transactions_in_parallel(transactions) + fn validate_mempool_transactions_in_parallel( + &self, + transactions: &mut [MutableTransaction], + args: &TransactionValidationBatchArgs, + ) -> Vec> { + self.virtual_processor.validate_mempool_transactions_in_parallel(transactions, args) } fn populate_mempool_transaction(&self, transaction: &mut MutableTransaction) -> TxResult<()> { @@ -495,6 +510,64 @@ impl ConsensusApi for Consensus { self.headers_store.get_timestamp(self.get_sink()).unwrap() } + fn get_current_block_color(&self, hash: Hash) -> Option { + let _guard = self.pruning_lock.blocking_read(); + + // Verify the block exists and can be assumed to have relations and reachability data + self.validate_block_exists(hash).ok()?; + + // Verify that the block is in future(source), where Ghostdag data is complete + self.services.reachability_service.is_dag_ancestor_of(self.get_source(), hash).then_some(())?; + + let sink = self.get_sink(); + + // Optimization: verify that the block is in past(sink), otherwise the search will fail anyway + // (means the block was not merged yet by a virtual chain block) + self.services.reachability_service.is_dag_ancestor_of(hash, sink).then_some(())?; + + let mut heap: BinaryHeap> = BinaryHeap::new(); + let mut visited = BlockHashSet::new(); + + let initial_children = self.get_block_children(hash).unwrap(); + + for child in initial_children { + if visited.insert(child) { + let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + heap.push(Reverse(SortableBlock::new(child, blue_work))); + } + } + + while let Some(Reverse(SortableBlock { hash: decedent, .. })) = heap.pop() { + if self.services.reachability_service.is_chain_ancestor_of(decedent, sink) { + let decedent_data = self.get_ghostdag_data(decedent).unwrap(); + + if decedent_data.mergeset_blues.contains(&hash) { + return Some(true); + } else if decedent_data.mergeset_reds.contains(&hash) { + return Some(false); + } + + // Note: because we are doing a topological BFS up (from `hash` towards virtual), the first chain block + // found must also be our merging block, so hash will be either in blues or in reds, rendering this line + // unreachable. + kaspa_core::warn!("DAG topology inconsistency: {decedent} is expected to be a merging block of {hash}"); + // TODO: we should consider the option of returning Result> from this method + return None; + } + + let children = self.get_block_children(decedent).unwrap(); + + for child in children { + if visited.insert(child) { + let blue_work = self.ghostdag_primary_store.get_blue_work(child).unwrap(); + heap.push(Reverse(SortableBlock::new(child, blue_work))); + } + } + } + + None + } + fn get_virtual_state_approx_id(&self) -> VirtualStateApproxId { self.lkg_virtual_state.load().to_virtual_state_approx_id() } @@ -534,14 +607,26 @@ impl ConsensusApi for Consensus { self.config.is_nearly_synced(compact.timestamp, compact.daa_score) } - fn get_virtual_chain_from_block(&self, hash: Hash) -> ConsensusResult { - // Calculate chain changes between the given hash and the - // sink. Note that we explicitly don't + fn get_virtual_chain_from_block(&self, low: Hash, chain_path_added_limit: Option) -> ConsensusResult { + // Calculate chain changes between the given `low` and the current sink hash (up to `limit` amount of block hashes). + // Note: + // 1) that we explicitly don't // do the calculation against the virtual itself so that we // won't later need to remove it from the result. + // 2) supplying `None` as `chain_path_added_limit` will result in the full chain path, with optimized performance. let _guard = self.pruning_lock.blocking_read(); - self.validate_block_exists(hash)?; - Ok(self.services.dag_traversal_manager.calculate_chain_path(hash, self.get_sink())) + + // Verify that the block exists + self.validate_block_exists(low)?; + + // Verify that source is on chain(block) + self.services + .reachability_service + .is_chain_ancestor_of(self.get_source(), low) + .then_some(()) + .ok_or(ConsensusError::General("the queried hash does not have source on its chain"))?; + + Ok(self.services.dag_traversal_manager.calculate_chain_path(low, self.get_sink(), chain_path_added_limit)) } /// Returns a Vec of header samples since genesis @@ -666,6 +751,11 @@ impl ConsensusApi for Consensus { self.services.coinbase_manager.modify_coinbase_payload(payload, miner_data) } + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], pov_daa_score: u64) -> Hash { + let storage_mass_activated = pov_daa_score > self.config.storage_mass_activation_daa_score; + calc_hash_merkle_root(txs.iter(), storage_mass_activated) + } + fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> { self.services.pruning_proof_manager.validate_pruning_point_proof(proof) } @@ -836,11 +926,35 @@ impl ConsensusApi for Consensus { self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash)) } - fn get_blocks_acceptance_data(&self, hashes: &[Hash]) -> ConsensusResult>> { + fn get_blocks_acceptance_data( + &self, + hashes: &[Hash], + merged_blocks_limit: Option, + ) -> ConsensusResult>> { + // Note: merged_blocks_limit will limit after the sum of merged blocks is breached along the supplied hash's acceptance data + // and not limit the acceptance data within a queried hash. i.e. It has mergeset_size_limit granularity, this is to guarantee full acceptance data coverage. + if merged_blocks_limit.is_none() { + return hashes + .iter() + .copied() + .map(|hash| self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))) + .collect::>>(); + } + let merged_blocks_limit = merged_blocks_limit.unwrap(); // we handle `is_none`, so may unwrap. + let mut num_of_merged_blocks = 0usize; + hashes .iter() .copied() - .map(|hash| self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash))) + .map_while(|hash| { + let entry = self.acceptance_data_store.get(hash).unwrap_option().ok_or(ConsensusError::MissingData(hash)); + num_of_merged_blocks += entry.as_ref().map_or(0, |entry| entry.len()); + if num_of_merged_blocks > merged_blocks_limit { + None + } else { + Some(entry) + } + }) .collect::>>() } diff --git a/consensus/src/consensus/services.rs b/consensus/src/consensus/services.rs index 4afb5938a0..38e283a141 100644 --- a/consensus/src/consensus/services.rs +++ b/consensus/src/consensus/services.rs @@ -11,13 +11,13 @@ use crate::{ }, }, processes::{ - block_depth::BlockDepthManager, coinbase::CoinbaseManager, ghostdag::protocol::GhostdagManager, mass::MassCalculator, + block_depth::BlockDepthManager, coinbase::CoinbaseManager, ghostdag::protocol::GhostdagManager, parents_builder::ParentsManager, pruning::PruningPointManager, pruning_proof::PruningProofManager, sync::SyncManager, transaction_validator::TransactionValidator, traversal_manager::DagTraversalManager, window::DualWindowManager, }, }; - use itertools::Itertools; +use kaspa_consensus_core::mass::MassCalculator; use kaspa_txscript::caches::TxScriptCacheCounters; use std::sync::{atomic::AtomicBool, Arc}; diff --git a/consensus/src/consensus/storage.rs b/consensus/src/consensus/storage.rs index d53324fc6f..89a0f5e265 100644 --- a/consensus/src/consensus/storage.rs +++ b/consensus/src/consensus/storage.rs @@ -31,7 +31,7 @@ use kaspa_consensus_core::{blockstatus::BlockStatus, BlockHashSet}; use kaspa_database::registry::DatabaseStorePrefixes; use kaspa_hashes::Hash; use parking_lot::RwLock; -use std::{mem::size_of, ops::DerefMut, sync::Arc}; +use std::{ops::DerefMut, sync::Arc}; pub struct ConsensusStorage { // DB diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index c626e00ff1..a705d9ecca 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -176,7 +176,7 @@ impl TestConsensus { let cb = Transaction::new(TX_VERSION, vec![], vec![], 0, SUBNETWORK_ID_COINBASE, 0, cb_payload); txs.insert(0, cb); - header.hash_merkle_root = calc_hash_merkle_root(txs.iter()); + header.hash_merkle_root = calc_hash_merkle_root(txs.iter(), false); MutableBlock::new(header, txs) } diff --git a/consensus/src/model/services/reachability.rs b/consensus/src/model/services/reachability.rs index d80efd760e..39f5ceba2d 100644 --- a/consensus/src/model/services/reachability.rs +++ b/consensus/src/model/services/reachability.rs @@ -17,6 +17,7 @@ pub trait ReachabilityService { fn is_any_dag_ancestor_result(&self, list: &mut impl Iterator, queried: Hash) -> Result; fn get_next_chain_ancestor(&self, descendant: Hash, ancestor: Hash) -> Hash; fn get_chain_parent(&self, this: Hash) -> Hash; + fn has_reachability_data(&self, this: Hash) -> bool; } impl ReachabilityService for T { @@ -56,6 +57,10 @@ impl ReachabilityService for T { fn get_chain_parent(&self, this: Hash) -> Hash { self.get_parent(this).unwrap() } + + fn has_reachability_data(&self, this: Hash) -> bool { + self.has(this).unwrap() + } } /// Multi-threaded reachability service imp @@ -108,6 +113,10 @@ impl ReachabilityService for MTReachability fn get_chain_parent(&self, this: Hash) -> Hash { self.store.read().get_parent(this).unwrap() } + + fn has_reachability_data(&self, this: Hash) -> bool { + self.store.read().has(this).unwrap() + } } impl MTReachabilityService { diff --git a/consensus/src/model/stores/acceptance_data.rs b/consensus/src/model/stores/acceptance_data.rs index a66fcdcfec..83f6c8f13b 100644 --- a/consensus/src/model/stores/acceptance_data.rs +++ b/consensus/src/model/stores/acceptance_data.rs @@ -12,7 +12,6 @@ use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::Deserialize; use serde::Serialize; -use std::mem::size_of; use std::sync::Arc; pub trait AcceptanceDataStoreReader { diff --git a/consensus/src/model/stores/block_transactions.rs b/consensus/src/model/stores/block_transactions.rs index 050606d3ce..5042682888 100644 --- a/consensus/src/model/stores/block_transactions.rs +++ b/consensus/src/model/stores/block_transactions.rs @@ -9,7 +9,6 @@ use kaspa_hashes::Hash; use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; -use std::mem::size_of; use std::sync::Arc; pub trait BlockTransactionsStoreReader { diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 89c4686c5f..bcf860b3a3 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -14,7 +14,6 @@ use kaspa_utils::mem_size::MemSizeEstimator; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; use std::iter::once; -use std::mem::size_of; use std::{cell::RefCell, sync::Arc}; /// Re-export for convenience diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index b0c25b5960..85668f6992 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -1,4 +1,3 @@ -use std::mem::size_of; use std::sync::Arc; use kaspa_consensus_core::{header::Header, BlockHasher, BlockLevel}; @@ -29,9 +28,7 @@ pub struct HeaderWithBlockLevel { impl MemSizeEstimator for HeaderWithBlockLevel { fn estimate_mem_bytes(&self) -> usize { - size_of::
() - + self.header.parents_by_level.iter().map(|l| l.len()).sum::() * size_of::() - + size_of::() + self.header.as_ref().estimate_mem_bytes() + size_of::() } } diff --git a/consensus/src/model/stores/mod.rs b/consensus/src/model/stores/mod.rs index 8397558863..9fda332960 100644 --- a/consensus/src/model/stores/mod.rs +++ b/consensus/src/model/stores/mod.rs @@ -3,10 +3,6 @@ pub mod block_transactions; pub mod block_window_cache; pub mod children; pub mod daa; -pub mod selected_chain; -use std::{fmt::Display, mem::size_of}; - -pub use kaspa_database; pub mod depth; pub mod ghostdag; pub mod headers; @@ -16,6 +12,7 @@ pub mod pruning; pub mod pruning_utxoset; pub mod reachability; pub mod relations; +pub mod selected_chain; pub mod statuses; pub mod tips; pub mod utxo_diffs; @@ -23,7 +20,9 @@ pub mod utxo_multisets; pub mod utxo_set; pub mod virtual_state; +pub use kaspa_database; pub use kaspa_database::prelude::DB; +use std::fmt::Display; #[derive(PartialEq, Eq, Clone, Copy, Hash)] pub(crate) struct U64Key([u8; size_of::()]); diff --git a/consensus/src/model/stores/utxo_set.rs b/consensus/src/model/stores/utxo_set.rs index fbe64deaf3..03add09482 100644 --- a/consensus/src/model/stores/utxo_set.rs +++ b/consensus/src/model/stores/utxo_set.rs @@ -28,7 +28,7 @@ pub trait UtxoSetStore: UtxoSetStoreReader { fn write_many(&mut self, utxos: &[(TransactionOutpoint, UtxoEntry)]) -> Result<(), StoreError>; } -pub const UTXO_KEY_SIZE: usize = kaspa_hashes::HASH_SIZE + std::mem::size_of::(); +pub const UTXO_KEY_SIZE: usize = kaspa_hashes::HASH_SIZE + size_of::(); #[derive(Eq, Hash, PartialEq, Debug, Copy, Clone)] struct UtxoKey([u8; UTXO_KEY_SIZE]); @@ -81,8 +81,7 @@ impl From for TransactionOutpoint { fn from(k: UtxoKey) -> Self { let transaction_id = Hash::from_slice(&k.0[..kaspa_hashes::HASH_SIZE]); let index = TransactionIndexType::from_le_bytes( - <[u8; std::mem::size_of::()]>::try_from(&k.0[kaspa_hashes::HASH_SIZE..]) - .expect("expecting index size"), + <[u8; size_of::()]>::try_from(&k.0[kaspa_hashes::HASH_SIZE..]).expect("expecting index size"), ); Self::new(transaction_id, index) } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 2425556d0e..b03643df87 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -14,14 +14,7 @@ impl BlockBodyProcessor { pub fn validate_body_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { self.check_parent_bodies_exist(block)?; self.check_coinbase_blue_score_and_subsidy(block)?; - self.check_block_transactions_in_context(block)?; - self.check_block_is_not_pruned(block) - } - - fn check_block_is_not_pruned(self: &Arc, _block: &Block) -> BlockProcessResult<()> { - // TODO: In kaspad code it checks that the block is not in the past of the current tips. - // We should decide what's the best indication that a block was pruned. - Ok(()) + self.check_block_transactions_in_context(block) } fn check_block_transactions_in_context(self: &Arc, block: &Block) -> BlockProcessResult<()> { @@ -36,12 +29,6 @@ impl BlockBodyProcessor { } fn check_parent_bodies_exist(self: &Arc, block: &Block) -> BlockProcessResult<()> { - // TODO: Skip this check for blocks in PP anticone that comes as part of the pruning proof. - - if block.header.direct_parents().len() == 1 && block.header.direct_parents()[0] == self.genesis.hash { - return Ok(()); - } - let statuses_read_guard = self.statuses_store.read(); let missing: Vec = block .header @@ -50,7 +37,7 @@ impl BlockBodyProcessor { .copied() .filter(|parent| { let status_option = statuses_read_guard.get(*parent).unwrap_option(); - status_option.is_none_or(|s| !s.has_block_body()) + status_option.is_none_or_ex(|s| !s.has_block_body()) }) .collect(); if !missing.is_empty() { @@ -94,13 +81,17 @@ mod tests { }; use kaspa_consensus_core::{ api::ConsensusApi, - merkle::calc_hash_merkle_root, + merkle::calc_hash_merkle_root as calc_hash_merkle_root_with_options, subnets::SUBNETWORK_ID_NATIVE, tx::{Transaction, TransactionInput, TransactionOutpoint}, }; use kaspa_core::assert_match; use kaspa_hashes::Hash; + fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { + calc_hash_merkle_root_with_options(txs, false) + } + #[tokio::test] async fn validate_body_in_context_test() { let config = ConfigBuilder::new(DEVNET_PARAMS) diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index e5a51d8154..c413552b99 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -2,7 +2,7 @@ use std::{collections::HashSet, sync::Arc}; use super::BlockBodyProcessor; use crate::errors::{BlockProcessResult, RuleError}; -use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root_with_options, tx::TransactionOutpoint}; +use kaspa_consensus_core::{block::Block, merkle::calc_hash_merkle_root, tx::TransactionOutpoint}; impl BlockBodyProcessor { pub fn validate_body_in_isolation(self: &Arc, block: &Block) -> BlockProcessResult { @@ -29,7 +29,7 @@ impl BlockBodyProcessor { } fn check_hash_merkle_root(block: &Block, storage_mass_activated: bool) -> BlockProcessResult<()> { - let calculated = calc_hash_merkle_root_with_options(block.transactions.iter(), storage_mass_activated); + let calculated = calc_hash_merkle_root(block.transactions.iter(), storage_mass_activated); if calculated != block.header.hash_merkle_root { return Err(RuleError::BadMerkleRoot(block.header.hash_merkle_root, calculated)); } @@ -137,13 +137,17 @@ mod tests { api::{BlockValidationFutures, ConsensusApi}, block::MutableBlock, header::Header, - merkle::calc_hash_merkle_root, + merkle::calc_hash_merkle_root as calc_hash_merkle_root_with_options, subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, tx::{scriptvec, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, }; use kaspa_core::assert_match; use kaspa_hashes::Hash; + fn calc_hash_merkle_root<'a>(txs: impl ExactSizeIterator) -> Hash { + calc_hash_merkle_root_with_options(txs, false) + } + #[test] fn validate_body_in_isolation_test() { let consensus = TestConsensus::new(&Config::new(MAINNET_PARAMS)); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 1ea674263d..4191a01cec 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -17,13 +17,14 @@ use crate::{ deps_manager::{BlockProcessingMessage, BlockTaskDependencyManager, TaskId, VirtualStateProcessingMessage}, ProcessingCounters, }, - processes::{coinbase::CoinbaseManager, mass::MassCalculator, transaction_validator::TransactionValidator}, + processes::{coinbase::CoinbaseManager, transaction_validator::TransactionValidator}, }; use crossbeam_channel::{Receiver, Sender}; use kaspa_consensus_core::{ block::Block, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, config::genesis::GenesisBlock, + mass::MassCalculator, tx::Transaction, }; use kaspa_consensus_notify::{ @@ -200,8 +201,7 @@ impl BlockBodyProcessor { // transactions that fits the merkle root. // PrunedBlock - PrunedBlock is an error that rejects a block body and // not the block as a whole, so we shouldn't mark it as invalid. - // TODO: implement the last part. - if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_)) { + if !matches!(e, RuleError::BadMerkleRoot(_, _) | RuleError::MissingParents(_) | RuleError::PrunedBlock) { self.statuses_store.write().set(block.hash(), BlockStatus::StatusInvalid).unwrap(); } return Err(e); @@ -225,7 +225,6 @@ impl BlockBodyProcessor { fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { let mass = self.validate_body_in_isolation(block)?; if !is_trusted { - // TODO: Check that it's safe to skip this check if the block is trusted. self.validate_body_in_context(block)?; } Ok(mass) diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index d1b74aeb5c..6c93b91d9c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -308,8 +308,6 @@ impl HeaderProcessor { // Runs partial header validation for trusted blocks (currently validates only header-in-isolation and computes GHOSTDAG). fn validate_trusted_header(&self, header: &Arc
) -> BlockProcessResult { - // TODO: For now we skip most validations for trusted blocks, but in the future we should - // employ some validations to avoid spam etc. let block_level = self.validate_header_in_isolation(header)?; let mut ctx = self.build_processing_context(header, block_level); self.ghostdag(&mut ctx); @@ -407,7 +405,6 @@ impl HeaderProcessor { && reachability::is_chain_ancestor_of(&staging, pp, ctx.hash).unwrap() { // Hint reachability about the new tip. - // TODO: identify a disqualified hst and make sure to use sink instead reachability::hint_virtual_selected_parent(&mut staging, ctx.hash).unwrap(); hst_write.set_batch(&mut batch, SortableBlock::new(ctx.hash, header.blue_work)).unwrap(); } diff --git a/consensus/src/pipeline/monitor.rs b/consensus/src/pipeline/monitor.rs index 600059f0a9..ca370a2f88 100644 --- a/consensus/src/pipeline/monitor.rs +++ b/consensus/src/pipeline/monitor.rs @@ -5,7 +5,7 @@ use kaspa_core::{ service::{AsyncService, AsyncServiceFuture}, tick::{TickReason, TickService}, }, - trace, + trace, warn, }; use std::{ sync::Arc, @@ -62,6 +62,13 @@ impl ConsensusMonitor { if delta.body_counts != 0 { delta.mass_counts as f64 / delta.body_counts as f64 } else{ 0f64 }, ); + if delta.chain_disqualified_counts > 0 { + warn!( + "Consensus detected UTXO-invalid blocks which are disqualified from the virtual selected chain (possibly due to inheritance): {} disqualified vs. {} valid chain blocks", + delta.chain_disqualified_counts, delta.chain_block_counts + ); + } + last_snapshot = snapshot; last_log_time = now; } diff --git a/consensus/src/pipeline/pruning_processor/processor.rs b/consensus/src/pipeline/pruning_processor/processor.rs index 8cded745a2..35dc211d51 100644 --- a/consensus/src/pipeline/pruning_processor/processor.rs +++ b/consensus/src/pipeline/pruning_processor/processor.rs @@ -2,7 +2,7 @@ use crate::{ consensus::{ - services::{ConsensusServices, DbGhostdagManager, DbPruningPointManager}, + services::{ConsensusServices, DbGhostdagManager, DbParentsManager, DbPruningPointManager}, storage::ConsensusStorage, }, model::{ @@ -31,7 +31,7 @@ use kaspa_consensus_core::{ muhash::MuHashExtensions, pruning::{PruningPointProof, PruningPointTrustedData}, trusted::ExternalGhostdagData, - BlockHashSet, + BlockHashMap, BlockHashSet, BlockLevel, }; use kaspa_consensusmanager::SessionLock; use kaspa_core::{debug, info, warn}; @@ -42,7 +42,7 @@ use kaspa_utils::iter::IterExtensions; use parking_lot::RwLockUpgradableReadGuard; use rocksdb::WriteBatch; use std::{ - collections::VecDeque, + collections::{hash_map::Entry::Vacant, VecDeque}, ops::Deref, sync::{ atomic::{AtomicBool, Ordering}, @@ -72,6 +72,7 @@ pub struct PruningProcessor { ghostdag_managers: Arc>, pruning_point_manager: DbPruningPointManager, pruning_proof_manager: Arc, + parents_manager: DbParentsManager, // Pruning lock pruning_lock: SessionLock, @@ -109,6 +110,7 @@ impl PruningProcessor { ghostdag_managers: services.ghostdag_managers.clone(), pruning_point_manager: services.pruning_point_manager.clone(), pruning_proof_manager: services.pruning_proof_manager.clone(), + parents_manager: services.parents_manager.clone(), pruning_lock, config, is_consensus_exiting, @@ -262,35 +264,35 @@ impl PruningProcessor { // We keep full data for pruning point and its anticone, relations for DAA/GD // windows and pruning proof, and only headers for past pruning points let keep_blocks: BlockHashSet = data.anticone.iter().copied().collect(); - let keep_relations: BlockHashSet = std::iter::empty() + let mut keep_relations: BlockHashMap = std::iter::empty() .chain(data.anticone.iter().copied()) .chain(data.daa_window_blocks.iter().map(|th| th.header.hash)) .chain(data.ghostdag_blocks.iter().map(|gd| gd.hash)) - .chain(proof.iter().flatten().map(|h| h.hash)) + .chain(proof[0].iter().map(|h| h.hash)) + .map(|h| (h, 0)) // Mark block level 0 for all the above. Note that below we add the remaining levels .collect(); let keep_headers: BlockHashSet = self.past_pruning_points(); info!("Header and Block pruning: waiting for consensus write permissions..."); let mut prune_guard = self.pruning_lock.blocking_write(); - let mut lock_acquire_time = Instant::now(); - let mut reachability_read = self.reachability_store.upgradable_read(); info!("Starting Header and Block pruning..."); { let mut counter = 0; let mut batch = WriteBatch::default(); - for kept in keep_relations.iter().copied() { + // At this point keep_relations only holds level-0 relations which is the correct filtering criteria for primary GHOSTDAG + for kept in keep_relations.keys().copied() { let Some(ghostdag) = self.ghostdag_primary_store.get_data(kept).unwrap_option() else { continue; }; - if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains(&h)) { + if ghostdag.unordered_mergeset().any(|h| !keep_relations.contains_key(&h)) { let mut mutable_ghostdag: ExternalGhostdagData = ghostdag.as_ref().into(); - mutable_ghostdag.mergeset_blues.retain(|h| keep_relations.contains(h)); - mutable_ghostdag.mergeset_reds.retain(|h| keep_relations.contains(h)); - mutable_ghostdag.blues_anticone_sizes.retain(|k, _| keep_relations.contains(k)); - if !keep_relations.contains(&mutable_ghostdag.selected_parent) { + mutable_ghostdag.mergeset_blues.retain(|h| keep_relations.contains_key(h)); + mutable_ghostdag.mergeset_reds.retain(|h| keep_relations.contains_key(h)); + mutable_ghostdag.blues_anticone_sizes.retain(|k, _| keep_relations.contains_key(k)); + if !keep_relations.contains_key(&mutable_ghostdag.selected_parent) { mutable_ghostdag.selected_parent = ORIGIN; } counter += 1; @@ -301,6 +303,45 @@ impl PruningProcessor { info!("Header and Block pruning: updated ghostdag data for {} blocks", counter); } + // No need to hold the prune guard while we continue populating keep_relations + drop(prune_guard); + + // Add additional levels only after filtering GHOSTDAG data via level 0 + for (level, level_proof) in proof.iter().enumerate().skip(1) { + let level = level as BlockLevel; + // We obtain the headers of the pruning point anticone (including the pruning point) + // in order to mark all parents of anticone roots at level as not-to-be-deleted. + // This optimizes multi-level parent validation (see ParentsManager) + // by avoiding the deletion of high-level parents which might still be needed for future + // header validation (avoiding the need for reference blocks; see therein). + // + // Notes: + // + // 1. Normally, such blocks would be part of the proof for this level, but here we address the rare case + // where there are a few such parallel blocks (since the proof only contains the past of the pruning point's + // selected-tip-at-level) + // 2. We refer to the pp anticone as roots even though technically it might contain blocks which are not a pure + // antichain (i.e., some of them are in the past of others). These blocks only add redundant info which would + // be included anyway. + let roots_parents_at_level = data + .anticone + .iter() + .copied() + .map(|hash| self.headers_store.get_header_with_block_level(hash).expect("pruning point anticone is not pruned")) + .filter(|root| level > root.block_level) // If the root itself is at level, there's no need for its level-parents + .flat_map(|root| self.parents_manager.parents_at_level(&root.header, level).iter().copied().collect_vec()); + for hash in level_proof.iter().map(|header| header.hash).chain(roots_parents_at_level) { + if let Vacant(e) = keep_relations.entry(hash) { + // This hash was not added by any lower level -- mark it as affiliated with proof level `level` + e.insert(level); + } + } + } + + prune_guard = self.pruning_lock.blocking_write(); + let mut lock_acquire_time = Instant::now(); + let mut reachability_read = self.reachability_store.upgradable_read(); + { // Start with a batch for pruning body tips and selected chain stores let mut batch = WriteBatch::default(); @@ -388,7 +429,7 @@ impl PruningProcessor { self.acceptance_data_store.delete_batch(&mut batch, current).unwrap(); self.block_transactions_store.delete_batch(&mut batch, current).unwrap(); - if keep_relations.contains(¤t) { + if let Some(&affiliated_proof_level) = keep_relations.get(¤t) { if statuses_write.get(current).unwrap_option().is_some_and(|s| s.is_valid()) { // We set the status to header-only only if it was previously set to a valid // status. This is important since some proof headers might not have their status set @@ -396,6 +437,15 @@ impl PruningProcessor { // other parts of the code assume the existence of GD data etc.) statuses_write.set_batch(&mut batch, current, StatusHeaderOnly).unwrap(); } + + // Delete level-x relations for blocks which only belong to higher-than-x proof levels. + // This preserves the semantic that for each level, relations represent a contiguous DAG area in that level + for lower_level in 0..affiliated_proof_level as usize { + let mut staging_level_relations = StagingRelationsStore::new(&mut level_relations_write[lower_level]); + relations::delete_level_relations(MemoryWriter, &mut staging_level_relations, current).unwrap_option(); + staging_level_relations.commit(&mut batch).unwrap(); + self.ghostdag_stores[lower_level].delete_batch(&mut batch, current).unwrap_option(); + } } else { // Count only blocks which get fully pruned including DAG relations counter += 1; diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index 07a1857ba6..dd9733fdda 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -48,12 +48,13 @@ use crate::{ }; use kaspa_consensus_core::{ acceptance_data::AcceptanceData, + api::args::{TransactionValidationArgs, TransactionValidationBatchArgs}, block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector}, blockstatus::BlockStatus::{StatusDisqualifiedFromChain, StatusUTXOValid}, coinbase::MinerData, config::genesis::GenesisBlock, header::Header, - merkle::calc_hash_merkle_root_with_options, + merkle::calc_hash_merkle_root, pruning::PruningPointsList, tx::{MutableTransaction, Transaction}, utxo::{ @@ -76,8 +77,10 @@ use kaspa_hashes::Hash; use kaspa_muhash::MuHash; use kaspa_notify::{events::EventType, notifier::Notify}; +use super::errors::{PruningImportError, PruningImportResult}; use crossbeam_channel::{Receiver as CrossbeamReceiver, Sender as CrossbeamSender}; use itertools::Itertools; +use kaspa_consensus_core::tx::ValidatedTransaction; use kaspa_utils::binary_heap::BinaryHeapExtensions; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use rand::{seq::SliceRandom, Rng}; @@ -93,8 +96,6 @@ use std::{ sync::{atomic::Ordering, Arc}, }; -use super::errors::{PruningImportError, PruningImportResult}; - pub struct VirtualStateProcessor { // Channels receiver: CrossbeamReceiver, @@ -289,7 +290,7 @@ impl VirtualStateProcessor { assert_eq!(virtual_ghostdag_data.selected_parent, new_sink); let sink_multiset = self.utxo_multisets_store.get(new_sink).unwrap(); - let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink); + let chain_path = self.dag_traversal_manager.calculate_chain_path(prev_sink, new_sink, None); let new_virtual_state = self .calculate_and_commit_virtual_state( virtual_read, @@ -382,10 +383,12 @@ impl VirtualStateProcessor { // Walk back up to the new virtual selected parent candidate let mut chain_block_counter = 0; + let mut chain_disqualified_counter = 0; for (selected_parent, current) in self.reachability_service.forward_chain_iterator(split_point, to, true).tuple_windows() { if selected_parent != diff_point { // This indicates that the selected parent is disqualified, propagate up and continue self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap(); + chain_disqualified_counter += 1; continue; } @@ -415,6 +418,7 @@ impl VirtualStateProcessor { if let Err(rule_error) = res { info!("Block {} is disqualified from virtual chain: {}", current, rule_error); self.statuses_store.write().set(current, StatusDisqualifiedFromChain).unwrap(); + chain_disqualified_counter += 1; } else { debug!("VIRTUAL PROCESSOR, UTXO validated for {current}"); @@ -433,6 +437,9 @@ impl VirtualStateProcessor { } // Report counters self.counters.chain_block_counts.fetch_add(chain_block_counter, Ordering::Relaxed); + if chain_disqualified_counter > 0 { + self.counters.chain_disqualified_counts.fetch_add(chain_disqualified_counter, Ordering::Relaxed); + } diff_point } @@ -558,7 +565,7 @@ impl VirtualStateProcessor { finality_point: Hash, pruning_point: Hash, ) -> (Hash, VecDeque) { - // TODO: tests + // TODO (relaxed): additional tests let mut heap = tips .into_iter() @@ -620,7 +627,7 @@ impl VirtualStateProcessor { mut candidates: VecDeque, pruning_point: Hash, ) -> (Vec, GhostdagData) { - // TODO: tests + // TODO (relaxed): additional tests // Mergeset increasing might traverse DAG areas which are below the finality point and which theoretically // can borderline with pruned data, hence we acquire the prune lock to ensure data consistency. Note that @@ -669,7 +676,7 @@ impl VirtualStateProcessor { MergesetIncreaseResult::Rejected { new_candidate } => { // If we already have a candidate in the past of new candidate then skip. if self.reachability_service.is_any_dag_ancestor(&mut candidates.iter().copied(), new_candidate) { - continue; // TODO: not sure this test is needed if candidates invariant as antichain is kept + continue; // TODO (optimization): not sure this check is needed if candidates invariant as antichain is kept } // Remove all candidates which are in the future of the new candidate candidates.retain(|&h| !self.reachability_service.is_dag_ancestor_of(new_candidate, h)); @@ -757,14 +764,15 @@ impl VirtualStateProcessor { virtual_utxo_view: &impl UtxoView, virtual_daa_score: u64, virtual_past_median_time: u64, + args: &TransactionValidationArgs, ) -> TxResult<()> { self.transaction_validator.validate_tx_in_isolation(&mutable_tx.tx)?; self.transaction_validator.utxo_free_tx_validation(&mutable_tx.tx, virtual_daa_score, virtual_past_median_time)?; - self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score)?; + self.validate_mempool_transaction_in_utxo_context(mutable_tx, virtual_utxo_view, virtual_daa_score, args)?; Ok(()) } - pub fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + pub fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction, args: &TransactionValidationArgs) -> TxResult<()> { let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; @@ -773,14 +781,24 @@ impl VirtualStateProcessor { if mutable_tx.tx.inputs.len() > 1 { // use pool to apply par_iter to inputs self.thread_pool.install(|| { - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + self.validate_mempool_transaction_impl( + mutable_tx, + virtual_utxo_view, + virtual_daa_score, + virtual_past_median_time, + args, + ) }) } else { - self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + self.validate_mempool_transaction_impl(mutable_tx, virtual_utxo_view, virtual_daa_score, virtual_past_median_time, args) } } - pub fn validate_mempool_transactions_in_parallel(&self, mutable_txs: &mut [MutableTransaction]) -> Vec> { + pub fn validate_mempool_transactions_in_parallel( + &self, + mutable_txs: &mut [MutableTransaction], + args: &TransactionValidationBatchArgs, + ) -> Vec> { let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; @@ -791,7 +809,13 @@ impl VirtualStateProcessor { mutable_txs .par_iter_mut() .map(|mtx| { - self.validate_mempool_transaction_impl(mtx, &virtual_utxo_view, virtual_daa_score, virtual_past_median_time) + self.validate_mempool_transaction_impl( + mtx, + &virtual_utxo_view, + virtual_daa_score, + virtual_past_median_time, + args.get(&mtx.id()), + ) }) .collect::>>() }) @@ -828,12 +852,9 @@ impl VirtualStateProcessor { txs: &[Transaction], virtual_state: &VirtualState, utxo_view: &V, - ) -> Vec> { - self.thread_pool.install(|| { - txs.par_iter() - .map(|tx| self.validate_block_template_transaction(tx, virtual_state, &utxo_view)) - .collect::>>() - }) + ) -> Vec> { + self.thread_pool + .install(|| txs.par_iter().map(|tx| self.validate_block_template_transaction(tx, virtual_state, &utxo_view)).collect()) } fn validate_block_template_transaction( @@ -841,13 +862,14 @@ impl VirtualStateProcessor { tx: &Transaction, virtual_state: &VirtualState, utxo_view: &impl UtxoView, - ) -> TxResult<()> { + ) -> TxResult { // No need to validate the transaction in isolation since we rely on the mining manager to submit transactions // which were previously validated through `validate_mempool_transaction_and_populate`, hence we only perform // in-context validations self.transaction_validator.utxo_free_tx_validation(tx, virtual_state.daa_score, virtual_state.past_median_time)?; - self.validate_transaction_in_utxo_context(tx, utxo_view, virtual_state.daa_score, TxValidationFlags::Full)?; - Ok(()) + let ValidatedTransaction { calculated_fee, .. } = + self.validate_transaction_in_utxo_context(tx, utxo_view, virtual_state.daa_score, TxValidationFlags::Full)?; + Ok(calculated_fee) } pub fn build_block_template( @@ -857,14 +879,14 @@ impl VirtualStateProcessor { build_mode: TemplateBuildMode, ) -> Result { // - // TODO: tests + // TODO (relaxed): additional tests // // We call for the initial tx batch before acquiring the virtual read lock, // optimizing for the common case where all txs are valid. Following selection calls // are called within the lock in order to preserve validness of already validated txs let mut txs = tx_selector.select_transactions(); - + let mut calculated_fees = Vec::with_capacity(txs.len()); let virtual_read = self.virtual_stores.read(); let virtual_state = virtual_read.state.get().unwrap(); let virtual_utxo_view = &virtual_read.utxo_set; @@ -872,9 +894,14 @@ impl VirtualStateProcessor { let mut invalid_transactions = HashMap::new(); let results = self.validate_block_template_transactions_in_parallel(&txs, &virtual_state, &virtual_utxo_view); for (tx, res) in txs.iter().zip(results) { - if let Err(e) = res { - invalid_transactions.insert(tx.id(), e); - tx_selector.reject_selection(tx.id()); + match res { + Err(e) => { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + } + Ok(fee) => { + calculated_fees.push(fee); + } } } @@ -889,12 +916,16 @@ impl VirtualStateProcessor { let next_batch_results = self.validate_block_template_transactions_in_parallel(&next_batch, &virtual_state, &virtual_utxo_view); for (tx, res) in next_batch.into_iter().zip(next_batch_results) { - if let Err(e) = res { - invalid_transactions.insert(tx.id(), e); - tx_selector.reject_selection(tx.id()); - has_rejections = true; - } else { - txs.push(tx); + match res { + Err(e) => { + invalid_transactions.insert(tx.id(), e); + tx_selector.reject_selection(tx.id()); + has_rejections = true; + } + Ok(fee) => { + txs.push(tx); + calculated_fees.push(fee); + } } } } @@ -911,7 +942,7 @@ impl VirtualStateProcessor { drop(virtual_read); // Build the template - self.build_block_template_from_virtual_state(virtual_state, miner_data, txs) + self.build_block_template_from_virtual_state(virtual_state, miner_data, txs, calculated_fees) } pub(crate) fn validate_block_template_transactions( @@ -939,6 +970,7 @@ impl VirtualStateProcessor { virtual_state: Arc, miner_data: MinerData, mut txs: Vec, + calculated_fees: Vec, ) -> Result { // [`calc_block_parents`] can use deep blocks below the pruning point for this calculation, so we // need to hold the pruning lock. @@ -962,7 +994,7 @@ impl VirtualStateProcessor { // Hash according to hardfork activation let storage_mass_activated = virtual_state.daa_score > self.storage_mass_activation_daa_score; - let hash_merkle_root = calc_hash_merkle_root_with_options(txs.iter(), storage_mass_activated); + let hash_merkle_root = calc_hash_merkle_root(txs.iter(), storage_mass_activated); let accepted_id_merkle_root = kaspa_merkle::calc_merkle_root(virtual_state.accepted_tx_ids.iter().copied()); let utxo_commitment = virtual_state.multiset.clone().finalize(); @@ -992,6 +1024,7 @@ impl VirtualStateProcessor { selected_parent_timestamp, selected_parent_daa_score, selected_parent_hash, + calculated_fees, )) } @@ -1034,7 +1067,7 @@ impl VirtualStateProcessor { ); } - // TODO: rename to reflect finalizing pruning point utxoset state and importing *to* virtual utxoset + /// Finalizes the pruning point utxoset state and imports the pruning point utxoset *to* virtual utxoset pub fn import_pruning_point_utxo_set( &self, new_pruning_point: Hash, diff --git a/consensus/src/pipeline/virtual_processor/test_block_builder.rs b/consensus/src/pipeline/virtual_processor/test_block_builder.rs index 872bf15b40..2654a6a5fe 100644 --- a/consensus/src/pipeline/virtual_processor/test_block_builder.rs +++ b/consensus/src/pipeline/virtual_processor/test_block_builder.rs @@ -61,6 +61,6 @@ impl TestBlockBuilder { let pov_virtual_utxo_view = (&virtual_read.utxo_set).compose(accumulated_diff); self.validate_block_template_transactions(&txs, &pov_virtual_state, &pov_virtual_utxo_view)?; drop(virtual_read); - self.build_block_template_from_virtual_state(pov_virtual_state, miner_data, txs) + self.build_block_template_from_virtual_state(pov_virtual_state, miner_data, txs, vec![]) } } diff --git a/consensus/src/pipeline/virtual_processor/utxo_validation.rs b/consensus/src/pipeline/virtual_processor/utxo_validation.rs index 1129762945..306f81446c 100644 --- a/consensus/src/pipeline/virtual_processor/utxo_validation.rs +++ b/consensus/src/pipeline/virtual_processor/utxo_validation.rs @@ -5,19 +5,18 @@ use crate::{ RuleError::{BadAcceptedIDMerkleRoot, BadCoinbaseTransaction, BadUTXOCommitment, InvalidTransactionsInUtxoContext}, }, model::stores::{block_transactions::BlockTransactionsStoreReader, daa::DaaStoreReader, ghostdag::GhostdagData}, - processes::{ - mass::Kip9Version, - transaction_validator::{ - errors::{TxResult, TxRuleError}, - transaction_validator_populated::TxValidationFlags, - }, + processes::transaction_validator::{ + errors::{TxResult, TxRuleError}, + transaction_validator_populated::TxValidationFlags, }, }; use kaspa_consensus_core::{ acceptance_data::{AcceptedTxEntry, MergesetBlockAcceptanceData}, + api::args::TransactionValidationArgs, coinbase::*, hashing, header::Header, + mass::Kip9Version, muhash::MuHashExtensions, tx::{MutableTransaction, PopulatedTransaction, Transaction, TransactionId, ValidatedTransaction, VerifiableTransaction}, utxo::{ @@ -248,7 +247,7 @@ impl VirtualStateProcessor { } } let populated_tx = PopulatedTransaction::new(transaction, entries); - let res = self.transaction_validator.validate_populated_transaction_and_get_fee(&populated_tx, pov_daa_score, flags); + let res = self.transaction_validator.validate_populated_transaction_and_get_fee(&populated_tx, pov_daa_score, flags, None); match res { Ok(calculated_fee) => Ok(ValidatedTransaction::new(populated_tx, calculated_fee)), Err(tx_rule_error) => { @@ -268,7 +267,6 @@ impl VirtualStateProcessor { for i in 0..mutable_tx.tx.inputs.len() { if mutable_tx.entries[i].is_some() { // We prefer a previously populated entry if such exists - // TODO: consider re-checking the utxo view to get the most up-to-date entry (since DAA score can change) continue; } if let Some(entry) = utxo_view.get(&mutable_tx.tx.inputs[i].previous_outpoint) { @@ -290,6 +288,7 @@ impl VirtualStateProcessor { mutable_tx: &mut MutableTransaction, utxo_view: &impl UtxoView, pov_daa_score: u64, + args: &TransactionValidationArgs, ) -> TxResult<()> { self.populate_mempool_transaction_in_utxo_context(mutable_tx, utxo_view)?; @@ -308,10 +307,12 @@ impl VirtualStateProcessor { mutable_tx.tx.set_mass(contextual_mass); // At this point we know all UTXO entries are populated, so we can safely pass the tx as verifiable + let mass_and_feerate_threshold = args.feerate_threshold.map(|threshold| (contextual_mass, threshold)); let calculated_fee = self.transaction_validator.validate_populated_transaction_and_get_fee( &mutable_tx.as_verifiable(), pov_daa_score, TxValidationFlags::SkipMassCheck, // we can skip the mass check since we just set it + mass_and_feerate_threshold, )?; mutable_tx.calculated_fee = Some(calculated_fee); Ok(()) diff --git a/consensus/src/processes/coinbase.rs b/consensus/src/processes/coinbase.rs index 4e3c36b797..f79bbed751 100644 --- a/consensus/src/processes/coinbase.rs +++ b/consensus/src/processes/coinbase.rs @@ -5,7 +5,7 @@ use kaspa_consensus_core::{ tx::{ScriptPublicKey, ScriptVec, Transaction, TransactionOutput}, BlockHashMap, BlockHashSet, }; -use std::{convert::TryInto, mem::size_of}; +use std::convert::TryInto; use crate::{constants, model::stores::ghostdag::GhostdagData}; diff --git a/consensus/src/processes/ghostdag/protocol.rs b/consensus/src/processes/ghostdag/protocol.rs index 87beeb565d..8dfe4e7937 100644 --- a/consensus/src/processes/ghostdag/protocol.rs +++ b/consensus/src/processes/ghostdag/protocol.rs @@ -91,7 +91,7 @@ impl pub fn ghostdag(&self, parents: &[Hash]) -> GhostdagData { assert!(!parents.is_empty(), "genesis must be added via a call to init"); diff --git a/consensus/src/processes/mass.rs b/consensus/src/processes/mass.rs deleted file mode 100644 index 8bb5f3339f..0000000000 --- a/consensus/src/processes/mass.rs +++ /dev/null @@ -1,256 +0,0 @@ -use kaspa_consensus_core::{ - mass::transaction_estimated_serialized_size, - tx::{Transaction, VerifiableTransaction}, -}; - -/// Temp enum for the transition phases of KIP9 -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum Kip9Version { - /// Initial KIP9 mass calculation, w/o the relaxed formula and summing storage mass and compute mass - Alpha, - - /// Currently proposed KIP9 mass calculation, with the relaxed formula (for the cases `|O| = 1 OR |O| <= |I| <= 2`), - /// and using a maximum operator over storage and compute mass - Beta, -} - -// TODO (aspect) - review and potentially merge this with the new MassCalculator currently located in the wallet core -// (i.e. migrate mass calculator from wallet core here or to consensus core) -#[derive(Clone)] -pub struct MassCalculator { - mass_per_tx_byte: u64, - mass_per_script_pub_key_byte: u64, - mass_per_sig_op: u64, - storage_mass_parameter: u64, -} - -impl MassCalculator { - pub fn new(mass_per_tx_byte: u64, mass_per_script_pub_key_byte: u64, mass_per_sig_op: u64, storage_mass_parameter: u64) -> Self { - Self { mass_per_tx_byte, mass_per_script_pub_key_byte, mass_per_sig_op, storage_mass_parameter } - } - - /// Calculates the compute mass of this transaction. This does not include the storage mass calculation below which - /// requires full UTXO context - pub fn calc_tx_compute_mass(&self, tx: &Transaction) -> u64 { - if tx.is_coinbase() { - return 0; - } - - let size = transaction_estimated_serialized_size(tx); - let mass_for_size = size * self.mass_per_tx_byte; - let total_script_public_key_size: u64 = tx - .outputs - .iter() - .map(|output| 2 /* script public key version (u16) */ + output.script_public_key.script().len() as u64) - .sum(); - let total_script_public_key_mass = total_script_public_key_size * self.mass_per_script_pub_key_byte; - - let total_sigops: u64 = tx.inputs.iter().map(|input| input.sig_op_count as u64).sum(); - let total_sigops_mass = total_sigops * self.mass_per_sig_op; - - mass_for_size + total_script_public_key_mass + total_sigops_mass - } - - /// Calculates the storage mass for this populated transaction. - /// Assumptions which must be verified before this call: - /// 1. All output values are non-zero - /// 2. At least one input (unless coinbase) - /// - /// Otherwise this function should never fail. - pub fn calc_tx_storage_mass(&self, tx: &impl VerifiableTransaction, version: Kip9Version) -> Option { - if tx.is_coinbase() { - return Some(0); - } - /* The code below computes the following formula: - - max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) - - where C is the mass storage parameter, O is the set of output values, I is the set of - input values, H(S) := |S|/sum_{s in S} 1 / s is the harmonic mean over the set S and - A(S) := sum_{s in S} / |S| is the arithmetic mean. - - See KIP-0009 for more details - */ - - // Since we are doing integer division, we perform the multiplication with C over the inner - // fractions, otherwise we'll get a sum of zeros or ones. - // - // If sum of fractions overflowed (nearly impossible, requires 10^7 outputs for C = 10^12), - // we return `None` indicating mass is incomputable - // - // Note: in theory this can be tighten by subtracting input mass in the process (possibly avoiding the overflow), - // however the overflow case is so unpractical with current mass limits so we avoid the hassle - let harmonic_outs = tx - .tx() - .outputs - .iter() - .map(|out| self.storage_mass_parameter / out.value) - .try_fold(0u64, |total, current| total.checked_add(current))?; // C·|O|/H(O) - - let outs_len = tx.tx().outputs.len() as u64; - let ins_len = tx.tx().inputs.len() as u64; - - /* - KIP-0009 relaxed formula for the cases |O| = 1 OR |O| <= |I| <= 2: - max( 0 , C·( |O|/H(O) - |I|/H(I) ) ) - - Note: in the case |I| = 1 both formulas are equal, yet the following code (harmonic_ins) is a bit more efficient. - Hence, we transform the condition to |O| = 1 OR |I| = 1 OR |O| = |I| = 2 which is equivalent (and faster). - */ - if version == Kip9Version::Beta && (outs_len == 1 || ins_len == 1 || (outs_len == 2 && ins_len == 2)) { - let harmonic_ins = tx - .populated_inputs() - .map(|(_, entry)| self.storage_mass_parameter / entry.amount) - .fold(0u64, |total, current| total.saturating_add(current)); // C·|I|/H(I) - return Some(harmonic_outs.saturating_sub(harmonic_ins)); // max( 0 , C·( |O|/H(O) - |I|/H(I) ) ); - } - - // Total supply is bounded, so a sum of existing UTXO entries cannot overflow (nor can it be zero) - let sum_ins = tx.populated_inputs().map(|(_, entry)| entry.amount).sum::(); // |I|·A(I) - let mean_ins = sum_ins / ins_len; - - // Inner fraction must be with C and over the mean value, in order to maximize precision. - // We can saturate the overall expression at u64::MAX since we lower-bound the subtraction below by zero anyway - let arithmetic_ins = ins_len.saturating_mul(self.storage_mass_parameter / mean_ins); // C·|I|/A(I) - - Some(harmonic_outs.saturating_sub(arithmetic_ins)) // max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) - } - - /// Calculates the overall mass of this transaction, combining both compute and storage masses. - /// The combination strategy depends on the version passed. - pub fn calc_tx_overall_mass( - &self, - tx: &impl VerifiableTransaction, - cached_compute_mass: Option, - version: Kip9Version, - ) -> Option { - match version { - Kip9Version::Alpha => self - .calc_tx_storage_mass(tx, version) - .and_then(|mass| mass.checked_add(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))), - Kip9Version::Beta => self - .calc_tx_storage_mass(tx, version) - .map(|mass| mass.max(cached_compute_mass.unwrap_or_else(|| self.calc_tx_compute_mass(tx.tx())))), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use kaspa_consensus_core::{ - constants::{SOMPI_PER_KASPA, STORAGE_MASS_PARAMETER}, - subnets::SubnetworkId, - tx::*, - }; - use std::str::FromStr; - - #[test] - fn test_mass_storage() { - // Tx with less outs than ins - let mut tx = generate_tx_from_amounts(&[100, 200, 300], &[300, 300]); - let test_version = Kip9Version::Alpha; - - // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) - - let storage_mass = - MassCalculator::new(0, 0, 0, 10u64.pow(12)).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 0); // Compounds from 3 to 2, with symmetric outputs and no fee, should be zero - - // Create asymmetry - tx.tx.outputs[0].value = 50; - tx.tx.outputs[1].value = 550; - let storage_mass_parameter = 10u64.pow(12); - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, storage_mass_parameter / 50 + storage_mass_parameter / 550 - 3 * (storage_mass_parameter / 200)); - - // Create a tx with more outs than ins - let base_value = 10_000 * SOMPI_PER_KASPA; - let mut tx = generate_tx_from_amounts(&[base_value, base_value, base_value * 2], &[base_value; 4]); - let storage_mass_parameter = STORAGE_MASS_PARAMETER; - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 4); // Inputs are above C so they don't contribute negative mass, 4 outputs exactly equal C each charge 1 - - let mut tx2 = tx.clone(); - tx2.tx.outputs[0].value = 10 * SOMPI_PER_KASPA; - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx2.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 1003); - - // Increase values over the lim - for out in tx.tx.outputs.iter_mut() { - out.value += 1 - } - tx.entries[0].as_mut().unwrap().amount += tx.tx.outputs.len() as u64; - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 0); - } - - #[test] - fn test_mass_storage_beta() { - // 2:2 transaction - let mut tx = generate_tx_from_amounts(&[100, 200], &[50, 250]); - let storage_mass_parameter = 10u64.pow(12); - let test_version = Kip9Version::Beta; - // Assert the formula: max( 0 , C·( |O|/H(O) - |I|/O(I) ) ) - - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 9000000000); - - // Set outputs to be equal to inputs - tx.tx.outputs[0].value = 100; - tx.tx.outputs[1].value = 200; - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 0); - - // Remove an output and make sure the other is small enough to make storage mass greater than zero - tx.tx.outputs.pop(); - tx.tx.outputs[0].value = 50; - let storage_mass = - MassCalculator::new(0, 0, 0, storage_mass_parameter).calc_tx_storage_mass(&tx.as_verifiable(), test_version).unwrap(); - assert_eq!(storage_mass, 5000000000); - } - - fn generate_tx_from_amounts(ins: &[u64], outs: &[u64]) -> MutableTransaction { - let script_pub_key = ScriptVec::from_slice(&[]); - let prev_tx_id = TransactionId::from_str("880eb9819a31821d9d2399e2f35e2433b72637e393d71ecc9b8d0250f49153c3").unwrap(); - let tx = Transaction::new( - 0, - (0..ins.len()) - .map(|i| TransactionInput { - previous_outpoint: TransactionOutpoint { transaction_id: prev_tx_id, index: i as u32 }, - signature_script: vec![], - sequence: 0, - sig_op_count: 0, - }) - .collect(), - outs.iter() - .copied() - .map(|out_amount| TransactionOutput { - value: out_amount, - script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()), - }) - .collect(), - 1615462089000, - SubnetworkId::from_bytes([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), - 0, - vec![], - ); - let entries = ins - .iter() - .copied() - .map(|in_amount| UtxoEntry { - amount: in_amount, - script_public_key: ScriptPublicKey::new(0, script_pub_key.clone()), - block_daa_score: 0, - is_coinbase: false, - }) - .collect(); - MutableTransaction::with_entries(tx, entries) - } -} diff --git a/consensus/src/processes/mod.rs b/consensus/src/processes/mod.rs index fb1490deaa..301ad8a3c0 100644 --- a/consensus/src/processes/mod.rs +++ b/consensus/src/processes/mod.rs @@ -2,7 +2,6 @@ pub mod block_depth; pub mod coinbase; pub mod difficulty; pub mod ghostdag; -pub mod mass; pub mod parents_builder; pub mod past_median_time; pub mod pruning; diff --git a/consensus/src/processes/parents_builder.rs b/consensus/src/processes/parents_builder.rs index 49b3822d94..14df3fcecb 100644 --- a/consensus/src/processes/parents_builder.rs +++ b/consensus/src/processes/parents_builder.rs @@ -10,8 +10,6 @@ use crate::model::{ stores::{headers::HeaderStoreReader, reachability::ReachabilityStoreReader, relations::RelationsStoreReader}, }; -use super::reachability::ReachabilityResultExtensions; - #[derive(Clone)] pub struct ParentsManager { max_block_level: BlockLevel, @@ -52,13 +50,10 @@ impl .expect("at least one of the parents is expected to be in the future of the pruning point"); direct_parent_headers.swap(0, first_parent_in_future_of_pruning_point); - let origin_children = self.relations_service.get_children(ORIGIN).unwrap().read().iter().copied().collect_vec(); - let origin_children_headers = - origin_children.iter().copied().map(|parent| self.headers_store.get_header(parent).unwrap()).collect_vec(); - + let mut origin_children_headers = None; let mut parents = Vec::with_capacity(self.max_block_level as usize); - for block_level in 0..self.max_block_level { + for block_level in 0..=self.max_block_level { // Direct parents are guaranteed to be in one another's anticones so add them all to // all the block levels they occupy. let mut level_candidates_to_reference_blocks = direct_parent_headers @@ -96,71 +91,89 @@ impl .collect::>() }; - for (i, parent) in grandparents.into_iter().enumerate() { - let is_in_origin_children_future = self - .reachability_service - .is_any_dag_ancestor_result(&mut origin_children.iter().copied(), parent) - .unwrap_option() - .is_some_and(|r| r); - - // Reference blocks are the blocks that are used in reachability queries to check if - // a candidate is in the future of another candidate. In most cases this is just the - // block itself, but in the case where a block doesn't have reachability data we need - // to use some blocks in its future as reference instead. - // If we make sure to add a parent in the future of the pruning point first, we can - // know that any pruned candidate that is in the past of some blocks in the pruning - // point anticone should be a parent (in the relevant level) of one of - // the virtual genesis children in the pruning point anticone. So we can check which - // virtual genesis children have this block as parent and use those block as - // reference blocks. - let reference_blocks = if is_in_origin_children_future { - smallvec![parent] - } else { - let mut reference_blocks = SmallVec::with_capacity(origin_children.len()); - for child_header in origin_children_headers.iter() { - if self.parents_at_level(child_header, block_level).contains(&parent) { - reference_blocks.push(child_header.hash); + let parents_at_level = if level_candidates_to_reference_blocks.is_empty() && first_parent_marker == grandparents.len() { + // Optimization: this is a common case for high levels where none of the direct parents is on the level + // and all direct parents have the same level parents. The condition captures this case because all grandparents + // will be below the first parent marker and there will be no additional grandparents. Bcs all grandparents come + // from a single, already validated parent, there's no need to run any additional antichain checks and we can return + // this set. + grandparents.into_iter().collect() + } else { + // + // Iterate through grandparents in order to find an antichain + for (i, parent) in grandparents.into_iter().enumerate() { + let has_reachability_data = self.reachability_service.has_reachability_data(parent); + + // Reference blocks are the blocks that are used in reachability queries to check if + // a candidate is in the future of another candidate. In most cases this is just the + // block itself, but in the case where a block doesn't have reachability data we need + // to use some blocks in its future as reference instead. + // If we make sure to add a parent in the future of the pruning point first, we can + // know that any pruned candidate that is in the past of some blocks in the pruning + // point anticone should be a parent (in the relevant level) of one of + // the origin children in the pruning point anticone. So we can check which + // origin children have this block as parent and use those block as + // reference blocks. + let reference_blocks = if has_reachability_data { + smallvec![parent] + } else { + // Here we explicitly declare the type because otherwise Rust would make it mutable. + let origin_children_headers: &Vec<_> = origin_children_headers.get_or_insert_with(|| { + self.relations_service + .get_children(ORIGIN) + .unwrap() + .read() + .iter() + .copied() + .map(|parent| self.headers_store.get_header(parent).unwrap()) + .collect_vec() + }); + let mut reference_blocks = SmallVec::with_capacity(origin_children_headers.len()); + for child_header in origin_children_headers.iter() { + if self.parents_at_level(child_header, block_level).contains(&parent) { + reference_blocks.push(child_header.hash); + } } + reference_blocks + }; + + // Make sure we process and insert all first parent's parents. See comments above. + // Note that as parents of an already validated block, they all form an antichain, + // hence no need for reachability queries yet. + if i < first_parent_marker { + level_candidates_to_reference_blocks.insert(parent, reference_blocks); + continue; } - reference_blocks - }; - - // Make sure we process and insert all first parent's parents. See comments above. - // Note that as parents of an already validated block, they all form an antichain, - // hence no need for reachability queries yet. - if i < first_parent_marker { - level_candidates_to_reference_blocks.insert(parent, reference_blocks); - continue; - } - if !is_in_origin_children_future { - continue; - } + if !has_reachability_data { + continue; + } - let len_before_retain = level_candidates_to_reference_blocks.len(); - level_candidates_to_reference_blocks - .retain(|_, refs| !self.reachability_service.is_any_dag_ancestor(&mut refs.iter().copied(), parent)); - let is_any_candidate_ancestor_of = level_candidates_to_reference_blocks.len() < len_before_retain; - - // We should add the block as a candidate if it's in the future of another candidate - // or in the anticone of all candidates. - if is_any_candidate_ancestor_of - || !level_candidates_to_reference_blocks.iter().any(|(_, candidate_references)| { - self.reachability_service.is_dag_ancestor_of_any(parent, &mut candidate_references.iter().copied()) - }) - { - level_candidates_to_reference_blocks.insert(parent, reference_blocks); + let len_before_retain = level_candidates_to_reference_blocks.len(); + level_candidates_to_reference_blocks + .retain(|_, refs| !self.reachability_service.is_any_dag_ancestor(&mut refs.iter().copied(), parent)); + let is_any_candidate_ancestor_of = level_candidates_to_reference_blocks.len() < len_before_retain; + + // We should add the block as a candidate if it's in the future of another candidate + // or in the anticone of all candidates. + if is_any_candidate_ancestor_of + || !level_candidates_to_reference_blocks.iter().any(|(_, candidate_references)| { + self.reachability_service.is_dag_ancestor_of_any(parent, &mut candidate_references.iter().copied()) + }) + { + level_candidates_to_reference_blocks.insert(parent, reference_blocks); + } } - } - if block_level > 0 - && level_candidates_to_reference_blocks.len() == 1 - && level_candidates_to_reference_blocks.contains_key(&self.genesis_hash) - { + // After processing all grandparents, collect the successful level candidates + level_candidates_to_reference_blocks.keys().copied().collect_vec() + }; + + if block_level > 0 && parents_at_level.as_slice() == std::slice::from_ref(&self.genesis_hash) { break; } - parents.push(level_candidates_to_reference_blocks.keys().copied().collect_vec()); + parents.push(parents_at_level); } parents diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 0fa1f76243..7c534af8ed 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -213,7 +213,7 @@ impl< let mut expected_pps_queue = VecDeque::new(); for current in self.reachability_service.backward_chain_iterator(hst, pruning_info.pruning_point, false) { let current_header = self.headers_store.get_header(current).unwrap(); - if expected_pps_queue.back().is_none_or(|&&h| h != current_header.pruning_point) { + if expected_pps_queue.back().is_none_or_ex(|&&h| h != current_header.pruning_point) { expected_pps_queue.push_back(current_header.pruning_point); } } diff --git a/consensus/src/processes/reachability/interval.rs b/consensus/src/processes/reachability/interval.rs index 9f8d7fbd09..b910f3ddf1 100644 --- a/consensus/src/processes/reachability/interval.rs +++ b/consensus/src/processes/reachability/interval.rs @@ -89,7 +89,7 @@ impl Interval { } /// Splits this interval to exactly |sizes| parts where - /// |part_i| = sizes[i]. This method expects sum(sizes) to be exactly + /// |part_i| = sizes\[i\]. This method expects sum(sizes) to be exactly /// equal to the interval's size. pub fn split_exact(&self, sizes: &[u64]) -> Vec { assert_eq!(sizes.iter().sum::(), self.size(), "sum of sizes must be equal to the interval's size"); @@ -107,7 +107,7 @@ impl Interval { /// Splits this interval to |sizes| parts /// by the allocation rule described below. This method expects sum(sizes) /// to be smaller or equal to the interval's size. Every part_i is - /// allocated at least sizes[i] capacity. The remaining budget is + /// allocated at least sizes\[i\] capacity. The remaining budget is /// split by an exponentially biased rule described below. /// /// This rule follows the GHOSTDAG protocol behavior where the child diff --git a/consensus/src/processes/reachability/tests/mod.rs b/consensus/src/processes/reachability/tests/mod.rs index 67131f7f93..c315a250bd 100644 --- a/consensus/src/processes/reachability/tests/mod.rs +++ b/consensus/src/processes/reachability/tests/mod.rs @@ -105,6 +105,12 @@ impl DagBlock { } } +impl From<(u64, &[u64])> for DagBlock { + fn from(value: (u64, &[u64])) -> Self { + Self::new(value.0.into(), value.1.iter().map(|&i| i.into()).collect()) + } +} + /// A struct with fluent API to streamline DAG building pub struct DagBuilder<'a, T: ReachabilityStore + ?Sized, S: RelationsStore + ChildrenStore + ?Sized> { reachability: &'a mut T, diff --git a/consensus/src/processes/sync/mod.rs b/consensus/src/processes/sync/mod.rs index 7b84801118..3978913bae 100644 --- a/consensus/src/processes/sync/mod.rs +++ b/consensus/src/processes/sync/mod.rs @@ -111,7 +111,7 @@ impl< (blocks, highest_reached) } - fn find_highest_common_chain_block(&self, low: Hash, high: Hash) -> Hash { + pub fn find_highest_common_chain_block(&self, low: Hash, high: Hash) -> Hash { self.reachability_service .default_backward_chain_iterator(low) .find(|candidate| self.reachability_service.is_chain_ancestor_of(*candidate, high)) @@ -191,7 +191,7 @@ impl< } } - if highest_with_body.is_none_or(|&h| h == high) { + if highest_with_body.is_none_or_ex(|&h| h == high) { return Ok(vec![]); }; diff --git a/consensus/src/processes/transaction_validator/mod.rs b/consensus/src/processes/transaction_validator/mod.rs index 646a203555..008b0c4dd1 100644 --- a/consensus/src/processes/transaction_validator/mod.rs +++ b/consensus/src/processes/transaction_validator/mod.rs @@ -11,7 +11,7 @@ use kaspa_txscript::{ SigCacheKey, }; -use super::mass::MassCalculator; +use kaspa_consensus_core::mass::MassCalculator; #[derive(Clone)] pub struct TransactionValidator { diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index e53200fecc..5d9176e51a 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -1,9 +1,14 @@ use crate::constants::{MAX_SOMPI, SEQUENCE_LOCK_TIME_DISABLED, SEQUENCE_LOCK_TIME_MASK}; use kaspa_consensus_core::hashing::sighash::{SigHashReusedValues, SigHashReusedValuesSync}; -use kaspa_consensus_core::{hashing::sighash::SigHashReusedValuesUnsync, tx::VerifiableTransaction}; +use kaspa_consensus_core::{ + hashing::sighash::SigHashReusedValuesUnsync, + mass::Kip9Version, + tx::{TransactionInput, VerifiableTransaction}, +}; use kaspa_core::warn; use kaspa_txscript::caches::Cache; use kaspa_txscript::{get_sig_op_count, SigCacheKey, TxScriptEngine}; +use kaspa_txscript_errors::TxScriptError; use rayon::iter::IntoParallelIterator; use rayon::ThreadPool; use std::sync::Arc; @@ -32,10 +37,12 @@ impl TransactionValidator { tx: &(impl VerifiableTransaction + std::marker::Sync), pov_daa_score: u64, flags: TxValidationFlags, + mass_and_feerate_threshold: Option<(u64, f64)>, ) -> TxResult { self.check_transaction_coinbase_maturity(tx, pov_daa_score)?; let total_in = self.check_transaction_input_amounts(tx)?; let total_out = Self::check_transaction_output_values(tx, total_in)?; + let fee = total_in - total_out; if flags != TxValidationFlags::SkipMassCheck && pov_daa_score > self.storage_mass_activation_daa_score { // Storage mass hardfork was activated self.check_mass_commitment(tx)?; @@ -45,6 +52,11 @@ impl TransactionValidator { } } Self::check_sequence_lock(tx, pov_daa_score)?; + + // The following call is not a consensus check (it could not be one in the first place since it uses floating number) + // but rather a mempool Replace by Fee validation rule. It was placed here purposely for avoiding unneeded script checks. + Self::check_feerate_threshold(fee, mass_and_feerate_threshold)?; + match flags { TxValidationFlags::Full | TxValidationFlags::SkipMassCheck => { Self::check_sig_op_counts::<_, SigHashReusedValuesUnsync>(tx)?; @@ -52,7 +64,19 @@ impl TransactionValidator { } TxValidationFlags::SkipScriptChecks => {} } - Ok(total_in - total_out) + Ok(fee) + } + + fn check_feerate_threshold(fee: u64, mass_and_feerate_threshold: Option<(u64, f64)>) -> TxResult<()> { + // An actual check can only occur if some mass and threshold are provided, + // otherwise, the check does not verify anything and exits successfully. + if let Some((contextual_mass, feerate_threshold)) = mass_and_feerate_threshold { + assert!(contextual_mass > 0); + if fee as f64 / contextual_mass as f64 <= feerate_threshold { + return Err(TxRuleError::FeerateTooLow); + } + } + Ok(()) } fn check_transaction_coinbase_maturity(&self, tx: &impl VerifiableTransaction, pov_daa_score: u64) -> TxResult<()> { @@ -101,10 +125,8 @@ impl TransactionValidator { } fn check_mass_commitment(&self, tx: &impl VerifiableTransaction) -> TxResult<()> { - let calculated_contextual_mass = self - .mass_calculator - .calc_tx_overall_mass(tx, None, crate::processes::mass::Kip9Version::Alpha) - .ok_or(TxRuleError::MassIncomputable)?; + let calculated_contextual_mass = + self.mass_calculator.calc_tx_overall_mass(tx, None, Kip9Version::Alpha).ok_or(TxRuleError::MassIncomputable)?; let committed_contextual_mass = tx.tx().mass(); if committed_contextual_mass != calculated_contextual_mass { return Err(TxRuleError::WrongMass(calculated_contextual_mass, committed_contextual_mass)); @@ -166,8 +188,8 @@ pub fn check_scripts_single_threaded(sig_cache: &Cache, tx: & let reused_values = SigHashReusedValuesUnsync::new(); for (i, (input, entry)) in tx.populated_inputs().enumerate() { let mut engine = TxScriptEngine::from_transaction_input(tx, input, i, entry, &reused_values, sig_cache) - .map_err(TxRuleError::SignatureInvalid)?; - engine.execute().map_err(TxRuleError::SignatureInvalid)?; + .map_err(|err| map_script_err(err, input))?; + engine.execute().map_err(|err| map_script_err(err, input))?; } Ok(()) } @@ -209,6 +231,14 @@ pub fn check_scripts_par_iter_thread( }) } +fn map_script_err(script_err: TxScriptError, input: &TransactionInput) -> TxRuleError { + if input.signature_script.is_empty() { + TxRuleError::SignatureEmpty(script_err) + } else { + TxRuleError::SignatureInvalid(script_err) + } +} + #[cfg(test)] mod tests { use super::super::errors::TxRuleError; diff --git a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs index 67901612da..914624f940 100644 --- a/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs +++ b/consensus/src/processes/transaction_validator/tx_validation_in_isolation.rs @@ -17,6 +17,7 @@ impl TransactionValidator { check_duplicate_transaction_inputs(tx)?; check_gas(tx)?; check_transaction_payload(tx)?; + check_transaction_subnetwork(tx)?; check_transaction_version(tx) } @@ -146,10 +147,18 @@ fn check_transaction_output_value_ranges(tx: &Transaction) -> TxResult<()> { Ok(()) } +fn check_transaction_subnetwork(tx: &Transaction) -> TxResult<()> { + if tx.is_coinbase() || tx.subnetwork_id.is_native() { + Ok(()) + } else { + Err(TxRuleError::SubnetworksDisabled(tx.subnetwork_id.clone())) + } +} + #[cfg(test)] mod tests { use kaspa_consensus_core::{ - subnets::{SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, + subnets::{SubnetworkId, SUBNETWORK_ID_COINBASE, SUBNETWORK_ID_NATIVE}, tx::{scriptvec, ScriptPublicKey, Transaction, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput}, }; use kaspa_core::assert_match; @@ -261,6 +270,10 @@ mod tests { tv.validate_tx_in_isolation(&valid_tx).unwrap(); + let mut tx: Transaction = valid_tx.clone(); + tx.subnetwork_id = SubnetworkId::from_byte(3); + assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::SubnetworksDisabled(_))); + let mut tx = valid_tx.clone(); tx.inputs = vec![]; assert_match!(tv.validate_tx_in_isolation(&tx), Err(TxRuleError::NoTxInputs)); diff --git a/consensus/src/processes/traversal_manager.rs b/consensus/src/processes/traversal_manager.rs index 3ae0aef5d7..23dc5c69f0 100644 --- a/consensus/src/processes/traversal_manager.rs +++ b/consensus/src/processes/traversal_manager.rs @@ -31,7 +31,7 @@ impl ChainPath { + pub fn calculate_chain_path(&self, from: Hash, to: Hash, chain_path_added_limit: Option) -> ChainPath { let mut removed = Vec::new(); let mut common_ancestor = from; for current in self.reachability_service.default_backward_chain_iterator(from) { @@ -42,9 +42,20 @@ impl>(); err?; + if address.len() < 8 { + return Err(AddressError::BadPayload); + } + let (payload_u5, checksum_u5) = address_u5.split_at(address.len() - 8); let fivebit_prefix = prefix.as_str().as_bytes().iter().copied().map(|c| c & 0x1fu8); // Convert to number - let checksum_ = u64::from_be_bytes([vec![0u8; 3], conv5to8(checksum_u5)].concat().try_into().expect("Is exactly 8 bytes")); + let checksum_ = + u64::from_be_bytes([vec![0u8; 3], conv5to8(checksum_u5)].concat().try_into().map_err(|_| AddressError::BadChecksumSize)?); if checksum(payload_u5, fivebit_prefix) != checksum_ { return Err(AddressError::BadChecksum); diff --git a/crypto/addresses/src/lib.rs b/crypto/addresses/src/lib.rs index fdba63ef7f..8e3ea385a8 100644 --- a/crypto/addresses/src/lib.rs +++ b/crypto/addresses/src/lib.rs @@ -1,3 +1,11 @@ +//! +//! Kaspa [`Address`] implementation. +//! +//! In it's string form, the Kaspa [`Address`] is represented by a `bech32`-encoded +//! address string combined with a network type. The `bech32` string encoding is +//! comprised of a public key, the public key version and the resulting checksum. +//! + use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use smallvec::SmallVec; @@ -11,6 +19,7 @@ use workflow_wasm::{ mod bech32; +/// Error type produced by [`Address`] operations. #[derive(Error, PartialEq, Eq, Debug, Clone)] pub enum AddressError { #[error("The address has an invalid prefix {0}")] @@ -28,9 +37,15 @@ pub enum AddressError { #[error("The address contains an invalid character {0}")] DecodingError(char), + #[error("The address checksum is invalid (must be exactly 8 bytes)")] + BadChecksumSize, + #[error("The address checksum is invalid")] BadChecksum, + #[error("The address payload is invalid")] + BadPayload, + #[error("The address is invalid")] InvalidAddress, @@ -49,6 +64,7 @@ impl From for AddressError { /// Address prefix identifying the network type this address belongs to (such as `kaspa`, `kaspatest`, `kaspasim`, `kaspadev`). #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Hash, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[borsh(use_discriminant = true)] pub enum Prefix { #[serde(rename = "kaspa")] Mainnet, @@ -117,6 +133,7 @@ impl TryFrom<&str> for Prefix { /// @category Address #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Hash, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[repr(u8)] +#[borsh(use_discriminant = true)] #[wasm_bindgen(js_name = "AddressVersion")] pub enum Version { /// PubKey addresses always have the version byte set to 0 @@ -182,7 +199,8 @@ pub const PAYLOAD_VECTOR_SIZE: usize = 36; /// Used as the underlying type for address payload, optimized for the largest version length (33). pub type PayloadVec = SmallVec<[u8; PAYLOAD_VECTOR_SIZE]>; -/// Kaspa `Address` struct that serializes to and from an address format string: `kaspa:qz0s...t8cv`. +/// Kaspa [`Address`] struct that serializes to and from an address format string: `kaspa:qz0s...t8cv`. +/// /// @category Address #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Hash, CastFromJs)] #[wasm_bindgen(inspectable)] @@ -281,11 +299,10 @@ impl BorshSerialize for Address { } impl BorshDeserialize for Address { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - // Deserialize into vec first since we have no custom smallvec support - let prefix: Prefix = borsh::BorshDeserialize::deserialize(buf)?; - let version: Version = borsh::BorshDeserialize::deserialize(buf)?; - let payload: Vec = borsh::BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let prefix: Prefix = borsh::BorshDeserialize::deserialize_reader(reader)?; + let version: Version = borsh::BorshDeserialize::deserialize_reader(reader)?; + let payload: Vec = borsh::BorshDeserialize::deserialize_reader(reader)?; Ok(Self::new(prefix, version, &payload)) } } @@ -489,8 +506,11 @@ impl<'de> Deserialize<'de> for Address { impl TryCastFromJs for Address { type Error = AddressError; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(string) = value.as_ref().as_string() { Address::try_from(string) } else if let Some(object) = js_sys::Object::try_from(value.as_ref()) { @@ -506,12 +526,26 @@ impl TryCastFromJs for Address { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing an Address-like object: `Address | string`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "Address | string")] pub type AddressT; + /// WASM (TypeScript) type representing an array of Address-like objects: `(Address | string)[]`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "(Address | string)[]")] pub type AddressOrStringArrayT; + /// WASM (TypeScript) type representing an array of [`Address`] objects: `Address[]`. + /// + /// @category Address #[wasm_bindgen(extends = js_sys::Array, typescript_type = "Address[]")] pub type AddressArrayT; + /// WASM (TypeScript) type representing an [`Address`] or an undefined value: `Address | undefined`. + /// + /// @category Address + #[wasm_bindgen(typescript_type = "Address | undefined")] + pub type AddressOrUndefinedT; } impl TryFrom for Vec
{ diff --git a/crypto/hashes/src/hashers.rs b/crypto/hashes/src/hashers.rs index 7f6775aaae..b45026bd2b 100644 --- a/crypto/hashes/src/hashers.rs +++ b/crypto/hashes/src/hashers.rs @@ -51,7 +51,7 @@ macro_rules! sha256_hasher { // SHA256 doesn't natively support domain separation, so we hash it to make it constant size. let mut tmp_state = Sha256::new(); tmp_state.update($domain_sep); - let mut out = Self(Sha256::new()); + let mut out = $name(Sha256::new()); out.write(tmp_state.finalize()); out diff --git a/crypto/hashes/src/lib.rs b/crypto/hashes/src/lib.rs index 6384a96c5d..d9ff47997c 100644 --- a/crypto/hashes/src/lib.rs +++ b/crypto/hashes/src/lib.rs @@ -187,8 +187,11 @@ impl Hash { type TryFromError = workflow_wasm::error::Error; impl TryCastFromJs for Hash { type Error = TryFromError; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let bytes = value.as_ref().try_as_vec_u8()?; Ok(Hash( <[u8; HASH_SIZE]>::try_from(bytes) diff --git a/crypto/muhash/Cargo.toml b/crypto/muhash/Cargo.toml index b5badb664f..cef8ec5bf0 100644 --- a/crypto/muhash/Cargo.toml +++ b/crypto/muhash/Cargo.toml @@ -26,3 +26,5 @@ rand.workspace = true name = "bench" harness = false +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(fuzzing)'] } diff --git a/crypto/muhash/fuzz/fuzz_targets/u3072.rs b/crypto/muhash/fuzz/fuzz_targets/u3072.rs index 584006628a..115c6f4a63 100644 --- a/crypto/muhash/fuzz/fuzz_targets/u3072.rs +++ b/crypto/muhash/fuzz/fuzz_targets/u3072.rs @@ -4,7 +4,6 @@ use kaspa_muhash::u3072::{self, U3072}; use num_bigint::BigInt; use num_integer::Integer; use num_traits::{One, Signed}; -use std::mem::size_of; fuzz_target!(|data: &[u8]| { if data.len() < muhash::SERIALIZED_MUHASH_SIZE { diff --git a/crypto/muhash/src/u3072.rs b/crypto/muhash/src/u3072.rs index 8d37f83810..82021eb88d 100644 --- a/crypto/muhash/src/u3072.rs +++ b/crypto/muhash/src/u3072.rs @@ -15,8 +15,8 @@ pub(crate) type DoubleLimb = u128; //#[cfg(target_pointer_width = "32")] //pub(crate) type DoubleLimb = u64; -const LIMB_SIZE_BYTES: usize = std::mem::size_of::(); -const LIMB_SIZE: usize = std::mem::size_of::() * 8; +const LIMB_SIZE_BYTES: usize = size_of::(); +const LIMB_SIZE: usize = Limb::BITS as usize; pub const LIMBS: usize = crate::ELEMENT_BYTE_SIZE / LIMB_SIZE_BYTES; pub const PRIME_DIFF: Limb = 1103717; diff --git a/crypto/txscript/Cargo.toml b/crypto/txscript/Cargo.toml index 6084df0b2a..e2f492ad38 100644 --- a/crypto/txscript/Cargo.toml +++ b/crypto/txscript/Cargo.toml @@ -9,24 +9,35 @@ include.workspace = true license.workspace = true repository.workspace = true +[features] +wasm32-core = [] +wasm32-sdk = [] + [dependencies] blake2b_simd.workspace = true borsh.workspace = true +cfg-if.workspace = true +hexplay.workspace = true indexmap.workspace = true itertools.workspace = true kaspa-addresses.workspace = true kaspa-consensus-core.workspace = true kaspa-hashes.workspace = true kaspa-txscript-errors.workspace = true +kaspa-utils.workspace = true +kaspa-wasm-core.workspace = true log.workspace = true parking_lot.workspace = true rand.workspace = true secp256k1.workspace = true +serde_json.workspace = true +serde-wasm-bindgen.workspace = true serde.workspace = true sha2.workspace = true smallvec.workspace = true thiserror.workspace = true wasm-bindgen.workspace = true +workflow-wasm.workspace = true [dev-dependencies] criterion.workspace = true diff --git a/crypto/txscript/src/caches.rs b/crypto/txscript/src/caches.rs index 4fbc37a1cc..8906f46707 100644 --- a/crypto/txscript/src/caches.rs +++ b/crypto/txscript/src/caches.rs @@ -32,9 +32,8 @@ impl Option { - self.map.read().get(key).cloned().map(|data| { + self.map.read().get(key).cloned().inspect(|_data| { self.counters.get_counts.fetch_add(1, Ordering::Relaxed); - data }) } @@ -87,8 +86,8 @@ impl core::ops::Sub for &TxScriptCacheCountersSnapshot { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - insert_counts: self.insert_counts.checked_sub(rhs.insert_counts).unwrap_or_default(), - get_counts: self.get_counts.checked_sub(rhs.get_counts).unwrap_or_default(), + insert_counts: self.insert_counts.saturating_sub(rhs.insert_counts), + get_counts: self.get_counts.saturating_sub(rhs.get_counts), } } } diff --git a/crypto/txscript/src/data_stack.rs b/crypto/txscript/src/data_stack.rs index 74988042ad..5d8ea18ed6 100644 --- a/crypto/txscript/src/data_stack.rs +++ b/crypto/txscript/src/data_stack.rs @@ -1,7 +1,6 @@ use crate::TxScriptError; use core::fmt::Debug; use core::iter; -use core::mem::size_of; const DEFAULT_SCRIPT_NUM_LEN: usize = 4; diff --git a/crypto/txscript/src/error.rs b/crypto/txscript/src/error.rs new file mode 100644 index 0000000000..7d45fb05e0 --- /dev/null +++ b/crypto/txscript/src/error.rs @@ -0,0 +1,89 @@ +use crate::script_builder; +use thiserror::Error; +use wasm_bindgen::{JsError, JsValue}; +use workflow_wasm::jserror::JsErrorData; + +#[derive(Debug, Error, Clone)] +pub enum Error { + #[error("{0}")] + Custom(String), + + #[error(transparent)] + JsValue(JsErrorData), + + #[error(transparent)] + Wasm(#[from] workflow_wasm::error::Error), + + #[error(transparent)] + ScriptBuilder(#[from] script_builder::ScriptBuilderError), + + #[error("{0}")] + ParseInt(#[from] std::num::ParseIntError), + + #[error(transparent)] + SerdeWasmBindgen(JsErrorData), + + #[error(transparent)] + NetworkType(#[from] kaspa_consensus_core::network::NetworkTypeError), + + #[error("Error converting property `{0}`: {1}")] + Convert(&'static str, String), + + #[error("Error processing JSON: {0}")] + SerdeJson(String), +} + +impl Error { + pub fn custom>(msg: T) -> Self { + Error::Custom(msg.into()) + } + + pub fn convert(prop: &'static str, msg: S) -> Self { + Self::Convert(prop, msg.to_string()) + } +} + +impl From for Error { + fn from(err: String) -> Self { + Self::Custom(err) + } +} + +impl From<&str> for Error { + fn from(err: &str) -> Self { + Self::Custom(err.to_string()) + } +} + +impl From for JsValue { + fn from(value: Error) -> Self { + match value { + Error::JsValue(js_error_data) => js_error_data.into(), + _ => JsValue::from(value.to_string()), + } + } +} + +impl From for Error { + fn from(err: JsValue) -> Self { + Self::JsValue(err.into()) + } +} + +impl From for Error { + fn from(err: JsError) -> Self { + Self::JsValue(err.into()) + } +} + +impl From for Error { + fn from(err: serde_json::Error) -> Self { + Self::SerdeJson(err.to_string()) + } +} + +impl From for Error { + fn from(err: serde_wasm_bindgen::Error) -> Self { + Self::SerdeWasmBindgen(JsValue::from(err).into()) + } +} diff --git a/crypto/txscript/src/lib.rs b/crypto/txscript/src/lib.rs index bcd234c550..2a00bd080f 100644 --- a/crypto/txscript/src/lib.rs +++ b/crypto/txscript/src/lib.rs @@ -3,10 +3,14 @@ extern crate core; pub mod caches; mod data_stack; +pub mod error; pub mod opcodes; +pub mod result; pub mod script_builder; pub mod script_class; pub mod standard; +#[cfg(feature = "wasm32-sdk")] +pub mod wasm; use crate::caches::Cache; use crate::data_stack::{DataStack, Stack}; diff --git a/crypto/txscript/src/opcodes/mod.rs b/crypto/txscript/src/opcodes/mod.rs index 7b66da27f7..f2a92fa0b5 100644 --- a/crypto/txscript/src/opcodes/mod.rs +++ b/crypto/txscript/src/opcodes/mod.rs @@ -1,5 +1,3 @@ -use core::mem::size_of; - #[macro_use] mod macros; @@ -2759,7 +2757,7 @@ mod test { (1u64, vec![], false), // Case 1: 0 = locktime < txLockTime (0x800000, vec![0x7f, 0, 0], false), // Case 2: 0 < locktime < txLockTime (0x800000, vec![0x7f, 0, 0, 0, 0, 0, 0, 0, 0], true), // Case 3: locktime too big - (LOCK_TIME_THRESHOLD * 2, vec![0x7f, 0, 0, 0], true), // Case 4: lock times are inconsistant + (LOCK_TIME_THRESHOLD * 2, vec![0x7f, 0, 0, 0], true), // Case 4: lock times are inconsistent ] { let mut tx = base_tx.clone(); tx.0.lock_time = tx_lock_time; diff --git a/crypto/txscript/src/result.rs b/crypto/txscript/src/result.rs new file mode 100644 index 0000000000..4c8cb83f54 --- /dev/null +++ b/crypto/txscript/src/result.rs @@ -0,0 +1 @@ +pub type Result = std::result::Result; diff --git a/crypto/txscript/src/script_builder.rs b/crypto/txscript/src/script_builder.rs index c7aa05fee7..731c47680e 100644 --- a/crypto/txscript/src/script_builder.rs +++ b/crypto/txscript/src/script_builder.rs @@ -5,6 +5,7 @@ use crate::{ opcodes::{codes::*, OP_1_NEGATE_VAL, OP_DATA_MAX_VAL, OP_DATA_MIN_VAL, OP_SMALL_INT_MAX_VAL}, MAX_SCRIPTS_SIZE, MAX_SCRIPT_ELEMENT_SIZE, }; +use hexplay::{HexView, HexViewBuilder}; use thiserror::Error; /// DEFAULT_SCRIPT_ALLOC is the default size used for the backing array @@ -69,7 +70,7 @@ impl ScriptBuilder { &self.script } - #[cfg(test)] + #[cfg(any(test, target_arch = "wasm32"))] pub fn extend(&mut self, data: &[u8]) { self.script.extend(data); } @@ -248,6 +249,16 @@ impl ScriptBuilder { let trimmed = &buffer[0..trimmed_size]; self.add_data(trimmed) } + + /// Return [`HexViewBuilder`] for the script + pub fn hex_view_builder(&self) -> HexViewBuilder<'_> { + HexViewBuilder::new(&self.script) + } + + /// Return ready to use [`HexView`] for the script + pub fn hex_view(&self, offset: usize, width: usize) -> HexView<'_> { + HexViewBuilder::new(&self.script).address_offset(offset).row_width(width).finish() + } } impl Default for ScriptBuilder { diff --git a/crypto/txscript/src/script_class.rs b/crypto/txscript/src/script_class.rs index 8e7a7796c4..ad61f30d89 100644 --- a/crypto/txscript/src/script_class.rs +++ b/crypto/txscript/src/script_class.rs @@ -17,6 +17,7 @@ pub enum Error { /// Standard classes of script payment in the blockDAG #[derive(PartialEq, Eq, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[borsh(use_discriminant = true)] #[repr(u8)] pub enum ScriptClass { /// None of the recognized forms diff --git a/crypto/txscript/src/standard.rs b/crypto/txscript/src/standard.rs index fb7eb455a3..3c0f12a181 100644 --- a/crypto/txscript/src/standard.rs +++ b/crypto/txscript/src/standard.rs @@ -100,9 +100,9 @@ pub mod test_helpers { (script_public_key, redeem_script) } - // Creates a transaction that spends the first output of provided transaction. - // Assumes that the output being spent has opTrueScript as it's scriptPublicKey. - // Creates the value of the spent output minus provided `fee` (in sompi). + /// Creates a transaction that spends the first output of provided transaction. + /// Assumes that the output being spent has opTrueScript as its scriptPublicKey. + /// Creates the value of the spent output minus provided `fee` (in sompi). pub fn create_transaction(tx_to_spend: &Transaction, fee: u64) -> Transaction { let (script_public_key, redeem_script) = op_true_script(); let signature_script = pay_to_script_hash_signature_script(redeem_script, vec![]).expect("the script is canonical"); @@ -111,6 +111,42 @@ pub mod test_helpers { let output = TransactionOutput::new(tx_to_spend.outputs[0].value - fee, script_public_key); Transaction::new(TX_VERSION, vec![input], vec![output], 0, SUBNETWORK_ID_NATIVE, 0, vec![]) } + + /// Creates a transaction that spends the outputs of specified indexes (if they exist) of every provided transaction and returns an optional change. + /// Assumes that the outputs being spent have opTrueScript as their scriptPublicKey. + /// + /// If some change is provided, creates two outputs, first one with the value of the spent outputs minus `change` + /// and `fee` (in sompi) and second one of `change` amount. + /// + /// If no change is provided, creates only one output with the value of the spent outputs minus and `fee` (in sompi) + pub fn create_transaction_with_change<'a>( + txs_to_spend: impl Iterator, + output_indexes: Vec, + change: Option, + fee: u64, + ) -> Transaction { + let (script_public_key, redeem_script) = op_true_script(); + let signature_script = pay_to_script_hash_signature_script(redeem_script, vec![]).expect("the script is canonical"); + let mut inputs_value: u64 = 0; + let mut inputs = vec![]; + for tx_to_spend in txs_to_spend { + for i in output_indexes.iter().copied() { + if i < tx_to_spend.outputs.len() { + let previous_outpoint = TransactionOutpoint::new(tx_to_spend.id(), i as u32); + inputs.push(TransactionInput::new(previous_outpoint, signature_script.clone(), MAX_TX_IN_SEQUENCE_NUM, 1)); + inputs_value += tx_to_spend.outputs[i].value; + } + } + } + let outputs = match change { + Some(change) => vec![ + TransactionOutput::new(inputs_value - fee - change, script_public_key.clone()), + TransactionOutput::new(change, script_public_key), + ], + None => vec![TransactionOutput::new(inputs_value - fee, script_public_key.clone())], + }; + Transaction::new(TX_VERSION, inputs, outputs, 0, SUBNETWORK_ID_NATIVE, 0, vec![]) + } } #[cfg(test)] diff --git a/crypto/txscript/src/wasm/builder.rs b/crypto/txscript/src/wasm/builder.rs new file mode 100644 index 0000000000..57c6b8b4f4 --- /dev/null +++ b/crypto/txscript/src/wasm/builder.rs @@ -0,0 +1,179 @@ +use crate::result::Result; +use crate::{script_builder as native, standard}; +use kaspa_consensus_core::tx::ScriptPublicKey; +use kaspa_utils::hex::ToHex; +use kaspa_wasm_core::hex::{HexViewConfig, HexViewConfigT}; +use kaspa_wasm_core::types::{BinaryT, HexString}; +use std::cell::{Ref, RefCell, RefMut}; +use std::rc::Rc; +use wasm_bindgen::prelude::wasm_bindgen; +use workflow_wasm::prelude::*; + +/// ScriptBuilder provides a facility for building custom scripts. It allows +/// you to push opcodes, ints, and data while respecting canonical encoding. In +/// general it does not ensure the script will execute correctly, however any +/// data pushes which would exceed the maximum allowed script engine limits and +/// are therefore guaranteed not to execute will not be pushed and will result in +/// the Script function returning an error. +/// @category Consensus +#[derive(Clone)] +#[wasm_bindgen(inspectable)] +pub struct ScriptBuilder { + script_builder: Rc>, +} + +impl ScriptBuilder { + #[inline] + pub fn inner(&self) -> Ref<'_, native::ScriptBuilder> { + self.script_builder.borrow() + } + + #[inline] + pub fn inner_mut(&self) -> RefMut<'_, native::ScriptBuilder> { + self.script_builder.borrow_mut() + } +} + +impl Default for ScriptBuilder { + fn default() -> Self { + Self { script_builder: Rc::new(RefCell::new(native::ScriptBuilder::new())) } + } +} + +#[wasm_bindgen] +impl ScriptBuilder { + #[wasm_bindgen(constructor)] + pub fn new() -> Self { + Self::default() + } + + /// Creates a new ScriptBuilder over an existing script. + /// Supplied script can be represented as an `Uint8Array` or a `HexString`. + #[wasm_bindgen(js_name = "fromScript")] + pub fn from_script(script: BinaryT) -> Result { + let builder = ScriptBuilder::default(); + let script = script.try_as_vec_u8()?; + builder.inner_mut().extend(&script); + + Ok(builder) + } + + /// Pushes the passed opcode to the end of the script. The script will not + /// be modified if pushing the opcode would cause the script to exceed the + /// maximum allowed script engine size. + #[wasm_bindgen(js_name = "addOp")] + pub fn add_op(&self, op: u8) -> Result { + let mut inner = self.inner_mut(); + inner.add_op(op)?; + + Ok(self.clone()) + } + + /// Adds the passed opcodes to the end of the script. + /// Supplied opcodes can be represented as an `Uint8Array` or a `HexString`. + #[wasm_bindgen(js_name = "addOps")] + pub fn add_ops(&self, opcodes: BinaryT) -> Result { + let opcodes = opcodes.try_as_vec_u8()?; + self.inner_mut().add_ops(&opcodes)?; + + Ok(self.clone()) + } + + /// AddData pushes the passed data to the end of the script. It automatically + /// chooses canonical opcodes depending on the length of the data. + /// + /// A zero length buffer will lead to a push of empty data onto the stack (Op0 = OpFalse) + /// and any push of data greater than [`MAX_SCRIPT_ELEMENT_SIZE`](kaspa_txscript::MAX_SCRIPT_ELEMENT_SIZE) will not modify + /// the script since that is not allowed by the script engine. + /// + /// Also, the script will not be modified if pushing the data would cause the script to + /// exceed the maximum allowed script engine size [`MAX_SCRIPTS_SIZE`](kaspa_txscript::MAX_SCRIPTS_SIZE). + #[wasm_bindgen(js_name = "addData")] + pub fn add_data(&self, data: BinaryT) -> Result { + let data = data.try_as_vec_u8()?; + + let mut inner = self.inner_mut(); + inner.add_data(&data)?; + + Ok(self.clone()) + } + + #[wasm_bindgen(js_name = "addI64")] + pub fn add_i64(&self, value: i64) -> Result { + let mut inner = self.inner_mut(); + inner.add_i64(value)?; + + Ok(self.clone()) + } + + #[wasm_bindgen(js_name = "addLockTime")] + pub fn add_lock_time(&self, lock_time: u64) -> Result { + let mut inner = self.inner_mut(); + inner.add_lock_time(lock_time)?; + + Ok(self.clone()) + } + + #[wasm_bindgen(js_name = "addSequence")] + pub fn add_sequence(&self, sequence: u64) -> Result { + let mut inner = self.inner_mut(); + inner.add_sequence(sequence)?; + + Ok(self.clone()) + } + + #[wasm_bindgen(js_name = "canonicalDataSize")] + pub fn canonical_data_size(data: BinaryT) -> Result { + let data = data.try_as_vec_u8()?; + let size = native::ScriptBuilder::canonical_data_size(&data) as u32; + + Ok(size) + } + + /// Get script bytes represented by a hex string. + #[wasm_bindgen(js_name = "toString")] + pub fn to_string_js(&self) -> HexString { + let inner = self.inner(); + + HexString::from(inner.script()) + } + + /// Drains (empties) the script builder, returning the + /// script bytes represented by a hex string. + pub fn drain(&self) -> HexString { + let mut inner = self.inner_mut(); + + HexString::from(inner.drain().as_slice()) + } + + /// Creates an equivalent pay-to-script-hash script. + /// Can be used to create an P2SH address. + /// @see {@link addressFromScriptPublicKey} + #[wasm_bindgen(js_name = "createPayToScriptHashScript")] + pub fn pay_to_script_hash_script(&self) -> ScriptPublicKey { + let inner = self.inner(); + let script = inner.script(); + + standard::pay_to_script_hash_script(script) + } + + /// Generates a signature script that fits a pay-to-script-hash script. + #[wasm_bindgen(js_name = "encodePayToScriptHashSignatureScript")] + pub fn pay_to_script_hash_signature_script(&self, signature: BinaryT) -> Result { + let inner = self.inner(); + let script = inner.script(); + let signature = signature.try_as_vec_u8()?; + let generated_script = standard::pay_to_script_hash_signature_script(script.into(), signature)?; + + Ok(generated_script.to_hex().into()) + } + + #[wasm_bindgen(js_name = "hexView")] + pub fn hex_view(&self, args: Option) -> Result { + let inner = self.inner(); + let script = inner.script(); + + let config = args.map(HexViewConfig::try_from).transpose()?.unwrap_or_default(); + Ok(config.build(script).to_string()) + } +} diff --git a/crypto/txscript/src/wasm/mod.rs b/crypto/txscript/src/wasm/mod.rs new file mode 100644 index 0000000000..e88e580c7d --- /dev/null +++ b/crypto/txscript/src/wasm/mod.rs @@ -0,0 +1,15 @@ +//! +//! WASM32 bindings for the txscript framework components. +//! + +use cfg_if::cfg_if; + +cfg_if! { + if #[cfg(any(feature = "wasm32-sdk", feature = "wasm32-core"))] { + pub mod opcodes; + pub mod builder; + + pub use self::opcodes::*; + pub use self::builder::*; + } +} diff --git a/consensus/client/src/script.rs b/crypto/txscript/src/wasm/opcodes.rs similarity index 51% rename from consensus/client/src/script.rs rename to crypto/txscript/src/wasm/opcodes.rs index 7392b1d856..40492cc837 100644 --- a/consensus/client/src/script.rs +++ b/crypto/txscript/src/wasm/opcodes.rs @@ -1,20 +1,12 @@ -use std::cell::{Ref, RefCell, RefMut}; -use std::rc::Rc; +pub use wasm_bindgen::prelude::*; -use kaspa_wasm_core::types::{BinaryT, HexString}; - -use crate::imports::*; -use crate::result::Result; -use kaspa_txscript::script_builder as native; +/// Kaspa Transaction Script Opcodes +/// @see {@link ScriptBuilder} +/// @category Consensus +#[wasm_bindgen] +pub enum Opcodes { + OpFalse = 0x00, -#[wasm_bindgen(typescript_custom_section)] -const TS_SCRIPT_OPCODES: &'static str = r#" -/** - * Kaspa Transaction Script Opcodes - * @see {@link ScriptBuilder} - * @category Consensus - */ -export enum Opcode { OpData1 = 0x01, OpData2 = 0x02, OpData3 = 0x03, @@ -90,15 +82,17 @@ export enum Opcode { OpData73 = 0x49, OpData74 = 0x4a, OpData75 = 0x4b, + OpPushData1 = 0x4c, OpPushData2 = 0x4d, OpPushData4 = 0x4e, + Op1Negate = 0x4f, - /** - * Reserved - */ + OpReserved = 0x50, - Op1 = 0x51, + + OpTrue = 0x51, + Op2 = 0x52, Op3 = 0x53, Op4 = 0x54, @@ -114,27 +108,21 @@ export enum Opcode { Op14 = 0x5e, Op15 = 0x5f, Op16 = 0x60, + OpNop = 0x61, - /** - * Reserved - */ OpVer = 0x62, OpIf = 0x63, OpNotIf = 0x64, - /** - * Reserved - */ OpVerIf = 0x65, - /** - * Reserved - */ OpVerNotIf = 0x66, + OpElse = 0x67, OpEndIf = 0x68, OpVerify = 0x69, OpReturn = 0x6a, OpToAltStack = 0x6b, OpFromAltStack = 0x6c, + Op2Drop = 0x6d, Op2Dup = 0x6e, Op3Dup = 0x6f, @@ -148,88 +136,57 @@ export enum Opcode { OpNip = 0x77, OpOver = 0x78, OpPick = 0x79, + OpRoll = 0x7a, OpRot = 0x7b, OpSwap = 0x7c, OpTuck = 0x7d, - /** - * Disabled - */ + + /// Splice opcodes. OpCat = 0x7e, - /** - * Disabled - */ OpSubStr = 0x7f, - /** - * Disabled - */ OpLeft = 0x80, - /** - * Disabled - */ OpRight = 0x81, + OpSize = 0x82, - /** - * Disabled - */ + + /// Bitwise logic opcodes. OpInvert = 0x83, - /** - * Disabled - */ OpAnd = 0x84, - /** - * Disabled - */ OpOr = 0x85, - /** - * Disabled - */ OpXor = 0x86, + OpEqual = 0x87, OpEqualVerify = 0x88, + OpReserved1 = 0x89, OpReserved2 = 0x8a, + + /// Numeric related opcodes. Op1Add = 0x8b, Op1Sub = 0x8c, - /** - * Disabled - */ Op2Mul = 0x8d, - /** - * Disabled - */ Op2Div = 0x8e, OpNegate = 0x8f, OpAbs = 0x90, OpNot = 0x91, Op0NotEqual = 0x92, + OpAdd = 0x93, OpSub = 0x94, - /** - * Disabled - */ OpMul = 0x95, - /** - * Disabled - */ OpDiv = 0x96, - /** - * Disabled - */ OpMod = 0x97, - /** - * Disabled - */ OpLShift = 0x98, - /** - * Disabled - */ OpRShift = 0x99, + OpBoolAnd = 0x9a, OpBoolOr = 0x9b, + OpNumEqual = 0x9c, OpNumEqualVerify = 0x9d, OpNumNotEqual = 0x9e, + OpLessThan = 0x9f, OpGreaterThan = 0xa0, OpLessThanOrEqual = 0xa1, @@ -237,10 +194,16 @@ export enum Opcode { OpMin = 0xa3, OpMax = 0xa4, OpWithin = 0xa5, + + /// Undefined opcodes. OpUnknown166 = 0xa6, OpUnknown167 = 0xa7, - OpSha256 = 0xa8, + + /// Crypto opcodes. + OpSHA256 = 0xa8, + OpCheckMultiSigECDSA = 0xa9, + OpBlake2b = 0xaa, OpCheckSigECDSA = 0xab, OpCheckSig = 0xac, @@ -249,6 +212,8 @@ export enum Opcode { OpCheckMultiSigVerify = 0xaf, OpCheckLockTimeVerify = 0xb0, OpCheckSequenceVerify = 0xb1, + + /// Undefined opcodes. OpUnknown178 = 0xb2, OpUnknown179 = 0xb3, OpUnknown180 = 0xb4, @@ -321,6 +286,7 @@ export enum Opcode { OpUnknown247 = 0xf7, OpUnknown248 = 0xf8, OpUnknown249 = 0xf9, + OpSmallInteger = 0xfa, OpPubKeys = 0xfb, OpUnknown252 = 0xfc, @@ -328,130 +294,3 @@ export enum Opcode { OpPubKey = 0xfe, OpInvalidOpCode = 0xff, } - -"#; - -/// -/// ScriptBuilder provides a facility for building custom scripts. It allows -/// you to push opcodes, ints, and data while respecting canonical encoding. In -/// general it does not ensure the script will execute correctly, however any -/// data pushes which would exceed the maximum allowed script engine limits and -/// are therefore guaranteed not to execute will not be pushed and will result in -/// the Script function returning an error. -/// -/// @see {@link Opcode} -/// @category Consensus -#[derive(Clone)] -#[wasm_bindgen(inspectable)] -pub struct ScriptBuilder { - script_builder: Rc>, -} - -impl ScriptBuilder { - #[inline] - pub fn inner(&self) -> Ref<'_, native::ScriptBuilder> { - self.script_builder.borrow() - } - - #[inline] - pub fn inner_mut(&self) -> RefMut<'_, native::ScriptBuilder> { - self.script_builder.borrow_mut() - } -} - -impl Default for ScriptBuilder { - fn default() -> Self { - Self { script_builder: Rc::new(RefCell::new(kaspa_txscript::script_builder::ScriptBuilder::new())) } - } -} - -#[wasm_bindgen] -impl ScriptBuilder { - #[wasm_bindgen(constructor)] - pub fn new() -> Self { - Self::default() - } - - #[wasm_bindgen(getter)] - pub fn data(&self) -> HexString { - self.script() - } - - /// Get script bytes represented by a hex string. - pub fn script(&self) -> HexString { - let inner = self.inner(); - HexString::from(inner.script()) - } - - /// Drains (empties) the script builder, returning the - /// script bytes represented by a hex string. - pub fn drain(&self) -> HexString { - let mut inner = self.inner_mut(); - HexString::from(inner.drain().as_slice()) - } - - #[wasm_bindgen(js_name = canonicalDataSize)] - pub fn canonical_data_size(data: BinaryT) -> Result { - let data = data.try_as_vec_u8()?; - let size = native::ScriptBuilder::canonical_data_size(&data) as u32; - Ok(size) - } - - /// Pushes the passed opcode to the end of the script. The script will not - /// be modified if pushing the opcode would cause the script to exceed the - /// maximum allowed script engine size. - #[wasm_bindgen(js_name = addOp)] - pub fn add_op(&self, op: u8) -> Result { - let mut inner = self.inner_mut(); - inner.add_op(op)?; - Ok(self.clone()) - } - - /// Adds the passed opcodes to the end of the script. - /// Supplied opcodes can be represented as a `Uint8Array` or a `HexString`. - #[wasm_bindgen(js_name = "addOps")] - pub fn add_ops(&self, opcodes: JsValue) -> Result { - let opcodes = opcodes.try_as_vec_u8()?; - self.inner_mut().add_ops(&opcodes)?; - Ok(self.clone()) - } - - /// AddData pushes the passed data to the end of the script. It automatically - /// chooses canonical opcodes depending on the length of the data. - /// - /// A zero length buffer will lead to a push of empty data onto the stack (Op0 = OpFalse) - /// and any push of data greater than [`MAX_SCRIPT_ELEMENT_SIZE`](kaspa_txscript::MAX_SCRIPT_ELEMENT_SIZE) will not modify - /// the script since that is not allowed by the script engine. - /// - /// Also, the script will not be modified if pushing the data would cause the script to - /// exceed the maximum allowed script engine size [`MAX_SCRIPTS_SIZE`](kaspa_txscript::MAX_SCRIPTS_SIZE). - #[wasm_bindgen(js_name = addData)] - pub fn add_data(&self, data: BinaryT) -> Result { - let data = data.try_as_vec_u8()?; - - let mut inner = self.inner_mut(); - inner.add_data(&data)?; - Ok(self.clone()) - } - - #[wasm_bindgen(js_name = addI64)] - pub fn add_i64(&self, value: i64) -> Result { - let mut inner = self.inner_mut(); - inner.add_i64(value)?; - Ok(self.clone()) - } - - #[wasm_bindgen(js_name = addLockTime)] - pub fn add_lock_time(&self, lock_time: u64) -> Result { - let mut inner = self.inner_mut(); - inner.add_lock_time(lock_time)?; - Ok(self.clone()) - } - - #[wasm_bindgen(js_name = addSequence)] - pub fn add_sequence(&self, sequence: u64) -> Result { - let mut inner = self.inner_mut(); - inner.add_sequence(sequence)?; - Ok(self.clone()) - } -} diff --git a/database/src/registry.rs b/database/src/registry.rs index 9e1b129d6f..752efb97b3 100644 --- a/database/src/registry.rs +++ b/database/src/registry.rs @@ -95,8 +95,8 @@ mod tests { let prefix = DatabaseStorePrefixes::AcceptanceData; assert_eq!(&[prefix as u8], prefix.as_ref()); assert_eq!( - std::mem::size_of::(), - std::mem::size_of::(), + size_of::(), + size_of::(), "DatabaseStorePrefixes is expected to have the same memory layout of u8" ); } diff --git a/indexes/utxoindex/src/core/errors.rs b/indexes/utxoindex/src/core/errors.rs index 61aa877ab8..0e09989055 100644 --- a/indexes/utxoindex/src/core/errors.rs +++ b/indexes/utxoindex/src/core/errors.rs @@ -4,7 +4,7 @@ use thiserror::Error; use crate::IDENT; use kaspa_database::prelude::StoreError; -/// Errors originating from the [`UtxoIndex`]. +/// Errors originating from the [`UtxoIndex`](crate::UtxoIndex). #[derive(Error, Debug)] pub enum UtxoIndexError { #[error("[{IDENT}]: {0}")] @@ -14,5 +14,5 @@ pub enum UtxoIndexError { DBResetError(#[from] io::Error), } -/// Results originating from the [`UtxoIndex`]. +/// Results originating from the [`UtxoIndex`](crate::UtxoIndex). pub type UtxoIndexResult = Result; diff --git a/indexes/utxoindex/src/index.rs b/indexes/utxoindex/src/index.rs index b71935afa2..3b1bf2fe9d 100644 --- a/indexes/utxoindex/src/index.rs +++ b/indexes/utxoindex/src/index.rs @@ -21,7 +21,8 @@ use std::{ const RESYNC_CHUNK_SIZE: usize = 2048; //Increased from 1k (used in go-kaspad), for quicker resets, while still having a low memory footprint. -/// UtxoIndex indexes [`CompactUtxoEntryCollections`] by [`ScriptPublicKey`], commits them to its owns store, and emits changes. +/// UtxoIndex indexes `CompactUtxoEntryCollections` by [`ScriptPublicKey`](kaspa_consensus_core::tx::ScriptPublicKey), +/// commits them to its owns store, and emits changes. /// Note: The UtxoIndex struct by itself is not thread save, only correct usage of the supplied RwLock via `new` makes it so. /// please follow guidelines found in the comments under `utxoindex::core::api::UtxoIndexApi` for proper thread safety. pub struct UtxoIndex { @@ -131,7 +132,7 @@ impl UtxoIndexApi for UtxoIndex { /// Deletes and reinstates the utxoindex database, syncing it from scratch via the consensus database. /// /// **Notes:** - /// 1) There is an implicit expectation that the consensus store must have [VirtualParent] tips. i.e. consensus database must be initiated. + /// 1) There is an implicit expectation that the consensus store must have VirtualParent tips. i.e. consensus database must be initiated. /// 2) resyncing while consensus notifies of utxo differences, may result in a corrupted db. fn resync(&mut self) -> UtxoIndexResult<()> { info!("Resyncing the utxoindex..."); diff --git a/indexes/utxoindex/src/stores/indexed_utxos.rs b/indexes/utxoindex/src/stores/indexed_utxos.rs index a96f5e46b7..c9bce2c717 100644 --- a/indexes/utxoindex/src/stores/indexed_utxos.rs +++ b/indexes/utxoindex/src/stores/indexed_utxos.rs @@ -11,7 +11,6 @@ use kaspa_index_core::indexed_utxos::BalanceByScriptPublicKey; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::fmt::Display; -use std::mem::size_of; use std::sync::Arc; pub const VERSION_TYPE_SIZE: usize = size_of::(); // Const since we need to re-use this a few times. @@ -67,8 +66,7 @@ impl From for TransactionOutpoint { fn from(key: TransactionOutpointKey) -> Self { let transaction_id = Hash::from_slice(&key.0[..kaspa_hashes::HASH_SIZE]); let index = TransactionIndexType::from_le_bytes( - <[u8; std::mem::size_of::()]>::try_from(&key.0[kaspa_hashes::HASH_SIZE..]) - .expect("expected index size"), + <[u8; size_of::()]>::try_from(&key.0[kaspa_hashes::HASH_SIZE..]).expect("expected index size"), ); Self::new(transaction_id, index) } diff --git a/indexes/utxoindex/src/update_container.rs b/indexes/utxoindex/src/update_container.rs index 8555a02d41..96449dbffe 100644 --- a/indexes/utxoindex/src/update_container.rs +++ b/indexes/utxoindex/src/update_container.rs @@ -25,7 +25,7 @@ impl UtxoIndexChanges { } } - /// Add a [`UtxoDiff`] the the [`UtxoIndexChanges`] struct. + /// Add a [`UtxoDiff`] the [`UtxoIndexChanges`] struct. pub fn update_utxo_diff(&mut self, utxo_diff: UtxoDiff) { let (to_add, mut to_remove) = (utxo_diff.add, utxo_diff.remove); @@ -53,7 +53,7 @@ impl UtxoIndexChanges { } } - /// Add a [`Vec<(TransactionOutpoint, UtxoEntry)>`] the the [`UtxoIndexChanges`] struct + /// Add a [`Vec<(TransactionOutpoint, UtxoEntry)>`] the [`UtxoIndexChanges`] struct /// /// Note: This is meant to be used when resyncing. pub fn add_utxos_from_vector(&mut self, utxo_vector: Vec<(TransactionOutpoint, UtxoEntry)>) { diff --git a/kaspad/Cargo.toml b/kaspad/Cargo.toml index 9f3290a51c..15a408dad5 100644 --- a/kaspad/Cargo.toml +++ b/kaspad/Cargo.toml @@ -41,22 +41,25 @@ kaspa-utxoindex.workspace = true kaspa-wrpc-server.workspace = true async-channel.workspace = true +cfg-if.workspace = true clap.workspace = true dhat = { workspace = true, optional = true } -serde.workspace = true dirs.workspace = true futures-util.workspace = true log.workspace = true num_cpus.workspace = true rand.workspace = true rayon.workspace = true +serde.workspace = true tempfile.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } workflow-log.workspace = true + toml = "0.8.10" serde_with = "3.7.0" [features] heap = ["dhat", "kaspa-alloc/heap"] devnet-prealloc = ["kaspa-consensus/devnet-prealloc"] +semaphore-trace = ["kaspa-utils/semaphore-trace"] diff --git a/kaspad/src/args.rs b/kaspad/src/args.rs index e116729159..56dd7c1de7 100644 --- a/kaspad/src/args.rs +++ b/kaspad/src/args.rs @@ -134,7 +134,7 @@ impl Default for Args { #[cfg(feature = "devnet-prealloc")] prealloc_address: None, #[cfg(feature = "devnet-prealloc")] - prealloc_amount: 1_000_000, + prealloc_amount: 10_000_000_000, disable_upnp: false, disable_dns_seeding: false, @@ -366,7 +366,7 @@ Setting to 0 prevents the preallocation and sets the maximum to {}, leading to 0 .long("ram-scale") .require_equals(true) .value_parser(clap::value_parser!(f64)) - .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with + .help("Apply a scale factor to memory allocation bounds. Nodes with limited RAM (~4-8GB) should set this to ~0.3-0.5 respectively. Nodes with a large RAM (~64GB) can set this value to ~3.0-4.0 and gain superior performance especially for syncing peers faster"), ) ; diff --git a/kaspad/src/daemon.rs b/kaspad/src/daemon.rs index 0950ad8fab..4175206eb5 100644 --- a/kaspad/src/daemon.rs +++ b/kaspad/src/daemon.rs @@ -6,14 +6,16 @@ use kaspa_consensus_core::{ errors::config::{ConfigError, ConfigResult}, }; use kaspa_consensus_notify::{root::ConsensusNotificationRoot, service::NotifyService}; -use kaspa_core::{core::Core, info, trace}; +use kaspa_core::{core::Core, debug, info}; use kaspa_core::{kaspad_env::version, task::tick::TickService}; use kaspa_database::prelude::CachePolicy; use kaspa_grpc_server::service::GrpcService; use kaspa_notify::{address::tracker::Tracker, subscription::context::SubscriptionContext}; use kaspa_rpc_service::service::RpcCoreService; use kaspa_txscript::caches::TxScriptCacheCounters; +use kaspa_utils::git; use kaspa_utils::networking::ContextualNetAddress; +use kaspa_utils::sysinfo::SystemInfo; use kaspa_utils_tower::counters::TowerConnectionCounters; use kaspa_addressmanager::AddressManager; @@ -161,7 +163,13 @@ impl Runtime { let log_dir = get_log_dir(args); // Initialize the logger - kaspa_core::log::init_logger(log_dir.as_deref(), &args.log_level); + cfg_if::cfg_if! { + if #[cfg(feature = "semaphore-trace")] { + kaspa_core::log::init_logger(log_dir.as_deref(), &format!("{},{}=debug", args.log_level, kaspa_utils::sync::semaphore_module_path())); + } else { + kaspa_core::log::init_logger(log_dir.as_deref(), &args.log_level); + } + }; // Configure the panic behavior // As we log the panic, we want to set it up after the logger @@ -227,7 +235,7 @@ pub fn create_core_with_runtime(runtime: &Runtime, args: &Args, fd_total_budget: let db_dir = app_dir.join(network.to_prefixed()).join(DEFAULT_DATA_DIR); // Print package name and version - info!("{} v{}", env!("CARGO_PKG_NAME"), version()); + info!("{} v{}", env!("CARGO_PKG_NAME"), git::with_short_hash(version())); assert!(!db_dir.to_str().unwrap().is_empty()); info!("Application directory: {}", app_dir.display()); @@ -392,16 +400,18 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm .with_tick_service(tick_service.clone()); let perf_monitor = if args.perf_metrics { let cb = move |counters: CountersSnapshot| { - trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display()); - trace!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display()); + debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_process_metrics_display()); + debug!("[{}] {}", kaspa_perf_monitor::SERVICE_NAME, counters.to_io_metrics_display()); #[cfg(feature = "heap")] - trace!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get()); + debug!("[{}] heap stats: {:?}", kaspa_perf_monitor::SERVICE_NAME, dhat::HeapStats::get()); }; Arc::new(perf_monitor_builder.with_fetch_cb(cb).build()) } else { Arc::new(perf_monitor_builder.build()) }; + let system_info = SystemInfo::default(); + let notify_service = Arc::new(NotifyService::new(notification_root.clone(), notification_recv, subscription_context.clone())); let index_service: Option> = if args.utxoindex { // Use only a single thread for none-consensus databases @@ -419,15 +429,16 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm let (address_manager, port_mapping_extender_svc) = AddressManager::new(config.clone(), meta_db, tick_service.clone()); - let mining_monitor = Arc::new(MiningMonitor::new(mining_counters.clone(), tx_script_cache_counters.clone(), tick_service.clone())); let mining_manager = MiningManagerProxy::new(Arc::new(MiningManager::new_with_extended_config( config.target_time_per_block, false, config.max_block_mass, config.ram_scale, config.block_template_cache_lifetime, - mining_counters, + mining_counters.clone(), ))); + let mining_monitor = + Arc::new(MiningMonitor::new(mining_manager.clone(), mining_counters, tx_script_cache_counters.clone(), tick_service.clone())); let flow_context = Arc::new(FlowContext::new( consensus_manager.clone(), @@ -465,6 +476,7 @@ do you confirm? (answer y/n or pass --yes to the Kaspad command line to confirm perf_monitor.clone(), p2p_tower_counters.clone(), grpc_tower_counters.clone(), + system_info, )); let grpc_service_broadcasters: usize = 3; // TODO: add a command line argument or derive from other arg/config/host-related fields let grpc_service = if !args.disable_grpc { diff --git a/math/src/uint.rs b/math/src/uint.rs index deb01496e2..4ecc1fe122 100644 --- a/math/src/uint.rs +++ b/math/src/uint.rs @@ -15,7 +15,7 @@ macro_rules! construct_uint { pub const MIN: Self = Self::ZERO; pub const MAX: Self = $name([u64::MAX; $n_words]); pub const BITS: u32 = $n_words * u64::BITS; - pub const BYTES: usize = $n_words * core::mem::size_of::(); + pub const BYTES: usize = $n_words * size_of::(); pub const LIMBS: usize = $n_words; #[inline] diff --git a/metrics/core/src/data.rs b/metrics/core/src/data.rs index c920f0a049..ce9dc72161 100644 --- a/metrics/core/src/data.rs +++ b/metrics/core/src/data.rs @@ -1,4 +1,7 @@ +use crate::error::Error; +use crate::result::Result; use borsh::{BorshDeserialize, BorshSerialize}; +use kaspa_rpc_core::GetMetricsResponse; use separator::{separated_float, separated_int, separated_uint_with_output, Separatable}; use serde::{Deserialize, Serialize}; use workflow_core::enums::Describe; @@ -37,10 +40,6 @@ impl MetricGroup { } impl MetricGroup { - pub fn iter() -> impl Iterator { - [MetricGroup::System, MetricGroup::Storage, MetricGroup::Connections, MetricGroup::Network].into_iter() - } - pub fn metrics(&self) -> impl Iterator { match self { MetricGroup::System => [ @@ -56,6 +55,7 @@ impl MetricGroup { Metric::NodeDiskIoReadPerSec, Metric::NodeDiskIoWriteBytes, Metric::NodeDiskIoWritePerSec, + Metric::NodeStorageSizeBytes, ] .as_slice() .iter(), @@ -127,7 +127,8 @@ impl From for MetricGroup { | Metric::NodeDiskIoReadBytes | Metric::NodeDiskIoWriteBytes | Metric::NodeDiskIoReadPerSec - | Metric::NodeDiskIoWritePerSec => MetricGroup::Storage, + | Metric::NodeDiskIoWritePerSec + | Metric::NodeStorageSizeBytes => MetricGroup::Storage, // -- Metric::NodeBorshLiveConnections | Metric::NodeBorshConnectionAttempts @@ -194,6 +195,7 @@ pub enum Metric { NodeDiskIoWriteBytes, NodeDiskIoReadPerSec, NodeDiskIoWritePerSec, + NodeStorageSizeBytes, // --- NodeActivePeers, NodeBorshLiveConnections, @@ -250,65 +252,6 @@ pub enum Metric { } impl Metric { - // TODO - this will be refactored at a later date - // as this requires changes and testing in /kos - pub fn group(&self) -> &'static str { - match self { - Metric::NodeCpuUsage - | Metric::NodeResidentSetSizeBytes - | Metric::NodeVirtualMemorySizeBytes - | Metric::NodeFileHandlesCount - | Metric::NodeDiskIoReadBytes - | Metric::NodeDiskIoWriteBytes - | Metric::NodeDiskIoReadPerSec - | Metric::NodeDiskIoWritePerSec - | Metric::NodeBorshLiveConnections - | Metric::NodeBorshConnectionAttempts - | Metric::NodeBorshHandshakeFailures - | Metric::NodeJsonLiveConnections - | Metric::NodeJsonConnectionAttempts - | Metric::NodeJsonHandshakeFailures - | Metric::NodeBorshBytesTx - | Metric::NodeBorshBytesRx - | Metric::NodeJsonBytesTx - | Metric::NodeJsonBytesRx - | Metric::NodeP2pBytesTx - | Metric::NodeP2pBytesRx - | Metric::NodeGrpcUserBytesTx - | Metric::NodeGrpcUserBytesRx - | Metric::NodeTotalBytesTx - | Metric::NodeTotalBytesRx - | Metric::NodeBorshBytesTxPerSecond - | Metric::NodeBorshBytesRxPerSecond - | Metric::NodeJsonBytesTxPerSecond - | Metric::NodeJsonBytesRxPerSecond - | Metric::NodeP2pBytesTxPerSecond - | Metric::NodeP2pBytesRxPerSecond - | Metric::NodeGrpcUserBytesTxPerSecond - | Metric::NodeGrpcUserBytesRxPerSecond - | Metric::NodeTotalBytesTxPerSecond - | Metric::NodeTotalBytesRxPerSecond - | Metric::NodeActivePeers => "system", - // -- - Metric::NodeBlocksSubmittedCount - | Metric::NodeHeadersProcessedCount - | Metric::NodeDependenciesProcessedCount - | Metric::NodeBodiesProcessedCount - | Metric::NodeTransactionsProcessedCount - | Metric::NodeChainBlocksProcessedCount - | Metric::NodeMassProcessedCount - | Metric::NodeDatabaseBlocksCount - | Metric::NodeDatabaseHeadersCount - | Metric::NetworkMempoolSize - | Metric::NetworkTransactionsPerSecond - | Metric::NetworkTipHashesCount - | Metric::NetworkDifficulty - | Metric::NetworkPastMedianTime - | Metric::NetworkVirtualParentHashesCount - | Metric::NetworkVirtualDaaScore => "kaspa", - } - } - pub fn is_key_performance_metric(&self) -> bool { matches!( self, @@ -362,6 +305,7 @@ impl Metric { Metric::NodeDiskIoWriteBytes => as_mb(f, si, short), Metric::NodeDiskIoReadPerSec => format!("{}/s", as_data_size(f, si)), Metric::NodeDiskIoWritePerSec => format!("{}/s", as_data_size(f, si)), + Metric::NodeStorageSizeBytes => as_gb(f, si, short), // -- Metric::NodeBorshLiveConnections => f.trunc().separated_string(), Metric::NodeBorshConnectionAttempts => f.trunc().separated_string(), @@ -425,6 +369,7 @@ impl Metric { Metric::NodeDiskIoWriteBytes => ("Storage Write", "Stor Write"), Metric::NodeDiskIoReadPerSec => ("Storage Read/s", "Stor Read"), Metric::NodeDiskIoWritePerSec => ("Storage Write/s", "Stor Write"), + Metric::NodeStorageSizeBytes => ("Storage Size", "Stor Size"), // -- Metric::NodeActivePeers => ("Active p2p Peers", "Peers"), Metric::NodeBorshLiveConnections => ("Borsh Active Connections", "Borsh Conn"), @@ -493,6 +438,7 @@ pub struct MetricsData { pub node_disk_io_write_bytes: u64, pub node_disk_io_read_per_sec: f32, pub node_disk_io_write_per_sec: f32, + pub node_storage_size_bytes: u64, // --- pub node_borsh_live_connections: u32, pub node_borsh_connection_attempts: u64, @@ -512,17 +458,6 @@ pub struct MetricsData { pub node_grpc_user_bytes_rx: u64, pub node_total_bytes_tx: u64, pub node_total_bytes_rx: u64, - - pub node_borsh_bytes_tx_per_second: u64, - pub node_borsh_bytes_rx_per_second: u64, - pub node_json_bytes_tx_per_second: u64, - pub node_json_bytes_rx_per_second: u64, - pub node_p2p_bytes_tx_per_second: u64, - pub node_p2p_bytes_rx_per_second: u64, - pub node_grpc_user_bytes_tx_per_second: u64, - pub node_grpc_user_bytes_rx_per_second: u64, - pub node_total_bytes_tx_per_second: u64, - pub node_total_bytes_rx_per_second: u64, // --- pub node_blocks_submitted_count: u64, pub node_headers_processed_count: u64, @@ -549,6 +484,87 @@ impl MetricsData { } } +impl TryFrom for MetricsData { + type Error = Error; + fn try_from(response: GetMetricsResponse) -> Result { + let GetMetricsResponse { + server_time, + consensus_metrics, + connection_metrics, + bandwidth_metrics, + process_metrics, + storage_metrics, + custom_metrics: _, + } = response; //rpc.get_metrics(true, true, true, true, true, false).await?; + + let consensus_metrics = consensus_metrics.ok_or(Error::MissingData("Consensus Metrics"))?; + let connection_metrics = connection_metrics.ok_or(Error::MissingData("Connection Metrics"))?; + let bandwidth_metrics = bandwidth_metrics.ok_or(Error::MissingData("Bandwidth Metrics"))?; + let process_metrics = process_metrics.ok_or(Error::MissingData("Process Metrics"))?; + let storage_metrics = storage_metrics.ok_or(Error::MissingData("Storage Metrics"))?; + + Ok(MetricsData { + unixtime_millis: server_time as f64, + + node_blocks_submitted_count: consensus_metrics.node_blocks_submitted_count, + node_headers_processed_count: consensus_metrics.node_headers_processed_count, + node_dependencies_processed_count: consensus_metrics.node_dependencies_processed_count, + node_bodies_processed_count: consensus_metrics.node_bodies_processed_count, + node_transactions_processed_count: consensus_metrics.node_transactions_processed_count, + node_chain_blocks_processed_count: consensus_metrics.node_chain_blocks_processed_count, + node_mass_processed_count: consensus_metrics.node_mass_processed_count, + // -- + node_database_blocks_count: consensus_metrics.node_database_blocks_count, + node_database_headers_count: consensus_metrics.node_database_headers_count, + network_mempool_size: consensus_metrics.network_mempool_size, + network_tip_hashes_count: consensus_metrics.network_tip_hashes_count, + network_difficulty: consensus_metrics.network_difficulty, + network_past_median_time: consensus_metrics.network_past_median_time, + network_virtual_parent_hashes_count: consensus_metrics.network_virtual_parent_hashes_count, + network_virtual_daa_score: consensus_metrics.network_virtual_daa_score, + + node_borsh_live_connections: connection_metrics.borsh_live_connections, + node_borsh_connection_attempts: connection_metrics.borsh_connection_attempts, + node_borsh_handshake_failures: connection_metrics.borsh_handshake_failures, + node_json_live_connections: connection_metrics.json_live_connections, + node_json_connection_attempts: connection_metrics.json_connection_attempts, + node_json_handshake_failures: connection_metrics.json_handshake_failures, + node_active_peers: connection_metrics.active_peers, + + node_borsh_bytes_tx: bandwidth_metrics.borsh_bytes_tx, + node_borsh_bytes_rx: bandwidth_metrics.borsh_bytes_rx, + node_json_bytes_tx: bandwidth_metrics.json_bytes_tx, + node_json_bytes_rx: bandwidth_metrics.json_bytes_rx, + node_p2p_bytes_tx: bandwidth_metrics.p2p_bytes_tx, + node_p2p_bytes_rx: bandwidth_metrics.p2p_bytes_rx, + node_grpc_user_bytes_tx: bandwidth_metrics.grpc_bytes_tx, + node_grpc_user_bytes_rx: bandwidth_metrics.grpc_bytes_rx, + + node_total_bytes_tx: bandwidth_metrics.borsh_bytes_tx + + bandwidth_metrics.json_bytes_tx + + bandwidth_metrics.p2p_bytes_tx + + bandwidth_metrics.grpc_bytes_tx, + + node_total_bytes_rx: bandwidth_metrics.borsh_bytes_rx + + bandwidth_metrics.json_bytes_rx + + bandwidth_metrics.p2p_bytes_rx + + bandwidth_metrics.grpc_bytes_rx, + + node_resident_set_size_bytes: process_metrics.resident_set_size, + node_virtual_memory_size_bytes: process_metrics.virtual_memory_size, + node_cpu_cores: process_metrics.core_num, + node_cpu_usage: process_metrics.cpu_usage, + node_file_handles: process_metrics.fd_num, + node_disk_io_read_bytes: process_metrics.disk_io_read_bytes, + node_disk_io_write_bytes: process_metrics.disk_io_write_bytes, + node_disk_io_read_per_sec: process_metrics.disk_io_read_per_sec, + node_disk_io_write_per_sec: process_metrics.disk_io_write_per_sec, + + node_storage_size_bytes: storage_metrics.storage_size_bytes, + }) + } +} + #[derive(Default, Debug, Clone, BorshDeserialize, BorshSerialize, Serialize, Deserialize)] pub struct MetricsSnapshot { pub data: MetricsData, @@ -615,6 +631,8 @@ pub struct MetricsSnapshot { pub network_past_median_time: f64, pub network_virtual_parent_hashes_count: f64, pub network_virtual_daa_score: f64, + // --- + pub node_storage_size_bytes: f64, } impl MetricsSnapshot { @@ -629,6 +647,7 @@ impl MetricsSnapshot { Metric::NodeDiskIoWriteBytes => self.node_disk_io_write_bytes, Metric::NodeDiskIoReadPerSec => self.node_disk_io_read_per_sec, Metric::NodeDiskIoWritePerSec => self.node_disk_io_write_per_sec, + Metric::NodeStorageSizeBytes => self.node_storage_size_bytes, // --- Metric::NodeActivePeers => self.node_active_peers, Metric::NodeBorshLiveConnections => self.node_borsh_active_connections, @@ -725,6 +744,7 @@ impl From<(&MetricsData, &MetricsData)> for MetricsSnapshot { node_disk_io_write_bytes: b.node_disk_io_write_bytes as f64, node_disk_io_read_per_sec: b.node_disk_io_read_per_sec as f64, node_disk_io_write_per_sec: b.node_disk_io_write_per_sec as f64, + node_storage_size_bytes: b.node_storage_size_bytes as f64, // --- node_borsh_active_connections: b.node_borsh_live_connections as f64, node_borsh_connection_attempts: b.node_borsh_connection_attempts as f64, @@ -843,7 +863,7 @@ pub fn as_data_size(bytes: f64, si: bool) -> String { } /// Format supplied value as a float with 2 decimal places. -fn format_as_float(f: f64, short: bool) -> String { +pub fn format_as_float(f: f64, short: bool) -> String { if short { if f < 1000.0 { format_with_precision(f) diff --git a/metrics/core/src/error.rs b/metrics/core/src/error.rs index e31142a76b..4c8a441f8d 100644 --- a/metrics/core/src/error.rs +++ b/metrics/core/src/error.rs @@ -6,6 +6,9 @@ pub enum Error { #[error("{0}")] Custom(String), + #[error("Missing metrics data `{0}`")] + MissingData(&'static str), + #[error(transparent)] RpcError(#[from] RpcError), } diff --git a/metrics/core/src/lib.rs b/metrics/core/src/lib.rs index 4a3ca2a0fc..53519b0f0c 100644 --- a/metrics/core/src/lib.rs +++ b/metrics/core/src/lib.rs @@ -6,7 +6,7 @@ pub use data::{Metric, MetricGroup, MetricsData, MetricsSnapshot}; use crate::result::Result; use futures::{pin_mut, select, FutureExt, StreamExt}; -use kaspa_rpc_core::{api::rpc::RpcApi, GetMetricsResponse}; +use kaspa_rpc_core::api::rpc::RpcApi; use std::{ future::Future, pin::Pin, @@ -74,6 +74,8 @@ impl Metrics { let interval = interval(Duration::from_secs(1)); pin_mut!(interval); + let mut first = true; + loop { select! { _ = task_ctl_receiver.recv().fuse() => { @@ -81,23 +83,29 @@ impl Metrics { }, _ = interval.next().fuse() => { - let last_metrics_data = current_metrics_data; - current_metrics_data = MetricsData::new(unixtime_as_millis_f64()); - if let Some(rpc) = this.rpc() { - if let Err(err) = this.sample_metrics(rpc.clone(), &mut current_metrics_data).await { - log_trace!("Metrics::sample_metrics() error: {}", err); + match this.sample_metrics(rpc.clone()).await { + Ok(incoming_data) => { + let last_metrics_data = current_metrics_data; + current_metrics_data = incoming_data; + this.data.lock().unwrap().replace(current_metrics_data.clone()); + + if first { + first = false; + } else if let Some(sink) = this.sink() { + let snapshot = MetricsSnapshot::from((&last_metrics_data, ¤t_metrics_data)); + if let Some(future) = sink(snapshot) { + future.await.ok(); + } + } + + } + Err(err) => { + log_trace!("Metrics::sample_metrics() error: {}", err); + } } } - this.data.lock().unwrap().replace(current_metrics_data.clone()); - - if let Some(sink) = this.sink() { - let snapshot = MetricsSnapshot::from((&last_metrics_data, ¤t_metrics_data)); - if let Some(future) = sink(snapshot) { - future.await.ok(); - } - } } } } @@ -112,74 +120,7 @@ impl Metrics { Ok(()) } - // --- samplers - - async fn sample_metrics(self: &Arc, rpc: Arc, data: &mut MetricsData) -> Result<()> { - let GetMetricsResponse { server_time: _, consensus_metrics, connection_metrics, bandwidth_metrics, process_metrics } = - rpc.get_metrics(true, true, true, true).await?; - - if let Some(consensus_metrics) = consensus_metrics { - data.node_blocks_submitted_count = consensus_metrics.node_blocks_submitted_count; - data.node_headers_processed_count = consensus_metrics.node_headers_processed_count; - data.node_dependencies_processed_count = consensus_metrics.node_dependencies_processed_count; - data.node_bodies_processed_count = consensus_metrics.node_bodies_processed_count; - data.node_transactions_processed_count = consensus_metrics.node_transactions_processed_count; - data.node_chain_blocks_processed_count = consensus_metrics.node_chain_blocks_processed_count; - data.node_mass_processed_count = consensus_metrics.node_mass_processed_count; - // -- - data.node_database_blocks_count = consensus_metrics.node_database_blocks_count; - data.node_database_headers_count = consensus_metrics.node_database_headers_count; - data.network_mempool_size = consensus_metrics.network_mempool_size; - data.network_tip_hashes_count = consensus_metrics.network_tip_hashes_count; - data.network_difficulty = consensus_metrics.network_difficulty; - data.network_past_median_time = consensus_metrics.network_past_median_time; - data.network_virtual_parent_hashes_count = consensus_metrics.network_virtual_parent_hashes_count; - data.network_virtual_daa_score = consensus_metrics.network_virtual_daa_score; - } - - if let Some(connection_metrics) = connection_metrics { - data.node_borsh_live_connections = connection_metrics.borsh_live_connections; - data.node_borsh_connection_attempts = connection_metrics.borsh_connection_attempts; - data.node_borsh_handshake_failures = connection_metrics.borsh_handshake_failures; - data.node_json_live_connections = connection_metrics.json_live_connections; - data.node_json_connection_attempts = connection_metrics.json_connection_attempts; - data.node_json_handshake_failures = connection_metrics.json_handshake_failures; - data.node_active_peers = connection_metrics.active_peers; - } - - if let Some(bandwidth_metrics) = bandwidth_metrics { - data.node_borsh_bytes_tx = bandwidth_metrics.borsh_bytes_tx; - data.node_borsh_bytes_rx = bandwidth_metrics.borsh_bytes_rx; - data.node_json_bytes_tx = bandwidth_metrics.json_bytes_tx; - data.node_json_bytes_rx = bandwidth_metrics.json_bytes_rx; - data.node_p2p_bytes_tx = bandwidth_metrics.p2p_bytes_tx; - data.node_p2p_bytes_rx = bandwidth_metrics.p2p_bytes_rx; - data.node_grpc_user_bytes_tx = bandwidth_metrics.grpc_bytes_tx; - data.node_grpc_user_bytes_rx = bandwidth_metrics.grpc_bytes_rx; - - data.node_total_bytes_tx = bandwidth_metrics.borsh_bytes_tx - + bandwidth_metrics.json_bytes_tx - + bandwidth_metrics.p2p_bytes_tx - + bandwidth_metrics.grpc_bytes_tx; - - data.node_total_bytes_rx = bandwidth_metrics.borsh_bytes_rx - + bandwidth_metrics.json_bytes_rx - + bandwidth_metrics.p2p_bytes_rx - + bandwidth_metrics.grpc_bytes_rx; - } - - if let Some(process_metrics) = process_metrics { - data.node_resident_set_size_bytes = process_metrics.resident_set_size; - data.node_virtual_memory_size_bytes = process_metrics.virtual_memory_size; - data.node_cpu_cores = process_metrics.core_num; - data.node_cpu_usage = process_metrics.cpu_usage; - data.node_file_handles = process_metrics.fd_num; - data.node_disk_io_read_bytes = process_metrics.disk_io_read_bytes; - data.node_disk_io_write_bytes = process_metrics.disk_io_write_bytes; - data.node_disk_io_read_per_sec = process_metrics.disk_io_read_per_sec; - data.node_disk_io_write_per_sec = process_metrics.disk_io_write_per_sec; - } - - Ok(()) + async fn sample_metrics(self: &Arc, rpc: Arc) -> Result { + MetricsData::try_from(rpc.get_metrics(true, true, true, true, true, false).await?) } } diff --git a/mining/Cargo.toml b/mining/Cargo.toml index facd45d6a4..0c7eb25251 100644 --- a/mining/Cargo.toml +++ b/mining/Cargo.toml @@ -27,8 +27,9 @@ parking_lot.workspace = true rand.workspace = true serde.workspace = true smallvec.workspace = true +sweep-bptree = "0.4.1" thiserror.workspace = true -tokio = { workspace = true, features = [ "rt-multi-thread", "macros", "signal" ] } +tokio = { workspace = true, features = ["rt-multi-thread", "macros", "signal"] } [dev-dependencies] kaspa-txscript.workspace = true diff --git a/mining/benches/bench.rs b/mining/benches/bench.rs index 59ff685dd5..16cfcc234f 100644 --- a/mining/benches/bench.rs +++ b/mining/benches/bench.rs @@ -1,6 +1,16 @@ use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use kaspa_mining::model::topological_index::TopologicalIndex; -use std::collections::{hash_set::Iter, HashMap, HashSet}; +use itertools::Itertools; +use kaspa_consensus_core::{ + subnets::SUBNETWORK_ID_NATIVE, + tx::{Transaction, TransactionInput, TransactionOutpoint}, +}; +use kaspa_hashes::{HasherBase, TransactionID}; +use kaspa_mining::{model::topological_index::TopologicalIndex, FeerateTransactionKey, Frontier, Policy}; +use rand::{thread_rng, Rng}; +use std::{ + collections::{hash_set::Iter, HashMap, HashSet}, + sync::Arc, +}; #[derive(Default)] pub struct Dag @@ -68,5 +78,211 @@ pub fn bench_compare_topological_index_fns(c: &mut Criterion) { group.finish(); } -criterion_group!(benches, bench_compare_topological_index_fns); +fn generate_unique_tx(i: u64) -> Arc { + let mut hasher = TransactionID::new(); + let prev = hasher.update(i.to_le_bytes()).clone().finalize(); + let input = TransactionInput::new(TransactionOutpoint::new(prev, 0), vec![], 0, 0); + Arc::new(Transaction::new(0, vec![input], vec![], 0, SUBNETWORK_ID_NATIVE, 0, vec![])) +} + +fn build_feerate_key(fee: u64, mass: u64, id: u64) -> FeerateTransactionKey { + FeerateTransactionKey::new(fee, mass, generate_unique_tx(id)) +} + +pub fn bench_mempool_sampling(c: &mut Criterion) { + let mut rng = thread_rng(); + let mut group = c.benchmark_group("mempool sampling"); + let cap = 1_000_000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let fee: u64 = if i % (cap as u64 / 100000) == 0 { 1000000 } else { rng.gen_range(1..10000) }; + let mass: u64 = 1650; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + let len = cap; + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + group.bench_function("mempool one-shot sample", |b| { + b.iter(|| { + black_box({ + let selected = frontier.sample_inplace(&mut rng, &Policy::new(500_000), &mut 0); + selected.iter().map(|k| k.mass).sum::() + }) + }) + }); + + // Benchmark frontier insertions and removals (see comparisons below) + let remove = map.values().take(map.len() / 10).cloned().collect_vec(); + group.bench_function("frontier remove/add", |b| { + b.iter(|| { + black_box({ + for r in remove.iter() { + frontier.remove(r).then_some(()).unwrap(); + } + for r in remove.iter().cloned() { + frontier.insert(r).then_some(()).unwrap(); + } + 0 + }) + }) + }); + + // Benchmark hashmap insertions and removals for comparison + let remove = map.iter().take(map.len() / 10).map(|(&k, v)| (k, v.clone())).collect_vec(); + group.bench_function("map remove/add", |b| { + b.iter(|| { + black_box({ + for r in remove.iter() { + map.remove(&r.0).unwrap(); + } + for r in remove.iter().cloned() { + map.insert(r.0, r.1.clone()); + } + 0 + }) + }) + }); + + // Benchmark std btree set insertions and removals for comparison + // Results show that frontier (sweep bptree) and std btree set are roughly the same. + // The slightly higher cost for sweep bptree should be attributed to subtree weight + // maintenance (see FeerateWeight) + #[allow(clippy::mutable_key_type)] + let mut std_btree = std::collections::BTreeSet::from_iter(map.values().cloned()); + let remove = map.iter().take(map.len() / 10).map(|(&k, v)| (k, v.clone())).collect_vec(); + group.bench_function("std btree remove/add", |b| { + b.iter(|| { + black_box({ + for (_, key) in remove.iter() { + std_btree.remove(key).then_some(()).unwrap(); + } + for (_, key) in remove.iter() { + std_btree.insert(key.clone()); + } + 0 + }) + }) + }); + group.finish(); +} + +pub fn bench_mempool_selectors(c: &mut Criterion) { + let mut rng = thread_rng(); + let mut group = c.benchmark_group("mempool selectors"); + let cap = 1_000_000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let fee: u64 = rng.gen_range(1..1000000); + let mass: u64 = 1650; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + for len in [100, 300, 350, 500, 1000, 2000, 5000, 10_000, 100_000, 500_000, 1_000_000].into_iter().rev() { + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + group.bench_function(format!("rebalancing selector ({})", len), |b| { + b.iter(|| { + black_box({ + let mut selector = frontier.build_rebalancing_selector(); + selector.select_transactions().iter().map(|k| k.gas).sum::() + }) + }) + }); + + let mut collisions = 0; + let mut n = 0; + + group.bench_function(format!("sample inplace selector ({})", len), |b| { + b.iter(|| { + black_box({ + let mut selector = frontier.build_selector_sample_inplace(&mut collisions); + n += 1; + selector.select_transactions().iter().map(|k| k.gas).sum::() + }) + }) + }); + + if n > 0 { + println!("---------------------- \n Avg collisions: {}", collisions / n); + } + + if frontier.total_mass() <= 500_000 { + group.bench_function(format!("take all selector ({})", len), |b| { + b.iter(|| { + black_box({ + let mut selector = frontier.build_selector_take_all(); + selector.select_transactions().iter().map(|k| k.gas).sum::() + }) + }) + }); + } + + group.bench_function(format!("dynamic selector ({})", len), |b| { + b.iter(|| { + black_box({ + let mut selector = frontier.build_selector(&Policy::new(500_000)); + selector.select_transactions().iter().map(|k| k.gas).sum::() + }) + }) + }); + } + + group.finish(); +} + +pub fn bench_inplace_sampling_worst_case(c: &mut Criterion) { + let mut group = c.benchmark_group("mempool inplace sampling"); + let max_fee = u64::MAX; + let fee_steps = (0..10).map(|i| max_fee / 100u64.pow(i)).collect_vec(); + for subgroup_size in [300, 200, 100, 80, 50, 30] { + let cap = 1_000_000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let fee: u64 = if i < 300 { fee_steps[i as usize / subgroup_size] } else { 1 }; + let mass: u64 = 1650; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + let mut frontier = Frontier::default(); + for item in map.values().cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let mut collisions = 0; + let mut n = 0; + + group.bench_function(format!("inplace sampling worst case (subgroup size: {})", subgroup_size), |b| { + b.iter(|| { + black_box({ + let mut selector = frontier.build_selector_sample_inplace(&mut collisions); + n += 1; + selector.select_transactions().iter().map(|k| k.gas).sum::() + }) + }) + }); + + if n > 0 { + println!("---------------------- \n Avg collisions: {}", collisions / n); + } + } + + group.finish(); +} + +criterion_group!( + benches, + bench_mempool_sampling, + bench_mempool_selectors, + bench_inplace_sampling_worst_case, + bench_compare_topological_index_fns +); criterion_main!(benches); diff --git a/mining/errors/src/mempool.rs b/mining/errors/src/mempool.rs index e33737f9df..319aaa4845 100644 --- a/mining/errors/src/mempool.rs +++ b/mining/errors/src/mempool.rs @@ -4,7 +4,7 @@ use kaspa_consensus_core::{ }; use thiserror::Error; -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum RuleError { /// A consensus transaction rule error /// @@ -24,12 +24,18 @@ pub enum RuleError { #[error("transaction {0} is already in the mempool")] RejectDuplicate(TransactionId), - #[error("output {0} already spent by transaction {1} in the memory pool")] + #[error("output {0} already spent by transaction {1} in the mempool")] RejectDoubleSpendInMempool(TransactionOutpoint, TransactionId), - /// New behavior: a transaction is rejected if the mempool is full - #[error("number of high-priority transactions in mempool ({0}) has reached the maximum allowed ({1})")] - RejectMempoolIsFull(usize, u64), + #[error("replace by fee found no double spending transaction in the mempool")] + RejectRbfNoDoubleSpend, + + #[error("replace by fee found more than one double spending transaction in the mempool")] + RejectRbfTooManyDoubleSpendingTransactions, + + /// a transaction is rejected if the mempool is full + #[error("transaction could not be added to the mempool because it's full with transactions with higher priority")] + RejectMempoolIsFull, /// An error emitted by mining\src\mempool\check_transaction_standard.rs #[error("transaction {0} is not standard: {1}")] @@ -95,7 +101,7 @@ impl From for RuleError { pub type RuleResult = std::result::Result; -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum NonStandardError { #[error("transaction version {1} is not in the valid range of {2}-{3}")] RejectVersion(TransactionId, u16, u16, u16), diff --git a/mining/src/block_template/builder.rs b/mining/src/block_template/builder.rs index de3428a745..6f0dbe6743 100644 --- a/mining/src/block_template/builder.rs +++ b/mining/src/block_template/builder.rs @@ -1,25 +1,17 @@ -use super::{errors::BuilderResult, policy::Policy}; -use crate::{block_template::selector::TransactionsSelector, model::candidate_tx::CandidateTransaction}; +use super::errors::BuilderResult; use kaspa_consensus_core::{ api::ConsensusApi, - block::{BlockTemplate, TemplateBuildMode}, + block::{BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, - merkle::calc_hash_merkle_root, tx::COINBASE_TRANSACTION_INDEX, }; -use kaspa_core::{ - debug, - time::{unix_now, Stopwatch}, -}; +use kaspa_core::time::{unix_now, Stopwatch}; -pub(crate) struct BlockTemplateBuilder { - policy: Policy, -} +pub(crate) struct BlockTemplateBuilder {} impl BlockTemplateBuilder { - pub(crate) fn new(max_block_mass: u64) -> Self { - let policy = Policy::new(max_block_mass); - Self { policy } + pub(crate) fn new() -> Self { + Self {} } /// BuildBlockTemplate creates a block template for a miner to consume @@ -89,12 +81,10 @@ impl BlockTemplateBuilder { &self, consensus: &dyn ConsensusApi, miner_data: &MinerData, - transactions: Vec, + selector: Box, build_mode: TemplateBuildMode, ) -> BuilderResult { let _sw = Stopwatch::<20>::with_threshold("build_block_template op"); - debug!("Considering {} transactions for a new block template", transactions.len()); - let selector = Box::new(TransactionsSelector::new(self.policy.clone(), transactions)); Ok(consensus.build_block_template(miner_data.clone(), selector, build_mode)?) } @@ -115,7 +105,8 @@ impl BlockTemplateBuilder { coinbase_tx.outputs.last_mut().unwrap().script_public_key = new_miner_data.script_public_key.clone(); } // Update the hash merkle root according to the modified transactions - block_template.block.header.hash_merkle_root = calc_hash_merkle_root(block_template.block.transactions.iter()); + block_template.block.header.hash_merkle_root = + consensus.calc_transaction_hash_merkle_root(&block_template.block.transactions, block_template.block.header.daa_score); let new_timestamp = unix_now(); if new_timestamp > block_template.block.header.timestamp { // Only if new time stamp is later than current, update the header. Otherwise, diff --git a/mining/src/block_template/model/tx.rs b/mining/src/block_template/model/tx.rs index b0c7e3f56e..65493e63b2 100644 --- a/mining/src/block_template/model/tx.rs +++ b/mining/src/block_template/model/tx.rs @@ -73,7 +73,8 @@ impl CandidateList { /// * tx1: start 0, end 100 /// * tx2: start 100, end 105 /// * tx3: start 105, end 2000 - /// And r=102, then find will return tx2. + /// + /// And r=102, then [`CandidateList::find`] will return tx2. pub(crate) fn find(&self, r: f64) -> usize { let mut min = 0; let mut max = self.candidates.len() - 1; diff --git a/mining/src/block_template/policy.rs b/mining/src/block_template/policy.rs index ff51972559..12ee98e28a 100644 --- a/mining/src/block_template/policy.rs +++ b/mining/src/block_template/policy.rs @@ -1,14 +1,14 @@ /// Policy houses the policy (configuration parameters) which is used to control /// the generation of block templates. See the documentation for -/// NewBlockTemplate for more details on each of these parameters are used. +/// NewBlockTemplate for more details on how each of these parameters are used. #[derive(Clone)] -pub(crate) struct Policy { +pub struct Policy { /// max_block_mass is the maximum block mass to be used when generating a block template. pub(crate) max_block_mass: u64, } impl Policy { - pub(crate) fn new(max_block_mass: u64) -> Self { + pub fn new(max_block_mass: u64) -> Self { Self { max_block_mass } } } diff --git a/mining/src/block_template/selector.rs b/mining/src/block_template/selector.rs index b65126caf6..6acacb22d3 100644 --- a/mining/src/block_template/selector.rs +++ b/mining/src/block_template/selector.rs @@ -18,7 +18,7 @@ use kaspa_consensus_core::{ /// candidate transactions should be. A smaller alpha makes the distribution /// more uniform. ALPHA is used when determining a candidate transaction's /// initial p value. -const ALPHA: i32 = 3; +pub(crate) const ALPHA: i32 = 3; /// REBALANCE_THRESHOLD is the percentage of candidate transactions under which /// we don't rebalance. Rebalancing is a heavy operation so we prefer to avoid @@ -28,7 +28,7 @@ const ALPHA: i32 = 3; /// if REBALANCE_THRESHOLD is 0.95, there's a 1-in-20 chance of collision. const REBALANCE_THRESHOLD: f64 = 0.95; -pub(crate) struct TransactionsSelector { +pub struct RebalancingWeightedTransactionSelector { policy: Policy, /// Transaction store transactions: Vec, @@ -52,8 +52,8 @@ pub(crate) struct TransactionsSelector { gas_usage_map: HashMap, } -impl TransactionsSelector { - pub(crate) fn new(policy: Policy, mut transactions: Vec) -> Self { +impl RebalancingWeightedTransactionSelector { + pub fn new(policy: Policy, mut transactions: Vec) -> Self { let _sw = Stopwatch::<100>::with_threshold("TransactionsSelector::new op"); // Sort the transactions by subnetwork_id. transactions.sort_by(|a, b| a.tx.subnetwork_id.cmp(&b.tx.subnetwork_id)); @@ -103,7 +103,7 @@ impl TransactionsSelector { /// select_transactions loops over the candidate transactions /// and appends the ones that will be included in the next block into /// selected_txs. - pub(crate) fn select_transactions(&mut self) -> Vec { + pub fn select_transactions(&mut self) -> Vec { let _sw = Stopwatch::<15>::with_threshold("select_transaction op"); let mut rng = rand::thread_rng(); @@ -182,11 +182,7 @@ impl TransactionsSelector { self.total_mass += selected_tx.calculated_mass; self.total_fees += selected_tx.calculated_fee; - trace!( - "Adding tx {0} (fee per megagram: {1})", - selected_tx.tx.id(), - selected_tx.calculated_fee * 1_000_000 / selected_tx.calculated_mass - ); + trace!("Adding tx {0} (fee per gram: {1})", selected_tx.tx.id(), selected_tx.calculated_fee / selected_tx.calculated_mass); // Mark for deletion selected_candidate.is_marked_for_deletion = true; @@ -229,7 +225,7 @@ impl TransactionsSelector { } } -impl TemplateTransactionSelector for TransactionsSelector { +impl TemplateTransactionSelector for RebalancingWeightedTransactionSelector { fn select_transactions(&mut self) -> Vec { self.select_transactions() } @@ -273,7 +269,13 @@ mod tests { use kaspa_txscript::{pay_to_script_hash_signature_script, test_helpers::op_true_script}; use std::{collections::HashSet, sync::Arc}; - use crate::{mempool::config::DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, model::candidate_tx::CandidateTransaction}; + use crate::{ + mempool::{ + config::DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, + model::frontier::selectors::{SequenceSelector, SequenceSelectorInput, SequenceSelectorTransaction}, + }, + model::candidate_tx::CandidateTransaction, + }; #[test] fn test_reject_transaction() { @@ -281,29 +283,43 @@ mod tests { // Create a vector of transactions differing by output value so they have unique ids let transactions = (0..TX_INITIAL_COUNT).map(|i| create_transaction(SOMPI_PER_KASPA * (i + 1) as u64)).collect_vec(); + let masses: HashMap<_, _> = transactions.iter().map(|tx| (tx.tx.id(), tx.calculated_mass)).collect(); + let sequence: SequenceSelectorInput = + transactions.iter().map(|tx| SequenceSelectorTransaction::new(tx.tx.clone(), tx.calculated_mass)).collect(); + let policy = Policy::new(100_000); - let mut selector = TransactionsSelector::new(policy, transactions); - let (mut kept, mut rejected) = (HashSet::new(), HashSet::new()); - let mut reject_count = 32; - for i in 0..10 { - let selected_txs = selector.select_transactions(); - if i > 0 { - assert_eq!( - selected_txs.len(), - reject_count, - "subsequent select calls are expected to only refill the previous rejections" - ); - reject_count /= 2; - } - for tx in selected_txs.iter() { - kept.insert(tx.id()).then_some(()).expect("selected txs should never repeat themselves"); - assert!(!rejected.contains(&tx.id()), "selected txs should never repeat themselves"); + let selectors: [Box; 2] = [ + Box::new(RebalancingWeightedTransactionSelector::new(policy.clone(), transactions)), + Box::new(SequenceSelector::new(sequence, policy.clone())), + ]; + + for mut selector in selectors { + let (mut kept, mut rejected) = (HashSet::new(), HashSet::new()); + let mut reject_count = 32; + let mut total_mass = 0; + for i in 0..10 { + let selected_txs = selector.select_transactions(); + if i > 0 { + assert_eq!( + selected_txs.len(), + reject_count, + "subsequent select calls are expected to only refill the previous rejections" + ); + reject_count /= 2; + } + for tx in selected_txs.iter() { + total_mass += masses[&tx.id()]; + kept.insert(tx.id()).then_some(()).expect("selected txs should never repeat themselves"); + assert!(!rejected.contains(&tx.id()), "selected txs should never repeat themselves"); + } + assert!(total_mass <= policy.max_block_mass); + selected_txs.iter().take(reject_count).for_each(|x| { + total_mass -= masses[&x.id()]; + selector.reject_selection(x.id()); + kept.remove(&x.id()).then_some(()).expect("was just inserted"); + rejected.insert(x.id()).then_some(()).expect("was just verified"); + }); } - selected_txs.iter().take(reject_count).for_each(|x| { - selector.reject_selection(x.id()); - kept.remove(&x.id()).then_some(()).expect("was just inserted"); - rejected.insert(x.id()).then_some(()).expect("was just verified"); - }); } } diff --git a/mining/src/feerate/fee_estimation.ipynb b/mining/src/feerate/fee_estimation.ipynb new file mode 100644 index 0000000000..a8b8fbfc89 --- /dev/null +++ b/mining/src/feerate/fee_estimation.ipynb @@ -0,0 +1,496 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Feerates\n", + "\n", + "The feerate value represents the fee/mass ratio of a transaction in `sompi/gram` units.\n", + "Given a feerate value recommendation, one should calculate the required fee by taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)`. \n", + "\n", + "This notebook makes an effort to implement and illustrate the feerate estimator method we used. The corresponding Rust implementation is more comprehensive and addresses some additional edge cases, but the code in this notebook highly reflects it." + ] + }, + { + "cell_type": "code", + "execution_count": 97, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "metadata": {}, + "outputs": [], + "source": [ + "feerates = [1.0, 1.1, 1.2]*10 + [1.5]*3000 + [2]*3000 + [2.1]*3000 + [3, 4, 5]*10\n", + "# feerates = [1.0, 1.1, 1.2] + [1.1]*100 + [1.2]*100 + [1.3]*100 # + [3, 4, 5, 100]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We compute the probability weight of each transaction by raising `feerate` to the power of `alpha` (currently set to `3`). Essentially, alpha represents the amount of bias we want towards higher feerate transactions. " + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "metadata": {}, + "outputs": [], + "source": [ + "ALPHA = 3.0" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total mempool weight: 64108.589999995806\n" + ] + } + ], + "source": [ + "total_weight = sum(np.array(feerates)**ALPHA)\n", + "print('Total mempool weight: ', total_weight)" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "metadata": {}, + "outputs": [], + "source": [ + "avg_mass = 2000\n", + "bps = 1\n", + "block_mass_limit = 500_000\n", + "network_mass_rate = bps * block_mass_limit" + ] + }, + { + "cell_type": "code", + "execution_count": 102, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Inclusion interval: 0.004\n" + ] + } + ], + "source": [ + "print('Inclusion interval: ', avg_mass/network_mass_rate)" + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "metadata": {}, + "outputs": [], + "source": [ + "class FeerateBucket:\n", + " def __init__(self, feerate, estimated_seconds):\n", + " self.feerate = feerate\n", + " self.estimated_seconds = estimated_seconds\n", + " \n", + "\n", + "class FeerateEstimations:\n", + " def __init__(self, low_bucket, mid_bucket, normal_bucket, priority_bucket):\n", + " self.low_bucket = low_bucket \n", + " self.mid_bucket = mid_bucket \n", + " self.normal_bucket = normal_bucket\n", + " self.priority_bucket = priority_bucket\n", + " \n", + " def __repr__(self):\n", + " return 'Feerates:\\t{}, {}, {}, {} \\nTimes:\\t\\t{}, {}, {}, {}'.format(\n", + " self.low_bucket.feerate, \n", + " self.mid_bucket.feerate, \n", + " self.normal_bucket.feerate,\n", + " self.priority_bucket.feerate, \n", + " self.low_bucket.estimated_seconds, \n", + " self.mid_bucket.estimated_seconds, \n", + " self.normal_bucket.estimated_seconds, \n", + " self.priority_bucket.estimated_seconds)\n", + " def feerates(self):\n", + " return np.array([\n", + " self.low_bucket.feerate, \n", + " self.mid_bucket.feerate, \n", + " self.normal_bucket.feerate,\n", + " self.priority_bucket.feerate\n", + " ])\n", + " \n", + " def times(self):\n", + " return np.array([\n", + " self.low_bucket.estimated_seconds, \n", + " self.mid_bucket.estimated_seconds, \n", + " self.normal_bucket.estimated_seconds,\n", + " self.priority_bucket.estimated_seconds\n", + " ])\n", + " \n", + "class FeerateEstimator:\n", + " \"\"\"\n", + " `total_weight`: The total probability weight of all current mempool ready \n", + " transactions, i.e., Σ_{tx in mempool}(tx.fee/tx.mass)^ALPHA\n", + " \n", + " 'inclusion_interval': The amortized time between transactions given the current \n", + " transaction masses present in the mempool, i.e., the inverse \n", + " of the transaction inclusion rate. For instance, if the average \n", + " transaction mass is 2500 grams, the block mass limit is 500,000\n", + " and the network has 10 BPS, then this number would be 1/2000 seconds.\n", + " \"\"\"\n", + " def __init__(self, total_weight, inclusion_interval):\n", + " self.total_weight = total_weight\n", + " self.inclusion_interval = inclusion_interval\n", + "\n", + " \"\"\"\n", + " Feerate to time function: f(feerate) = inclusion_interval * (1/p(feerate))\n", + " where p(feerate) = feerate^ALPHA/(total_weight + feerate^ALPHA) represents \n", + " the probability function for drawing `feerate` from the mempool\n", + " in a single trial. The inverse 1/p is the expected number of trials until\n", + " success (with repetition), thus multiplied by inclusion_interval it provides an\n", + " approximation to the overall expected waiting time\n", + " \"\"\"\n", + " def feerate_to_time(self, feerate):\n", + " c1, c2 = self.inclusion_interval, self.total_weight\n", + " return c1 * c2 / feerate**ALPHA + c1\n", + "\n", + " \"\"\"\n", + " The inverse function of `feerate_to_time`\n", + " \"\"\"\n", + " def time_to_feerate(self, time):\n", + " c1, c2 = self.inclusion_interval, self.total_weight\n", + " return ((c1 * c2 / time) / (1 - c1 / time))**(1 / ALPHA)\n", + " \n", + " \"\"\"\n", + " The antiderivative function of \n", + " feerate_to_time excluding the constant shift `+ c1`\n", + " \"\"\"\n", + " def feerate_to_time_antiderivative(self, feerate):\n", + " c1, c2 = self.inclusion_interval, self.total_weight\n", + " return c1 * c2 / (-2.0 * feerate**(ALPHA - 1))\n", + " \n", + " \"\"\"\n", + " Returns the feerate value for which the integral area is `frac` of the total area.\n", + " See figures below for illustration\n", + " \"\"\"\n", + " def quantile(self, lower, upper, frac):\n", + " c1, c2 = self.inclusion_interval, self.total_weight\n", + " z1 = self.feerate_to_time_antiderivative(lower)\n", + " z2 = self.feerate_to_time_antiderivative(upper)\n", + " z = frac * z2 + (1.0 - frac) * z1\n", + " return ((c1 * c2) / (-2 * z))**(1.0 / (ALPHA - 1.0))\n", + " \n", + " def calc_estimations(self):\n", + " # Choose `high` such that it provides sub-second waiting time\n", + " high = self.time_to_feerate(1.0)\n", + " \n", + " # Choose `low` feerate such that it provides sub-hour waiting time AND it covers (at least) the 0.25 quantile\n", + " low = max(self.time_to_feerate(3600.0), self.quantile(1.0, high, 0.25))\n", + " \n", + " # Choose `normal` feerate such that it provides sub-minute waiting time AND it covers (at least) the 0.66\n", + " # quantile between low and high\n", + " normal = max(self.time_to_feerate(60.0), self.quantile(low, high, 0.66))\n", + " \n", + " # Choose an additional point between normal and low\n", + " mid = max(self.time_to_feerate(1800.0), self.quantile(1.0, high, 0.5))\n", + " \n", + " return FeerateEstimations(\n", + " FeerateBucket(low, self.feerate_to_time(low)),\n", + " FeerateBucket(mid, self.feerate_to_time(mid)),\n", + " FeerateBucket(normal, self.feerate_to_time(normal)),\n", + " FeerateBucket(high, self.feerate_to_time(high)))" + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.0" + ] + }, + "execution_count": 104, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "estimator = FeerateEstimator(total_weight=0, inclusion_interval=1/100)\n", + "# estimator.quantile(2, 3, 0.5)\n", + "estimator.time_to_feerate(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Feerate estimation\n", + "\n", + "The figure below illustrates the estimator selection. We first estimate the `feerate_to_time` function and then select 3 meaningfull points by analyzing the curve and its integral (see `calc_estimations`). " + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHVlJREFUeJzt3XuQnHW95/H3t+8990lmMplkQhIwoIAJlxj14PFwRNR4OcA5LoVb60HLLU4VeNTaU7Wl7JaHtWTX2rPKrq5yFgTFEnVTikcUvLABBY5ASBASyIUk5DKT20xuc0vm0tPf/aOfCZNkkpnMdOeZfvrzqup6nufXTz/9bS6f3zO//j1Pm7sjIiLRFQu7ABERKS0FvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4hT0IiIRp6AXEYm4RNgFADQ1NfmiRYvCLkNEpKysW7fuoLs3T7TfjAj6RYsWsXbt2rDLEBEpK2a2azL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgr66Dfsr+Xf/rtZo4eGwq7FBGRGausg37XoX6+/dR22g8fD7sUEZEZa8KgN7MFZvaUmW0ys9fM7PNB+11mtsfMXg4eHx7zmi+Z2TYz22JmHyxV8S11GQAO9AyU6i1ERMreZG6BkAP+wd1fMrNaYJ2ZPRE8d4+7/4+xO5vZpcAtwGXAPOD/mdnF7j5SzMJhTND3KuhFRM5kwjN6d9/n7i8F673AJmD+WV5yA/ATdx909x3ANmBFMYo9VVNNCgMO9AyW4vAiIpFwTmP0ZrYIuBJ4IWj6rJmtN7MHzawxaJsPtI95WQdn7ximLBGP0VCVpFNDNyIiZzTpoDezGuBnwBfcvQe4F7gIuALYB3x9dNdxXu7jHO82M1trZmu7urrOufBR9dmkxuhFRM5iUkFvZkkKIf+wuz8C4O4H3H3E3fPA/bw5PNMBLBjz8jZg76nHdPf73H25uy9vbp7wdspnVJ9Nsl9BLyJyRpOZdWPAA8Amd//GmPbWMbvdBLwarD8K3GJmaTNbDCwB1hSv5JM1VCU1Ri8ichaTmXVzDfBJYIOZvRy03Ql8wsyuoDAssxP4OwB3f83MVgEbKczYuaMUM25GNVSlONw/xFAuTypR1pcFiIiUxIRB7+7PMv64++Nnec3dwN3TqGvSGqqSAHT1DTK/IXs+3lJEpKyU/SlwYxD0+kJWRGR8ZR/0DdkUgKZYioicQfkH/Ykzen0hKyIynrIP+ppMgphp6EZE5EzKPuhjZtSkEzqjFxE5g7IPeoDqdIJO3dhMRGRckQj6qlScfUcV9CIi44lE0FenE7oNgojIGUQi6GvSCfoGcxwbyoVdiojIjBOJoK/NFC7w3avhGxGR00Qj6NOFufR7j+q3Y0VEThWNoD9xRq+gFxE5VSSCvjqdwFDQi4iMJxJBH48ZtZkEe7s1Ri8icqpIBD0UboWgM3oRkdNFJ+hTCfYcUdCLiJwqOkGfSbCvewD3036HXESkokUm6GszSYZG8hzqHwq7FBGRGSVCQa8pliIi44lO0Kd1dayIyHiiE/QZXR0rIjKeyAR9JhkjGTcFvYjIKSIT9GZGXSbJPl00JSJyksgEPUB1Ok7HkWNhlyEiMqNEKuhrM0k6dNGUiMhJIhX0ddkkh/qH9AMkIiJjRCro64OZNzqrFxF5U7SCPlsI+t2HNE4vIjIqUkFfly1cNNWuL2RFRE6IVNBnk3FSiRi7DyvoRURGTRj0ZrbAzJ4ys01m9pqZfT5on2VmT5jZ1mDZGLSbmX3TzLaZ2Xozu6rUH2JMrdRnk7Qr6EVETpjMGX0O+Ad3fxvwLuAOM7sU+CKw2t2XAKuDbYCVwJLgcRtwb9GrPovadEJn9CIiY0wY9O6+z91fCtZ7gU3AfOAG4KFgt4eAG4P1G4AfeMHzQIOZtRa98jOoyybZffiY7ksvIhI4pzF6M1sEXAm8ALS4+z4odAbAnGC3+UD7mJd1BG2nHus2M1trZmu7urrOvfIzqM8mGRjWfelFREZNOujNrAb4GfAFd+85267jtJ12eu3u97n7cndf3tzcPNkyJjQ680bDNyIiBZMKejNLUgj5h939kaD5wOiQTLDsDNo7gAVjXt4G7C1OuRMbvWhKX8iKiBRMZtaNAQ8Am9z9G2OeehS4NVi/FfjFmPa/DWbfvAvoHh3iOR/qsgp6EZGxEpPY5xrgk8AGM3s5aLsT+Bqwysw+A+wG/k3w3OPAh4FtwDHg00WteALJeIwazbwRETlhwqB392cZf9wd4Lpx9nfgjmnWNS11mQS7dBsEEREgYlfGjqqvSvLGwf6wyxARmREiGfSNVSm6egfpH9TtikVEIhn0DVWFL2R36KxeRCSaQd9YlQIU9CIiENGgb8jqjF5EZFQkgz4Rj1GfTSroRUSIaNBD4Z43b3T1hV2GiEjoIhv0DdnCFEvdxVJEKl10g74qSe9AjsO6i6WIVLjIBr1m3oiIFEQ26Efn0usKWRGpdJEN+rpMkriZzuhFpOJFNuhjMaOhKsm2Ts28EZHKFtmgB2isTrFlf2/YZYiIhCrSQT+7OkX74WMMDI+EXYqISGgiHfQfHHmaZ1KfI333bLjncli/KuySRETOu8n8wlRZuqTz17y/6+ukYoOFhu52+OXnCutLbw6vMBGR8yyyZ/Tv2f0dUj54cuPwcVj9lXAKEhEJSWSDvnbwwPhPdHec30JEREIW2aDvTbeM/0R92/ktREQkZJEN+mcvuJ3hWObkxmQWrvtyOAWJiIQksl/GbpmzEoB37vjfNA53kaudR+oDd+mLWBGpOJENeiiE/R+r3scPX9jNPTcu46alGrYRkcoT2aGbUQ1VKeJmbNmvWyGISGWKfNDHY8bsmhSb9/eEXYqISCgiH/QAs2tSvLqnO+wyRERCURFB31yT5mDfEJ29A2GXIiJy3lVG0NemAdi0T3eyFJHKUxlBX1MI+tf2avhGRCrPhEFvZg+aWaeZvTqm7S4z22NmLwePD4957ktmts3MtpjZB0tV+LlIJ+M0ZJNs3KsvZEWk8kzmjP77wIfGab/H3a8IHo8DmNmlwC3AZcFrvmNm8WIVOx2za1K8pqAXkQo0YdC7+9PA4Uke7wbgJ+4+6O47gG3AimnUVzTNNWl2HuynfzAXdikiIufVdMboP2tm64OhncagbT7QPmafjqDtNGZ2m5mtNbO1XV1d0yhjcppr0ziwWT8tKCIVZqpBfy9wEXAFsA/4etBu4+zr4x3A3e9z9+Xuvry5uXmKZUze6Mybjfs0fCMilWVKQe/uB9x9xN3zwP28OTzTASwYs2sbsHd6JRZHTTpBVSrOho6jYZciInJeTSnozax1zOZNwOiMnEeBW8wsbWaLgSXAmumVWBxmxpzaNC+3K+hFpLJMePdKM/sxcC3QZGYdwD8C15rZFRSGZXYCfwfg7q+Z2SpgI5AD7nD3kdKUfu5a6jKs2XGYvsEcNelI37hTROSECdPO3T8xTvMDZ9n/buDu6RRVKnPrMjjw6p5u3nXh7LDLERE5LyriythRLXWFX5x6RcM3IlJBKiros6k4jVVJjdOLSEWpqKCHwjTLPynoRaSCVFzQz63LsL97gM4e3bJYRCpDxQX96Di9hm9EpFJUXNDPqU0TMwW9iFSOigv6RDzGnNoML+6c7H3aRETKW8UFPcC8hgwvtx9lYHjGXMslIlIyFRr0WYZHnPUd+sUpEYm+ig16QMM3IlIRKjLos8k4TTUp1uxQ0ItI9FVk0APMrc+wbtcRRvLj3i5fRCQyKjbo5zdk6RvMsUk/RCIiEVexQa9xehGpFBUb9HWZJPXZJM9tPxR2KSIiJVWxQQ/Q1pjlj9sPkRvJh12KiEjJVHTQXzCrir7BHBv2aD69iERXRQd9W2NhnP7ZrQdDrkREpHQqOuirUgnm1KV5dpuCXkSiq6KDHmBBQxXrdh3h2FAu7FJEREpCQT8rSy7vvKCrZEUkoio+6Oc3ZEnETOP0IhJZFR/0iXiM+Q1ZntzcGXYpIiIlUfFBD7CoqZodB/vZcbA/7FJERIpOQQ9c2FQNwOpNB0KuRESk+BT0QF02SXNNmtWbNHwjItGjoA8snF3Fmp2H6T4+HHYpIiJFpaAPLG6qZiTvPP16V9iliIgUlYI+MLc+Q1UqrnF6EYmcCYPezB40s04ze3VM2ywze8LMtgbLxqDdzOybZrbNzNab2VWlLL6YYmYsml3NE5sOMJgbCbscEZGimcwZ/feBD53S9kVgtbsvAVYH2wArgSXB4zbg3uKUeX4saamhf3CEZ17XxVMiEh0TBr27Pw2cen+AG4CHgvWHgBvHtP/AC54HGsystVjFltqCxiqyyTiPbdgXdikiIkUz1TH6FnffBxAs5wTt84H2Mft1BG2nMbPbzGytma3t6poZX4DGY8bipmqe2HiAgWEN34hINBT7y1gbp83H29Hd73P35e6+vLm5uchlTN3FLTX0DeZ4Rve+EZGImGrQHxgdkgmWo1cadQALxuzXBuydennnX1tjFR9P/ZGrH/lzuKsB7rkc1q8KuywRkSmbatA/CtwarN8K/GJM+98Gs2/eBXSPDvGUi0sP/oavxu5nVu4A4NDdDr/8nMJeRMrWZKZX/hh4DrjEzDrM7DPA14DrzWwrcH2wDfA48AawDbgfuL0kVZfQe3Z/hwyDJzcOH4fVXwmnIBGRaUpMtIO7f+IMT103zr4O3DHdosJUO3iGC6a6O85vISIiRaIrY0/Rm24Z/4n6tvNbiIhIkSjoT/HsBbczHMuc3JjMwnVfDqcgEZFpmnDoptJsmbMSKIzV1wweoNOaaPnYf8WW3hxyZSIiU6OgH8eWOSvZMmclm/b18LuNB3i46p1cE3ZRIiJTpKGbs1gyp4aqVJwfPLcz7FJERKZMQX8WiXiMt7XW8cTGA+w9ejzsckREpkRBP4Gl8+txhx+9sDvsUkREpkRBP4G6bJLFTdX8aM1u3adeRMqSgn4SlrbVc7h/iF+9UlZ3cxARART0k3LBrCqaalL88x+2k8+PezNOEZEZS0E/CWbG1Rc0srWzj6e2dE78AhGRGURBP0lLWmqpzya59/fbwy5FROScKOgnKR4zrljQwNpdR3hx56m/rCgiMnMp6M/BZfPqqErF+dbqrWGXIiIyaQr6c5CMx7jqgkae3nqQNTt0Vi8i5UFBf46WttVTk07wT7/dTOH2+yIiM5uC/hwl4zGWL2rkxZ1HeFo/IC4iZUBBPwWXz6unPpvkv/9ms+bVi8iMp6CfgnjMeOfiWby2t4dH/rQn7HJERM5KQT9Fb51bS2t9hq/9ehN9g7mwyxEROSMF/RSZGe9d0szBviG+9aSmW4rIzKWgn4a59Rkuba3lgWd2sONgf9jliIiMS0E/TX92URPxmPGln63XdEsRmZEU9NNUnU7wnrc08fyOw/zkxfawyxEROY2Cvggum1fHgsYsdz+2if3dA2GXIyJyEgV9EZgZ73vrHAZzI9z58w0awhGRGUVBXyQNVSnefeFsntzcyQ/1+7IiMoMo6IvoigUNLJpdxVd/tZEt+3vDLkdEBFDQF5WZ8f63tZCIG3//45cYGNaPiYtI+KYV9Ga208w2mNnLZrY2aJtlZk+Y2dZg2VicUstDdTrB9W9r4fUDffznf3lV4/UiErpinNH/pbtf4e7Lg+0vAqvdfQmwOtiuKAtnV7Ni8Sx+uq6DHzy3K+xyRKTClWLo5gbgoWD9IeDGErzHjPeuxbO4sKmar/xyI89tPxR2OSJSwaYb9A78zszWmdltQVuLu+8DCJZzpvkeZcnM+MBlLTRUJbn94XW6RYKIhGa6QX+Nu18FrATuMLP3TvaFZnabma01s7VdXV3TLGNmSififGRpK4O5PJ984AU6e3UxlYicf9MKenffGyw7gZ8DK4ADZtYKECw7z/Da+9x9ubsvb25unk4ZM1pjVYqPLZtHZ88gtz64ht6B4bBLEpEKM+WgN7NqM6sdXQc+ALwKPArcGux2K/CL6RZZ7ubWZfjw2+eyZX8v//6htRwb0v3rReT8mc4ZfQvwrJm9AqwBHnP33wBfA643s63A9cF2xVs4u5oPXDqXNTsP86kHX6RfP1YiIudJYqovdPc3gGXjtB8CrptOUVF1ydxaAH67cT+f+t4avv/pFVSnp/yvQERkUnRl7Hl2ydxaPnTZXNbtOsK/vf95DvUNhl2SiEScgj4EF7fU8uG3t/La3h7++jt/ZNchTb0UkdJR0IfkouYa/vqq+XT1DXLjt/+VHU99D+65HO5qKCzXrwq7RBGJCA0Qh6i1PsvHr2pj+OX/S8vv/xlsqPBEdzv88nOF9aU3h1egiESCzuhD1lid4oupVVSNhvyo4eOw+ivhFCUikaKgnwHqhg6M/0R3x/ktREQiSUE/A/SmW8Zt78vM1W2ORWTaFPQzwLMX3M5wLHNS2wBp7uy5iU9970XaDx8LqTIRiQIF/QywZc5KnrjoTnrSc3GMnvRcVi/5T/S85Sae236I6+/5A//nD9sZHsmHXaqIlCHNupkhtsxZyZY5K09qWwZc2FzNH17v4r/9ejOP/GkPd33sMt590exwihSRsqQz+hmuNpPko0vn8dGlrew7epxP3P88n3noRbZ16sfHRWRydEZfJi5qrmHhrCpebj/Ks1sP8sHNz3DzOxZwx19eRFtjVdjlicgMpqAvI4l4jOWLZnHpvDrW7DjMqhfbWbW2nb+5aj63X/sWFjVVh12iiMxACvoyVJVKcO0lc7h6YSPrdh3hkZf28NN1HXx06Tw+fc0irrygMewSRWQGUdCXsdpMkmsvmcM7Fs3ipd1H+O1r+3n0lb0snV/Pp65ZxEeWtpJOxMMuU0RCZjPhgpzly5f72rVrp/TaJzcf4JX27iJXVJ6Gcnk27eth/Z5uDvcP0ViV5KYr2/j41W1cOq8u7PJEpMjMbJ27L59oP53RR0gqEWPZggaWttWz+/AxXt3bw0PP7eTBf93BW+fW8vGr2/irZfOYU5eZ8FgiEh0K+ggyMxbOrmbh7GqOD4/w+v5eNu/v5auPbeLuxzZx1cJGVl4+lw9eNpcFszRjRyTqFPQRl03GWbaggWULGjjcP8S2zj62d/Xx1cc28dXHNnH5vDre97YW/uLiZpa11ZOI69IKkahR0FeQWdUpViyexYrFszh6bIjtXf1s7+rjW09u5Zurt1KbSfDnS5r4i4ub+bOLmmhrzGJmYZctItOkoK9QDVUprl6Y4uqFjQwMj7D78DF2HTrGM68f5PEN+wGYW5dhxeJZvGPxLFYsmsWSOTXEYgp+kXKjoBcyyTgXt9RycUst7s6h/iH2HDnOnqPHeWpzJ4++sheA+mySZW31LG1r4O1t9Sxtq2duXUZn/SIznIJeTmJmNNWkaapJs2xBA+5Oz0COPUePs/focTbv7+XZbQfJB7NyZ1WnWNZWz+Xz61nSUsslLbUsbqomldBYv8hMoaCXszIz6rNJ6rNJLm0tzMXPjeTp6huks2eQA70DbNjTzR9e7zoR/vGYsbipmkuCvxLeMqeGRU1VLJxdTU1a/8mJnG/6v07OWSIeo7U+S2t99kRbbiTPkWPDHOof5HD/EIf6hvjj9oM8vmEfYy/Jm12dYnFTYernotlVLGyqZuGsKlobMjRVp/UdgEgJKOilKBLxGM21aZpr0ye1D4/kOXpsmKPHhjh6fJju48Ps7xng9QO99AzkTto3GTda67PMa8gwryHLvPos8xqytDZkaK3P0FyTprEqpc5A5Bwp6KWkkmfoAKDQCXQH4d83kKN3MEfvwDDth4+xcW8PfYO5E8NBo+JmzK5JnThmc01h2TRm2VidpLEqRUNVUvf6EUFBLyFKxmMnvvgdTz7v9A/l6B3I0TeY49jQCMeGcvQPjtA/mONQ3xAvDR0Zt0MYlU3GaagqBH9jdZKGqhSNwXZ9trBdm0kUHukktZkENcG2OgmJCgW9zFixmFGbSVKbSZ51P3dnYDhf6ASGRhgYHn3kT6wfG8px5NgQg7leBoZHOD40wkS380vFY1Sn49RmktRlT+4IatIJsqk41akEVak42VS8sEwWtt9sG/N8Mq4rjyUUJQt6M/sQ8L+AOPBdd/9aqd5LKpuZkQ3CdLK/puvuDOYKHcFQLs9gLs/QSJ6hXP7N7VyewZHC830DOQ73DzGc8zf3G8kzcqY/Jc4gGTeyyUKt6UScdCJGJhknm4yTTsZIJ2In2gvbwXoiRjo5Zj1x+v7JRIxEzEjGY6TGW0/ESMUL6/GY6fqHMKxfBau/At0dUN8G130Zlt5c8rctSdCbWRz4NnA90AG8aGaPuvvGUryfyLkyMzLJOJnk9IZnRvJOLp9neMQZHsmTC5aFh5MLlsP509ty+UJH0TMwzJFjQ+TdGcl7cExnZKSwHD1+MRmFobNEvNAZJONGIhYjmbBCZxAvdAqjncToMh6LEY9BIhYjHjMSMSMWLE/ffrNTiY95/uT12ATHKNQVixW+n4nFjJgZMYOYjXZYhSm9Y9tPPGKMux43w4LtuBWOMXq8mFGaTnD9Kvjl52D4eGG7u72wDSUP+1Kd0a8Atrn7GwBm9hPgBkBBL5FSCK04pb48wN3JOyc6h9xohzDiJzqbvBc6nrw7+bwz4k4+T7D0U5Zvtp/oYIL3yOed4VyeweGRE/vmvbCfO4UHwb7BMdwhz+n75oNjlqOxHcBJHcaJtjc7hbi9uR4zTnRIxpttP+y9kxY/fvKbDB8vnOGXadDPB9rHbHcA7yzFG82uTrNYv5UqMmONdlLuflrHMdqxjAQdw9jOZ+x+I2M6mRMdDn7K9pj1Me95cnuwzuT2OdPxTnpfd/LB8cZ2hKM1jb6m2Q+O/w+ou6Pk/w5KFfTj/d1zUr9uZrcBtwFccMEFU36j0VvwiojMaPe0FYZrTlXfVvK3LtUUgA5gwZjtNmDv2B3c/T53X+7uy5ubm0tUhojIDHHdlyGZPbktmS20l1ipgv5FYImZLTazFHAL8GiJ3ktEZOZbejN87JtQvwCwwvJj3yzfWTfunjOzzwK/pTC98kF3f60U7yUiUjaW3nxegv1UJZsr4O6PA4+X6vgiIjI5ukxPRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYgz9/BvLWdmXcCuKb68CTjD3YIiI+qfUZ+vvOnzhWehu094D5kZEfTTYWZr3X152HWUUtQ/oz5fedPnm/k0dCMiEnEKehGRiItC0N8XdgHnQdQ/oz5fedPnm+HKfoxeRETOLgpn9CIichZlG/Rm9qCZdZrZq2HXUgpmtsDMnjKzTWb2mpl9PuyaisnMMma2xsxeCT7ffwm7plIws7iZ/cnMfhV2LaVgZjvNbIOZvWxma8Oup9jMrMHMfmpmm4P/F98ddk1TUbZDN2b2XqAP+IG7Xx52PcVmZq1Aq7u/ZGa1wDrgRnffGHJpRWFmBlS7e5+ZJYFngc+7+/Mhl1ZUZvYfgOVAnbt/NOx6is3MdgLL3c/0y9flzcweAp5x9+8Gv5ZX5e5Hw67rXJXtGb27Pw0cDruOUnH3fe7+UrDeC2wC5odbVfF4QV+wmQwe5XnWcQZm1gZ8BPhu2LXIuTOzOuC9wAMA7j5UjiEPZRz0lcTMFgFXAi+EW0lxBcMaLwOdwBPuHqnPB/xP4D8C+bALKSEHfmdm68zstrCLKbILgS7ge8Hw23fNrDrsoqZCQT/DmVkN8DPgC+7eE3Y9xeTuI+5+BdAGrDCzyAzBmdlHgU53Xxd2LSV2jbtfBawE7giGVKMiAVwF3OvuVwL9wBfDLWlqFPQzWDB2/TPgYXd/JOx6SiX4c/j3wIdCLqWYrgH+KhjD/gnwPjP7YbglFZ+77w2WncDPgRXhVlRUHUDHmL80f0oh+MuOgn6GCr6sfADY5O7fCLueYjOzZjNrCNazwPuBzeFWVTzu/iV3b3P3RcAtwJPu/u9CLquozKw6mChAMKTxASAys+DcfT/QbmaXBE3XAWU5GSIRdgFTZWY/Bq4FmsysA/hHd38g3KqK6hrgk8CGYBwb4E53fzzEmoqpFXjIzOIUTjhWuXskpyBGWAvw88I5CQngR+7+m3BLKrq/Bx4OZty8AXw65HqmpGynV4qIyORo6EZEJOIU9CIiEaegFxGJOAW9iEjEKehFRCJOQS8iEnEKehGRiFPQi4hE3P8H1DStq24uP4EAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Feerates:\t1.1499744606513134, 1.3970589103224236, 1.9124681884207781, 6.361686926992798 \n", + "Times:\t\t168.62498827393395, 94.04820895845543, 36.664092522353194, 1.0000000000000004" + ] + }, + "execution_count": 105, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "estimator = FeerateEstimator(total_weight=total_weight, \n", + " inclusion_interval=avg_mass/network_mass_rate)\n", + "\n", + "pred = estimator.calc_estimations()\n", + "x = np.linspace(1, pred.priority_bucket.feerate, 100000)\n", + "y = estimator.feerate_to_time(x)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.fill_between(x, estimator.inclusion_interval, y2=y, alpha=0.5)\n", + "plt.scatter(pred.feerates(), pred.times(), zorder=100)\n", + "plt.show()\n", + "pred" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Interpolating the original function using two of the points\n", + "\n", + "The code below reverse engineers the original curve using only 2 of the estimated points" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAD8CAYAAAB5Pm/hAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHW9JREFUeJzt3Xl0XGeZ5/HvU6WSVLKszVpiSbblJMZZyGLjOAHTaUgAk8CQhKUn0ECGgTZNBw6cYTJDaOYA5wwzORMCPX2gM52QNMlAJxMghEAHTAiBEAix5Wze4tiJN8mbbEebtZbqmT/qypZt2ZalKt/Srd/nHJ1776t7q57K8ruv3nrvvebuiIhIdMXCLkBERHJLQS8iEnEKehGRiFPQi4hEnIJeRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQirijsAgBqa2u9paUl7DJERKaVNWvW7Hf3ulPtlxdB39LSQmtra9hliIhMK2a2fSL7aehGRCTiFPQiIhGnoBcRiTgFvYhIxCnoRUQiTkEvIhJxCnoRkYib1kG/aU8Pt698mc6+obBLERHJW9M66LcfOMR3n3yVnQf7wy5FRCRvTeugb6goBWBv90DIlYiI5K9TBr2ZzTGzJ81so5mtN7PPB+1fM7N2M3sh+Ll2zDG3mtkWM9tkZstzVfzhoO9R0IuInMhE7nWTAr7o7s+Z2UxgjZk9Hvzu2+7+zbE7m9kFwI3AhUAj8Bsze4O7j2SzcIDa8mLMYG/3YLZfWkQkMk7Zo3f33e7+XLDeA2wEmk5yyHXAg+4+6O5bgS3A0mwUe6yieIza8hL2aehGROSETmuM3sxagEXAs0HTZ83sJTO718yqg7YmYOeYw9oY58RgZivMrNXMWjs6Ok678FENFSUaoxcROYkJB72ZlQM/Ab7g7t3AncA5wKXAbuCO0V3HOdyPa3C/y92XuPuSurpT3k75hBpmlmroRkTkJCYU9GaWIBPyP3T3hwHcfa+7j7h7GribI8MzbcCcMYc3A7uyV/LR6itK2acvY0VETmgis24MuAfY6O7fGtM+e8xuNwDrgvVHgRvNrMTM5gMLgFXZK/loDRUl7O8dYiiVztVbiIhMaxOZdbMM+Biw1sxeCNq+DHzYzC4lMyyzDfg0gLuvN7OHgA1kZuzcnIsZN6NGp1h29A7SVJXM1duIiExbpwx6d3+a8cfdHzvJMd8AvjGFuibsrDEXTSnoRUSON62vjAWorygB0BRLEZETmPZBf+Q2CJp5IyIynmkf9DVlxRTFTHPpRUROYNoHfSxm1M8sUY9eROQEpn3Qg+bSi4icTCSCXrdBEBE5sYgEfSl7uhT0IiLjiUzQdw+k6BtKhV2KiEjeiUTQj14otatTvXoRkWNFIugbDwe9nh0rInKsiAR95qIpBb2IyPEiEfQNFaWYwS59ISsicpxIBH0iHqNhZql69CIi44hE0ENm+EZBLyJyvMgE/eyqJLs1dCMicpzIBH1TVZL2zn7cj3s8rYhIQYtM0DdWljKUSnPg0FDYpYiI5JXoBH0wl363LpoSETlK5IK+XV/IiogcJXJBr5k3IiJHi0zQV5clKE3E2N2loBcRGSsyQW9mNFYmdWMzEZFjRCboITN806ahGxGRo0Qq6JuqkrS/rqAXERkrUkE/d1YZ+3sH6R8aCbsUEZG8Eamgn1NTBsDO1/tCrkREJH9EK+irM1MsdxxQ0IuIjIpU0M9Vj15E5DiRCvqaGcWUFcfZcVBBLyIy6pRBb2ZzzOxJM9toZuvN7PNBe42ZPW5mm4NlddBuZvaPZrbFzF4ys8W5/hBjamVuTRk7D2rmjYjIqIn06FPAF939fOAK4GYzuwD4EvCEuy8Angi2Aa4BFgQ/K4A7s171STRXl7FTPXoRkcNOGfTuvtvdnwvWe4CNQBNwHXBfsNt9wPXB+nXA/Z7xZ6DKzGZnvfITmFtTxo6DfbovvYhI4LTG6M2sBVgEPAs0uPtuyJwMgPpgtyZg55jD2oK2Y19rhZm1mllrR0fH6Vd+AnNrkvQPj+i+9CIigQkHvZmVAz8BvuDu3SfbdZy247rX7n6Xuy9x9yV1dXUTLeOURufS6wtZEZGMCQW9mSXIhPwP3f3hoHnv6JBMsNwXtLcBc8Yc3gzsyk65p3Z4iqWCXkQEmNisGwPuATa6+7fG/OpR4KZg/SbgZ2PaPx7MvrkC6Bod4jkTmqsV9CIiYxVNYJ9lwMeAtWb2QtD2ZeA24CEz+ySwA/hQ8LvHgGuBLUAf8ImsVnwKyeI4dTNLNMVSRCRwyqB396cZf9wd4Opx9nfg5inWNSVza8rYduBQmCWIiOSNSF0ZO2p+7QwFvYhIILJBv7d7kEODqbBLEREJXSSD/uzaGQBs3a9evYhIJIN+fp2CXkRkVCSDvmWWgl5EZFQkg740EaepKqmgFxEhokEPmS9kX1PQi4hEO+i3dvTqLpYiUvAiHfTdAykO6i6WIlLgohv0mnkjIgJEOOhH59JrnF5ECl1kg76pKkkiburRi0jBi2zQF8VjzJs1gy37esMuRUQkVJENeoAF9eUKehEpeNEO+oaZbD9wiIHhkbBLEREJTaSDvqtviLTD+f/tVyy77bc88nx72CWJiJxxkQ36R55v58HVO4HMk8nbO/u59eG1CnsRKTiRDfrbV25iMJU+qq1/eITbV24KqSIRkXBENuh3dY7/zNgTtYuIRFVkg76xKnla7SIiURXZoL9l+UKSifhRbclEnFuWLwypIhGRcBSFXUCuXL+oCYCv/3w9r/cNUzezhL+/9vzD7SIihSKyPXrIhP1Dn34zAF++9jyFvIgUpEgHPUBL7QwSceOVvbpCVkQKU+SDPhGPcU5dORt3d4ddiohIKCIf9AAXNFawYZeCXkQKU0EE/YWNlezrGaSjZzDsUkREzriCCPoLZlcAsEHDNyJSgAor6DV8IyIF6JRBb2b3mtk+M1s3pu1rZtZuZi8EP9eO+d2tZrbFzDaZ2fJcFX46KssSNFcn1aMXkYI0kR7994F3j9P+bXe/NPh5DMDMLgBuBC4MjvknM4uPc+wZd8HsCtbv6gq7DBGRM+6UQe/uTwEHJ/h61wEPuvugu28FtgBLp1Bf1lzQWMHW/YfoG0qFXYqIyBk1lTH6z5rZS8HQTnXQ1gTsHLNPW9B2HDNbYWatZtba0dExhTIm5sLGStzh5T09OX8vEZF8MtmgvxM4B7gU2A3cEbTbOPv6eC/g7ne5+xJ3X1JXVzfJMibugsbMF7Lr9YWsiBSYSQW9u+919xF3TwN3c2R4pg2YM2bXZmDX1ErMjsbKUqrLEqxr0zi9iBSWSQW9mc0es3kDMDoj51HgRjMrMbP5wAJg1dRKzA4z45I5VbzY1hl2KSIiZ9Qpb1NsZg8AbwNqzawN+CrwNjO7lMywzDbg0wDuvt7MHgI2ACngZncfyU3pp++S5iqeemUzhwZTzCiJ7B2aRUSOcsq0c/cPj9N8z0n2/wbwjakUlSuXzqki7bC2vYsrzp4VdjkiImdEQVwZO+ri5koAXtyp4RsRKRwFFfSzykuYW1OmcXoRKSgFFfRA5gvZnZp5IyKFo/CCvrmS9s5+9vUMhF2KiMgZUXBBv2huFYB69SJSMAou6C9srKQoZjy/4/WwSxEROSMKLuhLE3EubKygdZuCXkQKQ8EFPcBlLTW80NbJYCpvruUSEcmZwgz6+TUMpdK8pPveiEgBKMygb6kBYNXWid5mX0Rk+irIoK+ZUcy59eWs3qagF5HoK8igh0yvfs221xlJj3u7fBGRyCjYoF86v5qewRQv79GDSEQk2go26EfH6VdrnF5EIq5gg765uoymqiTPvHYg7FJERHKqYIMe4K3n1vKnVw9onF5EIq2gg37Zglp6BlKsbdd8ehGJrsIO+nMyT5l6enNHyJWIiOROQQf9rPISLphdwdNb9oddiohIzhR00AP8xYJantveSd9QKuxSRERyouCDftm5tQyNpHU7BBGJrIIP+staaiiOx3h6s4ZvRCSaCj7ok8VxLj+7hic37Qu7FBGRnCj4oAe46rx6Xu04xLb9h8IuRUQk6xT0wNXnNQDwxMvq1YtI9CjogbmzylhQX84TG/eGXYqISNYp6ANXn9/Aqq0H6R4YDrsUEZGsUtAH3nF+Pam089QrukpWRKJFQR9YNLea6rIET2zUOL2IRMspg97M7jWzfWa2bkxbjZk9bmabg2V10G5m9o9mtsXMXjKzxbksPpviMeOq8xr4zca9DKZGwi5HRCRrJtKj/z7w7mPavgQ84e4LgCeCbYBrgAXBzwrgzuyUeWa89+LZ9Ayk+KPufSMiEXLKoHf3p4Bj7w9wHXBfsH4fcP2Y9vs9489AlZnNzlaxubbs3FoqSov4xUu7wy5FRCRrJjtG3+DuuwGCZX3Q3gTsHLNfW9B2HDNbYWatZtba0ZEfX4AWF8VYfuFZPL5ewzciEh3Z/jLWxmkb9/FN7n6Xuy9x9yV1dXVZLmPyrr14Nj2DKf7wioZvRCQaiiZ53F4zm+3uu4OhmdGpKm3AnDH7NQO7plLgmbbsnFqSiRife+B5BoZHaKxKcsvyhVy/aNw/TERE8t5ke/SPAjcF6zcBPxvT/vFg9s0VQNfoEM908dja3QyNOP3DIzjQ3tnPrQ+v5ZHn28MuTURkUiYyvfIB4BlgoZm1mdkngduAd5rZZuCdwTbAY8BrwBbgbuDvclJ1Dt2+ctNxDwvvHx7h9pWbQqpIRGRqTjl04+4fPsGvrh5nXwdunmpRYdrV2X9a7SIi+U5Xxh6jsSp5Wu0iIvlOQX+MW5YvJJmIH9WWTMS5ZfnCkCoSEZmayc66iazR2TW3r9xEe2c/8ZjxP254o2bdiMi0pR79OK5f1MQfv3QVd3zoEkbSTkNladgliYhMmoL+JN5z8WyqyhL832e2h12KiMikKehPojQR598vmcOvN+xld5dm3YjI9KSgP4WPXjGPtDsPPLsj7FJERCZFQX8Kc2rKuGphPf+6aidDqXTY5YiInDYF/QR87M3z2N87yL+tnVa37RERART0E3LlgjoW1Jfzz79/jczFvyIi04eCfgJiMeNv//IcXt7Tw+825ce980VEJkpBP0Hvu7SRxspS7vz9q2GXIiJyWhT0E5SIx/jUX5zNqq0HWbP99bDLERGZMAX9abhx6RyqyhJ857ebwy5FRGTCFPSnoay4iBVXns2TmzpYs/3Y56WLiOQnBf1p+g9vaaG2vIT/9atNmoEjItOCgv40lRUX8dm3n8OzWw/y9BY9QFxE8p+CfhI+fPlcmqqSfHPlJtJp9epFJL8p6CehpCjOF96xgBfbuvjZi3pouIjkNwX9JH1gcTMXN1dy2y9f5tBgKuxyREROSEE/SbGY8dV/dyF7uwf5p99tCbscEZETUtBPwZvmVXPDoibufmor2w8cCrscEZFxKein6EvXnEdxUYy//+k6TbcUkbykoJ+ihopS/us15/H0lv38aE1b2OWIiBxHQZ8Ff710Lktbavjvv9jAvu6BsMsRETmKgj4LYjHjtg9cxEAqzVce0RCOiOQXBX2WnF1Xzn9+1xv49Ya9PLh6Z9jliIgcpqDPok+99Wzeem4tX//5erbs6wm7HBERQEGfVbGY8a2/uoSy4iI+98ALDAyPhF2SiMjUgt7MtpnZWjN7wcxag7YaM3vczDYHy+rslDo91FeU8s0PXczG3d18/efrwy5HRCQrPfq3u/ul7r4k2P4S8IS7LwCeCLYLylXnNfB3bzuHB1bt5IfPbg+7HBEpcLkYurkOuC9Yvw+4Pgfvkfe++K6FvH1hHV97dD2rt+khJSISnqkGvQO/NrM1ZrYiaGtw990AwbJ+iu8xLcVjxj/cuIjm6jI+84M17DjQF3ZJIlKgphr0y9x9MXANcLOZXTnRA81shZm1mllrR0fHFMvIT5XJBHd/fAmptPPxe59lf+9g2CWJSAGaUtC7+65guQ/4KbAU2GtmswGC5b4THHuXuy9x9yV1dXVTKSOvnVtfzj03Xcae7gH+4/dX65bGInLGTTrozWyGmc0cXQfeBawDHgVuCna7CfjZVIuc7t40r5rvfmQx63d18zf3t9I/pGmXInLmTKVH3wA8bWYvAquAf3P3XwG3Ae80s83AO4Ptgnf1+Q3c/sGLeea1A3zyvtUKexE5Y4ome6C7vwZcMk77AeDqqRQVVe9f3AzAF3/0Ip+8bzX33HQZyeJ4yFWJSNTpytgz7P2Lm/nWX13Cn187wEfveZbXDw2FXZKIRJyCPgQ3LGrmOx9ZzNr2Lj7wf/7EzoOaeikiuTPpoRuZmmsvmk1teQmfum8177/zT3z8ink8uHonuzr7aaxKcsvyhVy/qCnsMkUkAtSjD9HS+TX85DNvYTiV5o7HX6G9sx8H2jv7ufXhtTzyfHvYJYpIBCjoQ7agYSal43wh2z88wu0rN4VQkYhEjYI+D+ztGv/xg7s6+89wJSISRQr6PNBYlRy3vTKZ0GMJRWTKFPR54JblC0kmjh6+iRl09g/zN/e3srtLPXsRmTwFfR64flET//P9F9FUlcSApqokd3zwEr7ynvN5est+3nHH7/mXP25lJK3evYicPsuHoYElS5Z4a2tr2GXkpZ0H+/jKI+v4/SsdXNRUydfedwFvmlcTdlkikgfMbM2Yhz6dkHr0eW5OTRnf/8RlfOcji9jbPcAH7nyGz/xgDdv2Hwq7NBGZJnTB1DRgZrz34kauOq+eu5/ayj8/9Sq/2biXv758Hn/7l+dwVmVp2CWKSB7T0M00tK97gG//5hUeam0jbsaHljTzmbedQ3N1WdilicgZNNGhGwX9NLbjQB93/v5VfrxmJ+5w3aVNfGJZC29sqgy7NBE5AxT0BWRXZz93PfUa/2/1TvqHR1gyr5qb3tLCu994Fom4voYRiSoFfQHq6h/mR607uf+Z7ew42Ef9zBJuWNzEBxc3s6BhZtjliUiWKegL2Eja+d2mfTywagdPbupgJO1cMqeKDy5u4j0XN1IzozjsEkUkCxT0AkBHzyA/e6GdH69p4+U9PcQMLp8/i2suOovlF55FQ4Vm7IhMVwp6OYq7s2F3N79cu4dfrtvNqx2ZefhvmlfNVefVc+WCOi5srCAWs5ArFZGJUtDLSW3e28Ov1u1h5YY9rGvvBmDWjGL+YkEtV76hjmXn1qq3L5LnFPQyYR09g/xhcwdPvdLBU5v3czB4ju3cmjKWzq9haUsNl82voWVWGWbq8YvkCwW9TEo67azf1c2zWw+wautBWre/fjj4a8tLuHROFRc3V3JRcyUXNVVSW14ScsUihUtBL1nh7rza0cuqra/Tuu0gL7Z18tr+Q4z+Z9NYWcpFzZVc2FjJGxpmsvCsmcytKSOusX6RnJto0OteN3JSZsa59TM5t34mH7l8LgA9A8Os39XNuvYuXmrr4qW2Tlau33v4mJKiGOfUlbPwrJksaChnQf1M5teW0VxdRmni+McmikhuKejltM0sTXDF2bO44uxZh9sODabYsq+XTXt72Ly3h017e3nm1QP8dMwDzs2gsTLJvFllzJs1g5ZgObemjKaqJBXJIn0HIJIDCnrJihklRVwyp4pL5lQd1d7VP8yrHb3sONDHtgOH2B4sV67fc3js//BrFMeZXZWksSpJY2UpjVVJZleW0lSV5KzKUupmllBeopOByOlS0EtOVSYTLJ5bzeK51cf9rqt/mB0H+thxsI/dXf20d/azu3OAXV39bNjVzf7eweOOKU3EqC0voW5mCXXB8vB2sF5dlqC6rJiKZELfFYigoJcQVSYTmdk7zePfbXNgeIQ9XZng39M1wP7eQTp6BtnfO0RHzyDbD/QdNSvoWGaZ96guKw6WmfWqsmKqyxJUzSimKpmgvLSIitIiZpYmKC8pYmZpETOKi3TxmESGgl7yVmkiTkvtDFpqZ5x0v+GRNAcPZcK/o3eQzr4hXj80nFn2DfN63xBd/cN09A7yyt5eOvuGODQ0ctLXNIPy4iLKSzPBP/YkMHoiKCuOkzy8jFMW/CQTRUfWi+OUBfuUFMU07CShyFnQm9m7gf8NxIHvufttuXovKWyJeIyGitLTupJ3MDVCV98wXf3DdA+k6B1M0TMwTO9Aip6BzHrPYGa9dyBFz2DmhLHzYB/dAykODaboHz75yeJYMYNk4sjJoTQRo6QocwIoGbteFKM0Mdp+pK2kKB7sd2Tf0f2Ki2Ik4jGK4kZxfPz1RLCu4azwPPJ8O7ev3MSuzn4aq5Lcsnwh1y9qyvn75iTozSwOfBd4J9AGrDazR919Qy7eT+R0lRTFqa+IUz+F2zyk085AaoS+oRH6hzLLvqHUkfXhEfqHUkH7kX36hzNtQ6k0A8MjDKbSDAyn6eofZnA4zWAqzWAq0z44nGYgNUI2L3eJGYdDPzHmBDC6XhSPURw3io75fVHMKIob8VhmPWZGUcyIx4NlzIjb2O3YkfbYkX2KYkYsNv4+R+8XIxaDoljm5BQzgqVhY9ZjRrDMHGfB9lHrwe8txpH1Y14j139tPfJ8O7c+vPZwB6G9s59bH14LkPOwz1WPfimwxd1fAzCzB4HrAAW9REYsZsGwTG5HQN2dVNoPnxQyJ4Dj11PpNEMpZ3gksz6ccoZG0qRG0gyPOMNB2/BI+qj1Y48bSnnm+JHMPr2pFEOpNCNpP/yTOrxMM5KGkXT6cNvYfaYTO3zCOHLiiFnm3/PoiWP0hBAfc3IYbY+ZBfuCceQkE4tltl/e083wyNH/TPqHR7h95aZpG/RNwM4x223A5Tl6L5FIM7PDPevp9PgYdyftkEqnSacJTgo+7gnh2BPF4RPJiJP2zEnIHdLBa46kHXdnJNh2z+yfHt1n7Pp422Ne47j18Y457vUy6yPuh+vKvP+R7cxy9J+DHxfyo3Z19uf830Wugn68v4GO+pRmtgJYATB37twclSEiYcn0fCEeG70aurCvil52229pHyfUG6uSOX/vXD1QtA2YM2a7Gdg1dgd3v8vdl7j7krq6uhyVISKSH25ZvpDkMbcASSbi3LJ8Yc7fO1c9+tXAAjObD7QDNwIfydF7iYjkvdFx+MjMunH3lJl9FlhJ5u+1e919fS7eS0Rkurh+UdMZCfZj5Wy6gLs/BjyWq9cXEZGJydUYvYiI5AkFvYhIxCnoRUQiTkEvIhJxCnoRkYhT0IuIRJyCXkQk4syzef/TyRZh1gFsn+ThtcD+LJaTj6L+GfX5pjd9vvDMc/dT3kMmL4J+Ksys1d2XhF1HLkX9M+rzTW/6fPlPQzciIhGnoBcRibgoBP1dYRdwBkT9M+rzTW/6fHlu2o/Ri4jIyUWhRy8iIicxbYPezO41s31mti7sWnLBzOaY2ZNmttHM1pvZ58OuKZvMrNTMVpnZi8Hn+3rYNeWCmcXN7Hkz+0XYteSCmW0zs7Vm9oKZtYZdT7aZWZWZ/djMXg7+X3xz2DVNxrQdujGzK4Fe4H53f2PY9WSbmc0GZrv7c2Y2E1gDXO/uG0IuLSvMzIAZ7t5rZgngaeDz7v7nkEvLKjP7T8ASoMLd3xt2PdlmZtuAJe6er/PMp8TM7gP+4O7fM7NioMzdO8Ou63RN2x69uz8FHAy7jlxx993u/lyw3gNsBM78o2lyxDN6g81E8DM9ex0nYGbNwHuA74Vdi5w+M6sArgTuAXD3oekY8jCNg76QmFkLsAh4NtxKsisY1ngB2Ac87u6R+nzAPwD/BUiHXUgOOfBrM1tjZivCLibLzgY6gH8Jht++Z2Yzwi5qMhT0ec7MyoGfAF9w9+6w68kmdx9x90uBZmCpmUVmCM7M3gvsc/c1YdeSY8vcfTFwDXBzMKQaFUXAYuBOd18EHAK+FG5Jk6Ogz2PB2PVPgB+6+8Nh15MrwZ/DvwPeHXIp2bQMeF8whv0gcJWZ/SDckrLP3XcFy33AT4Gl4VaUVW1A25i/NH9MJvinHQV9ngq+rLwH2Oju3wq7nmwzszozqwrWk8A7gJfDrSp73P1Wd2929xbgRuC37v7RkMvKKjObEUwUIBjSeBcQmVlw7r4H2GlmC4Omq4FpORmiKOwCJsvMHgDeBtSaWRvwVXe/J9yqsmoZ8DFgbTCODfBld38sxJqyaTZwn5nFyXQ4HnL3SE5BjLAG4KeZPglFwL+6+6/CLSnrPgf8MJhx8xrwiZDrmZRpO71SREQmRkM3IiIRp6AXEYk4Bb2ISMQp6EVEIk5BLyIScQp6EZGIU9CLiEScgl5EJOL+P7IeihVMYkBBAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "(array([168.62498827, 94.03830275, 36.64656385, 0.97773399]),\n", + " array([168.62498827, 94.04820896, 36.66409252, 1. ]))" + ] + }, + "execution_count": 106, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "x1, x2 = pred.low_bucket.feerate**ALPHA, pred.normal_bucket.feerate**ALPHA\n", + "y1, y2 = pred.low_bucket.estimated_seconds, pred.normal_bucket.estimated_seconds\n", + "b2 = (y1 - y2*x2/x1) / (1 - x1/x2)\n", + "b1 = (y1 - b2) * x1\n", + "def p(ff):\n", + " return b1/ff**ALPHA + b2\n", + "\n", + "plt.figure()\n", + "plt.plot(x, p(x))\n", + "plt.scatter(pred.feerates(), pred.times(), zorder=100)\n", + "plt.show()\n", + "p(pred.feerates()), pred.times()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Challenge: outliers\n", + "\n", + "The segment below illustrates a challenge in the current approach. It is sufficient to add a single outlier \n", + "to the total weight (with `feerate=100`), and the `feerate_to_time` function is notably influenced. In truth, this tx should not affect our prediction because it only captures the first slot of each block, however because we sample with repetition it has a significant impact on the function. The following figure shows the `feerate_to_time` function with such an outlier " + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAG1xJREFUeJzt3X1wHPWd5/H3t+dJD7bkJ/lRfiIxEEwIJF7wHrm7HA6PSWGqNslxt0tce9RRdWGX7N3ebUJSBXck2SN3uZBNEXJFYgdnNxXiImQhVLjgNbAbskAwT+bBGBubYPlRfpJlW5Ilzff+6JY8lmakkZGmx9OfV5Vqun/9m57vyJY++vWve9rcHRERSZ4g7gJERCQeCgARkYRSAIiIJJQCQEQkoRQAIiIJpQAQEUkoBYCISEIpAEREEkoBICKSUOm4CxjJjBkzfNGiRXGXISJyVnnxxRcPuHvLaP2qOgAWLVrExo0b4y5DROSsYma/L6efDgGJiCSUAkBEJKEUACIiCaUAEBFJKAWAiEhCKQBERBJKASAiklA1GQB7Orr49hNb2N5+LO5SRESqVk0GwIHOk3z3yW2803487lJERKpWTQZAXSZ8W929/TFXIiJSvWo0AFIAdCkARERKqskAqM+GAaARgIhIaTUZAAMjAAWAiEhptRkA6fBtdZ3Mx1yJiEj1qskASKcCMinTHICIyAhqMgAAcumUDgGJiIyg7AAws5SZvWxmj0Xri83seTPbamY/M7Ns1J6L1rdF2xcV7OP2qH2LmV093m+mUC4dKABEREYwlhHAF4HNBevfBO5x9yXAYeDmqP1m4LC7fxC4J+qHmV0A3AgsBa4B7jOz1PsrvzQFgIjIyMoKADNrBT4F/DBaN+AK4KGoy1rghmh5ZbROtH1F1H8l8KC797j7DmAbcOl4vIlisulAcwAiIiModwTwHeCvgIHTaqYDR9y9L1pvA+ZFy/OAnQDR9o6o/2B7keeMuzAAdBaQiEgpowaAmX0a2O/uLxY2F+nqo2wb6TmFr3eLmW00s43t7e2jlVdSNqVDQCIiIylnBHA5cL2ZvQs8SHjo5zvAFDNLR31agd3RchswHyDa3gwcKmwv8pxB7n6/uy9z92UtLS1jfkMDMumArpMKABGRUkYNAHe/3d1b3X0R4STuk+7+x8BTwGeibquAR6LlR6N1ou1PurtH7TdGZwktBpYAvxu3dzJETnMAIiIjSo/epaQvAQ+a2deBl4HVUftq4G/NbBvhX/43Arj7G2a2DngT6ANudfcJ+w2tQ0AiIiMbUwC4+9PA09HydoqcxePu3cBnSzz/G8A3xlrkmcjpEJCIyIhq9krgrK4EFhEZUQ0HQEB3n04DFREppWYDIJcO6M87vf0KARGRYmo6AEB3BRMRKaVmAyCb1n2BRURGUrMBMDAC6NZNYUREiqrZAMimdWN4EZGR1HAA6BCQiMhIajYANAksIjIyBYCISELVbABkU+Fb61EAiIgUVbMBoBGAiMjIajYABiaBT+gD4UREiqrZAKjLRKeBKgBERIqq+QA43qMAEBEppmYDIBUY6cA40ds3emcRkQSq2QCAcB7ghEYAIiJF1XYApAKOn9QIQESkmJoOgExKIwARkVJqOgDSKdMIQESkhJoPAF0HICJSXE0HQCYION6jEYCISDG1HQBpBYCISCm1HQAp47gOAYmIFFXTAZBNBZzQJLCISFE1HQCZVEB3b5583uMuRUSk6tR8AIA+ElpEpJgaDwAD0LUAIiJF1HQADNwVTFcDi4gMV9MBkIluCqMRgIjIcLUdACndFUxEpJQaD4BoDkAXg4mIDFPjAaARgIhIKTUdAAOTwBoBiIgMV9MBoBGAiEhpNR4Aug5ARKSUmg6AVGAEpkNAIiLFjBoAZlZnZr8zs1fN7A0z+x9R+2Ize97MtprZz8wsG7XnovVt0fZFBfu6PWrfYmZXT9SbKng9cukUx7oVACIiQ5UzAugBrnD3jwAXA9eY2XLgm8A97r4EOAzcHPW/GTjs7h8E7on6YWYXADcCS4FrgPvMLDWeb6aYXDqgUwEgIjLMqAHgoWPRaib6cuAK4KGofS1wQ7S8Mlon2r7CzCxqf9Dde9x9B7ANuHRc3sUIMumATh0CEhEZpqw5ADNLmdkrwH5gPfAOcMTdB36ztgHzouV5wE6AaHsHML2wvchzJkw2FdDZ3TvRLyMictYpKwDcvd/dLwZaCf9q/1CxbtGjldhWqv00ZnaLmW00s43t7e3llDeibDrgaJdGACIiQ43pLCB3PwI8DSwHpphZOtrUCuyOltuA+QDR9mbgUGF7kecUvsb97r7M3Ze1tLSMpbyisqmAzh6NAEREhirnLKAWM5sSLdcDnwQ2A08Bn4m6rQIeiZYfjdaJtj/p7h613xidJbQYWAL8brzeSCnZdKCzgEREikiP3oU5wNrojJ0AWOfuj5nZm8CDZvZ14GVgddR/NfC3ZraN8C//GwHc/Q0zWwe8CfQBt7r7hF+iO3AWkLsTzkWLiAiUEQDuvgm4pEj7doqcxePu3cBnS+zrG8A3xl7mmcumA/ryTk9fnrrMhJ91KiJy1qjpK4EhDABA1wKIiAxR8wGQGwwATQSLiBSq+QD4w+NP8kz2NhZ/rxXuuRA2rYu7JBGRqlDOJPBZ67z9j/PJfd8iG/SEDR074Ze3hcsXfS6+wkREqkBNjwA+/t59ZL3n9MbeLthwVzwFiYhUkZoOgMk9+4pv6GirbCEiIlWopgOgMzer+Ibm1soWIiJShWo6AJ5Z8AV6g7rTGzP1sOKOeAoSEakiNT0JvGXmtQB85O3vMtcOYs2t4S9/TQCLiNR2AEAYAl/aej7XXTiHb37morjLERGpGjV9CGiAPhFURGS4RARALh3Q0aUAEBEplIwAyKQ4fFwBICJSKBEBUJcOOHLiZNxliIhUlUQEQC6T0iEgEZEhEhEAdZmA4yf76e3Px12KiEjVSEYApMMbwWgUICJySjICILoT2JETCgARkQEJCYDwbXZ0aSJYRGRAIgIgpxGAiMgwiQiAuui2kAoAEZFTkhEAAyMATQKLiAxKRADk0gEGdOhiMBGRQYkIADOjLpPSCEBEpEAiAgCgPpPSHICISIHEBEAuE2gEICJSIDEBkE0FHDmuOQARkQGJCYBcJuCQJoFFRAYlJgAasmkOawQgIjIoMQFQn01x/GQ/3b39cZciIlIVEhMADdHFYAc1ChARAZIUANkoAI71xFyJiEh1SEwA1Gc1AhARKZScABg4BHRMASAiAgkKgIZsGtAhIBGRAYkJgEzKSAemQ0AiIpHEBICZ0ZhLc0AjABERoIwAMLP5ZvaUmW02szfM7ItR+zQzW29mW6PHqVG7mdl3zWybmW0ys48W7GtV1H+rma2auLdVXH0mpTkAEZFIOSOAPuAv3f1DwHLgVjO7APgysMHdlwAbonWAa4El0dctwPchDAzgTuAy4FLgzoHQqJS6TKARgIhIZNQAcPc97v5StNwJbAbmASuBtVG3tcAN0fJK4Mceeg6YYmZzgKuB9e5+yN0PA+uBa8b13YyiPptSAIiIRMY0B2Bmi4BLgOeBWe6+B8KQAGZG3eYBOwue1ha1lWof+hq3mNlGM9vY3t4+lvJG1ZBJc+j4Sdx9XPcrInI2KjsAzGwS8HPgL9z96Ehdi7T5CO2nN7jf7+7L3H1ZS0tLueWVpT6borff6ezpG9f9ioicjcoKADPLEP7y/4m7Pxw174sO7RA97o/a24D5BU9vBXaP0F4xjbnwYrD9R7sr+bIiIlWpnLOADFgNbHb3bxdsehQYOJNnFfBIQfvno7OBlgMd0SGiXwNXmdnUaPL3qqitYiblwovB9h3VPICISLqMPpcDNwGvmdkrUdtXgLuBdWZ2M/Ae8Nlo26+A64BtwAngTwHc/ZCZfQ14Iep3l7sfGpd3UabGKAD2dmgEICIyagC4+zMUP34PsKJIfwduLbGvNcCasRQ4ngZHAJ0KABGRxFwJDJBJBdRlAvZpBCAikqwAgHAUoDkAEZEEBkBDNs1enQUkIpK8AGjMpRQAIiIkMQCyado7e8jndTWwiCRb4gJgUi5Nf951XwARSbzkBUDdwMVgOgwkIsmWuABozOpiMBERSGAATI5GALuOdMVciYhIvBIXAA3ZFOnAaDt8Iu5SRERilbgAMDOa6zO0HdYIQESSLXEBAOFE8E6NAEQk4RIZAE11GdoOaQQgIsmW0ABIc6Srl2O6M5iIJFgiA2BF7z/yTPY2Gv/nDLjnQti0Lu6SREQqrpwbwtSU8/Y/zifb/w/ZIPpE0I6d8MvbwuWLPhdfYSIiFZa4EcDH37uPrA/5OOjeLthwVzwFiYjEJHEBMLlnX/ENHW2VLUREJGaJC4DO3KziG5pbK1uIiEjMEhcAzyz4Ar1B3emNmXpYcUc8BYmIxCRxk8BbZl4LwLJ37mVGfzs2ZR624k5NAItI4iQuACAMgV/0Xc76zft46qZPsHhGY9wliYhUXOIOAQ2Y0pABYMeBYzFXIiISj8QGwNTGLADb24/HXImISDwSGwD1mRT1mRTbDygARCSZEhsAEB4G2qERgIgkVOIDYOv+zrjLEBGJRaIDYMakHAeOneTgsZ7RO4uI1JjEBwDAlr0aBYhI8iQ8AMIzgTYrAEQkgRIdAA3ZNJNyad7aczTuUkREKi7RAQAwrTHLZgWAiCRQ4gNgxqQsW/cfo68/H3cpIiIVpQCYlKOnL8+7B3U9gIgkS+IDoGVyeCbQpraOmCsREamsxAfAtMYs2XTAKzuPxF2KiEhFJT4AAjNmTc7x8nsKABFJllEDwMzWmNl+M3u9oG2ama03s63R49So3czsu2a2zcw2mdlHC56zKuq/1cxWTczbOTMzm+rYvOco3b39cZciIlIx5YwAHgCuGdL2ZWCDuy8BNkTrANcCS6KvW4DvQxgYwJ3AZcClwJ0DoVENZjfV0Zd33tit00FFJDlGDQB3/yfg0JDmlcDaaHktcENB+4899BwwxczmAFcD6939kLsfBtYzPFRiM7s5vEfwq5oHEJEEOdM5gFnuvgcgepwZtc8Ddhb0a4vaSrUPY2a3mNlGM9vY3t5+huWNzaRcmqa6NC/+/nBFXk9EpBqM9ySwFWnzEdqHN7rf7+7L3H1ZS0vLuBY3krlT6vnndw7gXrQsEZGac6YBsC86tEP0uD9qbwPmF/RrBXaP0F41WqfWc/hEL2/v0z2CRSQZzjQAHgUGzuRZBTxS0P756Gyg5UBHdIjo18BVZjY1mvy9KmqrGvOnNgDw7DsHYq5ERKQyyjkN9KfAs8B5ZtZmZjcDdwNXmtlW4MpoHeBXwHZgG/AD4AsA7n4I+BrwQvR1V9RWNZrqMzTXZ3h2+8G4SxERqYj0aB3c/d+V2LSiSF8Hbi2xnzXAmjFVV2HzptTz7DsH6c87qaDYtIWISO1I/JXAhRZOb+Bodx8vvaezgUSk9ikACiyc3kBg8A+b98VdiojIhFMAFMilU7RObeAf3lQAiEjtUwAMsXhGI++0H+fdA7o/gIjUNgXAEItnNALw6zf2xlyJiMjEUgAM0VyfYXZTHX//yq64SxERmVAKgCLOmz2ZzXs62bqvM+5SREQmjAKgiCUzJxEYGgWISE1TABTRmEszf1oDD7+0i/68PhxORGqTAqCEpXOa2NPRzVNv7R+9s4jIWUgBUMI5LZOYXJdm7bPvxl2KiMiEUACUkAqMC+c285utB9jero+IFpHaowAYwdK5TaQC4we/2R53KSIi404BMILGXJqlc5pYt7GNtsMn4i5HRGRcKQBGsWzRVADue/qdmCsRERlfCoBRTK7LcMGcJta9sJMd+nwgEakhCoAyXLZ4GqnA+Npjb8ZdiojIuFEAlKExl+YPFk3jybf28/QWXRcgIrVBAVCmi+dPYVpjlq/+4nU6u3vjLkdE5H1TAJQpFRgrzp/J7o4uvv7Y5rjLERF53xQAYzB3Sj0fWzCVn23cyeOv7Ym7HBGR90UBMEbLz5nOnOY6/su6V9myVx8XLSJnLwXAGKUC47oL55AKjP/4440cONYTd0kiImdEAXAGJtWlue7Ds9nT0cVNq5+no0uTwiJy9lEAnKE5zfV86sNzeHvfMT6/5nmOnDgZd0kiImOiAHgfFk5v5NoLZ/P6rqP80ff/mV1HuuIuSUSkbAqA9+kDLZO44eK57Drcxcp7n+G57QfjLklEpCwKgHHQOrWBz3yslbzDv//Bc9z75Fb6+vNxlyUiMiIFwDiZPinHv102nyUzJ/GtJ97mhu/9ltd3dcRdlohISQqAcZRNB1y9dDbXXTibHQePc/29z/CVX7zG3o7uuEsTERkmHXcBtcbMWDJrMvOnNfDc9oP87IWd/PzFNv5k+UL+9PJFtE5tiLtEERFAATBh6jIpPnHeTC5ZMJXntx/kR7/dwY9+u4Orl87mpuULWX7OdILA4i5TRBJMATDBmuszXLV0Nss/MJ1NbR08vaWdx1/fy6ymHNd/ZC6fvmguH57XrDAQkYpTAFRIU12Gj39wBpctnsaOA8fZsreTNb99lx/8ZgfTGrN84rwWPnHeTJYvnsbMprq4yxWRBFAAVFgmFXDurMmcO2syXb39/P7Acd49eILHX9vLwy/tAmDelHqWLZrKxxZOZencJs6dNZnJdZnRd75pHWy4CzraoLkVVtwBF31ugt+RiJytFAAxqs+kOH9OE+fPaSLvzv6jPezu6GJPRzcbNu/nkVd2D/adO6WOC+Y0sWTWZBZOa2DB9AYWTGtgTnM9qcDCX/6/vA16o6uRO3aG66AQEJGiKh4AZnYN8DdACvihu99d6RqqUWDG7OY6ZjeHh3/cnc7uPg4c6+HA8ZMc7OzhlZ1HePKt/eT91PPSgTFvaj0PdX+Vlv4hH0XR20XvE/+dYx+4gab6TBgUIiKRigaAmaWA7wFXAm3AC2b2qLvrbutDmBlN9Rma6jOc03KqPZ93Onv66OjqHfw62tXL9P72ovtJde7mkq+tB2ByXZop9RmmNmaZ0pBlSn2G5voMjbk0jdkUDbk0k3IpGrJpGqPHSbk0DdkUjbk0uXRALp0ilw40aS0ynmI6fFvpEcClwDZ33w5gZg8CKwEFQJmCwGiOfnEXOrZxFk09e4f1P5xp4V8vbKG7tz/86svT0dVLe2cP3b399PTlOdmXp69wWFGGdGDk0gHZgVDIBNSlU2TTAXWZU0GRSQWkU0Y6MNKpgEzKSAVGOggG28JHI5MKom02ZFv4PDMjZUZg4fchKLKcsqhfEK4XLod9jCA41S8wou0W7SfsZwbGwGO4n/AxbMcouc2ibBxYH6hjsI8pPKVAjIdvKx0A84CdBettwGUVrqEmPbPgC1z5zl+TyZ+66rg3qOP5xX/GxTOnjPr8/rzT25+ntz8MhN7+gvX+PL19Tl8+T3/e6cv7aY/hcp6+fJ7u7n4OnzjVnncn7+Ehrf684w79HrXno+W8M7b4qQ3DAoXSITIQNhQJplP9TgXN4GsMyZrTVodsLPW80/c4dNuphbL6FWkofN5I9RYG59D9lVvvSO2l9j+8pvLqLedFDVh9+CvMzA8/fMuGu2ouAIp9f0772TezW4BbABYsWHDGL7Rs0TQ+tnDqGT//7LMEXpuDFwwj0yvu4NoPf5Zr4y6tDPkoUMIgcfr6nb7+aDkKJ3fIFwTJwPJgwERhkx8MnjBk8gXbfOhzouXB/gXPdwdn4BHcgdPWfbB9YJ3B9VP98vni7YXrlNjf4OuOsM0JGwtrCCs9XcGmItuKR/DQ5sKoPm1/w/qV3reXWBn6Z0Cpesutafhrjf17M1Ltw75lJb6HpV5zQMvBA8U3dLSNuL/xUOkAaAPmF6y3ArsLO7j7/cD9AMuWLXtffxgmbqh90efO2jN+UikjlYIcqbhLEamse1rDwz5DNbdO+EtX+sPgXgCWmNliM8sCNwKPVrgGEZHqseIOyNSf3papD9snWEVHAO7eZ2Z/Bvya8DTQNe7+RiVrEBGpKgOj9gScBYS7/wr4VaVfV0SkasV0+Fb3AxARSSgFgIhIQikAREQSSgEgIpJQCgARkYRSAIiIJJQCQEQkoRQAIiIJZaU+BKoamFk78Pu464jMAEp8alNVUH1nrpprg+qur5prg+qubyJrW+juLaN1quoAqCZmttHdl8VdRymq78xVc21Q3fVVc21Q3fVVQ206BCQiklAKABGRhFIAlO/+uAsYheo7c9VcG1R3fdVcG1R3fbHXpjkAEZGE0ghARCShFACjMLP5ZvaUmW02szfM7Itx1zSUmaXM7GUzeyzuWoYysylm9pCZvRV9D/8w7poKmdl/jv5dXzezn5pZXYy1rDGz/Wb2ekHbNDNbb2Zbo8fYbnRdor7/Hf3bbjKzX5jZlGqqr2DbfzUzN7MZ1VSbmf25mW2J/g/+r0rXpQAYXR/wl+7+IWA5cKuZXRBzTUN9EdgcdxEl/A3w/9z9fOAjVFGdZjYPuA1Y5u4XEt6l7sYYS3oAuGZI25eBDe6+BNgQrcflAYbXtx640N0vAt4Gbq90UQUeYHh9mNl84ErgvUoXVOABhtRmZv8GWAlc5O5LgW9VuigFwCjcfY+7vxQtdxL+ApsXb1WnmFkr8Cngh3HXMpSZNQH/ClgN4O4n3f1IvFUNkwbqzSwNNAC74yrE3f8JODSkeSWwNlpeC9xQ0aIKFKvP3Z9w975o9Tlg4u9kXkKJ7x/APcBfAbFNeJao7T8Bd7t7T9Rnf6XrUgCMgZktAi4Bno+3ktN8h/A/dz7uQoo4B2gHfhQdovqhmTXGXdQAd99F+FfXe8AeoMPdn4i3qmFmufseCP8YAWbGXM9I/gPweNxFFDKz64Fd7v5q3LUUcS7wL83seTP7RzP7g0oXoAAok5lNAn4O/IW7H427HgAz+zSw391fjLuWEtLAR4Hvu/slwHHiPYRxmuh4+kpgMTAXaDSzP4m3qrOTmX2V8HDpT+KuZYCZNQBfBe6Iu5YS0sBUwkPL/w1YZ2ZWyQIUAGUwswzhL/+fuPvDcddT4HLgejN7F3gQuMLM/i7ekk7TBrS5+8CI6SHCQKgWnwR2uHu7u/cCDwP/IuaahtpnZnMAoseKHyYYjZmtAj4N/LFX13nlHyAM91ejn5FW4CUzmx1rVae0AQ976HeEo/iKTlIrAEYRJfJqYLO7fzvuegq5++3u3uruiwgnL59096r5C9bd9wI7zey8qGkF8GaMJQ31HrDczBqif+cVVNEkdeRRYFW0vAp4JMZahjGza4AvAde7+4m46ynk7q+5+0x3XxT9jLQBH43+X1aDvweuADCzc4EsFf7gOgXA6C4HbiL86/qV6Ou6uIs6i/w58BMz2wRcDPx1zPUMikYmDwEvAa8R/jzEdnWmmf0UeBY4z8zazOxm4G7gSjPbSngmy91VVt+9wGRgffSz8X+rrL6qUKK2NcA50amhDwKrKj2C0pXAIiIJpRGAiEhCKQBERBJKASAiklAKABGRhFIAiIgklAJARCShFAAiIgmlABARSaj/D6Gz7+yGqKSPAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Feerates:\t1.1539704395225572, 1.4115360845240776, 4.139754128892224, 16.2278954349457 \n", + "Times:\t\t2769.889957638353, 1513.4606486459202, 60.00000000000002, 1.0000000000000007" + ] + }, + "execution_count": 107, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "estimator = FeerateEstimator(total_weight=total_weight + 100**ALPHA, \n", + " inclusion_interval=avg_mass/network_mass_rate)\n", + "\n", + "pred = estimator.calc_estimations()\n", + "x = np.linspace(1, pred.priority_bucket.feerate, 100000)\n", + "y = estimator.feerate_to_time(x)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.fill_between(x, estimator.inclusion_interval, y2=y, alpha=0.5)\n", + "plt.scatter(pred.feerates(), pred.times(), zorder=100)\n", + "plt.show()\n", + "pred" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Outliers: solution\n", + "\n", + "Compute the estimator conditioned on the event the top most transaction captures the first slot. This decreases `total_weight` on the one hand (thus increasing `p`), while increasing `inclusion_interval` on the other, by capturing a block slot. If this estimator gives lower prediction times we switch to it, and then repeat the process with the next highest transaction. The process converges when the estimator is no longer improving or if all block slots are captured. " + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD8CAYAAAB+UHOxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAHmVJREFUeJzt3X2QXXWd5/H39z72U/op6TzQnZggEVREwR7Ah3IdIw5RxzCzYuG4Gl1qU7MwPoyzpehUyYxTM+PUuAO6A9REQeMui7DImjgLg5mAhSgBwlMMhJAQIOkkJB066Tx2p/ve7/5xTie307e7k7597+m+5/OqunXP+Z3fOed7fehPfufR3B0REYmfRNQFiIhINBQAIiIxpQAQEYkpBYCISEwpAEREYkoBICISUwoAEZGYGjcAzOwOM9tnZpuKLPtvZuZmNiucNzP7vpltM7ONZnZJQd/lZrY1/Cyf3J8hIiJn60xGAD8Grjy90czmA1cAOwqalwKLw88K4LawbytwI3AZcClwo5m1lFK4iIiUJjVeB3d/xMwWFll0E/A1YHVB2zLgJx7cXrzezJrNbB7wQWCtu/cAmNlaglC5a6x9z5o1yxcuLLZrEREZzVNPPbXf3dvG6zduABRjZp8Adrn7c2ZWuKgd2Fkw3xW2jdY+poULF7Jhw4aJlCgiEltm9tqZ9DvrADCzOuAvgY8UW1ykzcdoL7b9FQSHj1iwYMHZliciImdoIlcBvRlYBDxnZq8CHcDTZjaX4F/28wv6dgC7x2gfwd1Xununu3e2tY07ghERkQk66wBw99+5+2x3X+juCwn+uF/i7q8Da4DPhVcDXQ70uvse4EHgI2bWEp78/UjYJiIiETmTy0DvAh4DzjezLjO7dozu9wPbgW3AD4DrAMKTv38DPBl+vj10QlhERKJhU/l9AJ2dna6TwCIiZ8fMnnL3zvH66U5gEZGYUgCIiMRUVQbA4b4Bblr7Es/uPBh1KSIiU1ZVBkAu73xv3Vaefu1A1KWIiExZVRkA9dng/rYj/YMRVyIiMnVVZQCkkwlq0gkFgIjIGKoyACAYBRzuUwCIiIymegMgk+Jw30DUZYiITFlVGwC1maQOAYmIjKFqA6Auk+SIDgGJiIyqqgPgkA4BiYiMqooDQCeBRUTGUsUBoHMAIiJjqeoAONo/yFR+2qmISJSqNgDqsynyDsdO5KIuRURkSqraAKjLJAE9DkJEZDRVGwD1meB5QDoRLCJSXNUGgEYAIiJjq9oAGHoiqB4HISJSXNUGwMkRgA4BiYgUNW4AmNkdZrbPzDYVtP2jmb1oZhvN7P+aWXPBsm+Y2TYz22Jmf1DQfmXYts3Mbpj8nzJc3dA5AB0CEhEp6kxGAD8GrjytbS1wobtfBLwEfAPAzN4GXAO8PVznVjNLmlkSuAVYCrwN+HTYt2zqssEIQCeBRUSKGzcA3P0RoOe0tl+6+9Bf1vVARzi9DPipu/e7+yvANuDS8LPN3be7+wngp2Hfshm6CkiHgEREipuMcwD/GXggnG4HdhYs6wrbRmsvm2TCSCeNI/06CSwiUkxJAWBmfwkMAncONRXp5mO0F9vmCjPbYGYburu7SymPbErPAxIRGc2EA8DMlgMfBz7jpx640wXML+jWAeweo30Ed1/p7p3u3tnW1jbR8gDIpBIc0iEgEZGiJhQAZnYl8HXgE+5+rGDRGuAaM8ua2SJgMfAE8CSw2MwWmVmG4ETxmtJKH186aToHICIyitR4HczsLuCDwCwz6wJuJLjqJwusNTOA9e7+p+7+vJndA7xAcGjoenfPhdv5M+BBIAnc4e7Pl+H3DJNJJjh0XOcARESKGTcA3P3TRZpvH6P/3wJ/W6T9fuD+s6quRNl0kl4FgIhIUVV7JzBATSqhABARGUVVB0A2HbwXWC+FEREZqboDIJVgIOf0DeSjLkVEZMqp6gCoSQWPg9BhIBGRkao6ALLp4OcpAERERqruAEgpAERERlPVAVCT1iEgEZHRVHUAaAQgIjK6qg4AjQBEREZX1QGQ0QhARGRUVR0ACTNq0noekIhIMVUdABC8E0ABICIyUgwCQM8DEhEpRgEgIhJTVR8AmVSCg8cUACIip6v6AKjROwFERIqq+gDIphIc6lMAiIicrvoDIJ2kfzBP30Au6lJERKaUqg+AmvBmMF0KKiIyXNUHQG34OIgDOhEsIjLMuAFgZneY2T4z21TQ1mpma81sa/jdErabmX3fzLaZ2UYzu6RgneVh/61mtrw8P2ekoecB9Rw9UaldiohMC2cyAvgxcOVpbTcA69x9MbAunAdYCiwOPyuA2yAIDOBG4DLgUuDGodAot9rM0AhAASAiUmjcAHD3R4Ce05qXAavC6VXAVQXtP/HAeqDZzOYBfwCsdfcedz8ArGVkqJRFrUYAIiJFTfQcwBx33wMQfs8O29uBnQX9usK20drLbugQ0AEFgIjIMJN9EtiKtPkY7SM3YLbCzDaY2Ybu7u6SC0omjGwqQY8OAYmIDDPRANgbHtoh/N4XtncB8wv6dQC7x2gfwd1Xununu3e2tbVNsLzh6jJJjQBERE4z0QBYAwxdybMcWF3Q/rnwaqDLgd7wENGDwEfMrCU8+fuRsK0iatJJenQZqIjIMKnxOpjZXcAHgVlm1kVwNc93gHvM7FpgB3B12P1+4KPANuAY8AUAd+8xs78Bngz7fdvdTz+xXDbZVIKeo/2V2p2IyLQwbgC4+6dHWbSkSF8Hrh9lO3cAd5xVdZOkNp3UVUAiIqep+juBIbgX4MBRHQISESkUiwCoSSc5PpDTA+FERArEIgB0M5iIyEjxCICMAkBE5HSxCICTdwPrZjARkZNiEQA6BCQiMlKsAkB3A4uInBKLAMimExgaAYiIFIpFACTMqMsm6T6iABARGRKLAACoy6TYf0SPgxARGRKLADh/3wP8YvBP+ZftH4abLoSN90RdkohI5MZ9FtB0d/6+B7ji5b8j7X1BQ+9O+MWXgumLPhVdYSIiEav6EcD7d9xKOt83vHHgOKz7djQFiYhMEVUfADP69xZf0NtV2UJERKaYqg+Aw9k5xRc0dVS2EBGRKabqA+DRBdcxkKgZ3piuhSXfiqYgEZEpoupPAm+ZvRSA97x6C00n9tFXN4+6pX+tE8AiEntVPwKAIARue9dqzu2/k7vff7/++IuIEJMAAKhJJ0gYdB/WzWAiIhCjADAzGrK6G1hEZEhJAWBmf25mz5vZJjO7y8xqzGyRmT1uZlvN7G4zy4R9s+H8tnD5wsn4AWejNpPUCEBEJDThADCzduBLQKe7XwgkgWuAfwBucvfFwAHg2nCVa4ED7n4ecFPYr6Jq00n2KQBERIDSDwGlgFozSwF1wB7gQ8C94fJVwFXh9LJwnnD5EjOzEvd/VuoyKQWAiEhowgHg7ruA7wI7CP7w9wJPAQfdfTDs1gW0h9PtwM5w3cGw/8yJ7n8iGrIpeo6cYDCXr+RuRUSmpFIOAbUQ/Kt+EXAOUA8sLdLVh1YZY1nhdleY2QYz29Dd3T3R8oqqzybJubNf7wUQESnpENCHgVfcvdvdB4D7gPcCzeEhIYAOYHc43QXMBwiXNwE9p2/U3Ve6e6e7d7a1tZVQ3kgNNUFZe3qPT+p2RUSmo1ICYAdwuZnVhcfylwAvAA8Dnwz7LAdWh9NrwnnC5Q+5+4gRQDnNyKYBeL23b5yeIiLVr5RzAI8TnMx9GvhduK2VwNeBr5rZNoJj/LeHq9wOzAzbvwrcUELdE9KQHRoBKABEREp6FpC73wjceFrzduDSIn37gKtL2V+patIJUglj7yEFgIhIbO4EhuBu4Bk1KY0ARESIWQAA1GdTOgksIkJsA0AjABGR2AVAQzbF3kN95PMVvQBJRGTKiWUADOScnmO6GUxE4i2WAQC6F0BEJH4BUKN7AUREIIYBMCMcAew+qCuBRCTeYhcAdZkkqYTRdeBY1KWIiEQqdgFgZjTVptnZoxGAiMRb7AIAgvMAO3o0AhCReItlADTWpNmpQ0AiEnOxDICm2jSH+wbpPT4QdSkiIpGJZQA0hpeC6kSwiMRZPAOgNngxjE4Ei0icxToANAIQkTiLZQDUpBJkUwm6DmgEICLxFcsAMLPgSiBdCioiMRbLAACYoXsBRCTmYhsATXVpdvQc03sBRCS2SgoAM2s2s3vN7EUz22xm7zGzVjNba2Zbw++WsK+Z2ffNbJuZbTSzSybnJ0xMS22G/sE8e/SCeBGJqVJHAN8D/s3dLwDeCWwGbgDWuftiYF04D7AUWBx+VgC3lbjvkjTXBVcCvdJ9NMoyREQiM+EAMLNG4APA7QDufsLdDwLLgFVht1XAVeH0MuAnHlgPNJvZvAlXXqKW+gwAr+w/ElUJIiKRKmUEcC7QDfzIzJ4xsx+aWT0wx933AITfs8P+7cDOgvW7wrZI1GeSZJIJXtYIQERiqpQASAGXALe5+8XAUU4d7inGirSNOANrZivMbIOZbeju7i6hvLGZGS31aV7ZrwAQkXgqJQC6gC53fzycv5cgEPYOHdoJv/cV9J9fsH4HsPv0jbr7SnfvdPfOtra2EsobX1NNmu3dOgQkIvE04QBw99eBnWZ2fti0BHgBWAMsD9uWA6vD6TXA58KrgS4HeocOFUWluT7DroPH6R/MRVmGiEgkUiWu/0XgTjPLANuBLxCEyj1mdi2wA7g67Hs/8FFgG3As7Buplro0eYedPcc4b/aMqMsREamokgLA3Z8FOossWlKkrwPXl7K/ydZcF1wJ9HL3UQWAiMRObO8EBmgNA2DbPp0HEJH4iXUAZFIJmmrTvPj64ahLERGpuFgHAEBrfYYX9xyKugwRkYqLfQDMrM+wff9RTgzmoy5FRKSiFAANGXJ5Z7seCSEiMRP7AJjVkAVgi84DiEjMxD4AWuoyJEwBICLxE/sASCaM1vqMAkBEYif2AQDBlUAv6EogEYkZBQCwzH7D/zm+Av+rZrjpQth4T9QliYiUXanPApr2zt/3AB/uvZlMoj9o6N0Jv/hSMH3Rp6IrTESkzGI/Anj/jlvJeP/wxoHjsO7b0RQkIlIhsQ+AGf17iy/o7apsISIiFRb7ADicnVN8QVNHZQsREamw2AfAowuuYyBRM7wxXQtLvhVNQSIiFRL7k8BbZi8F4PJXbqF5YB/99fOovfKvdQJYRKpe7EcAEITAynev5s39d/Ivl6zWH38RiQUFQCibSjKrIcNTrx2IuhQRkYpQABSY11TLU68dYDCnR0OLSPVTABQ4p7mWYydyekOYiMRCyQFgZkkze8bM/jWcX2Rmj5vZVjO728wyYXs2nN8WLl9Y6r4n2znNwdVAT77aE3ElIiLlNxkjgC8Dmwvm/wG4yd0XAweAa8P2a4ED7n4ecFPYb0qZUZOmqTbNhld1HkBEql9JAWBmHcDHgB+G8wZ8CLg37LIKuCqcXhbOEy5fEvafUuY21vDEKz24e9SliIiUVakjgJuBrwFDZ01nAgfdfTCc7wLaw+l2YCdAuLw37D+lnNNcQ/eRfl5941jUpYiIlNWEA8DMPg7sc/enCpuLdPUzWFa43RVmtsHMNnR3d0+0vAmb31oHwKNbK79vEZFKKmUE8D7gE2b2KvBTgkM/NwPNZjZ0h3EHsDuc7gLmA4TLm4ARZ1vdfaW7d7p7Z1tbWwnlTUxzbXAe4JGt+yu+bxGRSppwALj7N9y9w90XAtcAD7n7Z4CHgU+G3ZYDq8PpNeE84fKHfAoeaDcz5rfU8tjLbzCg+wFEpIqV4z6ArwNfNbNtBMf4bw/bbwdmhu1fBW4ow74nxYLWOo70D/LczoNRlyIiUjaT8jA4d/8V8KtwejtwaZE+fcDVk7G/cpvfWocZPLJ1P50LW6MuR0SkLHQncBE16SRzG2v41ZZ9UZciIlI2CoBRLJxZz8auXl7v7Yu6FBGRslAAjOLNbfUArN08yisjRUSmOQXAKFrrM7TWpXlw0+tRlyIiUhYKgFGYGYvaGnhs+xv0Hh+IuhwRkUmnABjDm9vqyeWdh17UYSARqT4KgDHMbayhsSbF6md2j99ZRGSaUQCMwcx4y5wZ/HrrfroP90ddjojIpFIAjOOCuTPIufOL5zQKEJHqogAYx8yGLHMas9z3dFfUpYiITCoFwBl4y5wZbNp9iC16V7CIVBEFwBl469xGUgnjf65/NepSREQmjQLgDNRmkiye3cB9T+/icJ/uCRCR6qAAOEMXdTRz7ESO+57eFXUpIiKTQgFwhuY21TC3sYZVv32VfH7KvcdGROSsKQDOwjvnN7F9/1E9IE5EqoIC4Cy8ZfYMmuvS/PND25iCb7MUETkrCoCzkEgY717Qwu929fJrvTReRKY5BcBZumDeDGbUpLj531/SKEBEpjUFwFlKJRL83sJWnt5xkAef17kAEZm+JhwAZjbfzB42s81m9ryZfTlsbzWztWa2NfxuCdvNzL5vZtvMbKOZXTJZP6LS3j6vkZn1Gf7+gc0M5PJRlyMiMiGljAAGgb9w97cClwPXm9nbgBuAde6+GFgXzgMsBRaHnxXAbSXsO1KJhPHe82by2hvHuHP9a1GXIyIyIRMOAHff4+5Ph9OHgc1AO7AMWBV2WwVcFU4vA37igfVAs5nNm3DlEVs0s54FrXV895cvsfeQXhwvItPPpJwDMLOFwMXA48Acd98DQUgAs8Nu7cDOgtW6wrZpycz4/fPb6BvI8Vdrno+6HBGRs1ZyAJhZA/Az4CvufmisrkXaRlxGY2YrzGyDmW3o7u4utbyyaq7LcOmiVh7Y9DoPPq+Xx4vI9FJSAJhZmuCP/53ufl/YvHfo0E74vS9s7wLmF6zeAYx4y4q7r3T3TnfvbGtrK6W8irhkQQttM7Lc8LON7NOhIBGZRkq5CsiA24HN7v5PBYvWAMvD6eXA6oL2z4VXA10O9A4dKprOkgnjyrfP5Uj/IF+5+1k9J0hEpo1SRgDvAz4LfMjMng0/HwW+A1xhZluBK8J5gPuB7cA24AfAdSXse0pprc/wgcVt/PblN7jl4W1RlyMickZSE13R3R+l+HF9gCVF+jtw/UT3N9W9/ZxGdh08zn9f+xKL5zRw5YXT9gInEYkJ3Qk8ScyMJRfMZl5TDV+5+1k27eqNuiQRkTEpACZRKpngY++YRyaV4HN3PMHL3UeiLklEZFQKgElWn01x1Tvb6R/I8Sc/WM+ON45FXZKISFEKgDJoqc9w1cXtHDo+yKdWPsa2fYejLklEZAQFQJnMasjyRxe3c7hvgP9422M89dqBqEsSERlGAVBGbTOyXP3u+SQM/uQH6/n5M3qhvIhMHQqAMmuqTfPJd3cwqyHLV+5+lhtXb+LEoB4hLSLRUwBUQF0mxR9d3M7FC5pZ9dhr/PGtv2HL6zovICLRUgBUSDJhfGBxGx97xzy27z/Kx//Hr7nl4W16oYyIREYBUGHnzW7gM5ctYOHMev7xwS1cefMj/GrLvvFXFBGZZAqACNRlUnz0HfP4w4vm0XP0BJ//0ZN8/o4n2Nh1MOrSRCRGJvwsICnduW0NLJhZx3M7e1n/yht84p9/w++f38YXlyzmkgUtUZcnIlVOARCxVCLBu9/UwoXtjTzX1cvjr/Tw8K2/5Z0dTXz2PQv5+EXzqEkng84b74F134beLmjqgCXfgos+Fe0PEJFpy4KHdE5NnZ2dvmHDhgmt+9Lew/y/jdPvdQMnBvO8sOcQm3b18sbREzTVpln2rnP4/IwnWPTYN7GB46c6p2vhD7+vEBCRYczsKXfvHK+fzgFMMZlUgnfNb+Yzly3gjy9uZ3Zjlv/9+A4yv/rb4X/8AQaOByMCEZEJ0CGgKcrMmN9ax/zWOvoHc7Q//kbRft7bxe6Dx2lvrq1whSIy3SkApoFsKsnh7Bwa+0e+eH5Xfibv/85DzGuq4bJFrfzeolYuWdDCebMbSCc1wBOR0SkApolHF1zHFS//Hen8qRfPDyRqeHT+f+U/WBu7Dh7n3zfv4+fP7gYgk0xw/twZXNjexNvPaeSt8xo5r62Bprp0VD9BRKYYBcA0sWX2UgDev+NWZvTv5XB2Do8uuI49s5fyLuBd85txdw4eH2DvoT66D/fTfaSfnz+zi7ue2HFyOy11ac6b3cCb24LPm2bW0d5SS0dzHY21KcxGe8uniFQbBcA0smX20pNBUIyZ0VKXoaUuwwVzgzZ353DfIPuP9HPg2AAHjp1gz8E+Xth9iKMncsPWr8skaW+upaOllnOaa5nXVMOshixtM4LPrIbgk0np0JJINah4AJjZlcD3gCTwQ3f/TqVriBMzo7E2TWPtyEM/fQM5eo8PcKhvgMN9gxw+Psjh/gFe2HOI9dt7OD6QK7JFaKxJ0TYjy8yGLE21aZpr0zSFn+a6YF/NdZmTbTNqUtRnUtSkExphiEwhFQ0AM0sCtwBXAF3Ak2a2xt1fqGQdEqhJJ6lJJ5nTWFN0+WAuz7ETufAzyLETOY6G38f6c+w6cJxX9h+lfyDH8YEcA7mx7ylJGNRmktRnUtRnU9RnkzRkg3Coy6ZoyCapy6SoTSfJphLUpJNk0wlqUsF3NjV8/uR3Qf900kgnEiQSChqZRiK6ybPSI4BLgW3uvh3AzH4KLAMUAFNQKpmgsTZRdPRQTC7v9A3k6B/MD/s+kcszMJhnIOfBdC7PicE8R/oGOXB0gMF8MD+Q8/A7T6m3JybNSCWDTzqRIJ1MBNPJRBASyUTR6VRBWyqRIJmA5NC3GYmEkUoE30kzkonwEy5LDi0Plw31P7U83J4F04mwzoSd+piBWbDMgETCCPIs+B7qc7IvRiJxqr9Z0G/oOxGOuoa2Y5xaPrSdoXYr2E6xWoZGcMF08J+1RnUl2ngP/OJLwX09AL07g3koewhUOgDagZ0F813AZRWuQcokmbDwX/albcfdyTsM5vPk8s5gzoPvvDOYz4+czzu5XDCfcyefd/Lu5PMMm88VtJ0YzHN8IHdyX0GfsP/JdSDvjo/yPTQ9de+lr6yhGBgKpaEGK2yjMDiGlg8F2akNDbWdWt+Gbf9k15PBdGpbBbs+uV87vZaCjZxe2+lBR5HlheueKttGto23HLjj4DeZnR/lJs8qC4Bi/1QY9v8fM1sBrABYsGDBhHd07qx6/ssHzp3w+iJnKl8QPCdDJp8n56eW5fLBJ18wXRhIubzjhSHDqSA8FTxBe9BWGEQAwwPr5LrDtuMj1g0C7PT2YBmc2v7QsmBPhdOE00GDe2Hb8L6EtQxfHsz4yeUjt0/YVrj9wr4Mq+X0bZ3amY9Y3yko/eR+C9dnxG899WMK/3AVe6JOsb7D/rMIv9t69o9cGYLDQWVW6QDoAuYXzHcAuws7uPtKYCUEzwKa6I5SyQQNuhFKRKa6mzqCwz6na+oo+64r/RfySWCxmS0yswxwDbCmwjWIiEwdS74VPNixULo2aC+zio4A3H3QzP4MeJDgMtA73P35StYgIjKlDB3nj8FVQLj7/cD9ld6viMiUddGnInmsuw6Si4jElAJARCSmFAAiIjGlABARiSkFgIhITCkARERiSgEgIhJTCgARkZgyL/YUoynCzLqB16KuYwJmAaM84anqxOm3gn5vNaum3/omd28br9OUDoDpysw2uHtn1HVUQpx+K+j3VrM4/dYhOgQkIhJTCgARkZhSAJTHyqgLqKA4/VbQ761mcfqtgM4BiIjElkYAIiIxpQCYJGY238weNrPNZva8mX056prKzcySZvaMmf1r1LWUm5k1m9m9ZvZi+N/xe6KuqZzM7M/D/x1vMrO7zKwm6pomk5ndYWb7zGxTQVurma01s63hd0uUNVaCAmDyDAJ/4e5vBS4Hrjezt0VcU7l9GdgcdREV8j3g39z9AuCdVPHvNrN24EtAp7tfSPD2vmuirWrS/Ri48rS2G4B17r4YWBfOVzUFwCRx9z3u/nQ4fZjgD0R7tFWVj5l1AB8Dfhh1LeVmZo3AB4DbAdz9hLsfjLaqsksBtWaWAuqA3RHXM6nc/RGg57TmZcCqcHoVcFVFi4qAAqAMzGwhcDHweLSVlNXNwNeAfNSFVMC5QDfwo/CQ1w/NrD7qosrF3XcB3wV2AHuAXnf/ZbRVVcQcd98DwT/ogNkR11N2CoBJZmYNwM+Ar7j7oajrKQcz+ziwz92firqWCkkBlwC3ufvFwFGq+PBAeOx7GbAIOAeoN7P/FG1VUg4KgElkZmmCP/53uvt9UddTRu8DPmFmrwI/BT5kZv8r2pLKqgvocvehEd29BIFQrT4MvOLu3e4+ANwHvDfimiphr5nNAwi/90VcT9kpACaJmRnBMeLN7v5PUddTTu7+DXfvcPeFBCcHH3L3qv0Xoru/Duw0s/PDpiXACxGWVG47gMvNrC783/USqvikd4E1wPJwejmwOsJaKiIVdQFV5H3AZ4HfmdmzYds33f3+CGuSyfNF4E4zywDbgS9EXE/ZuPvjZnYv8DTB1W3PUGV3yZrZXcAHgVlm1gXcCHwHuMfMriUIwaujq7AydCewiEhM6RCQiEhMKQBERGJKASAiElMKABGRmFIAiIjElAJARCSmFAAiIjGlABARian/D+hfAG5Faoo0AAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "Feerates:\t1.1531420689155165, 1.4085104512204296, 2.816548045571761, 11.10120050773006 \n", + "Times:\t\t874.010579873836, 479.615551452334, 60.00000000000001, 1.0000000000000004" + ] + }, + "execution_count": 108, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def build_estimator():\n", + " _feerates = [1.0]*10 + [1.1]*10 + [1.2]*10 + [1.5]*3000 + [2]*3000\\\n", + "+ [2.1]*3000 + [3]*10 + [4]*10 + [5]*10 + [6] + [7] + [10] + [100] + [200]*200\n", + " _total_weight = sum(np.array(_feerates)**ALPHA)\n", + " _network_mass_rate = bps * block_mass_limit\n", + " estimator = FeerateEstimator(total_weight=_total_weight, \n", + " inclusion_interval=avg_mass/_network_mass_rate)\n", + " \n", + " nr = _network_mass_rate\n", + " for i in range(len(_feerates)-1, -1, -1):\n", + " tw = sum(np.array(_feerates[:i])**ALPHA)\n", + " nr -= avg_mass\n", + " if nr <= 0:\n", + " print(\"net mass rate {}\", nr)\n", + " break\n", + " e = FeerateEstimator(total_weight=tw, \n", + " inclusion_interval=avg_mass/nr)\n", + " if e.feerate_to_time(1.0) < estimator.feerate_to_time(1.0):\n", + " # print(\"removing {}\".format(_feerates[i]))\n", + " estimator = e\n", + " else:\n", + " break\n", + " \n", + " return estimator\n", + "\n", + "estimator = build_estimator()\n", + "pred = estimator.calc_estimations()\n", + "x = np.linspace(1, pred.priority_bucket.feerate, 100000)\n", + "y = estimator.feerate_to_time(x)\n", + "plt.figure()\n", + "plt.plot(x, y)\n", + "plt.fill_between(x, estimator.inclusion_interval, y2=y, alpha=0.5)\n", + "plt.scatter(pred.feerates(), pred.times(), zorder=100)\n", + "plt.show()\n", + "pred" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python [conda env:gr]", + "language": "python", + "name": "conda-env-gr-py" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/mining/src/feerate/mod.rs b/mining/src/feerate/mod.rs new file mode 100644 index 0000000000..5ef3579a56 --- /dev/null +++ b/mining/src/feerate/mod.rs @@ -0,0 +1,231 @@ +//! See the accompanying fee_estimation.ipynb Jupyter Notebook which details the reasoning +//! behind this fee estimator. + +use crate::block_template::selector::ALPHA; +use itertools::Itertools; +use std::fmt::Display; + +/// A type representing fee/mass of a transaction in `sompi/gram` units. +/// Given a feerate value recommendation, calculate the required fee by +/// taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` +pub type Feerate = f64; + +#[derive(Clone, Copy, Debug)] +pub struct FeerateBucket { + pub feerate: f64, + pub estimated_seconds: f64, +} + +impl Display for FeerateBucket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "({:.4}, {:.4}s)", self.feerate, self.estimated_seconds) + } +} + +#[derive(Clone, Debug)] +pub struct FeerateEstimations { + /// *Top-priority* feerate bucket. Provides an estimation of the feerate required for sub-second DAG inclusion. + /// + /// Note: for all buckets, feerate values represent fee/mass of a transaction in `sompi/gram` units. + /// Given a feerate value recommendation, calculate the required fee by + /// taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` + pub priority_bucket: FeerateBucket, + + /// A vector of *normal* priority feerate values. The first value of this vector is guaranteed to exist and + /// provide an estimation for sub-*minute* DAG inclusion. All other values will have shorter estimation + /// times than all `low_bucket` values. Therefor by chaining `[priority] | normal | low` and interpolating + /// between them, one can compose a complete feerate function on the client side. The API makes an effort + /// to sample enough "interesting" points on the feerate-to-time curve, so that the interpolation is meaningful. + pub normal_buckets: Vec, + + /// A vector of *low* priority feerate values. The first value of this vector is guaranteed to + /// exist and provide an estimation for sub-*hour* DAG inclusion. + pub low_buckets: Vec, +} + +impl FeerateEstimations { + pub fn ordered_buckets(&self) -> Vec { + std::iter::once(self.priority_bucket) + .chain(self.normal_buckets.iter().copied()) + .chain(self.low_buckets.iter().copied()) + .collect() + } +} + +impl Display for FeerateEstimations { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "(fee/mass, secs) priority: {}, ", self.priority_bucket)?; + write!(f, "normal: {}, ", self.normal_buckets.iter().format(", "))?; + write!(f, "low: {}", self.low_buckets.iter().format(", ")) + } +} + +pub struct FeerateEstimatorArgs { + pub network_blocks_per_second: u64, + pub maximum_mass_per_block: u64, +} + +impl FeerateEstimatorArgs { + pub fn new(network_blocks_per_second: u64, maximum_mass_per_block: u64) -> Self { + Self { network_blocks_per_second, maximum_mass_per_block } + } + + pub fn network_mass_per_second(&self) -> u64 { + self.network_blocks_per_second * self.maximum_mass_per_block + } +} + +#[derive(Debug, Clone)] +pub struct FeerateEstimator { + /// The total probability weight of current mempool ready transactions, i.e., `Σ_{tx in mempool}(tx.fee/tx.mass)^alpha`. + /// Note that some estimators might consider a reduced weight which excludes outliers. See [`Frontier::build_feerate_estimator`] + total_weight: f64, + + /// The amortized time **in seconds** between transactions, given the current transaction masses present in the mempool. Or in + /// other words, the inverse of the transaction inclusion rate. For instance, if the average transaction mass is 2500 grams, + /// the block mass limit is 500,000 and the network has 10 BPS, then this number would be 1/2000 seconds. + inclusion_interval: f64, +} + +impl FeerateEstimator { + pub fn new(total_weight: f64, inclusion_interval: f64) -> Self { + assert!(total_weight >= 0.0); + assert!((0f64..1f64).contains(&inclusion_interval)); + Self { total_weight, inclusion_interval } + } + + pub(crate) fn feerate_to_time(&self, feerate: f64) -> f64 { + let (c1, c2) = (self.inclusion_interval, self.total_weight); + c1 * c2 / feerate.powi(ALPHA) + c1 + } + + fn time_to_feerate(&self, time: f64) -> f64 { + let (c1, c2) = (self.inclusion_interval, self.total_weight); + assert!(c1 < time, "{c1}, {time}"); + ((c1 * c2 / time) / (1f64 - c1 / time)).powf(1f64 / ALPHA as f64) + } + + /// The antiderivative function of [`feerate_to_time`] excluding the constant shift `+ c1` + #[inline] + fn feerate_to_time_antiderivative(&self, feerate: f64) -> f64 { + let (c1, c2) = (self.inclusion_interval, self.total_weight); + c1 * c2 / (-2f64 * feerate.powi(ALPHA - 1)) + } + + /// Returns the feerate value for which the integral area is `frac` of the total area between `lower` and `upper`. + fn quantile(&self, lower: f64, upper: f64, frac: f64) -> f64 { + assert!((0f64..=1f64).contains(&frac)); + assert!(0.0 < lower && lower <= upper, "{lower}, {upper}"); + let (c1, c2) = (self.inclusion_interval, self.total_weight); + if c1 == 0.0 || c2 == 0.0 { + // if c1 · c2 == 0.0, the integral area is empty, so we simply return `lower` + return lower; + } + let z1 = self.feerate_to_time_antiderivative(lower); + let z2 = self.feerate_to_time_antiderivative(upper); + // Get the total area corresponding to `frac` of the integral area between `lower` and `upper` + // which can be expressed as z1 + frac * (z2 - z1) + let z = frac * z2 + (1f64 - frac) * z1; + // Calc the x value (feerate) corresponding to said area + ((c1 * c2) / (-2f64 * z)).powf(1f64 / (ALPHA - 1) as f64) + } + + pub fn calc_estimations(&self, minimum_standard_feerate: f64) -> FeerateEstimations { + let min = minimum_standard_feerate; + // Choose `high` such that it provides sub-second waiting time + let high = self.time_to_feerate(1f64).max(min); + // Choose `low` feerate such that it provides sub-hour waiting time AND it covers (at least) the 0.25 quantile + let low = self.time_to_feerate(3600f64).max(self.quantile(min, high, 0.25)); + // Choose `normal` feerate such that it provides sub-minute waiting time AND it covers (at least) the 0.66 quantile between low and high. + let normal = self.time_to_feerate(60f64).max(self.quantile(low, high, 0.66)); + // Choose an additional point between normal and low + let mid = self.time_to_feerate(1800f64).max(self.quantile(min, high, 0.5)); + /* Intuition for the above: + 1. The quantile calculations make sure that we return interesting points on the `feerate_to_time` curve. + 2. They also ensure that the times don't diminish too high if small increments to feerate would suffice + to cover large fractions of the integral area (reflecting the position within the waiting-time distribution) + */ + FeerateEstimations { + priority_bucket: FeerateBucket { feerate: high, estimated_seconds: self.feerate_to_time(high) }, + normal_buckets: vec![ + FeerateBucket { feerate: normal, estimated_seconds: self.feerate_to_time(normal) }, + FeerateBucket { feerate: mid, estimated_seconds: self.feerate_to_time(mid) }, + ], + low_buckets: vec![FeerateBucket { feerate: low, estimated_seconds: self.feerate_to_time(low) }], + } + } +} + +#[derive(Clone, Debug)] +pub struct FeeEstimateVerbose { + pub estimations: FeerateEstimations, + + pub mempool_ready_transactions_count: u64, + pub mempool_ready_transactions_total_mass: u64, + pub network_mass_per_second: u64, + + pub next_block_template_feerate_min: f64, + pub next_block_template_feerate_median: f64, + pub next_block_template_feerate_max: f64, +} + +#[cfg(test)] +mod tests { + use super::*; + use itertools::Itertools; + + #[test] + fn test_feerate_estimations() { + let estimator = FeerateEstimator { total_weight: 1002283.659, inclusion_interval: 0.004f64 }; + let estimations = estimator.calc_estimations(1.0); + let buckets = estimations.ordered_buckets(); + for (i, j) in buckets.into_iter().tuple_windows() { + assert!(i.feerate >= j.feerate); + } + dbg!(estimations); + } + + #[test] + fn test_min_feerate_estimations() { + let estimator = FeerateEstimator { total_weight: 0.00659, inclusion_interval: 0.004f64 }; + let minimum_feerate = 0.755; + let estimations = estimator.calc_estimations(minimum_feerate); + println!("{estimations}"); + let buckets = estimations.ordered_buckets(); + assert!(buckets.last().unwrap().feerate >= minimum_feerate); + for (i, j) in buckets.into_iter().tuple_windows() { + assert!(i.feerate >= j.feerate); + assert!(i.estimated_seconds <= j.estimated_seconds); + } + } + + #[test] + fn test_zero_values() { + let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.0 }; + let minimum_feerate = 0.755; + let estimations = estimator.calc_estimations(minimum_feerate); + let buckets = estimations.ordered_buckets(); + for bucket in buckets { + assert_eq!(minimum_feerate, bucket.feerate); + assert_eq!(0.0, bucket.estimated_seconds); + } + + let estimator = FeerateEstimator { total_weight: 0.0, inclusion_interval: 0.1 }; + let minimum_feerate = 0.755; + let estimations = estimator.calc_estimations(minimum_feerate); + let buckets = estimations.ordered_buckets(); + for bucket in buckets { + assert_eq!(minimum_feerate, bucket.feerate); + assert_eq!(estimator.inclusion_interval, bucket.estimated_seconds); + } + + let estimator = FeerateEstimator { total_weight: 0.1, inclusion_interval: 0.0 }; + let minimum_feerate = 0.755; + let estimations = estimator.calc_estimations(minimum_feerate); + let buckets = estimations.ordered_buckets(); + for bucket in buckets { + assert_eq!(minimum_feerate, bucket.feerate); + assert_eq!(0.0, bucket.estimated_seconds); + } + } +} diff --git a/mining/src/lib.rs b/mining/src/lib.rs index 2986577efe..141d9d2836 100644 --- a/mining/src/lib.rs +++ b/mining/src/lib.rs @@ -8,12 +8,17 @@ use mempool::tx::Priority; mod block_template; pub(crate) mod cache; pub mod errors; +pub mod feerate; pub mod manager; mod manager_tests; pub mod mempool; pub mod model; pub mod monitor; +// Exposed for benchmarks +pub use block_template::{policy::Policy, selector::RebalancingWeightedTransactionSelector}; +pub use mempool::model::frontier::{feerate_key::FeerateTransactionKey, search_tree::SearchTree, Frontier}; + #[cfg(test)] pub mod testutils; @@ -25,6 +30,7 @@ pub struct MiningCounters { pub low_priority_tx_counts: AtomicU64, pub block_tx_counts: AtomicU64, pub tx_accepted_counts: AtomicU64, + pub tx_evicted_counts: AtomicU64, pub input_counts: AtomicU64, pub output_counts: AtomicU64, @@ -43,6 +49,7 @@ impl Default for MiningCounters { low_priority_tx_counts: Default::default(), block_tx_counts: Default::default(), tx_accepted_counts: Default::default(), + tx_evicted_counts: Default::default(), input_counts: Default::default(), output_counts: Default::default(), ready_txs_sample: Default::default(), @@ -61,6 +68,7 @@ impl MiningCounters { low_priority_tx_counts: self.low_priority_tx_counts.load(Ordering::Relaxed), block_tx_counts: self.block_tx_counts.load(Ordering::Relaxed), tx_accepted_counts: self.tx_accepted_counts.load(Ordering::Relaxed), + tx_evicted_counts: self.tx_evicted_counts.load(Ordering::Relaxed), input_counts: self.input_counts.load(Ordering::Relaxed), output_counts: self.output_counts.load(Ordering::Relaxed), ready_txs_sample: self.ready_txs_sample.load(Ordering::Relaxed), @@ -96,6 +104,7 @@ pub struct MempoolCountersSnapshot { pub low_priority_tx_counts: u64, pub block_tx_counts: u64, pub tx_accepted_counts: u64, + pub tx_evicted_counts: u64, pub input_counts: u64, pub output_counts: u64, pub ready_txs_sample: u64, @@ -146,13 +155,14 @@ impl core::ops::Sub for &MempoolCountersSnapshot { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), - high_priority_tx_counts: self.high_priority_tx_counts.checked_sub(rhs.high_priority_tx_counts).unwrap_or_default(), - low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), - block_tx_counts: self.block_tx_counts.checked_sub(rhs.block_tx_counts).unwrap_or_default(), - tx_accepted_counts: self.tx_accepted_counts.checked_sub(rhs.tx_accepted_counts).unwrap_or_default(), - input_counts: self.input_counts.checked_sub(rhs.input_counts).unwrap_or_default(), - output_counts: self.output_counts.checked_sub(rhs.output_counts).unwrap_or_default(), + elapsed_time: self.elapsed_time.saturating_sub(rhs.elapsed_time), + high_priority_tx_counts: self.high_priority_tx_counts.saturating_sub(rhs.high_priority_tx_counts), + low_priority_tx_counts: self.low_priority_tx_counts.saturating_sub(rhs.low_priority_tx_counts), + block_tx_counts: self.block_tx_counts.saturating_sub(rhs.block_tx_counts), + tx_accepted_counts: self.tx_accepted_counts.saturating_sub(rhs.tx_accepted_counts), + tx_evicted_counts: self.tx_evicted_counts.saturating_sub(rhs.tx_evicted_counts), + input_counts: self.input_counts.saturating_sub(rhs.input_counts), + output_counts: self.output_counts.saturating_sub(rhs.output_counts), ready_txs_sample: (self.ready_txs_sample + rhs.ready_txs_sample) / 2, txs_sample: (self.txs_sample + rhs.txs_sample) / 2, orphans_sample: (self.orphans_sample + rhs.orphans_sample) / 2, @@ -172,8 +182,8 @@ impl core::ops::Sub for &P2pTxCountSample { fn sub(self, rhs: Self) -> Self::Output { Self::Output { - elapsed_time: self.elapsed_time.checked_sub(rhs.elapsed_time).unwrap_or_default(), - low_priority_tx_counts: self.low_priority_tx_counts.checked_sub(rhs.low_priority_tx_counts).unwrap_or_default(), + elapsed_time: self.elapsed_time.saturating_sub(rhs.elapsed_time), + low_priority_tx_counts: self.low_priority_tx_counts.saturating_sub(rhs.low_priority_tx_counts), } } } diff --git a/mining/src/manager.rs b/mining/src/manager.rs index 5743901220..32893312a1 100644 --- a/mining/src/manager.rs +++ b/mining/src/manager.rs @@ -2,27 +2,31 @@ use crate::{ block_template::{builder::BlockTemplateBuilder, errors::BuilderError}, cache::BlockTemplateCache, errors::MiningManagerResult, + feerate::{FeeEstimateVerbose, FeerateEstimations, FeerateEstimatorArgs}, mempool::{ config::Config, - model::tx::{MempoolTransaction, TxRemovalReason}, + model::tx::{MempoolTransaction, TransactionPostValidation, TransactionPreValidation, TxRemovalReason}, populate_entries_and_try_validate::{ populate_mempool_transactions_in_parallel, validate_mempool_transaction, validate_mempool_transactions_in_parallel, }, - tx::{Orphan, Priority}, + tx::{Orphan, Priority, RbfPolicy}, Mempool, }, model::{ - candidate_tx::CandidateTransaction, owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, topological_sort::IntoIterTopologically, + tx_insert::TransactionInsertion, tx_query::TransactionQuery, }, MempoolCountersSnapshot, MiningCounters, P2pTxCountSample, }; use itertools::Itertools; use kaspa_consensus_core::{ - api::ConsensusApi, - block::{BlockTemplate, TemplateBuildMode}, + api::{ + args::{TransactionValidationArgs, TransactionValidationBatchArgs}, + ConsensusApi, + }, + block::{BlockTemplate, TemplateBuildMode, TemplateTransactionSelector}, coinbase::MinerData, errors::{block::RuleError as BlockRuleError, tx::TxRuleError}, tx::{MutableTransaction, Transaction, TransactionId, TransactionOutput}, @@ -103,14 +107,14 @@ impl MiningManager { loop { attempts += 1; - let transactions = self.block_candidate_transactions(); - let block_template_builder = BlockTemplateBuilder::new(self.config.maximum_mass_per_block); + let selector = self.build_selector(); + let block_template_builder = BlockTemplateBuilder::new(); let build_mode = if attempts < self.config.maximum_build_block_template_attempts { TemplateBuildMode::Standard } else { TemplateBuildMode::Infallible }; - match block_template_builder.build_block_template(consensus, miner_data, transactions, build_mode) { + match block_template_builder.build_block_template(consensus, miner_data, selector, build_mode) { Ok(block_template) => { let block_template = cache_lock.set_immutable_cached_template(block_template); match attempts { @@ -193,8 +197,62 @@ impl MiningManager { } } - pub(crate) fn block_candidate_transactions(&self) -> Vec { - self.mempool.read().block_candidate_transactions() + /// Dynamically builds a transaction selector based on the specific state of the ready transactions frontier + pub(crate) fn build_selector(&self) -> Box { + self.mempool.read().build_selector() + } + + /// Returns realtime feerate estimations based on internal mempool state + pub(crate) fn get_realtime_feerate_estimations(&self) -> FeerateEstimations { + let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + let estimator = self.mempool.read().build_feerate_estimator(args); + estimator.calc_estimations(self.config.minimum_feerate()) + } + + /// Returns realtime feerate estimations based on internal mempool state with additional verbose data + pub(crate) fn get_realtime_feerate_estimations_verbose( + &self, + consensus: &dyn ConsensusApi, + prefix: kaspa_addresses::Prefix, + ) -> MiningManagerResult { + let args = FeerateEstimatorArgs::new(self.config.network_blocks_per_second, self.config.maximum_mass_per_block); + let network_mass_per_second = args.network_mass_per_second(); + let mempool_read = self.mempool.read(); + let estimator = mempool_read.build_feerate_estimator(args); + let ready_transactions_count = mempool_read.ready_transaction_count(); + let ready_transaction_total_mass = mempool_read.ready_transaction_total_mass(); + drop(mempool_read); + let mut resp = FeeEstimateVerbose { + estimations: estimator.calc_estimations(self.config.minimum_feerate()), + network_mass_per_second, + mempool_ready_transactions_count: ready_transactions_count as u64, + mempool_ready_transactions_total_mass: ready_transaction_total_mass, + + next_block_template_feerate_min: -1.0, + next_block_template_feerate_median: -1.0, + next_block_template_feerate_max: -1.0, + }; + // calculate next_block_template_feerate_xxx + { + let script_public_key = kaspa_txscript::pay_to_address_script(&kaspa_addresses::Address::new( + prefix, + kaspa_addresses::Version::PubKey, + &[0u8; 32], + )); + let miner_data: MinerData = MinerData::new(script_public_key, vec![]); + + let BlockTemplate { block: kaspa_consensus_core::block::MutableBlock { transactions, .. }, calculated_fees, .. } = + self.get_block_template(consensus, &miner_data)?; + + let Some(Stats { max, median, min }) = feerate_stats(transactions, calculated_fees) else { + return Ok(resp); + }; + + resp.next_block_template_feerate_max = max; + resp.next_block_template_feerate_min = min; + resp.next_block_template_feerate_median = median; + } + Ok(resp) } /// Clears the block template cache, forcing the next call to get_block_template to build a new block template. @@ -205,54 +263,65 @@ impl MiningManager { #[cfg(test)] pub(crate) fn block_template_builder(&self) -> BlockTemplateBuilder { - BlockTemplateBuilder::new(self.config.maximum_mass_per_block) + BlockTemplateBuilder::new() } /// validate_and_insert_transaction validates the given transaction, and /// adds it to the set of known transactions that have not yet been /// added to any block. /// - /// The returned transactions are clones of objects owned by the mempool. + /// The validation is constrained by a Replace by fee policy applied + /// to double spends in the mempool. For more information, see [`RbfPolicy`]. + /// + /// On success, returns transactions that where unorphaned following the insertion + /// of the provided transaction. + /// + /// The returned transactions are references of objects owned by the mempool. pub fn validate_and_insert_transaction( &self, consensus: &dyn ConsensusApi, transaction: Transaction, priority: Priority, orphan: Orphan, - ) -> MiningManagerResult>> { - self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan) + rbf_policy: RbfPolicy, + ) -> MiningManagerResult { + self.validate_and_insert_mutable_transaction(consensus, MutableTransaction::from_tx(transaction), priority, orphan, rbf_policy) } - /// Exposed only for tests. Ordinary users should call `validate_and_insert_transaction` instead - pub fn validate_and_insert_mutable_transaction( + /// Exposed for tests only + /// + /// See `validate_and_insert_transaction` + pub(crate) fn validate_and_insert_mutable_transaction( &self, consensus: &dyn ConsensusApi, transaction: MutableTransaction, priority: Priority, orphan: Orphan, - ) -> MiningManagerResult>> { + rbf_policy: RbfPolicy, + ) -> MiningManagerResult { // read lock on mempool - let mut transaction = self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction)?; + let TransactionPreValidation { mut transaction, feerate_threshold } = + self.mempool.read().pre_validate_and_populate_transaction(consensus, transaction, rbf_policy)?; + let args = TransactionValidationArgs::new(feerate_threshold); // no lock on mempool - let validation_result = validate_mempool_transaction(consensus, &mut transaction); + let validation_result = validate_mempool_transaction(consensus, &mut transaction, &args); // write lock on mempool let mut mempool = self.mempool.write(); - if let Some(accepted_transaction) = - mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan)? - { - let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); - drop(mempool); - - // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. - let mut accepted_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); - // We include the original accepted transaction as well - accepted_transactions.push(accepted_transaction); - accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); - self.counters.increase_tx_counts(1, priority); - - Ok(accepted_transactions) - } else { - Ok(vec![]) + match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan, rbf_policy)? { + TransactionPostValidation { removed, accepted: Some(accepted_transaction) } => { + let unorphaned_transactions = mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction); + drop(mempool); + + // The capacity used here may be exceeded since accepted unorphaned transaction may themselves unorphan other transactions. + let mut accepted_transactions = Vec::with_capacity(unorphaned_transactions.len() + 1); + // We include the original accepted transaction as well + accepted_transactions.push(accepted_transaction); + accepted_transactions.extend(self.validate_and_insert_unorphaned_transactions(consensus, unorphaned_transactions)); + self.counters.increase_tx_counts(1, priority); + + Ok(TransactionInsertion::new(removed, accepted_transactions)) + } + TransactionPostValidation { removed, accepted: None } => Ok(TransactionInsertion::new(removed, vec![])), } } @@ -263,6 +332,9 @@ impl MiningManager { ) -> Vec> { // The capacity used here may be exceeded (see next comment). let mut accepted_transactions = Vec::with_capacity(incoming_transactions.len()); + // The validation args map is immutably empty since unorphaned transactions do not require pre processing so there + // are no feerate thresholds to use. Instead, we rely on this being checked during post processing. + let args = TransactionValidationBatchArgs::new(); // We loop as long as incoming unorphaned transactions do unorphan other transactions when they // get validated and inserted into the mempool. while !incoming_transactions.is_empty() { @@ -277,8 +349,11 @@ impl MiningManager { let mut validation_results = Vec::with_capacity(transactions.len()); while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { assert!(lower_bound < upper_bound, "the chunk is never empty"); - validation_results - .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + validation_results.extend(validate_mempool_transactions_in_parallel( + consensus, + &mut transactions[lower_bound..upper_bound], + &args, + )); lower_bound = upper_bound; } assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); @@ -291,19 +366,21 @@ impl MiningManager { .zip(validation_results) .flat_map(|((transaction, priority), validation_result)| { let orphan_id = transaction.id(); + let rbf_policy = Mempool::get_orphan_transaction_rbf_policy(priority); match mempool.post_validate_and_insert_transaction( consensus, validation_result, transaction, priority, Orphan::Forbidden, + rbf_policy, ) { - Ok(Some(accepted_transaction)) => { + Ok(TransactionPostValidation { removed: _, accepted: Some(accepted_transaction) }) => { accepted_transactions.push(accepted_transaction.clone()); self.counters.increase_tx_counts(1, priority); mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) } - Ok(None) => vec![], + Ok(TransactionPostValidation { removed: _, accepted: None }) => vec![], Err(err) => { debug!("Failed to unorphan transaction {0} due to rule error: {1}", orphan_id, err); vec![] @@ -319,14 +396,18 @@ impl MiningManager { /// Validates a batch of transactions, handling iteratively only the independent ones, and /// adds those to the set of known transactions that have not yet been added to any block. /// + /// The validation is constrained by a Replace by fee policy applied + /// to double spends in the mempool. For more information, see [`RbfPolicy`]. + /// /// Returns transactions that where unorphaned following the insertion of the provided - /// transactions. The returned transactions are clones of objects owned by the mempool. + /// transactions. The returned transactions are references of objects owned by the mempool. pub fn validate_and_insert_transaction_batch( &self, consensus: &dyn ConsensusApi, transactions: Vec, priority: Priority, orphan: Orphan, + rbf_policy: RbfPolicy, ) -> Vec>> { const TRANSACTION_CHUNK_SIZE: usize = 250; @@ -340,12 +421,18 @@ impl MiningManager { // read lock on mempool // Here, we simply log and drop all erroneous transactions since the caller doesn't care about those anyway let mut transactions = Vec::with_capacity(sorted_transactions.len()); + let mut args = TransactionValidationBatchArgs::new(); for chunk in &sorted_transactions.chunks(TRANSACTION_CHUNK_SIZE) { let mempool = self.mempool.read(); let txs = chunk.filter_map(|tx| { let transaction_id = tx.id(); - match mempool.pre_validate_and_populate_transaction(consensus, tx) { - Ok(tx) => Some(tx), + match mempool.pre_validate_and_populate_transaction(consensus, tx, rbf_policy) { + Ok(TransactionPreValidation { transaction, feerate_threshold }) => { + if let Some(threshold) = feerate_threshold { + args.set_feerate_threshold(transaction.id(), threshold); + } + Some(transaction) + } Err(RuleError::RejectAlreadyAccepted(transaction_id)) => { debug!("Ignoring already accepted transaction {}", transaction_id); None @@ -374,8 +461,11 @@ impl MiningManager { let mut validation_results = Vec::with_capacity(transactions.len()); while let Some(upper_bound) = self.next_transaction_chunk_upper_bound(&transactions, lower_bound) { assert!(lower_bound < upper_bound, "the chunk is never empty"); - validation_results - .extend(validate_mempool_transactions_in_parallel(consensus, &mut transactions[lower_bound..upper_bound])); + validation_results.extend(validate_mempool_transactions_in_parallel( + consensus, + &mut transactions[lower_bound..upper_bound], + &args, + )); lower_bound = upper_bound; } assert_eq!(transactions.len(), validation_results.len(), "every transaction should have a matching validation result"); @@ -386,13 +476,20 @@ impl MiningManager { let mut mempool = self.mempool.write(); let txs = chunk.flat_map(|(transaction, validation_result)| { let transaction_id = transaction.id(); - match mempool.post_validate_and_insert_transaction(consensus, validation_result, transaction, priority, orphan) { - Ok(Some(accepted_transaction)) => { + match mempool.post_validate_and_insert_transaction( + consensus, + validation_result, + transaction, + priority, + orphan, + rbf_policy, + ) { + Ok(TransactionPostValidation { removed: _, accepted: Some(accepted_transaction) }) => { insert_results.push(Ok(accepted_transaction.clone())); self.counters.increase_tx_counts(1, priority); mempool.get_unorphaned_transactions_after_accepted_transaction(&accepted_transaction) } - Ok(None) => { + Ok(TransactionPostValidation { removed: _, accepted: None }) | Err(RuleError::RejectDuplicate(_)) => { // Either orphaned or already existing in the mempool vec![] } @@ -620,10 +717,10 @@ impl MiningManager { let _swo = Stopwatch::<60>::with_threshold("revalidate update_revalidated_transaction op"); for (transaction, validation_result) in chunk { let transaction_id = transaction.id(); - // Only consider transactions still being in the mempool since during the validation some might have been removed. - if mempool.update_revalidated_transaction(transaction) { - match validation_result { - Ok(()) => { + match validation_result { + Ok(()) => { + // Only consider transactions still being in the mempool since during the validation some might have been removed. + if mempool.update_revalidated_transaction(transaction) { // A following transaction should not remove this one from the pool since we process in a topological order. // Still, considering the (very unlikely) scenario of two high priority txs sandwiching a low one, where // in this case topological order is not guaranteed since we only considered chained dependencies of @@ -632,66 +729,55 @@ impl MiningManager { // provided upon request. valid_ids.push(transaction_id); valid += 1; + } else { + other += 1; } - Err(RuleError::RejectMissingOutpoint) => { - let transaction = mempool.get_transaction(&transaction_id, TransactionQuery::TransactionsOnly).unwrap(); - let missing_txs = transaction - .entries - .iter() - .zip(transaction.tx.inputs.iter()) - .flat_map( - |(entry, input)| { - if entry.is_none() { - Some(input.previous_outpoint.transaction_id) - } else { - None - } - }, - ) - .collect::>(); - - // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between - // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by - // the consensus and this ongoing call to revalidate. - // - // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in - // fact in the race condition case they are valid regarding outpoints. - let extra_info = match missing_txs.len() { - 0 => " but no missing tx!".to_string(), // this is never supposed to happen - 1 => format!(" missing tx {}", missing_txs[0]), - n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), - }; - - // This call cleanly removes the invalid transaction. - let result = mempool.remove_transaction( + } + Err(RuleError::RejectMissingOutpoint) => { + let missing_txs = transaction + .entries + .iter() + .zip(transaction.tx.inputs.iter()) + .filter_map(|(entry, input)| entry.is_none().then_some(input.previous_outpoint.transaction_id)) + .collect::>(); + + // A transaction may have missing outpoints for legitimate reasons related to concurrency, like a race condition between + // an accepted block having not started yet or unfinished call to handle_new_block_transactions but already processed by + // the consensus and this ongoing call to revalidate. + // + // So we only remove the transaction and keep its redeemers in the mempool because we cannot be sure they are invalid, in + // fact in the race condition case they are valid regarding outpoints. + let extra_info = match missing_txs.len() { + 0 => " but no missing tx!".to_string(), // this is never supposed to happen + 1 => format!(" missing tx {}", missing_txs[0]), + n => format!(" with {} missing txs {}..{}", n, missing_txs[0], missing_txs.last().unwrap()), + }; + + // This call cleanly removes the invalid transaction. + _ = mempool + .remove_transaction( &transaction_id, false, TxRemovalReason::RevalidationWithMissingOutpoints, extra_info.as_str(), - ); - if let Err(err) = result { - warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); - } - missing_outpoint += 1; - } - Err(err) => { - // Rust rewrite note: - // The behavior changes here compared to the golang version. - // The failed revalidation is simply logged and the process continues. - warn!( - "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", - transaction_id, err - ); - // This call cleanly removes the invalid transaction and its redeemers. - let result = mempool.remove_transaction(&transaction_id, true, TxRemovalReason::Muted, ""); - if let Err(err) = result { - warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err); - } - invalid += 1; - } + ) + .inspect_err(|err| warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err)); + missing_outpoint += 1; + } + Err(err) => { + // Rust rewrite note: + // The behavior changes here compared to the golang version. + // The failed revalidation is simply logged and the process continues. + warn!( + "Removing high priority transaction {0} and its redeemers, it failed revalidation with {1}", + transaction_id, err + ); + // This call cleanly removes the invalid transaction and its redeemers. + _ = mempool + .remove_transaction(&transaction_id, true, TxRemovalReason::Muted, "") + .inspect_err(|err| warn!("Failed to remove transaction {} from mempool: {}", transaction_id, err)); + invalid += 1; } - } else { - other += 1; } } if !valid_ids.is_empty() { @@ -713,6 +799,12 @@ impl MiningManager { missing_outpoint, invalid, ); + if other > 0 { + debug!( + "During revalidation of high priority transactions {} txs were removed from the mempool by concurrent flows", + other + ) + } } } } @@ -739,6 +831,11 @@ impl MiningManager { pub fn unknown_transactions(&self, transactions: Vec) -> Vec { self.mempool.read().unknown_transactions(transactions) } + + #[cfg(test)] + pub(crate) fn get_estimated_size(&self) -> usize { + self.mempool.read().get_estimated_size() + } } /// Async proxy for the mining manager @@ -756,35 +853,60 @@ impl MiningManagerProxy { consensus.clone().spawn_blocking(move |c| self.inner.get_block_template(c, &miner_data)).await } + /// Returns realtime feerate estimations based on internal mempool state + pub async fn get_realtime_feerate_estimations(self) -> FeerateEstimations { + spawn_blocking(move || self.inner.get_realtime_feerate_estimations()).await.unwrap() + } + + /// Returns realtime feerate estimations based on internal mempool state with additional verbose data + pub async fn get_realtime_feerate_estimations_verbose( + self, + consensus: &ConsensusProxy, + prefix: kaspa_addresses::Prefix, + ) -> MiningManagerResult { + consensus.clone().spawn_blocking(move |c| self.inner.get_realtime_feerate_estimations_verbose(c, prefix)).await + } + /// Validates a transaction and adds it to the set of known transactions that have not yet been /// added to any block. /// - /// The returned transactions are clones of objects owned by the mempool. + /// The validation is constrained by a Replace by fee policy applied + /// to double spends in the mempool. For more information, see [`RbfPolicy`]. + /// + /// The returned transactions are references of objects owned by the mempool. pub async fn validate_and_insert_transaction( self, consensus: &ConsensusProxy, transaction: Transaction, priority: Priority, orphan: Orphan, - ) -> MiningManagerResult>> { - consensus.clone().spawn_blocking(move |c| self.inner.validate_and_insert_transaction(c, transaction, priority, orphan)).await + rbf_policy: RbfPolicy, + ) -> MiningManagerResult { + consensus + .clone() + .spawn_blocking(move |c| self.inner.validate_and_insert_transaction(c, transaction, priority, orphan, rbf_policy)) + .await } /// Validates a batch of transactions, handling iteratively only the independent ones, and /// adds those to the set of known transactions that have not yet been added to any block. /// + /// The validation is constrained by a Replace by fee policy applied + /// to double spends in the mempool. For more information, see [`RbfPolicy`]. + /// /// Returns transactions that where unorphaned following the insertion of the provided - /// transactions. The returned transactions are clones of objects owned by the mempool. + /// transactions. The returned transactions are references of objects owned by the mempool. pub async fn validate_and_insert_transaction_batch( self, consensus: &ConsensusProxy, transactions: Vec, priority: Priority, orphan: Orphan, + rbf_policy: RbfPolicy, ) -> Vec>> { consensus .clone() - .spawn_blocking(move |c| self.inner.validate_and_insert_transaction_batch(c, transactions, priority, orphan)) + .spawn_blocking(move |c| self.inner.validate_and_insert_transaction_batch(c, transactions, priority, orphan, rbf_policy)) .await } @@ -889,3 +1011,103 @@ impl MiningManagerProxy { count } } + +/// Represents statistical information about fee rates of transactions. +struct Stats { + /// The maximum fee rate observed. + max: f64, + /// The median fee rate observed. + median: f64, + /// The minimum fee rate observed. + min: f64, +} +/// Calculates the maximum, median, and minimum fee rates (fee per unit mass) +/// for a set of transactions, excluding the first transaction which is assumed +/// to be the coinbase transaction. +/// +/// # Arguments +/// +/// * `transactions` - A vector of `Transaction` objects. The first transaction +/// is assumed to be the coinbase transaction and is excluded from fee rate +/// calculations. +/// * `calculated_fees` - A vector of fees associated with the transactions. +/// This vector should have one less element than the `transactions` vector +/// since the first transaction (coinbase) does not have a fee. +/// +/// # Returns +/// +/// Returns an `Option` containing the maximum, median, and minimum fee +/// rates if the input vectors are valid. Returns `None` if the vectors are +/// empty or if the lengths are inconsistent. +fn feerate_stats(transactions: Vec, calculated_fees: Vec) -> Option { + if calculated_fees.is_empty() { + return None; + } + if transactions.len() != calculated_fees.len() + 1 { + error!( + "[feerate_stats] block template transactions length ({}) is expected to be one more than `calculated_fees` length ({})", + transactions.len(), + calculated_fees.len() + ); + return None; + } + debug_assert!(transactions[0].is_coinbase()); + let mut feerates = calculated_fees + .into_iter() + .zip(transactions + .iter() + // skip coinbase tx + .skip(1) + .map(Transaction::mass)) + .map(|(fee, mass)| fee as f64 / mass as f64) + .collect_vec(); + feerates.sort_unstable_by(f64::total_cmp); + + let max = feerates[feerates.len() - 1]; + let min = feerates[0]; + let median = feerates[feerates.len() / 2]; + + Some(Stats { max, median, min }) +} + +#[cfg(test)] +mod tests { + use super::*; + use kaspa_consensus_core::subnets; + use std::iter::repeat; + + fn transactions(length: usize) -> Vec { + let tx = || { + let tx = Transaction::new(0, vec![], vec![], 0, Default::default(), 0, vec![]); + tx.set_mass(2); + tx + }; + let mut txs = repeat(tx()).take(length).collect_vec(); + txs[0].subnetwork_id = subnets::SUBNETWORK_ID_COINBASE; + txs + } + + #[test] + fn feerate_stats_test() { + let calculated_fees = vec![100u64, 200, 300, 400]; + let txs = transactions(calculated_fees.len() + 1); + let Stats { max, median, min } = feerate_stats(txs, calculated_fees).unwrap(); + assert_eq!(max, 200.0); + assert_eq!(median, 150.0); + assert_eq!(min, 50.0); + } + + #[test] + fn feerate_stats_empty_test() { + let calculated_fees = vec![]; + let txs = transactions(calculated_fees.len() + 1); + assert!(feerate_stats(txs, calculated_fees).is_none()); + } + + #[test] + fn feerate_stats_inconsistent_test() { + let calculated_fees = vec![100u64, 200, 300, 400]; + let txs = transactions(calculated_fees.len()); + assert!(feerate_stats(txs, calculated_fees).is_none()); + } +} diff --git a/mining/src/manager_tests.rs b/mining/src/manager_tests.rs index 5301170945..6ddc86e45b 100644 --- a/mining/src/manager_tests.rs +++ b/mining/src/manager_tests.rs @@ -7,19 +7,21 @@ mod tests { mempool::{ config::{Config, DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE}, errors::RuleError, - tx::{Orphan, Priority}, + model::frontier::selectors::TakeAllSelector, + tx::{Orphan, Priority, RbfPolicy}, }, - model::{candidate_tx::CandidateTransaction, tx_query::TransactionQuery}, + model::{tx_insert::TransactionInsertion, tx_query::TransactionQuery}, testutils::consensus_mock::ConsensusMock, MiningCounters, }; + use itertools::Itertools; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus_core::{ api::ConsensusApi, block::TemplateBuildMode, coinbase::MinerData, constants::{MAX_TX_IN_SEQUENCE_NUM, SOMPI_PER_KASPA, TX_VERSION}, - errors::tx::{TxResult, TxRuleError}, + errors::tx::TxRuleError, mass::transaction_estimated_serialized_size, subnets::SUBNETWORK_ID_NATIVE, tx::{ @@ -28,11 +30,13 @@ mod tests { }, }; use kaspa_hashes::Hash; + use kaspa_mining_errors::mempool::RuleResult; use kaspa_txscript::{ pay_to_address_script, pay_to_script_hash_signature_script, - test_helpers::{create_transaction, op_true_script}, + test_helpers::{create_transaction, create_transaction_with_change, op_true_script}, }; - use std::sync::Arc; + use kaspa_utils::mem_size::MemSizeEstimator; + use std::{iter::once, sync::Arc}; use tokio::sync::mpsc::{error::TryRecvError, unbounded_channel}; const TARGET_TIME_PER_BLOCK: u64 = 1_000; @@ -42,72 +46,106 @@ mod tests { #[test] fn test_validate_and_insert_transaction() { const TX_COUNT: u32 = 10; - let consensus = Arc::new(ConsensusMock::new()); - let counters = Arc::new(MiningCounters::default()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); - let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); - for transaction in transactions_to_insert.iter() { - let result = mining_manager.validate_and_insert_mutable_transaction( - consensus.as_ref(), - transaction.clone(), - Priority::Low, - Orphan::Allowed, - ); - assert!(result.is_ok(), "inserting a valid transaction failed"); - } - // The UtxoEntry was filled manually for those transactions, so the transactions won't be considered orphans. - // Therefore, all the transactions expected to be contained in the mempool. - let (transactions_from_pool, _) = mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly); - assert_eq!( - transactions_to_insert.len(), - transactions_from_pool.len(), - "wrong number of transactions in mempool: expected: {}, got: {}", - transactions_to_insert.len(), - transactions_from_pool.len() - ); - transactions_to_insert.iter().for_each(|tx_to_insert| { - let found_exact_match = transactions_from_pool.contains(tx_to_insert); - let tx_from_pool = transactions_from_pool.iter().find(|tx_from_pool| tx_from_pool.id() == tx_to_insert.id()); - let found_transaction_id = tx_from_pool.is_some(); - if found_transaction_id && !found_exact_match { - let tx = tx_from_pool.unwrap(); - assert_eq!( - tx_to_insert.calculated_fee.unwrap(), - tx.calculated_fee.unwrap(), - "wrong fee in transaction {}: expected: {}, got: {}", - tx.id(), - tx_to_insert.calculated_fee.unwrap(), - tx.calculated_fee.unwrap() - ); - assert_eq!( - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap(), - "wrong mass in transaction {}: expected: {}, got: {}", - tx.id(), - tx_to_insert.calculated_compute_mass.unwrap(), - tx.calculated_compute_mass.unwrap() - ); + for (priority, orphan, rbf_policy) in all_priority_orphan_rbf_policy_combinations() { + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); + let transactions_to_insert = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i, 0)).collect::>(); + for transaction in transactions_to_insert.iter() { + let result = into_mempool_result(mining_manager.validate_and_insert_mutable_transaction( + consensus.as_ref(), + transaction.clone(), + priority, + orphan, + rbf_policy, + )); + match rbf_policy { + RbfPolicy::Forbidden | RbfPolicy::Allowed => { + assert!(result.is_ok(), "({priority:?}, {orphan:?}, {rbf_policy:?}) inserting a valid transaction failed"); + } + RbfPolicy::Mandatory => { + assert!(result.is_err(), "({priority:?}, {orphan:?}, {rbf_policy:?}) replacing a valid transaction without replacement in mempool should fail"); + let err = result.unwrap_err(); + assert_eq!( + RuleError::RejectRbfNoDoubleSpend, + err, + "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong error: expected {} got: {}", + RuleError::RejectRbfNoDoubleSpend, + err, + ); + } + } } - assert!(found_exact_match, "missing transaction {} in the mempool, no exact match", tx_to_insert.id()); - }); - // The parent's transaction was inserted into the consensus, so we want to verify that - // the child transaction is not considered an orphan and inserted into the mempool. - let transaction_not_an_orphan = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); - let result = mining_manager.validate_and_insert_transaction( - consensus.as_ref(), - transaction_not_an_orphan.clone(), - Priority::Low, - Orphan::Allowed, - ); - assert!(result.is_ok(), "inserting the child transaction {} into the mempool failed", transaction_not_an_orphan.id()); - let (transactions_from_pool, _) = mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly); - assert!( - contained_by(transaction_not_an_orphan.id(), &transactions_from_pool), - "missing transaction {} in the mempool", - transaction_not_an_orphan.id() - ); + // The UtxoEntry was filled manually for those transactions, so the transactions won't be considered orphans. + // Therefore, all the transactions expected to be contained in the mempool if replace by fee policy allowed it. + let (transactions_from_pool, _) = mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly); + let transactions_inserted = match rbf_policy { + RbfPolicy::Forbidden | RbfPolicy::Allowed => transactions_to_insert.clone(), + RbfPolicy::Mandatory => { + vec![] + } + }; + assert_eq!( + transactions_inserted.len(), + transactions_from_pool.len(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong number of transactions in mempool: expected: {}, got: {}", + transactions_inserted.len(), + transactions_from_pool.len() + ); + transactions_inserted.iter().for_each(|tx_to_insert| { + let found_exact_match = transactions_from_pool.contains(tx_to_insert); + let tx_from_pool = transactions_from_pool.iter().find(|tx_from_pool| tx_from_pool.id() == tx_to_insert.id()); + let found_transaction_id = tx_from_pool.is_some(); + if found_transaction_id && !found_exact_match { + let tx = tx_from_pool.unwrap(); + assert_eq!( + tx_to_insert.calculated_fee.unwrap(), + tx.calculated_fee.unwrap(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong fee in transaction {}: expected: {}, got: {}", + tx.id(), + tx_to_insert.calculated_fee.unwrap(), + tx.calculated_fee.unwrap() + ); + assert_eq!( + tx_to_insert.calculated_compute_mass.unwrap(), + tx.calculated_compute_mass.unwrap(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) wrong mass in transaction {}: expected: {}, got: {}", + tx.id(), + tx_to_insert.calculated_compute_mass.unwrap(), + tx.calculated_compute_mass.unwrap() + ); + } + assert!( + found_exact_match, + "({priority:?}, {orphan:?}, {rbf_policy:?}) missing transaction {} in the mempool, no exact match", + tx_to_insert.id() + ); + }); + + // The parent's transaction was inserted into the consensus, so we want to verify that + // the child transaction is not considered an orphan and inserted into the mempool. + let transaction_not_an_orphan = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction_not_an_orphan.clone(), + priority, + orphan, + RbfPolicy::Forbidden, + ); + assert!( + result.is_ok(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) inserting the child transaction {} into the mempool failed", + transaction_not_an_orphan.id() + ); + let (transactions_from_pool, _) = mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly); + assert!( + contained_by(transaction_not_an_orphan.id(), &transactions_from_pool), + "({priority:?}, {orphan:?}, {rbf_policy:?}) missing transaction {} in the mempool", + transaction_not_an_orphan.id() + ); + } } /// test_simulated_error_in_consensus verifies that a predefined result is actually @@ -115,127 +153,397 @@ mod tests { /// insert a transaction. #[test] fn test_simulated_error_in_consensus() { - let consensus = Arc::new(ConsensusMock::new()); - let counters = Arc::new(MiningCounters::default()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); - - // Build an invalid transaction with some gas and inform the consensus mock about the result it should return - // when the mempool will submit this transaction for validation. - let mut transaction = create_transaction_with_utxo_entry(0, 1); - Arc::make_mut(&mut transaction.tx).gas = 1000; - let status = Err(TxRuleError::TxHasGas); - consensus.set_status(transaction.id(), status.clone()); - - // Try validate and insert the transaction into the mempool - let result = into_status(mining_manager.validate_and_insert_transaction( - consensus.as_ref(), - transaction.tx.as_ref().clone(), - Priority::Low, - Orphan::Allowed, - )); + for (priority, orphan, rbf_policy) in all_priority_orphan_rbf_policy_combinations() { + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); + + // Build an invalid transaction with some gas and inform the consensus mock about the result it should return + // when the mempool will submit this transaction for validation. + let mut transaction = create_transaction_with_utxo_entry(0, 1); + Arc::make_mut(&mut transaction.tx).gas = 1000; + let tx_err = TxRuleError::TxHasGas; + let expected = match rbf_policy { + RbfPolicy::Forbidden | RbfPolicy::Allowed => Err(RuleError::from(tx_err.clone())), + RbfPolicy::Mandatory => Err(RuleError::RejectRbfNoDoubleSpend), + }; + consensus.set_status(transaction.id(), Err(tx_err)); + + // Try validate and insert the transaction into the mempool + let result = into_mempool_result(mining_manager.validate_and_insert_mutable_transaction( + consensus.as_ref(), + transaction.clone(), + priority, + orphan, + rbf_policy, + )); - assert_eq!( - status, result, - "Unexpected result when trying to insert an invalid transaction: expected: {status:?}, got: {result:?}", - ); - let pool_tx = mining_manager.get_transaction(&transaction.id(), TransactionQuery::All); - assert!(pool_tx.is_none(), "Mempool contains a transaction that should have been rejected"); + assert_eq!( + expected, result, + "({priority:?}, {orphan:?}, {rbf_policy:?}) unexpected result when trying to insert an invalid transaction: expected: {expected:?}, got: {result:?}", + ); + let pool_tx = mining_manager.get_transaction(&transaction.id(), TransactionQuery::All); + assert!( + pool_tx.is_none(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) mempool contains a transaction that should have been rejected" + ); + } } /// test_insert_double_transactions_to_mempool verifies that an attempt to insert a transaction /// more than once into the mempool will result in raising an appropriate error. #[test] fn test_insert_double_transactions_to_mempool() { - let consensus = Arc::new(ConsensusMock::new()); - let counters = Arc::new(MiningCounters::default()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); + for (priority, orphan, rbf_policy) in all_priority_orphan_rbf_policy_combinations() { + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); - let transaction = create_transaction_with_utxo_entry(0, 0); + let transaction = create_transaction_with_utxo_entry(0, 0); - // submit the transaction to the mempool - let result = mining_manager.validate_and_insert_mutable_transaction( - consensus.as_ref(), - transaction.clone(), - Priority::Low, - Orphan::Allowed, - ); - assert!(result.is_ok(), "mempool should have accepted a valid transaction but did not"); - - // submit the same transaction again to the mempool - let result = mining_manager.validate_and_insert_transaction( - consensus.as_ref(), - transaction.tx.as_ref().clone(), - Priority::Low, - Orphan::Allowed, - ); - assert!(result.is_err(), "mempool should refuse a double submit of the same transaction but accepts it"); - if let Err(MiningManagerError::MempoolError(RuleError::RejectDuplicate(transaction_id))) = result { - assert_eq!( - transaction.id(), - transaction_id, - "the error returned by the mempool should include id {} but provides {}", - transaction.id(), - transaction_id + // submit the transaction to the mempool + let result = mining_manager.validate_and_insert_mutable_transaction( + consensus.as_ref(), + transaction.clone(), + priority, + orphan, + rbf_policy.for_insert(), ); - } else { - panic!( - "the nested error returned by the mempool should be variant RuleError::RejectDuplicate but is {:?}", - result.err().unwrap() + assert!( + result.is_ok(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) mempool should have accepted a valid transaction but did not" ); + + // submit the same transaction again to the mempool + let result = into_mempool_result(mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction.tx.as_ref().clone(), + priority, + orphan, + rbf_policy, + )); + match result { + Err(RuleError::RejectDuplicate(transaction_id)) => { + assert_eq!( + transaction.id(), + transaction_id, + "({priority:?}, {orphan:?}, {rbf_policy:?}) the error returned by the mempool should include transaction id {} but provides {}", + transaction.id(), + transaction_id + ); + } + Err(err) => { + panic!( + "({priority:?}, {orphan:?}, {rbf_policy:?}) the error returned by the mempool should be {:?} but is {err:?}", + RuleError::RejectDuplicate(transaction.id()) + ); + } + Ok(()) => { + panic!("({priority:?}, {orphan:?}, {rbf_policy:?}) mempool should refuse a double submit of the same transaction but accepts it"); + } + } } } - // test_double_spend_in_mempool verifies that an attempt to insert a transaction double-spending - // another transaction already in the mempool will result in raising an appropriate error. + /// test_double_spend_in_mempool verifies that an attempt to insert a transaction double-spending + /// another transaction already in the mempool will result in raising an appropriate error. #[test] fn test_double_spend_in_mempool() { - let consensus = Arc::new(ConsensusMock::new()); - let counters = Arc::new(MiningCounters::default()); - let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); + for (priority, orphan, rbf_policy) in all_priority_orphan_rbf_policy_combinations() { + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); - let transaction = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); - assert!( - consensus.can_finance_transaction(&MutableTransaction::from_tx(transaction.clone())), - "the consensus mock should have spendable UTXOs for the newly created transaction {}", - transaction.id() - ); + let transaction = create_child_and_parent_txs_and_add_parent_to_consensus(&consensus); + assert!( + consensus.can_finance_transaction(&MutableTransaction::from_tx(transaction.clone())), + "({priority:?}, {orphan:?}, {rbf_policy:?}) the consensus mock should have spendable UTXOs for the newly created transaction {}", + transaction.id() + ); - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), transaction.clone(), Priority::Low, Orphan::Allowed); - assert!(result.is_ok(), "the mempool should accept a valid transaction when it is able to populate its UTXO entries"); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction.clone(), + priority, + orphan, + RbfPolicy::Forbidden, + ); + assert!(result.is_ok(), "({priority:?}, {orphan:?}, {rbf_policy:?}) the mempool should accept a valid transaction when it is able to populate its UTXO entries"); - let mut double_spending_transaction = transaction.clone(); - double_spending_transaction.outputs[0].value -= 1; // do some minor change so that txID is different - double_spending_transaction.finalize(); - assert_ne!( - transaction.id(), - double_spending_transaction.id(), - "two transactions differing by only one output value should have different ids" - ); - let result = mining_manager.validate_and_insert_transaction( - consensus.as_ref(), - double_spending_transaction.clone(), - Priority::Low, - Orphan::Allowed, - ); - assert!(result.is_err(), "mempool should refuse a double spend transaction but accepts it"); - if let Err(MiningManagerError::MempoolError(RuleError::RejectDoubleSpendInMempool(_, transaction_id))) = result { - assert_eq!( - transaction.id(), - transaction_id, - "the error returned by the mempool should include id {} but provides {}", + let mut double_spending_transaction = transaction.clone(); + double_spending_transaction.outputs[0].value += 1; // do some minor change so that txID is different while not increasing fee + double_spending_transaction.finalize(); + assert_ne!( transaction.id(), - transaction_id - ); - } else { - panic!( - "the nested error returned by the mempool should be variant RuleError::RejectDoubleSpendInMempool but is {:?}", - result.err().unwrap() + double_spending_transaction.id(), + "({priority:?}, {orphan:?}, {rbf_policy:?}) two transactions differing by only one output value should have different ids" ); + let result = into_mempool_result(mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + double_spending_transaction.clone(), + priority, + orphan, + rbf_policy, + )); + match result { + Err(RuleError::RejectDoubleSpendInMempool(_, transaction_id)) => { + assert_eq!( + transaction.id(), + transaction_id, + "({priority:?}, {orphan:?}, {rbf_policy:?}) the error returned by the mempool should include id {} but provides {}", + transaction.id(), + transaction_id + ); + } + Err(err) => { + panic!("({priority:?}, {orphan:?}, {rbf_policy:?}) the error returned by the mempool should be RuleError::RejectDoubleSpendInMempool but is {err:?}"); + } + Ok(()) => { + panic!("({priority:?}, {orphan:?}, {rbf_policy:?}) mempool should refuse a double spend transaction ineligible to RBF but accepts it"); + } + } } } - // test_handle_new_block_transactions verifies that all the transactions in the block were successfully removed from the mempool. + /// test_replace_by_fee_in_mempool verifies that an attempt to insert a double-spending transaction + /// will cause or not the transaction(s) double spending in the mempool to be replaced/removed, + /// depending on varying factors. + #[test] + fn test_replace_by_fee_in_mempool() { + const BASE_FEE: u64 = DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE; + + struct TxOp { + /// Funding transaction indexes + tx: Vec, + /// Funding transaction output indexes + output: Vec, + /// Add a change output to the transaction + change: bool, + /// Transaction fee + fee: u64, + /// Children binary tree depth + depth: usize, + } + + impl TxOp { + fn change(&self) -> Option { + self.change.then_some(900 * SOMPI_PER_KASPA) + } + } + + struct Test { + name: &'static str, + /// Initial transactions in the mempool + starts: Vec, + /// Replacement transaction submitted to the mempool + replacement: TxOp, + /// Expected RBF result for the 3 policies [Forbidden, Allowed, Mandatory] + expected: [bool; 3], + } + + impl Test { + fn run_rbf(&self, rbf_policy: RbfPolicy, expected: bool) { + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mining_manager = MiningManager::new(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS, None, counters); + let funding_transactions = create_and_add_funding_transactions(&consensus, 10); + + // RPC submit the initial transactions + let (transactions, children): (Vec<_>, Vec<_>) = + self.starts + .iter() + .map(|tx_op| { + let transaction = create_funded_transaction( + select_transactions(&funding_transactions, &tx_op.tx), + tx_op.output.clone(), + tx_op.change(), + tx_op.fee, + ); + assert!( + consensus.can_finance_transaction(&MutableTransaction::from_tx(transaction.clone())), + "[{}, {:?}] the consensus should have spendable UTXOs for the newly created transaction {}", + self.name, rbf_policy, transaction.id() + ); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction.clone(), + Priority::High, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); + assert!( + result.is_ok(), + "[{}, {:?}] the mempool should accept a valid transaction when it is able to populate its UTXO entries", + self.name, rbf_policy, + ); + let children = create_children_tree(&transaction, tx_op.depth); + let children_count = (2_usize.pow(tx_op.depth as u32) - 1) * transaction.outputs.len(); + assert_eq!( + children.len(), children_count, + "[{}, {:?}] a parent transaction with {} output(s) should generate a binary children tree of depth {} with {} children but got {}", + self.name, rbf_policy, transaction.outputs.len(), tx_op.depth, children_count, children.len(), + ); + validate_and_insert_transactions( + &mining_manager, + consensus.as_ref(), + children.iter(), + Priority::High, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); + (transaction, children) + }) + .unzip(); + + // RPC submit transaction replacement + let transaction_replacement = create_funded_transaction( + select_transactions(&funding_transactions, &self.replacement.tx), + self.replacement.output.clone(), + self.replacement.change(), + self.replacement.fee, + ); + assert!( + consensus.can_finance_transaction(&MutableTransaction::from_tx(transaction_replacement.clone())), + "[{}, {:?}] the consensus should have spendable UTXOs for the newly created transaction {}", + self.name, + rbf_policy, + transaction_replacement.id() + ); + let tx_count = mining_manager.transaction_count(TransactionQuery::TransactionsOnly); + let expected_tx_count = match expected { + true => tx_count + 1 - transactions.len() - children.iter().map(|x| x.len()).sum::(), + false => tx_count, + }; + let priority = match rbf_policy { + RbfPolicy::Forbidden | RbfPolicy::Mandatory => Priority::High, + RbfPolicy::Allowed => Priority::Low, + }; + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + transaction_replacement.clone(), + priority, + Orphan::Forbidden, + rbf_policy, + ); + if expected { + assert!(result.is_ok(), "[{}, {:?}] mempool should accept a RBF transaction", self.name, rbf_policy,); + let tx_insertion = result.unwrap(); + assert_eq!( + tx_insertion.removed.as_ref().unwrap().id(), + transactions[0].id(), + "[{}, {:?}] RBF should return the removed transaction", + self.name, + rbf_policy, + ); + transactions.iter().for_each(|x| { + assert!( + !mining_manager.has_transaction(&x.id(), TransactionQuery::All), + "[{}, {:?}] RBF replaced transaction should no longer be in the mempool", + self.name, + rbf_policy, + ); + }); + assert_transaction_count( + &mining_manager, + expected_tx_count, + &format!( + "[{}, {:?}] RBF should remove all chained transactions of the removed mempool transaction(s)", + self.name, rbf_policy + ), + ); + } else { + assert!(result.is_err(), "[{}, {:?}] mempool should reject the RBF transaction", self.name, rbf_policy); + transactions.iter().for_each(|x| { + assert!( + mining_manager.has_transaction(&x.id(), TransactionQuery::All), + "[{}, {:?}] RBF transaction target is no longer in the mempool", + self.name, + rbf_policy + ); + }); + assert_transaction_count( + &mining_manager, + expected_tx_count, + &format!("[{}, {:?}] a failing RBF should leave the mempool unchanged", self.name, rbf_policy), + ); + } + } + + fn run(&self) { + [RbfPolicy::Forbidden, RbfPolicy::Allowed, RbfPolicy::Mandatory].iter().copied().enumerate().for_each( + |(i, rbf_policy)| { + self.run_rbf(rbf_policy, self.expected[i]); + }, + ) + } + } + + let tests = vec![ + Test { + name: "1 input, 1 output <=> 1 input, 1 output, constant fee", + starts: vec![TxOp { tx: vec![0], output: vec![0], change: false, fee: BASE_FEE, depth: 0 }], + replacement: TxOp { tx: vec![0], output: vec![0], change: false, fee: BASE_FEE, depth: 0 }, + expected: [false, false, false], + }, + Test { + name: "1 input, 1 output <=> 1 input, 1 output, increased fee", + starts: vec![TxOp { tx: vec![0], output: vec![0], change: false, fee: BASE_FEE, depth: 0 }], + replacement: TxOp { tx: vec![0], output: vec![0], change: false, fee: BASE_FEE * 2, depth: 0 }, + expected: [false, true, true], + }, + Test { + name: "2 inputs, 2 outputs <=> 2 inputs, 2 outputs, increased fee", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE * 2, depth: 0 }, + expected: [false, true, true], + }, + Test { + name: "4 inputs, 2 outputs <=> 2 inputs, 2 outputs, constant fee", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0, 1], change: true, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 0 }, + expected: [false, true, true], + }, + Test { + name: "2 inputs, 2 outputs <=> 2 inputs, 1 output, constant fee", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 1], output: vec![0], change: false, fee: BASE_FEE, depth: 0 }, + expected: [false, true, true], + }, + Test { + name: "2 inputs, 2 outputs <=> 4 inputs, 2 output, constant fee (MUST FAIL on fee/mass)", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 1], output: vec![0, 1], change: true, fee: BASE_FEE, depth: 0 }, + expected: [false, false, false], + }, + Test { + name: "2 inputs, 1 output <=> 4 inputs, 2 output, increased fee (MUST FAIL on fee/mass)", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0], change: false, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 1], output: vec![0, 1], change: true, fee: BASE_FEE + 10, depth: 0 }, + expected: [false, false, false], + }, + Test { + name: "2 inputs, 2 outputs <=> 2 inputs, 1 output, constant fee, partial double spend overlap", + starts: vec![TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 2 }], + replacement: TxOp { tx: vec![0, 2], output: vec![0], change: false, fee: BASE_FEE, depth: 0 }, + expected: [false, true, true], + }, + Test { + name: "(2 inputs, 2 outputs) * 2 <=> 4 inputs, 2 outputs, increased fee, 2 double spending mempool transactions (MUST FAIL on Mandatory)", + starts: vec![ + TxOp { tx: vec![0, 1], output: vec![0], change: true, fee: BASE_FEE, depth: 2 }, + TxOp { tx: vec![0, 1], output: vec![1], change: true, fee: BASE_FEE, depth: 2 }, + ], + replacement: TxOp { tx: vec![0, 1], output: vec![0, 1], change: true, fee: BASE_FEE * 2, depth: 0 }, + expected: [false, true, false], + }, + ]; + + for test in tests { + test.run(); + } + } + + /// test_handle_new_block_transactions verifies that all the transactions in the block were successfully removed from the mempool. #[test] fn test_handle_new_block_transactions() { let consensus = Arc::new(ConsensusMock::new()); @@ -250,6 +558,7 @@ mod tests { transaction.tx.as_ref().clone(), Priority::Low, Orphan::Allowed, + RbfPolicy::Forbidden, ); assert!(result.is_ok(), "the insertion of a new valid transaction in the mempool failed"); } @@ -295,8 +604,8 @@ mod tests { } #[test] - // test_double_spend_with_block verifies that any transactions which are now double spends as a result of the block's new transactions - // will be removed from the mempool. + /// test_double_spend_with_block verifies that any transactions which are now double spends as a result of the block's new transactions + /// will be removed from the mempool. fn test_double_spend_with_block() { let consensus = Arc::new(ConsensusMock::new()); let counters = Arc::new(MiningCounters::default()); @@ -308,6 +617,7 @@ mod tests { transaction_in_the_mempool.tx.as_ref().clone(), Priority::Low, Orphan::Allowed, + RbfPolicy::Forbidden, ); assert!(result.is_ok()); @@ -326,7 +636,7 @@ mod tests { ); } - // test_orphan_transactions verifies that a transaction could be a part of a new block template only if it's not an orphan. + /// test_orphan_transactions verifies that a transaction could be a part of a new block template only if it's not an orphan. #[test] fn test_orphan_transactions() { let consensus = Arc::new(ConsensusMock::new()); @@ -340,8 +650,13 @@ mod tests { assert_eq!(parent_txs.len(), TX_PAIRS_COUNT); assert_eq!(child_txs.len(), TX_PAIRS_COUNT); for orphan in child_txs.iter() { - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), orphan.clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + orphan.clone(), + Priority::Low, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "the mempool should accept the valid orphan transaction {}", orphan.id()); } let (populated_txs, orphans) = mining_manager.get_all_transactions(TransactionQuery::All); @@ -485,10 +800,15 @@ mod tests { ); // Add the remaining parent transaction into the mempool - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), parent_txs[0].clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + parent_txs[0].clone(), + Priority::Low, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "the insertion of the remaining parent transaction in the mempool failed"); - let unorphaned_txs = result.unwrap(); + let unorphaned_txs = result.unwrap().accepted; let (populated_txs, orphans) = mining_manager.get_all_transactions(TransactionQuery::All); assert_eq!( unorphaned_txs.len(), SKIPPED_TXS + 1, @@ -592,8 +912,13 @@ mod tests { // Try submit children while rejecting orphans for (tx, test) in child_txs.iter().zip(tests.iter()) { - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), tx.clone(), test.priority, Orphan::Forbidden); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + tx.clone(), + test.priority, + Orphan::Forbidden, + RbfPolicy::Forbidden, + ); assert!(result.is_err(), "mempool should reject an orphan transaction with {:?} when asked to do so", test.priority); if let Err(MiningManagerError::MempoolError(RuleError::RejectDisallowedOrphan(transaction_id))) = result { assert_eq!( @@ -613,8 +938,13 @@ mod tests { // Try submit children while accepting orphans for (tx, test) in child_txs.iter().zip(tests.iter()) { - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), tx.clone(), test.priority, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + tx.clone(), + test.priority, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert_eq!( test.should_enter_orphan_pool, result.is_ok(), @@ -623,7 +953,7 @@ mod tests { test.insert_result() ); if let Ok(unorphaned_txs) = result { - assert!(unorphaned_txs.is_empty(), "mempool should unorphan no transaction since it only contains orphans"); + assert!(unorphaned_txs.accepted.is_empty(), "mempool should unorphan no transaction since it only contains orphans"); } else if let Err(MiningManagerError::MempoolError(RuleError::RejectOrphanPoolIsFull(pool_len, config_len))) = result { assert_eq!( (config.maximum_orphan_transaction_count as usize, config.maximum_orphan_transaction_count), @@ -642,10 +972,15 @@ mod tests { // Submit all the parents for (i, (tx, test)) in parent_txs.iter().zip(tests.iter()).enumerate() { - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), tx.clone(), test.priority, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + tx.clone(), + test.priority, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "mempool should accept a valid transaction with {:?} when asked to do so", test.priority,); - let unorphaned_txs = result.as_ref().unwrap(); + let unorphaned_txs = &result.as_ref().unwrap().accepted; assert_eq!( test.should_unorphan, unorphaned_txs.len() > 1, @@ -682,8 +1017,13 @@ mod tests { // Add to mempool a transaction that spends child_tx_2 (as high priority) let spending_tx = create_transaction(&child_tx_2, 1_000); - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), spending_tx.clone(), Priority::High, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + spending_tx.clone(), + Priority::High, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "the insertion in the mempool of the spending transaction failed"); // Revalidate, to make sure spending_tx is still valid @@ -725,7 +1065,7 @@ mod tests { assert!(orphan_txs.is_empty(), "orphan pool should be empty"); } - // test_modify_block_template verifies that modifying a block template changes coinbase data correctly. + /// test_modify_block_template verifies that modifying a block template changes coinbase data correctly. #[test] fn test_modify_block_template() { let consensus = Arc::new(ConsensusMock::new()); @@ -737,17 +1077,27 @@ mod tests { let (parent_txs, child_txs) = create_arrays_of_parent_and_children_transactions(&consensus, TX_PAIRS_COUNT); for (parent_tx, child_tx) in parent_txs.iter().zip(child_txs.iter()) { - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), parent_tx.clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + parent_tx.clone(), + Priority::Low, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "the mempool should accept the valid parent transaction {}", parent_tx.id()); - let result = - mining_manager.validate_and_insert_transaction(consensus.as_ref(), child_tx.clone(), Priority::Low, Orphan::Allowed); + let result = mining_manager.validate_and_insert_transaction( + consensus.as_ref(), + child_tx.clone(), + Priority::Low, + Orphan::Allowed, + RbfPolicy::Forbidden, + ); assert!(result.is_ok(), "the mempool should accept the valid child transaction {}", parent_tx.id()); } // Collect all parent transactions for the next block template. // They are ready since they have no parents in the mempool. - let transactions = mining_manager.block_candidate_transactions(); + let transactions = mining_manager.build_selector().select_transactions(); assert_eq!( TX_PAIRS_COUNT, transactions.len(), @@ -755,7 +1105,7 @@ mod tests { ); parent_txs.iter().for_each(|x| { assert!( - transactions.iter().any(|tx| tx.tx.id() == x.id()), + transactions.iter().any(|tx| tx.id() == x.id()), "the parent transaction {} should be candidate for the next block template", x.id() ); @@ -767,12 +1117,78 @@ mod tests { // TODO: extend the test according to the golang scenario } + // This is a sanity test for the mempool eviction policy. We check that if the mempool reached to its maximum + // (in bytes) a high paying transaction will evict as much transactions as needed so it can enter the + // mempool. + // TODO: Add a test where we try to add a heavy transaction with fee rate that's higher than some of the mempool + // transactions, but not enough, so the transaction will be rejected nonetheless. + #[test] + fn test_evict() { + const TX_COUNT: usize = 10; + let txs = (0..TX_COUNT).map(|i| create_transaction_with_utxo_entry(i as u32, 0)).collect_vec(); + + let consensus = Arc::new(ConsensusMock::new()); + let counters = Arc::new(MiningCounters::default()); + let mut config = Config::build_default(TARGET_TIME_PER_BLOCK, false, MAX_BLOCK_MASS); + let tx_size = txs[0].mempool_estimated_bytes(); + let size_limit = TX_COUNT * tx_size; + config.mempool_size_limit = size_limit; + let mining_manager = MiningManager::with_config(config, None, counters); + + for tx in txs { + validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), tx).unwrap(); + } + assert_eq!(mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly).0.len(), TX_COUNT); + + let heavy_tx_low_fee = { + let mut heavy_tx = create_transaction_with_utxo_entry(TX_COUNT as u32, 0); + let mut inner_tx = (*(heavy_tx.tx)).clone(); + inner_tx.payload = vec![0u8; TX_COUNT / 2 * tx_size - inner_tx.estimate_mem_bytes()]; + heavy_tx.tx = inner_tx.into(); + heavy_tx.calculated_fee = Some(2081); + heavy_tx + }; + assert!(validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), heavy_tx_low_fee.clone()).is_err()); + assert_eq!(mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly).0.len(), TX_COUNT); + + let heavy_tx_high_fee = { + let mut heavy_tx = create_transaction_with_utxo_entry(TX_COUNT as u32 + 1, 0); + let mut inner_tx = (*(heavy_tx.tx)).clone(); + inner_tx.payload = vec![0u8; TX_COUNT / 2 * tx_size - inner_tx.estimate_mem_bytes()]; + heavy_tx.tx = inner_tx.into(); + heavy_tx.calculated_fee = Some(500_000); + heavy_tx + }; + validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), heavy_tx_high_fee.clone()).unwrap(); + assert_eq!(mining_manager.get_all_transactions(TransactionQuery::TransactionsOnly).0.len(), TX_COUNT - 5); + assert!(mining_manager.get_estimated_size() <= size_limit); + + let too_big_tx = { + let mut heavy_tx = create_transaction_with_utxo_entry(TX_COUNT as u32 + 2, 0); + let mut inner_tx = (*(heavy_tx.tx)).clone(); + inner_tx.payload = vec![0u8; size_limit]; + heavy_tx.tx = inner_tx.into(); + heavy_tx.calculated_fee = Some(500_000); + heavy_tx + }; + assert!(validate_and_insert_mutable_transaction(&mining_manager, consensus.as_ref(), too_big_tx.clone()).is_err()); + } + + fn validate_and_insert_mutable_transaction( + mining_manager: &MiningManager, + consensus: &dyn ConsensusApi, + tx: MutableTransaction, + ) -> Result { + mining_manager.validate_and_insert_mutable_transaction(consensus, tx, Priority::Low, Orphan::Allowed, RbfPolicy::Forbidden) + } + fn sweep_compare_modified_template_to_built( consensus: &dyn ConsensusApi, address_prefix: Prefix, mining_manager: &MiningManager, - transactions: Vec, + transactions: Vec, ) { + let transactions = transactions.into_iter().map(Arc::new).collect::>(); for _ in 0..4 { // Run a few times to get more randomness compare_modified_template_to_built( @@ -839,7 +1255,7 @@ mod tests { consensus: &dyn ConsensusApi, address_prefix: Prefix, mining_manager: &MiningManager, - transactions: Vec, + transactions: Vec>, first_op: OpType, second_op: OpType, ) { @@ -848,7 +1264,12 @@ mod tests { // Build a fresh template for coinbase2 as a reference let builder = mining_manager.block_template_builder(); - let result = builder.build_block_template(consensus, &miner_data_2, transactions, TemplateBuildMode::Standard); + let result = builder.build_block_template( + consensus, + &miner_data_2, + Box::new(TakeAllSelector::new(transactions)), + TemplateBuildMode::Standard, + ); assert!(result.is_ok(), "build block template failed for miner data 2"); let expected_template = result.unwrap(); @@ -933,6 +1354,68 @@ mod tests { mutable_tx } + fn create_and_add_funding_transactions(consensus: &Arc, count: usize) -> Vec { + // Make the funding amounts always different so that funding txs have different ids + (0..count) + .map(|i| { + let funding_tx = create_transaction_without_input(vec![1_000 * SOMPI_PER_KASPA, 2_500 * SOMPI_PER_KASPA + i as u64]); + consensus.add_transaction(funding_tx.clone(), 1); + funding_tx + }) + .collect_vec() + } + + fn select_transactions<'a>(transactions: &'a [Transaction], indexes: &'a [usize]) -> impl Iterator { + indexes.iter().map(|i| &transactions[*i]) + } + + fn create_funded_transaction<'a>( + txs_to_spend: impl Iterator, + output_indexes: Vec, + change: Option, + fee: u64, + ) -> Transaction { + create_transaction_with_change(txs_to_spend, output_indexes, change, fee) + } + + fn create_children_tree(parent: &Transaction, depth: usize) -> Vec { + let mut tree = vec![]; + let root = [parent.clone()]; + let mut parents = &root[..]; + let mut first_child = 0; + for _ in 0..depth { + let mut children = vec![]; + for parent in parents { + children.extend(parent.outputs.iter().enumerate().map(|(i, output)| { + create_transaction_with_change( + once(parent), + vec![i], + Some(output.value / 2), + DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, + ) + })); + } + tree.extend(children); + parents = &tree[first_child..]; + first_child = tree.len() + } + tree + } + + fn validate_and_insert_transactions<'a>( + mining_manager: &MiningManager, + consensus: &dyn ConsensusApi, + transactions: impl Iterator, + priority: Priority, + orphan: Orphan, + rbf_policy: RbfPolicy, + ) { + transactions.for_each(|transaction| { + let result = mining_manager.validate_and_insert_transaction(consensus, transaction.clone(), priority, orphan, rbf_policy); + assert!(result.is_ok(), "the mempool should accept a valid transaction when it is able to populate its UTXO entries"); + }); + } + fn create_arrays_of_parent_and_children_transactions( consensus: &Arc, count: usize, @@ -974,11 +1457,13 @@ mod tests { transactions.iter().any(|x| x.as_ref().id() == transaction_id) } - fn into_status(result: MiningManagerResult) -> TxResult<()> { + fn into_mempool_result(result: MiningManagerResult) -> RuleResult<()> { match result { Ok(_) => Ok(()), - Err(MiningManagerError::MempoolError(RuleError::RejectTxRule(err))) => Err(err), - _ => Ok(()), + Err(MiningManagerError::MempoolError(err)) => Err(err), + _ => { + panic!("result is an unsupported error"); + } } } @@ -1000,4 +1485,26 @@ mod tests { let script = pay_to_address_script(&address); MinerData::new(script, vec![]) } + + #[allow(dead_code)] + fn all_priority_orphan_combinations() -> impl Iterator { + [Priority::Low, Priority::High] + .iter() + .flat_map(|priority| [Orphan::Allowed, Orphan::Forbidden].iter().map(|orphan| (*priority, *orphan))) + } + + fn all_priority_orphan_rbf_policy_combinations() -> impl Iterator { + [Priority::Low, Priority::High].iter().flat_map(|priority| { + [Orphan::Allowed, Orphan::Forbidden].iter().flat_map(|orphan| { + [RbfPolicy::Forbidden, RbfPolicy::Allowed, RbfPolicy::Mandatory] + .iter() + .map(|rbf_policy| (*priority, *orphan, *rbf_policy)) + }) + }) + } + + fn assert_transaction_count(mining_manager: &MiningManager, expected_count: usize, message: &str) { + let count = mining_manager.transaction_count(TransactionQuery::TransactionsOnly); + assert_eq!(expected_count, count, "{message} mempool transaction count: expected {}, got {}", expected_count, count); + } } diff --git a/mining/src/mempool/config.rs b/mining/src/mempool/config.rs index aecbc07118..04407b411e 100644 --- a/mining/src/mempool/config.rs +++ b/mining/src/mempool/config.rs @@ -1,20 +1,18 @@ use kaspa_consensus_core::constants::TX_VERSION; -pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: u64 = 1_000_000; -pub(crate) const DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT: u64 = 50_000; +pub(crate) const DEFAULT_MAXIMUM_TRANSACTION_COUNT: usize = 1_000_000; +pub(crate) const DEFAULT_MEMPOOL_SIZE_LIMIT: usize = 1_000_000_000; pub(crate) const DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS: u64 = 5; -pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 60; -pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; +pub(crate) const DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 24 * 60 * 60; +pub(crate) const DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_INTERVAL_SECONDS: u64 = 120; pub(crate) const DEFAULT_ACCEPTED_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; pub(crate) const DEFAULT_ORPHAN_EXPIRE_INTERVAL_SECONDS: u64 = 60; pub(crate) const DEFAULT_ORPHAN_EXPIRE_SCAN_INTERVAL_SECONDS: u64 = 10; pub(crate) const DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_MASS: u64 = 100_000; - -// TODO: when rusty-kaspa nodes run most of the network, consider increasing this value -pub(crate) const DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT: u64 = 50; +pub(crate) const DEFAULT_MAXIMUM_ORPHAN_TRANSACTION_COUNT: u64 = 500; /// DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE specifies the minimum transaction fee for a transaction to be accepted to /// the mempool and relayed. It is specified in sompi per 1kg (or 1000 grams) of transaction mass. @@ -29,8 +27,8 @@ pub(crate) const DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION: u16 = TX_VERSION; #[derive(Clone, Debug)] pub struct Config { - pub maximum_transaction_count: u64, - pub maximum_ready_transaction_count: u64, + pub maximum_transaction_count: usize, + pub mempool_size_limit: usize, pub maximum_build_block_template_attempts: u64, pub transaction_expire_interval_daa_score: u64, pub transaction_expire_scan_interval_daa_score: u64, @@ -47,13 +45,14 @@ pub struct Config { pub minimum_relay_transaction_fee: u64, pub minimum_standard_transaction_version: u16, pub maximum_standard_transaction_version: u16, + pub network_blocks_per_second: u64, } impl Config { #[allow(clippy::too_many_arguments)] pub fn new( - maximum_transaction_count: u64, - maximum_ready_transaction_count: u64, + maximum_transaction_count: usize, + mempool_size_limit: usize, maximum_build_block_template_attempts: u64, transaction_expire_interval_daa_score: u64, transaction_expire_scan_interval_daa_score: u64, @@ -70,10 +69,11 @@ impl Config { minimum_relay_transaction_fee: u64, minimum_standard_transaction_version: u16, maximum_standard_transaction_version: u16, + network_blocks_per_second: u64, ) -> Self { Self { maximum_transaction_count, - maximum_ready_transaction_count, + mempool_size_limit, maximum_build_block_template_attempts, transaction_expire_interval_daa_score, transaction_expire_scan_interval_daa_score, @@ -90,6 +90,7 @@ impl Config { minimum_relay_transaction_fee, minimum_standard_transaction_version, maximum_standard_transaction_version, + network_blocks_per_second, } } @@ -98,7 +99,7 @@ impl Config { pub const fn build_default(target_milliseconds_per_block: u64, relay_non_std_transactions: bool, max_block_mass: u64) -> Self { Self { maximum_transaction_count: DEFAULT_MAXIMUM_TRANSACTION_COUNT, - maximum_ready_transaction_count: DEFAULT_MAXIMUM_READY_TRANSACTION_COUNT, + mempool_size_limit: DEFAULT_MEMPOOL_SIZE_LIMIT, maximum_build_block_template_attempts: DEFAULT_MAXIMUM_BUILD_BLOCK_TEMPLATE_ATTEMPTS, transaction_expire_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_INTERVAL_SECONDS * 1000 / target_milliseconds_per_block, transaction_expire_scan_interval_daa_score: DEFAULT_TRANSACTION_EXPIRE_SCAN_INTERVAL_SECONDS * 1000 @@ -118,11 +119,20 @@ impl Config { minimum_relay_transaction_fee: DEFAULT_MINIMUM_RELAY_TRANSACTION_FEE, minimum_standard_transaction_version: DEFAULT_MINIMUM_STANDARD_TRANSACTION_VERSION, maximum_standard_transaction_version: DEFAULT_MAXIMUM_STANDARD_TRANSACTION_VERSION, + network_blocks_per_second: 1000 / target_milliseconds_per_block, } } pub fn apply_ram_scale(mut self, ram_scale: f64) -> Self { - self.maximum_transaction_count = (self.maximum_transaction_count as f64 * ram_scale.min(1.0)) as u64; // Allow only scaling down + // Allow only scaling down + self.maximum_transaction_count = (self.maximum_transaction_count as f64 * ram_scale.min(1.0)) as usize; + self.mempool_size_limit = (self.mempool_size_limit as f64 * ram_scale.min(1.0)) as usize; self } + + /// Returns the minimum standard fee/mass ratio currently required by the mempool + pub(crate) fn minimum_feerate(&self) -> f64 { + // The parameter minimum_relay_transaction_fee is in sompi/kg units so divide by 1000 to get sompi/gram + self.minimum_relay_transaction_fee as f64 / 1000.0 + } } diff --git a/mining/src/mempool/mod.rs b/mining/src/mempool/mod.rs index a3e26e7c99..ec5f5675ba 100644 --- a/mining/src/mempool/mod.rs +++ b/mining/src/mempool/mod.rs @@ -1,6 +1,6 @@ use crate::{ + feerate::{FeerateEstimator, FeerateEstimatorArgs}, model::{ - candidate_tx::CandidateTransaction, owner_txs::{GroupedOwnerTransactions, ScriptPublicKeySet}, tx_query::TransactionQuery, }, @@ -12,7 +12,10 @@ use self::{ model::{accepted_transactions::AcceptedTransactions, orphan_pool::OrphanPool, pool::Pool, transactions_pool::TransactionsPool}, tx::Priority, }; -use kaspa_consensus_core::tx::{MutableTransaction, TransactionId}; +use kaspa_consensus_core::{ + block::TemplateTransactionSelector, + tx::{MutableTransaction, TransactionId}, +}; use kaspa_core::time::Stopwatch; use std::sync::Arc; @@ -23,6 +26,7 @@ pub(crate) mod handle_new_block_transactions; pub(crate) mod model; pub(crate) mod populate_entries_and_try_validate; pub(crate) mod remove_transaction; +pub(crate) mod replace_by_fee; pub(crate) mod validate_and_insert_transaction; /// Mempool contains transactions intended to be inserted into a block and mined. @@ -111,9 +115,23 @@ impl Mempool { count } - pub(crate) fn block_candidate_transactions(&self) -> Vec { - let _sw = Stopwatch::<10>::with_threshold("block_candidate_transactions op"); - self.transaction_pool.all_ready_transactions() + pub(crate) fn ready_transaction_count(&self) -> usize { + self.transaction_pool.ready_transaction_count() + } + + pub(crate) fn ready_transaction_total_mass(&self) -> u64 { + self.transaction_pool.ready_transaction_total_mass() + } + + /// Dynamically builds a transaction selector based on the specific state of the ready transactions frontier + pub(crate) fn build_selector(&self) -> Box { + let _sw = Stopwatch::<10>::with_threshold("build_selector op"); + self.transaction_pool.build_selector() + } + + /// Builds a feerate estimator based on internal state of the ready transactions frontier + pub(crate) fn build_feerate_estimator(&self, args: FeerateEstimatorArgs) -> FeerateEstimator { + self.transaction_pool.build_feerate_estimator(args) } pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { @@ -122,12 +140,7 @@ impl Mempool { } pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { - if let Some(tx) = self.transaction_pool.get_mut(&transaction.id()) { - tx.mtx = transaction; - true - } else { - false - } + self.transaction_pool.update_revalidated_transaction(transaction) } pub(crate) fn has_accepted_transaction(&self, transaction_id: &TransactionId) -> bool { @@ -144,6 +157,11 @@ impl Mempool { .filter(|transaction_id| !(self.transaction_pool.has(transaction_id) || self.orphan_pool.has(transaction_id))); self.accepted_transactions.unaccepted(&mut not_in_pools_txs) } + + #[cfg(test)] + pub(crate) fn get_estimated_size(&self) -> usize { + self.transaction_pool.get_estimated_size() + } } pub mod tx { @@ -158,4 +176,51 @@ pub mod tx { Forbidden, Allowed, } + + /// Replace by Fee (RBF) policy + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub enum RbfPolicy { + /// ### RBF is forbidden + /// + /// Inserts the incoming transaction. + /// + /// Conditions of success: + /// + /// - no double spend + /// + /// If conditions are not met, leaves the mempool unchanged and fails with a double spend error. + Forbidden, + + /// ### RBF may occur + /// + /// Identifies double spends in mempool and their owning transactions checking in order every input of the incoming + /// transaction. + /// + /// Removes all mempool transactions owning double spends and inserts the incoming transaction. + /// + /// Conditions of success: + /// + /// - on absence of double spends, always succeeds + /// - on double spends, the incoming transaction has a higher fee/mass ratio than the mempool transaction owning + /// the first double spend + /// + /// If conditions are not met, leaves the mempool unchanged and fails with a double spend or a tx fee/mass too low error. + Allowed, + + /// ### RBF must occur + /// + /// Identifies double spends in mempool and their owning transactions checking in order every input of the incoming + /// transaction. + /// + /// Removes the mempool transaction owning the double spends and inserts the incoming transaction. + /// + /// Conditions of success: + /// + /// - at least one double spend + /// - all double spends belong to the same mempool transaction + /// - the incoming transaction has a higher fee/mass ratio than the mempool double spending transaction. + /// + /// If conditions are not met, leaves the mempool unchanged and fails with a double spend or a tx fee/mass too low error. + Mandatory, + } } diff --git a/mining/src/mempool/model/frontier.rs b/mining/src/mempool/model/frontier.rs new file mode 100644 index 0000000000..8d21953271 --- /dev/null +++ b/mining/src/mempool/model/frontier.rs @@ -0,0 +1,543 @@ +use crate::{ + feerate::{FeerateEstimator, FeerateEstimatorArgs}, + model::candidate_tx::CandidateTransaction, + Policy, RebalancingWeightedTransactionSelector, +}; + +use feerate_key::FeerateTransactionKey; +use kaspa_consensus_core::{block::TemplateTransactionSelector, tx::Transaction}; +use kaspa_core::trace; +use rand::{distributions::Uniform, prelude::Distribution, Rng}; +use search_tree::SearchTree; +use selectors::{SequenceSelector, SequenceSelectorInput, TakeAllSelector}; +use std::{collections::HashSet, iter::FusedIterator, sync::Arc}; + +pub(crate) mod feerate_key; +pub(crate) mod search_tree; +pub(crate) mod selectors; + +/// If the frontier contains less than 4x the block mass limit, we consider +/// inplace sampling to be less efficient (due to collisions) and thus use +/// the rebalancing selector +const COLLISION_FACTOR: u64 = 4; + +/// Multiplication factor for in-place sampling. We sample 20% more than the +/// hard limit in order to allow the SequenceSelector to compensate for consensus rejections. +const MASS_LIMIT_FACTOR: f64 = 1.2; + +/// A rough estimation for the average transaction mass. The usage is a non-important edge case +/// hence we just throw this here (as oppose to performing an accurate estimation) +const TYPICAL_TX_MASS: f64 = 2000.0; + +/// Management of the transaction pool frontier, that is, the set of transactions in +/// the transaction pool which have no mempool ancestors and are essentially ready +/// to enter the next block template. +#[derive(Default)] +pub struct Frontier { + /// Frontier transactions sorted by feerate order and searchable for weight sampling + search_tree: SearchTree, + + /// Total masses: Σ_{tx in frontier} tx.mass + total_mass: u64, +} + +impl Frontier { + pub fn total_weight(&self) -> f64 { + self.search_tree.total_weight() + } + + pub fn total_mass(&self) -> u64 { + self.total_mass + } + + pub fn len(&self) -> usize { + self.search_tree.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn insert(&mut self, key: FeerateTransactionKey) -> bool { + let mass = key.mass; + if self.search_tree.insert(key) { + self.total_mass += mass; + true + } else { + false + } + } + + pub fn remove(&mut self, key: &FeerateTransactionKey) -> bool { + let mass = key.mass; + if self.search_tree.remove(key) { + self.total_mass -= mass; + true + } else { + false + } + } + + /// Samples the frontier in-place based on the provided policy and returns a SequenceSelector. + /// + /// This sampling algorithm should be used when frontier total mass is high enough compared to + /// policy mass limit so that the probability of sampling collisions remains low. + /// + /// Convergence analysis: + /// 1. Based on the above we can safely assume that `k << n`, where `n` is the total number of frontier items + /// and `k` is the number of actual samples (since `desired_mass << total_mass` and mass per item is bounded) + /// 2. Indeed, if the weight distribution is not too spread (i.e., `max(weights) = O(min(weights))`), `k << n` means + /// that the probability of collisions is low enough and the sampling process will converge in `O(k log(n))` w.h.p. + /// 3. It remains to deal with the case where the weight distribution is highly biased. The process implemented below + /// keeps track of the top-weight element. If the distribution is highly biased, this element will be sampled with + /// sufficient probability (in constant time). Following each sampling collision we search for a consecutive range of + /// top elements which were already sampled and narrow the sampling space to exclude them all. We do this by computing + /// the prefix weight up to the top most item which wasn't sampled yet (inclusive) and then continue the sampling process + /// over the narrowed space. This process is repeated until acquiring the desired mass. + /// 4. Numerical stability. Naively, one would simply subtract `total_weight -= top.weight` in order to narrow the sampling + /// space. However, if `top.weight` is much larger than the remaining weight, the above f64 subtraction will yield a number + /// close or equal to zero. We fix this by implementing a `log(n)` prefix weight operation. + /// 5. Q. Why not just use u64 weights? + /// A. The current weight calculation is `feerate^alpha` with `alpha=3`. Using u64 would mean that the feerate space + /// is limited to a range of size `(2^64)^(1/3) = ~2^21 = ~2M`. Already with current usages, the feerate can vary + /// from `~1/50` (2000 sompi for a transaction with 100K storage mass), to `5M` (100 KAS fee for a transaction with + /// 2000 mass = 100·100_000_000/2000), resulting in a range of size 250M (`5M/(1/50)`). + /// By using floating point arithmetics we gain the adjustment of the probability space to the accuracy level required for + /// current samples. And if the space is highly biased, the repeated elimination of top items and the prefix weight computation + /// will readjust it. + pub fn sample_inplace(&self, rng: &mut R, policy: &Policy, _collisions: &mut u64) -> SequenceSelectorInput + where + R: Rng + ?Sized, + { + debug_assert!(!self.search_tree.is_empty(), "expected to be called only if not empty"); + + // Sample 20% more than the hard limit in order to allow the SequenceSelector to + // compensate for consensus rejections. + // Note: this is a soft limit which is why the loop below might pass it if the + // next sampled transaction happens to cross the bound + let desired_mass = (policy.max_block_mass as f64 * MASS_LIMIT_FACTOR) as u64; + + let mut distr = Uniform::new(0f64, self.total_weight()); + let mut down_iter = self.search_tree.descending_iter(); + let mut top = down_iter.next().unwrap(); + let mut cache = HashSet::new(); + let mut sequence = SequenceSelectorInput::default(); + let mut total_selected_mass: u64 = 0; + let mut collisions = 0; + + // The sampling process is converging so the cache will eventually hold all entries, which guarantees loop exit + 'outer: while cache.len() < self.search_tree.len() && total_selected_mass <= desired_mass { + let query = distr.sample(rng); + let item = { + let mut item = self.search_tree.search(query); + while !cache.insert(item.tx.id()) { + collisions += 1; + // Try to narrow the sampling space in order to reduce further sampling collisions + if cache.contains(&top.tx.id()) { + loop { + match down_iter.next() { + Some(next) => top = next, + None => break 'outer, + } + // Loop until finding a top item which was not sampled yet + if !cache.contains(&top.tx.id()) { + break; + } + } + let remaining_weight = self.search_tree.prefix_weight(top); + distr = Uniform::new(0f64, remaining_weight); + } + let query = distr.sample(rng); + item = self.search_tree.search(query); + } + item + }; + sequence.push(item.tx.clone(), item.mass); + total_selected_mass += item.mass; // Max standard mass + Mempool capacity bound imply this will not overflow + } + trace!("[mempool frontier sample inplace] collisions: {collisions}, cache: {}", cache.len()); + *_collisions += collisions; + sequence + } + + /// Dynamically builds a transaction selector based on the specific state of the ready transactions frontier. + /// + /// The logic is divided into three cases: + /// 1. The frontier is small and can fit entirely into a block: perform no sampling and return + /// a TakeAllSelector + /// 2. The frontier has at least ~4x the capacity of a block: expected collision rate is low, perform + /// in-place k*log(n) sampling and return a SequenceSelector + /// 3. The frontier has 1-4x capacity of a block. In this case we expect a high collision rate while + /// the number of overall transactions is still low, so we take all of the transactions and use the + /// rebalancing weighted selector (performing the actual sampling out of the mempool lock) + /// + /// The above thresholds were selected based on benchmarks. Overall, this dynamic selection provides + /// full transaction selection in less than 150 µs even if the frontier has 1M entries (!!). See mining/benches + /// for more details. + pub fn build_selector(&self, policy: &Policy) -> Box { + if self.total_mass <= policy.max_block_mass { + Box::new(TakeAllSelector::new(self.search_tree.ascending_iter().map(|k| k.tx.clone()).collect())) + } else if self.total_mass > policy.max_block_mass * COLLISION_FACTOR { + let mut rng = rand::thread_rng(); + Box::new(SequenceSelector::new(self.sample_inplace(&mut rng, policy, &mut 0), policy.clone())) + } else { + Box::new(RebalancingWeightedTransactionSelector::new( + policy.clone(), + self.search_tree.ascending_iter().cloned().map(CandidateTransaction::from_key).collect(), + )) + } + } + + /// Exposed for benchmarking purposes + pub fn build_selector_sample_inplace(&self, _collisions: &mut u64) -> Box { + let mut rng = rand::thread_rng(); + let policy = Policy::new(500_000); + Box::new(SequenceSelector::new(self.sample_inplace(&mut rng, &policy, _collisions), policy)) + } + + /// Exposed for benchmarking purposes + pub fn build_selector_take_all(&self) -> Box { + Box::new(TakeAllSelector::new(self.search_tree.ascending_iter().map(|k| k.tx.clone()).collect())) + } + + /// Exposed for benchmarking purposes + pub fn build_rebalancing_selector(&self) -> Box { + Box::new(RebalancingWeightedTransactionSelector::new( + Policy::new(500_000), + self.search_tree.ascending_iter().cloned().map(CandidateTransaction::from_key).collect(), + )) + } + + /// Builds a feerate estimator based on internal state of the ready transactions frontier + pub fn build_feerate_estimator(&self, args: FeerateEstimatorArgs) -> FeerateEstimator { + let average_transaction_mass = match self.len() { + 0 => TYPICAL_TX_MASS, + n => self.total_mass() as f64 / n as f64, + }; + let bps = args.network_blocks_per_second as f64; + let mut mass_per_block = args.maximum_mass_per_block as f64; + let mut inclusion_interval = average_transaction_mass / (mass_per_block * bps); + let mut estimator = FeerateEstimator::new(self.total_weight(), inclusion_interval); + + // Search for better estimators by possibly removing extremely high outliers + let mut down_iter = self.search_tree.descending_iter().peekable(); + while let Some(current) = down_iter.next() { + // Update values for the coming iteration. In order to remove the outlier from the + // total weight, we must compensate by capturing a block slot. Note we capture the + // slot with correspondence to the outlier actual mass. This is important in cases + // where the high-feerate txs have mass which deviates from the average. + mass_per_block -= current.mass as f64; + if mass_per_block <= average_transaction_mass { + // Out of block slots, break + break; + } + + // Re-calc the inclusion interval based on the new block "capacity". + // Note that inclusion_interval < 1.0 as required by the estimator, since mass_per_block > average_transaction_mass (by condition above) and bps >= 1 + inclusion_interval = average_transaction_mass / (mass_per_block * bps); + + // Compute the weight up to, and excluding, current key (which translates to zero weight if peek() is none) + let prefix_weight = down_iter.peek().map(|key| self.search_tree.prefix_weight(key)).unwrap_or_default(); + let pending_estimator = FeerateEstimator::new(prefix_weight, inclusion_interval); + + // Test the pending estimator vs. the current one + if pending_estimator.feerate_to_time(1.0) < estimator.feerate_to_time(1.0) { + estimator = pending_estimator; + } else { + // The pending estimator is no better, break. Indicates that the reduction in + // network mass per second is more significant than the removed weight + break; + } + } + estimator + } + + /// Returns an iterator to the transactions in the frontier in increasing feerate order + pub fn ascending_iter(&self) -> impl DoubleEndedIterator> + ExactSizeIterator + FusedIterator { + self.search_tree.ascending_iter().map(|key| &key.tx) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use feerate_key::tests::build_feerate_key; + use itertools::Itertools; + use rand::thread_rng; + use std::collections::HashMap; + + #[test] + pub fn test_highly_irregular_sampling() { + let mut rng = thread_rng(); + let cap = 1000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let mut fee: u64 = if i % (cap as u64 / 100) == 0 { 1000000 } else { rng.gen_range(1..10000) }; + if i == 0 { + // Add an extremely large fee in order to create extremely high variance + fee = 100_000_000 * 1_000_000; // 1M KAS + } + let mass: u64 = 1650; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + let mut frontier = Frontier::default(); + for item in map.values().cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let _sample = frontier.sample_inplace(&mut rng, &Policy::new(500_000), &mut 0); + } + + #[test] + pub fn test_mempool_sampling_small() { + let mut rng = thread_rng(); + let cap = 2000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let fee: u64 = rng.gen_range(1..1000000); + let mass: u64 = 1650; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + let mut frontier = Frontier::default(); + for item in map.values().cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let mut selector = frontier.build_selector(&Policy::new(500_000)); + selector.select_transactions().iter().map(|k| k.gas).sum::(); + + let mut selector = frontier.build_rebalancing_selector(); + selector.select_transactions().iter().map(|k| k.gas).sum::(); + + let mut selector = frontier.build_selector_sample_inplace(&mut 0); + selector.select_transactions().iter().map(|k| k.gas).sum::(); + + let mut selector = frontier.build_selector_take_all(); + selector.select_transactions().iter().map(|k| k.gas).sum::(); + + let mut selector = frontier.build_selector(&Policy::new(500_000)); + selector.select_transactions().iter().map(|k| k.gas).sum::(); + } + + #[test] + pub fn test_total_mass_tracking() { + let mut rng = thread_rng(); + let cap = 10000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let fee: u64 = if i % (cap as u64 / 100) == 0 { 1000000 } else { rng.gen_range(1..10000) }; + let mass: u64 = rng.gen_range(1..100000); // Use distinct mass values to challenge the test + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + let len = cap / 2; + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let prev_total_mass = frontier.total_mass(); + // Assert the total mass + assert_eq!(frontier.total_mass(), frontier.search_tree.ascending_iter().map(|k| k.mass).sum::()); + + // Add a bunch of duplicates and make sure the total mass remains the same + let mut dup_items = frontier.search_tree.ascending_iter().take(len / 2).cloned().collect_vec(); + for dup in dup_items.iter().cloned() { + (!frontier.insert(dup)).then_some(()).unwrap(); + } + assert_eq!(prev_total_mass, frontier.total_mass()); + assert_eq!(frontier.total_mass(), frontier.search_tree.ascending_iter().map(|k| k.mass).sum::()); + + // Remove a few elements from the map in order to randomize the iterator + dup_items.iter().take(10).for_each(|k| { + map.remove(&k.tx.id()); + }); + + // Add and remove random elements some of which will be duplicate insertions and some missing removals + for item in map.values().step_by(2) { + frontier.remove(item); + if let Some(item2) = dup_items.pop() { + frontier.insert(item2); + } + } + assert_eq!(frontier.total_mass(), frontier.search_tree.ascending_iter().map(|k| k.mass).sum::()); + } + + #[test] + fn test_feerate_estimator() { + let mut rng = thread_rng(); + let cap = 2000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let mut fee: u64 = rng.gen_range(1..1000000); + let mass: u64 = 1650; + // 304 (~500,000/1650) extreme outliers is an edge case where the build estimator logic should be tested at + if i <= 303 { + // Add an extremely large fee in order to create extremely high variance + fee = i * 10_000_000 * 1_000_000; + } + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + // We are testing that the build function actually returns and is not looping indefinitely + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(1.0); + + let buckets = estimations.ordered_buckets(); + // Test for the absence of NaN, infinite or zero values in buckets + for b in buckets.iter() { + assert!( + b.feerate.is_normal() && b.feerate >= 1.0, + "bucket feerate must be a finite number greater or equal to the minimum standard feerate" + ); + assert!( + b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0, + "bucket estimated seconds must be a finite number greater than zero" + ); + } + dbg!(len, estimator); + dbg!(estimations); + } + } + + #[test] + fn test_constant_feerate_estimator() { + const MIN_FEERATE: f64 = 1.0; + let cap = 20_000; + let mut map = HashMap::with_capacity(cap); + for i in 0..cap as u64 { + let mass: u64 = 1650; + let fee = (mass as f64 * MIN_FEERATE) as u64; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + for len in [0, 1, 10, 100, 200, 300, 500, 750, cap / 2, (cap * 2) / 3, (cap * 4) / 5, (cap * 5) / 6, cap] { + println!(); + println!("Testing a frontier with {} txs...", len.min(cap)); + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + // We are testing that the build function actually returns and is not looping indefinitely + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(MIN_FEERATE); + let buckets = estimations.ordered_buckets(); + // Test for the absence of NaN, infinite or zero values in buckets + for b in buckets.iter() { + assert!( + b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + "bucket feerate must be a finite number greater or equal to the minimum standard feerate" + ); + assert!( + b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0, + "bucket estimated seconds must be a finite number greater than zero" + ); + } + dbg!(len, estimator); + dbg!(estimations); + } + } + + #[test] + fn test_feerate_estimator_with_low_mass_outliers() { + const MIN_FEERATE: f64 = 1.0; + const STD_FEERATE: f64 = 10.0; + const HIGH_FEERATE: f64 = 1000.0; + + let cap = 20_000; + let mut frontier = Frontier::default(); + for i in 0..cap as u64 { + let (mass, fee) = if i < 200 { + let mass = 1650; + (mass, (HIGH_FEERATE * mass as f64) as u64) + } else { + let mass = 90_000; + (mass, (STD_FEERATE * mass as f64) as u64) + }; + let key = build_feerate_key(fee, mass, i); + frontier.insert(key).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + // We are testing that the build function actually returns and is not looping indefinitely + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(MIN_FEERATE); + + // Test that estimations are not biased by the average high mass + let normal_feerate = estimations.normal_buckets.first().unwrap().feerate; + assert!( + normal_feerate < HIGH_FEERATE / 10.0, + "Normal bucket feerate is expected to be << high feerate due to small mass of high feerate txs ({}, {})", + normal_feerate, + HIGH_FEERATE + ); + + let buckets = estimations.ordered_buckets(); + // Test for the absence of NaN, infinite or zero values in buckets + for b in buckets.iter() { + assert!( + b.feerate.is_normal() && b.feerate >= MIN_FEERATE, + "bucket feerate must be a finite number greater or equal to the minimum standard feerate" + ); + assert!( + b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0, + "bucket estimated seconds must be a finite number greater than zero" + ); + } + dbg!(estimator); + dbg!(estimations); + } + + #[test] + fn test_feerate_estimator_with_less_than_block_capacity() { + let mut map = HashMap::new(); + for i in 0..304 { + let mass: u64 = 1650; + let fee = 10_000_000 * 1_000_000; + let key = build_feerate_key(fee, mass, i); + map.insert(key.tx.id(), key); + } + + // All lens make for less than block capacity (given the mass used) + for len in [0, 1, 10, 100, 200, 250, 300] { + let mut frontier = Frontier::default(); + for item in map.values().take(len).cloned() { + frontier.insert(item).then_some(()).unwrap(); + } + + let args = FeerateEstimatorArgs { network_blocks_per_second: 1, maximum_mass_per_block: 500_000 }; + // We are testing that the build function actually returns and is not looping indefinitely + let estimator = frontier.build_feerate_estimator(args); + let estimations = estimator.calc_estimations(1.0); + + let buckets = estimations.ordered_buckets(); + // Test for the absence of NaN, infinite or zero values in buckets + for b in buckets.iter() { + // Expect min feerate bcs blocks are not full + assert!(b.feerate == 1.0, "bucket feerate is expected to be equal to the minimum standard feerate"); + assert!( + b.estimated_seconds.is_normal() && b.estimated_seconds > 0.0 && b.estimated_seconds <= 1.0, + "bucket estimated seconds must be a finite number greater than zero & less than 1.0" + ); + } + dbg!(len, estimator); + dbg!(estimations); + } + } +} diff --git a/mining/src/mempool/model/frontier/feerate_key.rs b/mining/src/mempool/model/frontier/feerate_key.rs new file mode 100644 index 0000000000..843ef0ff13 --- /dev/null +++ b/mining/src/mempool/model/frontier/feerate_key.rs @@ -0,0 +1,108 @@ +use crate::{block_template::selector::ALPHA, mempool::model::tx::MempoolTransaction}; +use kaspa_consensus_core::tx::Transaction; +use std::sync::Arc; + +#[derive(Clone, Debug)] +pub struct FeerateTransactionKey { + pub fee: u64, + pub mass: u64, + weight: f64, + pub tx: Arc, +} + +impl Eq for FeerateTransactionKey {} + +impl PartialEq for FeerateTransactionKey { + fn eq(&self, other: &Self) -> bool { + self.tx.id() == other.tx.id() + } +} + +impl FeerateTransactionKey { + pub fn new(fee: u64, mass: u64, tx: Arc) -> Self { + // NOTE: any change to the way this weight is calculated (such as scaling by some factor) + // requires a reversed update to total_weight in `Frontier::build_feerate_estimator`. This + // is because the math methods in FeeEstimator assume this specific weight function. + Self { fee, mass, weight: (fee as f64 / mass as f64).powi(ALPHA), tx } + } + + pub fn feerate(&self) -> f64 { + self.fee as f64 / self.mass as f64 + } + + pub fn weight(&self) -> f64 { + self.weight + } +} + +impl std::hash::Hash for FeerateTransactionKey { + fn hash(&self, state: &mut H) { + // Transaction id is a sufficient identifier for this key + self.tx.id().hash(state); + } +} + +impl PartialOrd for FeerateTransactionKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for FeerateTransactionKey { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // Our first priority is the feerate. + // The weight function is monotonic in feerate so we prefer using it + // since it is cached + match self.weight().total_cmp(&other.weight()) { + core::cmp::Ordering::Equal => {} + ord => return ord, + } + + // If feerates (and thus weights) are equal, prefer the higher fee in absolute value + match self.fee.cmp(&other.fee) { + core::cmp::Ordering::Equal => {} + ord => return ord, + } + + // + // At this point we don't compare the mass fields since if both feerate + // and fee are equal, mass must be equal as well + // + + // Finally, we compare transaction ids in order to allow multiple transactions with + // the same fee and mass to exist within the same sorted container + self.tx.id().cmp(&other.tx.id()) + } +} + +impl From<&MempoolTransaction> for FeerateTransactionKey { + fn from(tx: &MempoolTransaction) -> Self { + let mass = tx.mtx.tx.mass(); + let fee = tx.mtx.calculated_fee.expect("fee is expected to be populated"); + assert_ne!(mass, 0, "mass field is expected to be set when inserting to the mempool"); + Self::new(fee, mass, tx.mtx.tx.clone()) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use kaspa_consensus_core::{ + subnets::SUBNETWORK_ID_NATIVE, + tx::{Transaction, TransactionInput, TransactionOutpoint}, + }; + use kaspa_hashes::{HasherBase, TransactionID}; + use std::sync::Arc; + + fn generate_unique_tx(i: u64) -> Arc { + let mut hasher = TransactionID::new(); + let prev = hasher.update(i.to_le_bytes()).clone().finalize(); + let input = TransactionInput::new(TransactionOutpoint::new(prev, 0), vec![], 0, 0); + Arc::new(Transaction::new(0, vec![input], vec![], 0, SUBNETWORK_ID_NATIVE, 0, vec![])) + } + + /// Test helper for generating a feerate key with a unique tx (per u64 id) + pub(crate) fn build_feerate_key(fee: u64, mass: u64, id: u64) -> FeerateTransactionKey { + FeerateTransactionKey::new(fee, mass, generate_unique_tx(id)) + } +} diff --git a/mining/src/mempool/model/frontier/search_tree.rs b/mining/src/mempool/model/frontier/search_tree.rs new file mode 100644 index 0000000000..edf34c2710 --- /dev/null +++ b/mining/src/mempool/model/frontier/search_tree.rs @@ -0,0 +1,336 @@ +use super::feerate_key::FeerateTransactionKey; +use std::iter::FusedIterator; +use sweep_bptree::tree::visit::{DescendVisit, DescendVisitResult}; +use sweep_bptree::tree::{Argument, SearchArgument}; +use sweep_bptree::{BPlusTree, NodeStoreVec}; + +type FeerateKey = FeerateTransactionKey; + +/// A struct for implementing "weight space" search using the SearchArgument customization. +/// The weight space is the range `[0, total_weight)` and each key has a "logical" interval allocation +/// within this space according to its tree position and weight. +/// +/// We implement the search efficiently by maintaining subtree weights which are updated with each +/// element insertion/removal. Given a search query `p ∈ [0, total_weight)` we then find the corresponding +/// element in log time by walking down from the root and adjusting the query according to subtree weights. +/// For instance if the query point is `123.56` and the top 3 subtrees have weights `120, 10.5 ,100` then we +/// recursively query the middle subtree with the point `123.56 - 120 = 3.56`. +/// +/// See SearchArgument implementation below for more details. +#[derive(Clone, Copy, Debug, Default)] +struct FeerateWeight(f64); + +impl FeerateWeight { + /// Returns the weight value + pub fn weight(&self) -> f64 { + self.0 + } +} + +impl Argument for FeerateWeight { + fn from_leaf(keys: &[FeerateKey]) -> Self { + Self(keys.iter().map(|k| k.weight()).sum()) + } + + fn from_inner(_keys: &[FeerateKey], arguments: &[Self]) -> Self { + Self(arguments.iter().map(|a| a.0).sum()) + } +} + +impl SearchArgument for FeerateWeight { + type Query = f64; + + fn locate_in_leaf(query: Self::Query, keys: &[FeerateKey]) -> Option { + let mut sum = 0.0; + for (i, k) in keys.iter().enumerate() { + let w = k.weight(); + sum += w; + if query < sum { + return Some(i); + } + } + // In order to avoid sensitivity to floating number arithmetics, + // we logically "clamp" the search, returning the last leaf if the query + // value is out of bounds + match keys.len() { + 0 => None, + n => Some(n - 1), + } + } + + fn locate_in_inner(mut query: Self::Query, _keys: &[FeerateKey], arguments: &[Self]) -> Option<(usize, Self::Query)> { + // Search algorithm: Locate the next subtree to visit by iterating through `arguments` + // and subtracting the query until the correct range is found + for (i, a) in arguments.iter().enumerate() { + if query >= a.0 { + query -= a.0; + } else { + return Some((i, query)); + } + } + // In order to avoid sensitivity to floating number arithmetics, + // we logically "clamp" the search, returning the last subtree if the query + // value is out of bounds. Eventually this will lead to the return of the + // last leaf (see locate_in_leaf as well) + match arguments.len() { + 0 => None, + n => Some((n - 1, arguments[n - 1].0)), + } + } +} + +/// Visitor struct which accumulates the prefix weight up to a provided key (inclusive) in log time. +/// +/// The basic idea is to use the subtree weights stored in the tree for walking down from the root +/// to the leaf (corresponding to the searched key), and accumulating all weights proceeding the walk-down path +struct PrefixWeightVisitor<'a> { + /// The key to search up to + key: &'a FeerateKey, + /// This field accumulates the prefix weight during the visit process + accumulated_weight: f64, +} + +impl<'a> PrefixWeightVisitor<'a> { + pub fn new(key: &'a FeerateKey) -> Self { + Self { key, accumulated_weight: Default::default() } + } + + /// Returns the index of the first `key ∈ keys` such that `key > self.key`. If no such key + /// exists, the returned index will be the length of `keys`. + fn search_in_keys(&self, keys: &[FeerateKey]) -> usize { + match keys.binary_search(self.key) { + Err(idx) => { + // self.key is not in keys, idx is the index of the following key + idx + } + Ok(idx) => { + // Exact match, return the following index + idx + 1 + } + } + } +} + +impl<'a> DescendVisit for PrefixWeightVisitor<'a> { + type Result = f64; + + fn visit_inner(&mut self, keys: &[FeerateKey], arguments: &[FeerateWeight]) -> DescendVisitResult { + let idx = self.search_in_keys(keys); + // Invariants: + // a. arguments.len() == keys.len() + 1 (n inner node keys are the separators between n+1 subtrees) + // b. idx <= keys.len() (hence idx < arguments.len()) + + // Based on the invariants, we first accumulate all the subtree weights up to idx + for argument in arguments.iter().take(idx) { + self.accumulated_weight += argument.weight(); + } + + // ..and then go down to the idx'th subtree + DescendVisitResult::GoDown(idx) + } + + fn visit_leaf(&mut self, keys: &[FeerateKey], _values: &[()]) -> Option { + // idx is the index of the key following self.key + let idx = self.search_in_keys(keys); + // Accumulate all key weights up to idx (which is inclusive if self.key ∈ tree) + for key in keys.iter().take(idx) { + self.accumulated_weight += key.weight(); + } + // ..and return the final result + Some(self.accumulated_weight) + } +} + +type InnerTree = BPlusTree>; + +/// A transaction search tree sorted by feerate order and searchable for probabilistic weighted sampling. +/// +/// All `log(n)` expressions below are in base 64 (based on constants chosen within the sweep_bptree crate). +/// +/// The tree has the following properties: +/// 1. Linear time ordered access (ascending / descending) +/// 2. Insertions/removals in log(n) time +/// 3. Search for a weight point `p ∈ [0, total_weight)` in log(n) time +/// 4. Compute the prefix weight of a key, i.e., the sum of weights up to that key (inclusive) +/// according to key order, in log(n) time +/// 5. Access the total weight in O(1) time. The total weight has numerical stability since it +/// is recomputed from subtree weights for each item insertion/removal +/// +/// Computing the prefix weight is a crucial operation if the tree is used for random sampling and +/// the tree is highly imbalanced in terms of weight variance. +/// See [`Frontier::sample_inplace()`](crate::mempool::model::frontier::Frontier::sample_inplace) +/// for more details. +pub struct SearchTree { + tree: InnerTree, +} + +impl Default for SearchTree { + fn default() -> Self { + Self { tree: InnerTree::new(Default::default()) } + } +} + +impl SearchTree { + pub fn new() -> Self { + Self { tree: InnerTree::new(Default::default()) } + } + + pub fn len(&self) -> usize { + self.tree.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Inserts a key into the tree in log(n) time. Returns `false` if the key was already in the tree. + pub fn insert(&mut self, key: FeerateKey) -> bool { + self.tree.insert(key, ()).is_none() + } + + /// Remove a key from the tree in log(n) time. Returns `false` if the key was not in the tree. + pub fn remove(&mut self, key: &FeerateKey) -> bool { + self.tree.remove(key).is_some() + } + + /// Search for a weight point `query ∈ [0, total_weight)` in log(n) time + pub fn search(&self, query: f64) -> &FeerateKey { + self.tree.get_by_argument(query).expect("clamped").0 + } + + /// Access the total weight in O(1) time + pub fn total_weight(&self) -> f64 { + self.tree.root_argument().weight() + } + + /// Computes the prefix weight of a key, i.e., the sum of weights up to that key (inclusive) + /// according to key order, in log(n) time + pub fn prefix_weight(&self, key: &FeerateKey) -> f64 { + self.tree.descend_visit(PrefixWeightVisitor::new(key)).unwrap() + } + + /// Iterate the tree in descending key order (going down from the + /// highest key). Linear in the number of keys *actually* iterated. + pub fn descending_iter(&self) -> impl DoubleEndedIterator + ExactSizeIterator + FusedIterator { + self.tree.iter().rev().map(|(key, ())| key) + } + + /// Iterate the tree in ascending key order (going up from the + /// lowest key). Linear in the number of keys *actually* iterated. + pub fn ascending_iter(&self) -> impl DoubleEndedIterator + ExactSizeIterator + FusedIterator { + self.tree.iter().map(|(key, ())| key) + } + + /// The lowest key in the tree (by key order) + pub fn first(&self) -> Option<&FeerateKey> { + self.tree.first().map(|(k, ())| k) + } + + /// The highest key in the tree (by key order) + pub fn last(&self) -> Option<&FeerateKey> { + self.tree.last().map(|(k, ())| k) + } +} + +#[cfg(test)] +mod tests { + use super::super::feerate_key::tests::build_feerate_key; + use super::*; + use itertools::Itertools; + use std::collections::HashSet; + use std::ops::Sub; + + #[test] + fn test_feerate_weight_queries() { + let mut tree = SearchTree::new(); + let mass = 2000; + // The btree stores N=64 keys at each node/leaf, so we make sure the tree has more than + // 64^2 keys in order to trigger at least a few intermediate tree nodes + let fees = vec![[123, 113, 10_000, 1000, 2050, 2048]; 64 * (64 + 1)].into_iter().flatten().collect_vec(); + + #[allow(clippy::mutable_key_type)] + let mut s = HashSet::with_capacity(fees.len()); + for (i, fee) in fees.iter().copied().enumerate() { + let key = build_feerate_key(fee, mass, i as u64); + s.insert(key.clone()); + tree.insert(key); + } + + // Randomly remove 1/6 of the items + let remove = s.iter().take(fees.len() / 6).cloned().collect_vec(); + for r in remove { + s.remove(&r); + tree.remove(&r); + } + + // Collect to vec and sort for reference + let mut v = s.into_iter().collect_vec(); + v.sort(); + + // Test reverse iteration + for (expected, item) in v.iter().rev().zip(tree.descending_iter()) { + assert_eq!(&expected, &item); + assert!(expected.cmp(item).is_eq()); // Assert Ord equality as well + } + + // Sweep through the tree and verify that weight search queries are handled correctly + let eps: f64 = 0.001; + let mut sum = 0.0; + for expected in v.iter() { + let weight = expected.weight(); + let eps = eps.min(weight / 3.0); + let samples = [sum + eps, sum + weight / 2.0, sum + weight - eps]; + for sample in samples { + let key = tree.search(sample); + assert_eq!(expected, key); + assert!(expected.cmp(key).is_eq()); // Assert Ord equality as well + } + sum += weight; + } + + println!("{}, {}", sum, tree.total_weight()); + + // Test clamped search bounds + assert_eq!(tree.first(), Some(tree.search(f64::NEG_INFINITY))); + assert_eq!(tree.first(), Some(tree.search(-1.0))); + assert_eq!(tree.first(), Some(tree.search(-eps))); + assert_eq!(tree.first(), Some(tree.search(0.0))); + assert_eq!(tree.last(), Some(tree.search(sum))); + assert_eq!(tree.last(), Some(tree.search(sum + eps))); + assert_eq!(tree.last(), Some(tree.search(sum + 1.0))); + assert_eq!(tree.last(), Some(tree.search(1.0 / 0.0))); + assert_eq!(tree.last(), Some(tree.search(f64::INFINITY))); + let _ = tree.search(f64::NAN); + + // Assert prefix weights + let mut prefix = Vec::with_capacity(v.len()); + prefix.push(v[0].weight()); + for i in 1..v.len() { + prefix.push(prefix[i - 1] + v[i].weight()); + } + let eps = v.iter().map(|k| k.weight()).min_by(f64::total_cmp).unwrap() * 1e-4; + for (expected_prefix, key) in prefix.into_iter().zip(v) { + let prefix = tree.prefix_weight(&key); + assert!(expected_prefix.sub(prefix).abs() < eps); + } + } + + #[test] + fn test_tree_rev_iter() { + let mut tree = SearchTree::new(); + let mass = 2000; + let fees = vec![[123, 113, 10_000, 1000, 2050, 2048]; 64 * (64 + 1)].into_iter().flatten().collect_vec(); + let mut v = Vec::with_capacity(fees.len()); + for (i, fee) in fees.iter().copied().enumerate() { + let key = build_feerate_key(fee, mass, i as u64); + v.push(key.clone()); + tree.insert(key); + } + v.sort(); + + for (expected, item) in v.into_iter().rev().zip(tree.descending_iter()) { + assert_eq!(&expected, item); + assert!(expected.cmp(item).is_eq()); // Assert Ord equality as well + } + } +} diff --git a/mining/src/mempool/model/frontier/selectors.rs b/mining/src/mempool/model/frontier/selectors.rs new file mode 100644 index 0000000000..a30ecc1459 --- /dev/null +++ b/mining/src/mempool/model/frontier/selectors.rs @@ -0,0 +1,162 @@ +use crate::Policy; +use kaspa_consensus_core::{ + block::TemplateTransactionSelector, + tx::{Transaction, TransactionId}, +}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +pub struct SequenceSelectorTransaction { + pub tx: Arc, + pub mass: u64, +} + +impl SequenceSelectorTransaction { + pub fn new(tx: Arc, mass: u64) -> Self { + Self { tx, mass } + } +} + +type SequencePriorityIndex = u32; + +/// The input sequence for the [`SequenceSelector`] transaction selector +#[derive(Default)] +pub struct SequenceSelectorInput { + /// We use the btree map ordered by insertion order in order to follow + /// the initial sequence order while allowing for efficient removal of previous selections + inner: BTreeMap, +} + +impl FromIterator for SequenceSelectorInput { + fn from_iter>(iter: T) -> Self { + Self { inner: BTreeMap::from_iter(iter.into_iter().enumerate().map(|(i, v)| (i as SequencePriorityIndex, v))) } + } +} + +impl SequenceSelectorInput { + pub fn push(&mut self, tx: Arc, mass: u64) { + let idx = self.inner.len() as SequencePriorityIndex; + self.inner.insert(idx, SequenceSelectorTransaction::new(tx, mass)); + } + + pub fn iter(&self) -> impl Iterator { + self.inner.values() + } +} + +/// Helper struct for storing data related to previous selections +struct SequenceSelectorSelection { + tx_id: TransactionId, + mass: u64, + priority_index: SequencePriorityIndex, +} + +/// A selector which selects transactions in the order they are provided. The selector assumes +/// that the transactions were already selected via weighted sampling and simply tries them one +/// after the other until the block mass limit is reached. +pub struct SequenceSelector { + input_sequence: SequenceSelectorInput, + selected_vec: Vec, + /// Maps from selected tx ids to tx mass so that the total used mass can be subtracted on tx reject + selected_map: Option>, + total_selected_mass: u64, + overall_candidates: usize, + overall_rejections: usize, + policy: Policy, +} + +impl SequenceSelector { + pub fn new(input_sequence: SequenceSelectorInput, policy: Policy) -> Self { + Self { + overall_candidates: input_sequence.inner.len(), + selected_vec: Vec::with_capacity(input_sequence.inner.len()), + input_sequence, + selected_map: Default::default(), + total_selected_mass: Default::default(), + overall_rejections: Default::default(), + policy, + } + } + + #[inline] + fn reset_selection(&mut self) { + self.selected_vec.clear(); + self.selected_map = None; + } +} + +impl TemplateTransactionSelector for SequenceSelector { + fn select_transactions(&mut self) -> Vec { + // Remove selections from the previous round if any + for selection in self.selected_vec.drain(..) { + self.input_sequence.inner.remove(&selection.priority_index); + } + // Reset selection data structures + self.reset_selection(); + let mut transactions = Vec::with_capacity(self.input_sequence.inner.len()); + + // Iterate the input sequence in order + for (&priority_index, tx) in self.input_sequence.inner.iter() { + if self.total_selected_mass.saturating_add(tx.mass) > self.policy.max_block_mass { + // We assume the sequence is relatively small, hence we keep on searching + // for transactions with lower mass which might fit into the remaining gap + continue; + } + self.total_selected_mass += tx.mass; + self.selected_vec.push(SequenceSelectorSelection { tx_id: tx.tx.id(), mass: tx.mass, priority_index }); + transactions.push(tx.tx.as_ref().clone()) + } + transactions + } + + fn reject_selection(&mut self, tx_id: TransactionId) { + // Lazy-create the map only when there are actual rejections + let selected_map = self.selected_map.get_or_insert_with(|| self.selected_vec.iter().map(|tx| (tx.tx_id, tx.mass)).collect()); + let mass = selected_map.remove(&tx_id).expect("only previously selected txs can be rejected (and only once)"); + // Selections must be counted in total selected mass, so this subtraction cannot underflow + self.total_selected_mass -= mass; + self.overall_rejections += 1; + } + + fn is_successful(&self) -> bool { + const SUFFICIENT_MASS_THRESHOLD: f64 = 0.8; + const LOW_REJECTION_FRACTION: f64 = 0.2; + + // We consider the operation successful if either mass occupation is above 80% or rejection rate is below 20% + self.overall_rejections == 0 + || (self.total_selected_mass as f64) > self.policy.max_block_mass as f64 * SUFFICIENT_MASS_THRESHOLD + || (self.overall_rejections as f64) < self.overall_candidates as f64 * LOW_REJECTION_FRACTION + } +} + +/// A selector that selects all the transactions it holds and is always considered successful. +/// If all mempool transactions have combined mass which is <= block mass limit, this selector +/// should be called and provided with all the transactions. +pub struct TakeAllSelector { + txs: Vec>, +} + +impl TakeAllSelector { + pub fn new(txs: Vec>) -> Self { + Self { txs } + } +} + +impl TemplateTransactionSelector for TakeAllSelector { + fn select_transactions(&mut self) -> Vec { + // Drain on the first call so that subsequent calls return nothing + self.txs.drain(..).map(|tx| tx.as_ref().clone()).collect() + } + + fn reject_selection(&mut self, _tx_id: TransactionId) { + // No need to track rejections (for reduced mass), since there's nothing else to select + } + + fn is_successful(&self) -> bool { + // Considered successful because we provided all mempool transactions to this + // selector, so there's no point in retries + true + } +} diff --git a/mining/src/mempool/model/mod.rs b/mining/src/mempool/model/mod.rs index 88997e46f1..bfe622293d 100644 --- a/mining/src/mempool/model/mod.rs +++ b/mining/src/mempool/model/mod.rs @@ -1,4 +1,5 @@ pub(crate) mod accepted_transactions; +pub(crate) mod frontier; pub(crate) mod map; pub(crate) mod orphan_pool; pub(crate) mod pool; diff --git a/mining/src/mempool/model/orphan_pool.rs b/mining/src/mempool/model/orphan_pool.rs index 6bb4361788..f813e1a56b 100644 --- a/mining/src/mempool/model/orphan_pool.rs +++ b/mining/src/mempool/model/orphan_pool.rs @@ -302,8 +302,4 @@ impl Pool for OrphanPool { fn chained(&self) -> &TransactionsEdges { &self.chained_orphans } - - fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { - self.all_orphans.get_mut(transaction_id) - } } diff --git a/mining/src/mempool/model/pool.rs b/mining/src/mempool/model/pool.rs index 5ad6970eb4..0a7bb6ec5d 100644 --- a/mining/src/mempool/model/pool.rs +++ b/mining/src/mempool/model/pool.rs @@ -27,8 +27,6 @@ pub(crate) trait Pool { self.all().get(transaction_id) } - fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction>; - /// Returns the number of transactions in the pool fn len(&self) -> usize { self.all().len() diff --git a/mining/src/mempool/model/transactions_pool.rs b/mining/src/mempool/model/transactions_pool.rs index cf70150df7..5741831d3f 100644 --- a/mining/src/mempool/model/transactions_pool.rs +++ b/mining/src/mempool/model/transactions_pool.rs @@ -1,27 +1,32 @@ use crate::{ + feerate::{FeerateEstimator, FeerateEstimatorArgs}, mempool::{ config::Config, errors::{RuleError, RuleResult}, model::{ map::MempoolTransactionCollection, pool::{Pool, TransactionsEdges}, - tx::MempoolTransaction, + tx::{DoubleSpend, MempoolTransaction}, utxo_set::MempoolUtxoSet, }, tx::Priority, }, - model::{candidate_tx::CandidateTransaction, topological_index::TopologicalIndex}, + model::{topological_index::TopologicalIndex, TransactionIdSet}, + Policy, }; use kaspa_consensus_core::{ - tx::TransactionId, - tx::{MutableTransaction, TransactionOutpoint}, + block::TemplateTransactionSelector, + tx::{MutableTransaction, TransactionId, TransactionOutpoint}, }; -use kaspa_core::{time::unix_now, trace, warn}; +use kaspa_core::{debug, time::unix_now, trace}; use std::{ - collections::{hash_map::Keys, hash_set::Iter, HashSet}, + collections::{hash_map::Keys, hash_set::Iter}, + iter::once, sync::Arc, }; +use super::frontier::Frontier; + /// Pool of transactions to be included in a block template /// /// ### Rust rewrite notes @@ -47,19 +52,29 @@ pub(crate) struct TransactionsPool { /// Mempool config config: Arc, - /// Store of transactions + /// Store of transactions. + /// Any mutable access to this map should be carefully reviewed for consistency with all other collections + /// and fields of this struct. In particular, `estimated_size` must reflect the exact sum of estimated size + /// for all current transactions in this collection. all_transactions: MempoolTransactionCollection, + /// Transactions dependencies formed by inputs present in pool - ancestor relations. parent_transactions: TransactionsEdges, + /// Transactions dependencies formed by outputs present in pool - successor relations. chained_transactions: TransactionsEdges, + /// Transactions with no parents in the mempool -- ready to be inserted into a block template - ready_transactions: HashSet, + ready_transactions: Frontier, last_expire_scan_daa_score: u64, + /// last expire scan time in milliseconds last_expire_scan_time: u64, + /// Sum of estimated size for all transactions currently held in `all_transactions` + estimated_size: usize, + /// Store of UTXOs utxo_set: MempoolUtxoSet, } @@ -75,6 +90,7 @@ impl TransactionsPool { last_expire_scan_daa_score: 0, last_expire_scan_time: unix_now(), utxo_set: MempoolUtxoSet::new(), + estimated_size: 0, } } @@ -84,15 +100,16 @@ impl TransactionsPool { transaction: MutableTransaction, virtual_daa_score: u64, priority: Priority, + transaction_size: usize, ) -> RuleResult<&MempoolTransaction> { let transaction = MempoolTransaction::new(transaction, priority, virtual_daa_score); let id = transaction.id(); - self.add_mempool_transaction(transaction)?; + self.add_mempool_transaction(transaction, transaction_size)?; Ok(self.get(&id).unwrap()) } /// Add a mempool transaction to the pool - pub(crate) fn add_mempool_transaction(&mut self, transaction: MempoolTransaction) -> RuleResult<()> { + pub(crate) fn add_mempool_transaction(&mut self, transaction: MempoolTransaction, transaction_size: usize) -> RuleResult<()> { let id = transaction.id(); assert!(!self.all_transactions.contains_key(&id), "transaction {id} to be added already exists in the transactions pool"); @@ -105,7 +122,7 @@ impl TransactionsPool { let parents = self.get_parent_transaction_ids_in_pool(&transaction.mtx); self.parent_transactions.insert(id, parents.clone()); if parents.is_empty() { - self.ready_transactions.insert(id); + self.ready_transactions.insert((&transaction).into()); } for parent_id in parents { let entry = self.chained_transactions.entry(parent_id).or_default(); @@ -113,6 +130,7 @@ impl TransactionsPool { } self.utxo_set.add_transaction(&transaction.mtx); + self.estimated_size += transaction_size; self.all_transactions.insert(id, transaction); trace!("Added transaction {}", id); Ok(()) @@ -133,18 +151,20 @@ impl TransactionsPool { if let Some(parents) = self.parent_transactions.get_mut(chain) { parents.remove(transaction_id); if parents.is_empty() { - self.ready_transactions.insert(*chain); + let tx = self.all_transactions.get(chain).unwrap(); + self.ready_transactions.insert(tx.into()); } } } } self.parent_transactions.remove(transaction_id); self.chained_transactions.remove(transaction_id); - self.ready_transactions.remove(transaction_id); // Remove the transaction itself let removed_tx = self.all_transactions.remove(transaction_id).ok_or(RuleError::RejectMissingTransaction(*transaction_id))?; + self.ready_transactions.remove(&(&removed_tx).into()); + // TODO: consider using `self.parent_transactions.get(transaction_id)` // The tradeoff to consider is whether it might be possible that a parent tx exists in the pool // however its relation as parent is not registered. This can supposedly happen in rare cases where @@ -153,88 +173,112 @@ impl TransactionsPool { // Remove the transaction from the mempool UTXO set self.utxo_set.remove_transaction(&removed_tx.mtx, &parent_ids); + self.estimated_size -= removed_tx.mtx.mempool_estimated_bytes(); + + if self.all_transactions.is_empty() { + assert_eq!(0, self.estimated_size, "Sanity test -- if tx pool is empty, estimated byte size should be zero"); + } Ok(removed_tx) } + pub(crate) fn update_revalidated_transaction(&mut self, transaction: MutableTransaction) -> bool { + if let Some(tx) = self.all_transactions.get_mut(&transaction.id()) { + // Make sure to update the overall estimated size since the updated transaction might have a different size + self.estimated_size -= tx.mtx.mempool_estimated_bytes(); + tx.mtx = transaction; + self.estimated_size += tx.mtx.mempool_estimated_bytes(); + true + } else { + false + } + } + pub(crate) fn ready_transaction_count(&self) -> usize { self.ready_transactions.len() } - /// all_ready_transactions returns all fully populated mempool transactions having no parents in the mempool. - /// These transactions are ready for being inserted in a block template. - pub(crate) fn all_ready_transactions(&self) -> Vec { - // The returned transactions are leaving the mempool so they are cloned - self.ready_transactions - .iter() - .take(self.config.maximum_ready_transaction_count as usize) - .map(|id| CandidateTransaction::from_mutable(&self.all_transactions.get(id).unwrap().mtx)) - .collect() + pub(crate) fn ready_transaction_total_mass(&self) -> u64 { + self.ready_transactions.total_mass() } - /// Is the mempool transaction identified by `transaction_id` unchained, thus having no successor? - pub(crate) fn transaction_is_unchained(&self, transaction_id: &TransactionId) -> bool { - if self.all_transactions.contains_key(transaction_id) { - if let Some(chains) = self.chained_transactions.get(transaction_id) { - return chains.is_empty(); - } - return true; - } - false + /// Dynamically builds a transaction selector based on the specific state of the ready transactions frontier + pub(crate) fn build_selector(&self) -> Box { + self.ready_transactions.build_selector(&Policy::new(self.config.maximum_mass_per_block)) } + + /// Builds a feerate estimator based on internal state of the ready transactions frontier + pub(crate) fn build_feerate_estimator(&self, args: FeerateEstimatorArgs) -> FeerateEstimator { + self.ready_transactions.build_feerate_estimator(args) + } + /// Returns the exceeding low-priority transactions having the lowest fee rates in order - /// to have room for at least `free_slots` new transactions. The returned transactions + /// to make room for `transaction`. The returned transactions /// are guaranteed to be unchained (no successor in mempool) and to not be parent of /// `transaction`. /// - /// An error is returned if the mempool is filled with high priority transactions. + /// An error is returned if the mempool is filled with high priority transactions, or + /// there are not enough lower feerate transactions that can be removed to accommodate `transaction` pub(crate) fn limit_transaction_count( &self, - free_slots: usize, transaction: &MutableTransaction, + transaction_size: usize, ) -> RuleResult> { - assert!(free_slots > 0); - // Returns a vector of transactions to be removed that the caller has to remove actually. - // The caller is golang validateAndInsertTransaction equivalent. - // This behavior differs from golang impl. - let trim_size = self.len() + free_slots - usize::min(self.len() + free_slots, self.config.maximum_transaction_count as usize); - let mut transactions_to_remove = Vec::with_capacity(trim_size); - if trim_size > 0 { - // TODO: consider introducing an index on all_transactions low-priority items instead. - // - // Sorting this vector here may be sub-optimal compared with maintaining a sorted - // index of all_transactions low-priority items if the proportion of low-priority txs - // in all_transactions is important. - let low_priority_txs = self - .all_transactions - .values() - .filter(|x| x.priority == Priority::Low && self.transaction_is_unchained(&x.id()) && !x.is_parent_of(transaction)); - - if trim_size == 1 { - // This is the most likely case. Here we just search the minimum, thus avoiding the need to sort altogether. - if let Some(tx) = low_priority_txs.min_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()) { - transactions_to_remove.push(tx); - } - } else { - let mut low_priority_txs = low_priority_txs.collect::>(); - if low_priority_txs.len() > trim_size { - low_priority_txs.sort_by(|a, b| a.fee_rate().partial_cmp(&b.fee_rate()).unwrap()); - transactions_to_remove.extend_from_slice(&low_priority_txs[0..usize::min(trim_size, low_priority_txs.len())]); - } else { - transactions_to_remove = low_priority_txs; - } - } + // No eviction needed -- return + if self.len() < self.config.maximum_transaction_count + && self.estimated_size + transaction_size <= self.config.mempool_size_limit + { + return Ok(Default::default()); } - // An error is returned if the mempool is filled with high priority and other unremovable transactions. - let tx_count = self.len() + free_slots - transactions_to_remove.len(); - if tx_count as u64 > self.config.maximum_transaction_count { - let err = RuleError::RejectMempoolIsFull(tx_count - free_slots, self.config.maximum_transaction_count); - warn!("{}", err.to_string()); - return Err(err); + // Returns a vector of transactions to be removed (the caller has to actually remove) + let feerate_threshold = transaction.calculated_feerate().unwrap(); + let mut txs_to_remove = Vec::with_capacity(1); // Normally we expect a single removal + let mut selection_overall_size = 0; + for tx in self + .ready_transactions + .ascending_iter() + .map(|tx| self.all_transactions.get(&tx.id()).unwrap()) + .filter(|mtx| mtx.priority == Priority::Low) + { + // TODO (optimization): inline the `has_parent_in_set` check within the redeemer traversal and exit early if possible + let redeemers = self.get_redeemer_ids_in_pool(&tx.id()).into_iter().chain(once(tx.id())).collect::(); + if transaction.has_parent_in_set(&redeemers) { + continue; + } + + // We are iterating ready txs by ascending feerate so the pending tx has lower feerate than all remaining txs + if tx.fee_rate() > feerate_threshold { + let err = RuleError::RejectMempoolIsFull; + debug!("Transaction {} with feerate {} has been rejected: {}", transaction.id(), feerate_threshold, err); + return Err(err); + } + + txs_to_remove.push(tx.id()); + selection_overall_size += tx.mtx.mempool_estimated_bytes(); + + if self.len() + 1 - txs_to_remove.len() <= self.config.maximum_transaction_count + && self.estimated_size + transaction_size - selection_overall_size <= self.config.mempool_size_limit + { + return Ok(txs_to_remove); + } } - Ok(transactions_to_remove.iter().map(|x| x.id()).collect()) + // We could not find sufficient space for the pending transaction + debug!( + "Mempool is filled with high-priority/ancestor txs (count: {}, bytes: {}). Transaction {} with feerate {} and size {} has been rejected: {}", + self.len(), + self.estimated_size, + transaction.id(), + feerate_threshold, + transaction_size, + RuleError::RejectMempoolIsFull + ); + Err(RuleError::RejectMempoolIsFull) + } + + pub(crate) fn get_estimated_size(&self) -> usize { + self.estimated_size } pub(crate) fn all_transaction_ids_with_priority(&self, priority: Priority) -> Vec { @@ -245,10 +289,29 @@ impl TransactionsPool { self.utxo_set.get_outpoint_owner_id(outpoint) } + /// Make sure no other transaction in the mempool is already spending an output which one of this transaction inputs spends pub(crate) fn check_double_spends(&self, transaction: &MutableTransaction) -> RuleResult<()> { self.utxo_set.check_double_spends(transaction) } + /// Returns the first double spend of every transaction in the mempool double spending on `transaction` + pub(crate) fn get_double_spend_transaction_ids(&self, transaction: &MutableTransaction) -> Vec { + self.utxo_set.get_double_spend_transaction_ids(transaction) + } + + pub(crate) fn get_double_spend_owner<'a>(&'a self, double_spend: &DoubleSpend) -> RuleResult<&'a MempoolTransaction> { + match self.get(&double_spend.owner_id) { + Some(transaction) => Ok(transaction), + None => { + // This case should never arise in the first place. + // Anyway, in case it does, if a double spent transaction id is found but the matching + // transaction cannot be located in the mempool a replacement is no longer possible + // so a double spend error is returned. + Err(double_spend.into()) + } + } + } + pub(crate) fn collect_expired_low_priority_transactions(&mut self, virtual_daa_score: u64) -> Vec { let now = unix_now(); if virtual_daa_score < self.last_expire_scan_daa_score + self.config.transaction_expire_scan_interval_daa_score @@ -300,8 +363,4 @@ impl Pool for TransactionsPool { fn chained(&self) -> &TransactionsEdges { &self.chained_transactions } - - fn get_mut(&mut self, transaction_id: &TransactionId) -> Option<&mut MempoolTransaction> { - self.all_transactions.get_mut(transaction_id) - } } diff --git a/mining/src/mempool/model/tx.rs b/mining/src/mempool/model/tx.rs index 1e549c9979..27bb87d09d 100644 --- a/mining/src/mempool/model/tx.rs +++ b/mining/src/mempool/model/tx.rs @@ -1,8 +1,9 @@ -use crate::mempool::tx::Priority; -use kaspa_consensus_core::{tx::MutableTransaction, tx::TransactionId}; +use crate::mempool::tx::{Priority, RbfPolicy}; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint}; +use kaspa_mining_errors::mempool::RuleError; use std::{ - cmp::Ordering, fmt::{Display, Formatter}, + sync::Arc, }; pub(crate) struct MempoolTransaction { @@ -26,33 +27,53 @@ impl MempoolTransaction { assert!(contextual_mass > 0, "expected to be called for validated txs only"); self.mtx.calculated_fee.unwrap() as f64 / contextual_mass as f64 } +} - pub(crate) fn is_parent_of(&self, transaction: &MutableTransaction) -> bool { - let parent_id = self.id(); - transaction.tx.inputs.iter().any(|x| x.previous_outpoint.transaction_id == parent_id) +impl RbfPolicy { + #[cfg(test)] + /// Returns an alternate policy accepting a transaction insertion in case the policy requires a replacement + pub(crate) fn for_insert(&self) -> RbfPolicy { + match self { + RbfPolicy::Forbidden | RbfPolicy::Allowed => *self, + RbfPolicy::Mandatory => RbfPolicy::Allowed, + } } } -impl Ord for MempoolTransaction { - fn cmp(&self, other: &Self) -> Ordering { - self.fee_rate().total_cmp(&other.fee_rate()).then(self.id().cmp(&other.id())) - } +pub(crate) struct DoubleSpend { + pub outpoint: TransactionOutpoint, + pub owner_id: TransactionId, } -impl Eq for MempoolTransaction {} +impl DoubleSpend { + pub fn new(outpoint: TransactionOutpoint, owner_id: TransactionId) -> Self { + Self { outpoint, owner_id } + } +} -impl PartialOrd for MempoolTransaction { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) +impl From for RuleError { + fn from(value: DoubleSpend) -> Self { + RuleError::RejectDoubleSpendInMempool(value.outpoint, value.owner_id) } } -impl PartialEq for MempoolTransaction { - fn eq(&self, other: &Self) -> bool { - self.fee_rate() == other.fee_rate() +impl From<&DoubleSpend> for RuleError { + fn from(value: &DoubleSpend) -> Self { + RuleError::RejectDoubleSpendInMempool(value.outpoint, value.owner_id) } } +pub(crate) struct TransactionPreValidation { + pub transaction: MutableTransaction, + pub feerate_threshold: Option, +} + +#[derive(Default)] +pub(crate) struct TransactionPostValidation { + pub removed: Option>, + pub accepted: Option>, +} + #[derive(PartialEq, Eq)] pub(crate) enum TxRemovalReason { Muted, @@ -63,6 +84,7 @@ pub(crate) enum TxRemovalReason { DoubleSpend, InvalidInBlockTemplate, RevalidationWithMissingOutpoints, + ReplacedByFee, } impl TxRemovalReason { @@ -76,6 +98,7 @@ impl TxRemovalReason { TxRemovalReason::DoubleSpend => "double spend", TxRemovalReason::InvalidInBlockTemplate => "invalid in block template", TxRemovalReason::RevalidationWithMissingOutpoints => "revalidation with missing outpoints", + TxRemovalReason::ReplacedByFee => "replaced by fee", } } diff --git a/mining/src/mempool/model/utxo_set.rs b/mining/src/mempool/model/utxo_set.rs index 38c2bcb4ee..808d674885 100644 --- a/mining/src/mempool/model/utxo_set.rs +++ b/mining/src/mempool/model/utxo_set.rs @@ -1,7 +1,9 @@ +use std::collections::HashSet; + use crate::{ mempool::{ - errors::{RuleError, RuleResult}, - model::map::OutpointIndex, + errors::RuleResult, + model::{map::OutpointIndex, tx::DoubleSpend}, }, model::TransactionIdSet, }; @@ -70,14 +72,36 @@ impl MempoolUtxoSet { /// Make sure no other transaction in the mempool is already spending an output which one of this transaction inputs spends pub(crate) fn check_double_spends(&self, transaction: &MutableTransaction) -> RuleResult<()> { + match self.get_first_double_spend(transaction) { + Some(double_spend) => Err(double_spend.into()), + None => Ok(()), + } + } + + pub(crate) fn get_first_double_spend(&self, transaction: &MutableTransaction) -> Option { let transaction_id = transaction.id(); for input in transaction.tx.inputs.iter() { if let Some(existing_transaction_id) = self.get_outpoint_owner_id(&input.previous_outpoint) { if *existing_transaction_id != transaction_id { - return Err(RuleError::RejectDoubleSpendInMempool(input.previous_outpoint, *existing_transaction_id)); + return Some(DoubleSpend::new(input.previous_outpoint, *existing_transaction_id)); + } + } + } + None + } + + /// Returns the first double spend of every transaction in the mempool double spending on `transaction` + pub(crate) fn get_double_spend_transaction_ids(&self, transaction: &MutableTransaction) -> Vec { + let transaction_id = transaction.id(); + let mut double_spends = vec![]; + let mut visited = HashSet::new(); + for input in transaction.tx.inputs.iter() { + if let Some(existing_transaction_id) = self.get_outpoint_owner_id(&input.previous_outpoint) { + if *existing_transaction_id != transaction_id && visited.insert(*existing_transaction_id) { + double_spends.push(DoubleSpend::new(input.previous_outpoint, *existing_transaction_id)); } } } - Ok(()) + double_spends } } diff --git a/mining/src/mempool/populate_entries_and_try_validate.rs b/mining/src/mempool/populate_entries_and_try_validate.rs index 0c0dcf9a1e..a5c1252805 100644 --- a/mining/src/mempool/populate_entries_and_try_validate.rs +++ b/mining/src/mempool/populate_entries_and_try_validate.rs @@ -1,5 +1,12 @@ use crate::mempool::{errors::RuleResult, model::pool::Pool, Mempool}; -use kaspa_consensus_core::{api::ConsensusApi, constants::UNACCEPTED_DAA_SCORE, tx::MutableTransaction, tx::UtxoEntry}; +use kaspa_consensus_core::{ + api::{ + args::{TransactionValidationArgs, TransactionValidationBatchArgs}, + ConsensusApi, + }, + constants::UNACCEPTED_DAA_SCORE, + tx::{MutableTransaction, UtxoEntry}, +}; use kaspa_mining_errors::mempool::RuleError; impl Mempool { @@ -14,15 +21,20 @@ impl Mempool { } } -pub(crate) fn validate_mempool_transaction(consensus: &dyn ConsensusApi, transaction: &mut MutableTransaction) -> RuleResult<()> { - Ok(consensus.validate_mempool_transaction(transaction)?) +pub(crate) fn validate_mempool_transaction( + consensus: &dyn ConsensusApi, + transaction: &mut MutableTransaction, + args: &TransactionValidationArgs, +) -> RuleResult<()> { + Ok(consensus.validate_mempool_transaction(transaction, args)?) } pub(crate) fn validate_mempool_transactions_in_parallel( consensus: &dyn ConsensusApi, transactions: &mut [MutableTransaction], + args: &TransactionValidationBatchArgs, ) -> Vec> { - consensus.validate_mempool_transactions_in_parallel(transactions).into_iter().map(|x| x.map_err(RuleError::from)).collect() + consensus.validate_mempool_transactions_in_parallel(transactions, args).into_iter().map(|x| x.map_err(RuleError::from)).collect() } pub(crate) fn populate_mempool_transactions_in_parallel( diff --git a/mining/src/mempool/remove_transaction.rs b/mining/src/mempool/remove_transaction.rs index 960ebc264b..3aac8a20bd 100644 --- a/mining/src/mempool/remove_transaction.rs +++ b/mining/src/mempool/remove_transaction.rs @@ -4,7 +4,7 @@ use crate::mempool::{ Mempool, }; use kaspa_consensus_core::tx::TransactionId; -use kaspa_core::{debug, warn}; +use kaspa_core::debug; use kaspa_utils::iter::IterExtensions; impl Mempool { @@ -43,8 +43,8 @@ impl Mempool { TxRemovalReason::Muted => {} TxRemovalReason::DoubleSpend => match removed_transactions.len() { 0 => {} - 1 => warn!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), - n => warn!( + 1 => debug!("Removed transaction ({}) {}{}", reason, removed_transactions[0], extra_info), + n => debug!( "Removed {} transactions ({}): {}{}", n, reason, diff --git a/mining/src/mempool/replace_by_fee.rs b/mining/src/mempool/replace_by_fee.rs new file mode 100644 index 0000000000..6acd1a6188 --- /dev/null +++ b/mining/src/mempool/replace_by_fee.rs @@ -0,0 +1,149 @@ +use crate::mempool::{ + errors::{RuleError, RuleResult}, + model::tx::{DoubleSpend, MempoolTransaction, TxRemovalReason}, + tx::RbfPolicy, + Mempool, +}; +use kaspa_consensus_core::tx::{MutableTransaction, Transaction}; +use std::sync::Arc; + +impl Mempool { + /// Returns the replace by fee (RBF) constraint fee/mass threshold for an incoming transaction and a policy. + /// + /// Fails if the transaction does not meet some condition of the RBF policy, excluding the fee/mass condition. + /// + /// See [`RbfPolicy`] variants for details of each policy process and success conditions. + pub(super) fn get_replace_by_fee_constraint( + &self, + transaction: &MutableTransaction, + rbf_policy: RbfPolicy, + ) -> RuleResult> { + match rbf_policy { + RbfPolicy::Forbidden => { + // When RBF is forbidden, fails early on any double spend + self.transaction_pool.check_double_spends(transaction)?; + Ok(None) + } + + RbfPolicy::Allowed => { + // When RBF is allowed, never fails since both insertion and replacement are possible + let double_spends = self.transaction_pool.get_double_spend_transaction_ids(transaction); + if double_spends.is_empty() { + Ok(None) + } else { + let mut feerate_threshold = 0f64; + for double_spend in double_spends { + // We take the max over all double spends as the required threshold + feerate_threshold = feerate_threshold.max(self.get_double_spend_feerate(&double_spend)?); + } + Ok(Some(feerate_threshold)) + } + } + + RbfPolicy::Mandatory => { + // When RBF is mandatory, fails early if we do not have exactly one double spending transaction + let double_spends = self.transaction_pool.get_double_spend_transaction_ids(transaction); + match double_spends.len() { + 0 => Err(RuleError::RejectRbfNoDoubleSpend), + 1 => { + let feerate_threshold = self.get_double_spend_feerate(&double_spends[0])?; + Ok(Some(feerate_threshold)) + } + _ => Err(RuleError::RejectRbfTooManyDoubleSpendingTransactions), + } + } + } + } + + /// Executes replace by fee (RBF) for an incoming transaction and a policy. + /// + /// See [`RbfPolicy`] variants for details of each policy process and success conditions. + /// + /// On success, `transaction` is guaranteed to embed no double spend with the mempool. + /// + /// On success with the [`RbfPolicy::Mandatory`] policy, some removed transaction is always returned. + pub(super) fn execute_replace_by_fee( + &mut self, + transaction: &MutableTransaction, + rbf_policy: RbfPolicy, + ) -> RuleResult>> { + match rbf_policy { + RbfPolicy::Forbidden => { + self.transaction_pool.check_double_spends(transaction)?; + Ok(None) + } + + RbfPolicy::Allowed => { + let double_spends = self.transaction_pool.get_double_spend_transaction_ids(transaction); + match double_spends.is_empty() { + true => Ok(None), + false => { + let removed = self.validate_double_spending_transaction(transaction, &double_spends[0])?.mtx.tx.clone(); + for double_spend in double_spends.iter().skip(1) { + // Validate the feerate threshold is passed for all double spends + self.validate_double_spending_transaction(transaction, double_spend)?; + } + // We apply consequences such as removal only after we fully validate against all double spends + for double_spend in double_spends { + self.remove_transaction( + &double_spend.owner_id, + true, + TxRemovalReason::ReplacedByFee, + format!("by {}", transaction.id()).as_str(), + )?; + } + Ok(Some(removed)) + } + } + } + + RbfPolicy::Mandatory => { + let double_spends = self.transaction_pool.get_double_spend_transaction_ids(transaction); + match double_spends.len() { + 0 => Err(RuleError::RejectRbfNoDoubleSpend), + 1 => { + let removed = self.validate_double_spending_transaction(transaction, &double_spends[0])?.mtx.tx.clone(); + self.remove_transaction( + &double_spends[0].owner_id, + true, + TxRemovalReason::ReplacedByFee, + format!("by {}", transaction.id()).as_str(), + )?; + Ok(Some(removed)) + } + _ => Err(RuleError::RejectRbfTooManyDoubleSpendingTransactions), + } + } + } + } + + fn get_double_spend_feerate(&self, double_spend: &DoubleSpend) -> RuleResult { + let owner = self.transaction_pool.get_double_spend_owner(double_spend)?; + match owner.mtx.calculated_feerate() { + Some(double_spend_feerate) => Ok(double_spend_feerate), + // Getting here is unexpected since a mempool owned tx should be populated with fee + // and mass at this stage but nonetheless we fail gracefully + None => Err(double_spend.into()), + } + } + + fn validate_double_spending_transaction<'a>( + &'a self, + transaction: &MutableTransaction, + double_spend: &DoubleSpend, + ) -> RuleResult<&'a MempoolTransaction> { + let owner = self.transaction_pool.get_double_spend_owner(double_spend)?; + if let (Some(transaction_feerate), Some(double_spend_feerate)) = + (transaction.calculated_feerate(), owner.mtx.calculated_feerate()) + { + if transaction_feerate > double_spend_feerate { + return Ok(owner); + } else { + return Err(double_spend.into()); + } + } + // Getting here is unexpected since both txs should be populated with + // fee and mass at this stage but nonetheless we fail gracefully + Err(double_spend.into()) + } +} diff --git a/mining/src/mempool/validate_and_insert_transaction.rs b/mining/src/mempool/validate_and_insert_transaction.rs index 591fa5c4aa..69e08019b6 100644 --- a/mining/src/mempool/validate_and_insert_transaction.rs +++ b/mining/src/mempool/validate_and_insert_transaction.rs @@ -1,10 +1,12 @@ +use std::sync::atomic::Ordering; + use crate::mempool::{ errors::{RuleError, RuleResult}, model::{ pool::Pool, - tx::{MempoolTransaction, TxRemovalReason}, + tx::{MempoolTransaction, TransactionPostValidation, TransactionPreValidation, TxRemovalReason}, }, - tx::{Orphan, Priority}, + tx::{Orphan, Priority, RbfPolicy}, Mempool, }; use kaspa_consensus_core::{ @@ -13,21 +15,21 @@ use kaspa_consensus_core::{ tx::{MutableTransaction, Transaction, TransactionId, TransactionOutpoint, UtxoEntry}, }; use kaspa_core::{debug, info}; -use std::sync::Arc; impl Mempool { pub(crate) fn pre_validate_and_populate_transaction( &self, consensus: &dyn ConsensusApi, mut transaction: MutableTransaction, - ) -> RuleResult { + rbf_policy: RbfPolicy, + ) -> RuleResult { self.validate_transaction_unacceptance(&transaction)?; - // Populate mass in the beginning, it will be used in multiple places throughout the validation and insertion. + // Populate mass and estimated_size in the beginning, it will be used in multiple places throughout the validation and insertion. transaction.calculated_compute_mass = Some(consensus.calculate_transaction_compute_mass(&transaction.tx)); self.validate_transaction_in_isolation(&transaction)?; - self.transaction_pool.check_double_spends(&transaction)?; + let feerate_threshold = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; self.populate_mempool_entries(&mut transaction); - Ok(transaction) + Ok(TransactionPreValidation { transaction, feerate_threshold }) } pub(crate) fn post_validate_and_insert_transaction( @@ -37,7 +39,8 @@ impl Mempool { transaction: MutableTransaction, priority: Priority, orphan: Orphan, - ) -> RuleResult>> { + rbf_policy: RbfPolicy, + ) -> RuleResult { let transaction_id = transaction.id(); // First check if the transaction was not already added to the mempool. @@ -46,39 +49,84 @@ impl Mempool { // concurrently. if self.transaction_pool.has(&transaction_id) { debug!("Transaction {0} is not post validated since already in the mempool", transaction_id); - return Ok(None); + return Err(RuleError::RejectDuplicate(transaction_id)); } self.validate_transaction_unacceptance(&transaction)?; - // Re-check double spends since validate_and_insert_transaction is no longer atomic - self.transaction_pool.check_double_spends(&transaction)?; - match validation_result { Ok(_) => {} Err(RuleError::RejectMissingOutpoint) => { if orphan == Orphan::Forbidden { return Err(RuleError::RejectDisallowedOrphan(transaction_id)); } + let _ = self.get_replace_by_fee_constraint(&transaction, rbf_policy)?; self.orphan_pool.try_add_orphan(consensus.get_virtual_daa_score(), transaction, priority)?; - return Ok(None); + return Ok(TransactionPostValidation::default()); } Err(err) => { return Err(err); } } + // Perform mempool in-context validations prior to possible RBF replacements self.validate_transaction_in_context(&transaction)?; + // Check double spends and try to remove them if the RBF policy requires it + let removed_transaction = self.execute_replace_by_fee(&transaction, rbf_policy)?; + + // + // Note: there exists a case below where `limit_transaction_count` returns an error signaling that + // this tx should be rejected due to mempool size limits (rather than evicting others). However, + // if this tx happened to be an RBF tx, it might have already caused an eviction in the line + // above. We choose to ignore this rare case for now, as it essentially means that even the increased + // feerate of the replacement tx is very low relative to the mempool overall. + // + // Before adding the transaction, check if there is room in the pool - self.transaction_pool.limit_transaction_count(1, &transaction)?.iter().try_for_each(|x| { - self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str()) - })?; + let transaction_size = transaction.mempool_estimated_bytes(); + let txs_to_remove = self.transaction_pool.limit_transaction_count(&transaction, transaction_size)?; + if !txs_to_remove.is_empty() { + let transaction_pool_len_before = self.transaction_pool.len(); + for x in txs_to_remove.iter() { + self.remove_transaction(x, true, TxRemovalReason::MakingRoom, format!(" for {}", transaction_id).as_str())?; + // self.transaction_pool.limit_transaction_count(&transaction) returns the + // smallest prefix of `ready_transactions` (sorted by ascending fee-rate) + // that makes enough room for `transaction`, but since each call to `self.remove_transaction` + // also removes all transactions dependant on `x` we might already have sufficient space, so + // we constantly check the break condition. + // + // Note that self.transaction_pool.len() < self.config.maximum_transaction_count means we have + // at least one available slot in terms of the count limit + if self.transaction_pool.len() < self.config.maximum_transaction_count + && self.transaction_pool.get_estimated_size() + transaction_size <= self.config.mempool_size_limit + { + break; + } + } + self.counters + .tx_evicted_counts + .fetch_add(transaction_pool_len_before.saturating_sub(self.transaction_pool.len()) as u64, Ordering::Relaxed); + } + + assert!( + self.transaction_pool.len() < self.config.maximum_transaction_count + && self.transaction_pool.get_estimated_size() + transaction_size <= self.config.mempool_size_limit, + "Transactions in mempool: {}, max: {}, mempool bytes size: {}, max: {}", + self.transaction_pool.len() + 1, + self.config.maximum_transaction_count, + self.transaction_pool.get_estimated_size() + transaction_size, + self.config.mempool_size_limit, + ); // Add the transaction to the mempool as a MempoolTransaction and return a clone of the embedded Arc - let accepted_transaction = - self.transaction_pool.add_transaction(transaction, consensus.get_virtual_daa_score(), priority)?.mtx.tx.clone(); - Ok(Some(accepted_transaction)) + let accepted_transaction = self + .transaction_pool + .add_transaction(transaction, consensus.get_virtual_daa_score(), priority, transaction_size)? + .mtx + .tx + .clone(); + Ok(TransactionPostValidation { removed: removed_transaction, accepted: Some(accepted_transaction) }) } /// Validates that the transaction wasn't already accepted into the DAG @@ -96,6 +144,7 @@ impl Mempool { if self.transaction_pool.has(&transaction_id) { return Err(RuleError::RejectDuplicate(transaction_id)); } + if !self.config.accept_non_standard { self.check_transaction_standard_in_isolation(transaction)?; } @@ -184,9 +233,26 @@ impl Mempool { // The one we just removed from the orphan pool. assert_eq!(transactions.len(), 1, "the list returned by remove_orphan is expected to contain exactly one transaction"); let transaction = transactions.pop().unwrap(); + let rbf_policy = Self::get_orphan_transaction_rbf_policy(transaction.priority); self.validate_transaction_unacceptance(&transaction.mtx)?; - self.transaction_pool.check_double_spends(&transaction.mtx)?; + let _ = self.get_replace_by_fee_constraint(&transaction.mtx, rbf_policy)?; Ok(transaction) } + + /// Returns the RBF policy to apply to an orphan/unorphaned transaction by inferring it from the transaction priority. + pub(crate) fn get_orphan_transaction_rbf_policy(priority: Priority) -> RbfPolicy { + // The RBF policy applied to an orphaned transaction is not recorded in the orphan pool + // but we can infer it from the priority: + // + // - high means a submitted tx via RPC which forbids RBF + // - low means a tx arrived via P2P which allows RBF + // + // Note that the RPC submit transaction replacement case, implying a mandatory RBF, forbids orphans + // so is excluded here. + match priority { + Priority::High => RbfPolicy::Forbidden, + Priority::Low => RbfPolicy::Allowed, + } + } } diff --git a/mining/src/model/candidate_tx.rs b/mining/src/model/candidate_tx.rs index f1fdf7c71c..b8cc34cc4d 100644 --- a/mining/src/model/candidate_tx.rs +++ b/mining/src/model/candidate_tx.rs @@ -1,10 +1,11 @@ -use kaspa_consensus_core::tx::{MutableTransaction, Transaction}; +use crate::FeerateTransactionKey; +use kaspa_consensus_core::tx::Transaction; use std::sync::Arc; /// Transaction with additional metadata needed in order to be a candidate /// in the transaction selection algorithm #[derive(Clone, Debug, PartialEq, Eq)] -pub(crate) struct CandidateTransaction { +pub struct CandidateTransaction { /// The actual transaction pub tx: Arc, /// Populated fee @@ -14,9 +15,7 @@ pub(crate) struct CandidateTransaction { } impl CandidateTransaction { - pub(crate) fn from_mutable(tx: &MutableTransaction) -> Self { - let mass = tx.tx.mass(); - assert_ne!(mass, 0, "mass field is expected to be set when inserting to the mempool"); - Self { tx: tx.tx.clone(), calculated_fee: tx.calculated_fee.expect("fee is expected to be populated"), calculated_mass: mass } + pub fn from_key(key: FeerateTransactionKey) -> Self { + Self { tx: key.tx, calculated_fee: key.fee, calculated_mass: key.mass } } } diff --git a/mining/src/model/mod.rs b/mining/src/model/mod.rs index 3f17a50c81..dcec6f17f9 100644 --- a/mining/src/model/mod.rs +++ b/mining/src/model/mod.rs @@ -1,10 +1,11 @@ use kaspa_consensus_core::tx::TransactionId; use std::collections::HashSet; -pub(crate) mod candidate_tx; +pub mod candidate_tx; pub mod owner_txs; pub mod topological_index; pub mod topological_sort; +pub mod tx_insert; pub mod tx_query; /// A set of unique transaction ids diff --git a/mining/src/model/tx_insert.rs b/mining/src/model/tx_insert.rs new file mode 100644 index 0000000000..4c006fb991 --- /dev/null +++ b/mining/src/model/tx_insert.rs @@ -0,0 +1,14 @@ +use kaspa_consensus_core::tx::Transaction; +use std::sync::Arc; + +#[derive(Debug)] +pub struct TransactionInsertion { + pub removed: Option>, + pub accepted: Vec>, +} + +impl TransactionInsertion { + pub fn new(removed: Option>, accepted: Vec>) -> Self { + Self { removed, accepted } + } +} diff --git a/mining/src/monitor.rs b/mining/src/monitor.rs index 517bd82763..74449424c1 100644 --- a/mining/src/monitor.rs +++ b/mining/src/monitor.rs @@ -1,4 +1,5 @@ use super::MiningCounters; +use crate::manager::MiningManagerProxy; use kaspa_core::{ debug, info, task::{ @@ -13,6 +14,8 @@ use std::{sync::Arc, time::Duration}; const MONITOR: &str = "mempool-monitor"; pub struct MiningMonitor { + mining_manager: MiningManagerProxy, + // Counters counters: Arc, @@ -24,11 +27,12 @@ pub struct MiningMonitor { impl MiningMonitor { pub fn new( + mining_manager: MiningManagerProxy, counters: Arc, tx_script_cache_counters: Arc, tick_service: Arc, ) -> MiningMonitor { - MiningMonitor { counters, tx_script_cache_counters, tick_service } + MiningMonitor { mining_manager, counters, tx_script_cache_counters, tick_service } } pub async fn worker(self: &Arc) { @@ -62,6 +66,14 @@ impl MiningMonitor { delta.low_priority_tx_counts, delta.tx_accepted_counts, ); + let feerate_estimations = self.mining_manager.clone().get_realtime_feerate_estimations().await; + debug!("Realtime feerate estimations: {}", feerate_estimations); + } + if delta.tx_evicted_counts > 0 { + info!( + "Mempool stats: {} transactions were evicted from the mempool in favor of incoming higher feerate transactions", + delta.tx_evicted_counts + ); } if tx_script_cache_snapshot != last_tx_script_cache_snapshot { debug!( diff --git a/mining/src/testutils/coinbase_mock.rs b/mining/src/testutils/coinbase_mock.rs index 12e0905a8a..8d19c2fcd0 100644 --- a/mining/src/testutils/coinbase_mock.rs +++ b/mining/src/testutils/coinbase_mock.rs @@ -4,7 +4,6 @@ use kaspa_consensus_core::{ subnets::SUBNETWORK_ID_COINBASE, tx::{Transaction, TransactionOutput}, }; -use std::mem::size_of; const LENGTH_OF_BLUE_SCORE: usize = size_of::(); const LENGTH_OF_SUBSIDY: usize = size_of::(); diff --git a/mining/src/testutils/consensus_mock.rs b/mining/src/testutils/consensus_mock.rs index 94d774c428..28d3f58974 100644 --- a/mining/src/testutils/consensus_mock.rs +++ b/mining/src/testutils/consensus_mock.rs @@ -1,6 +1,9 @@ use super::coinbase_mock::CoinbaseManagerMock; use kaspa_consensus_core::{ - api::ConsensusApi, + api::{ + args::{TransactionValidationArgs, TransactionValidationBatchArgs}, + ConsensusApi, + }, block::{BlockTemplate, MutableBlock, TemplateBuildMode, TemplateTransactionSelector, VirtualStateApproxId}, coinbase::MinerData, constants::BLOCK_VERSION, @@ -16,7 +19,7 @@ use kaspa_consensus_core::{ utxo::utxo_collection::UtxoCollection, }; use kaspa_core::time::unix_now; -use kaspa_hashes::ZERO_HASH; +use kaspa_hashes::{Hash, ZERO_HASH}; use parking_lot::RwLock; use std::{collections::HashMap, sync::Arc}; @@ -83,7 +86,7 @@ impl ConsensusApi for ConsensusMock { let coinbase = coinbase_manager.expected_coinbase_transaction(miner_data.clone()); txs.insert(0, coinbase.tx); let now = unix_now(); - let hash_merkle_root = calc_hash_merkle_root(txs.iter()); + let hash_merkle_root = self.calc_transaction_hash_merkle_root(&txs, 0); let header = Header::new_finalized( BLOCK_VERSION, vec![], @@ -100,10 +103,10 @@ impl ConsensusApi for ConsensusMock { ); let mutable_block = MutableBlock::new(header, txs); - Ok(BlockTemplate::new(mutable_block, miner_data, coinbase.has_red_reward, now, 0, ZERO_HASH)) + Ok(BlockTemplate::new(mutable_block, miner_data, coinbase.has_red_reward, now, 0, ZERO_HASH, vec![])) } - fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction) -> TxResult<()> { + fn validate_mempool_transaction(&self, mutable_tx: &mut MutableTransaction, _: &TransactionValidationArgs) -> TxResult<()> { // If a predefined status was registered to simulate an error, return it right away if let Some(status) = self.statuses.read().get(&mutable_tx.id()) { if status.is_err() { @@ -130,20 +133,27 @@ impl ConsensusApi for ConsensusMock { // At this point we know all UTXO entries are populated, so we can safely calculate the fee let total_in: u64 = mutable_tx.entries.iter().map(|x| x.as_ref().unwrap().amount).sum(); let total_out: u64 = mutable_tx.tx.outputs.iter().map(|x| x.value).sum(); - let calculated_fee = total_in - total_out; mutable_tx .tx .set_mass(self.calculate_transaction_storage_mass(mutable_tx).unwrap() + mutable_tx.calculated_compute_mass.unwrap()); - mutable_tx.calculated_fee = Some(calculated_fee); + + if mutable_tx.calculated_fee.is_none() { + let calculated_fee = total_in - total_out; + mutable_tx.calculated_fee = Some(calculated_fee); + } Ok(()) } - fn validate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { - transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() + fn validate_mempool_transactions_in_parallel( + &self, + transactions: &mut [MutableTransaction], + _: &TransactionValidationBatchArgs, + ) -> Vec> { + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x, &Default::default())).collect() } fn populate_mempool_transactions_in_parallel(&self, transactions: &mut [MutableTransaction]) -> Vec> { - transactions.iter_mut().map(|x| self.validate_mempool_transaction(x)).collect() + transactions.iter_mut().map(|x| self.validate_mempool_transaction(x, &Default::default())).collect() } fn calculate_transaction_compute_mass(&self, transaction: &Transaction) -> u64 { @@ -170,4 +180,8 @@ impl ConsensusApi for ConsensusMock { let coinbase_manager = CoinbaseManagerMock::new(); Ok(coinbase_manager.modify_coinbase_payload(payload, miner_data)) } + + fn calc_transaction_hash_merkle_root(&self, txs: &[Transaction], _pov_daa_score: u64) -> Hash { + calc_hash_merkle_root(txs.iter(), false) + } } diff --git a/musl-toolchain/build.sh b/musl-toolchain/build.sh new file mode 100755 index 0000000000..b32314082b --- /dev/null +++ b/musl-toolchain/build.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +PRESET_HASH_FILE="$HOME/x-tools/preset_hash" + +# Calculate the hash of the preset file +CURRENT_PRESET_HASH=$(sha256sum $GITHUB_WORKSPACE/musl-toolchain/preset.sh | awk '{print $1}') + +echo "Current preset hash: $CURRENT_PRESET_HASH" + +# Traverse to working directory +cd $GITHUB_WORKSPACE/musl-toolchain + +# Set the preset +source preset.sh + +# If the toolchain is not installed or the preset has changed or the preset hash file does not exist +if [ ! -d "$HOME/x-tools" ] || [ ! -f "$PRESET_HASH_FILE" ] || [ "$(cat $PRESET_HASH_FILE)" != "$CURRENT_PRESET_HASH" ]; then + # Install dependencies + sudo apt-get update + sudo apt-get install -y autoconf automake libtool libtool-bin unzip help2man python3.10-dev gperf bison flex texinfo gawk libncurses5-dev + + # Clone crosstool-ng + git clone https://github.com/crosstool-ng/crosstool-ng + + # Configure and build crosstool-ng + cd crosstool-ng + # Use version 1.26 + git checkout crosstool-ng-1.26.0 + ./bootstrap + ./configure --prefix=$HOME/ctng + make + make install + # Add crosstool-ng to PATH + export PATH=$HOME/ctng/bin:$PATH + + + + # Load toolchainc configuration + ct-ng $CTNG_PRESET + + # Build the toolchain + ct-ng build > build.log 2>&1 + + # Set status to the exit code of the build + status=$? + + # We store the log in a file because it bloats the screen too much + # on GitHub Actions. We print it only if the build fails. + echo "Build result:" + if [ $status -eq 0 ]; then + echo "Build succeeded" + ls -la $HOME/x-tools + # Store the current hash of preset.sh after successful build + echo "$CURRENT_PRESET_HASH" > "$PRESET_HASH_FILE" + else + echo "Build failed, here's the log:" + cat .config + cat build.log + fi +fi + +# Update toolchain variables: C compiler, C++ compiler, linker, and archiver +export CC=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-gcc +export CXX=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-g++ +export LD=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ld +export AR=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ar + +# Exports for cc crate +# https://docs.rs/cc/latest/cc/#external-configuration-via-environment-variables +export RANLIB_x86_64_unknown_linux_musl=$HOME/x-tools/$CTNG_PRESET/bin/$CTNG_PRESET-ranlib +export CC_x86_64_unknown_linux_musl=$CC +export CXX_x86_64_unknown_linux_musl=$CXX +export AR_x86_64_unknown_linux_musl=$AR +export LD_x86_64_unknown_linux_musl=$LD + +# Set environment variables for static linking +export OPENSSL_STATIC=true +export RUSTFLAGS="-C link-arg=-static" + +# We specify the compiler that will invoke linker +export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=$CC + +# Add target +rustup target add x86_64-unknown-linux-musl + +# Install missing dependencies +cargo fetch --target x86_64-unknown-linux-musl + +# Patch missing include in librocksdb-sys-0.16.0+8.10.0. Credit: @supertypo +FILE_PATH=$(find $HOME/.cargo/registry/src/ -path "*/librocksdb-sys-0.16.0+8.10.0/*/offpeak_time_info.h") + +if [ -n "$FILE_PATH" ]; then + sed -i '1i #include ' "$FILE_PATH" +else + echo "No such file for sed modification." +fi \ No newline at end of file diff --git a/musl-toolchain/preset.sh b/musl-toolchain/preset.sh new file mode 100755 index 0000000000..63e98a7685 --- /dev/null +++ b/musl-toolchain/preset.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Sets the preset that will be used by crosstool-ng +# Available presets can be fetched with: ct-ng list-samples +export CTNG_PRESET="x86_64-multilib-linux-musl" \ No newline at end of file diff --git a/notify/Cargo.toml b/notify/Cargo.toml index 09d3f58651..3ef23ff86e 100644 --- a/notify/Cargo.toml +++ b/notify/Cargo.toml @@ -34,6 +34,7 @@ thiserror.workspace = true triggered.workspace = true workflow-core.workspace = true workflow-log.workspace = true +workflow-serializer.workspace = true [dev-dependencies] criterion.workspace = true diff --git a/notify/src/address/tracker.rs b/notify/src/address/tracker.rs index f103b163cb..a2b1c64ddb 100644 --- a/notify/src/address/tracker.rs +++ b/notify/src/address/tracker.rs @@ -384,11 +384,11 @@ impl Inner { } } -/// Tracker of a set of [`Address`](kaspa_addresses::Address), indexing and counting registrations +/// Tracker of a set of [`Address`], indexing and counting registrations /// /// #### Implementation design /// -/// Each [`Address`](kaspa_addresses::Address) is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey). +/// Each [`Address`] is stored internally as a [`ScriptPubKey`](kaspa_consensus_core::tx::ScriptPublicKey). /// This prevents inter-network duplication and optimizes UTXOs filtering efficiency. /// /// But consequently the address network prefix gets lost and must be globally provided when querying for addresses by indexes. diff --git a/notify/src/notifier.rs b/notify/src/notifier.rs index 220fd261be..6927ea1a13 100644 --- a/notify/src/notifier.rs +++ b/notify/src/notifier.rs @@ -75,8 +75,8 @@ pub type DynNotify = Arc>; /// /// - a vector of [`DynCollector`] /// - a vector of [`Subscriber`] -/// - a pool of [`Broadcaster`] -/// - a map of [`Listener`] +/// - a pool of `Broadcaster` +/// - a map of `Listener` /// /// Collectors and subscribers form the scaffold. They are provided to the ctor, are immutable and share its /// lifespan. Both do materialize a connection to the notifier _parents_, collectors for incoming notifications diff --git a/notify/src/scope.rs b/notify/src/scope.rs index 0d9e33544e..1fe7924711 100644 --- a/notify/src/scope.rs +++ b/notify/src/scope.rs @@ -3,6 +3,7 @@ use borsh::{BorshDeserialize, BorshSerialize}; use derive_more::Display; use kaspa_addresses::Address; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; macro_rules! scope_enum { ($(#[$meta:meta])* $vis:vis enum $name:ident { @@ -53,9 +54,38 @@ impl Scope { } } +impl Serializer for Scope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Scope, self, writer)?; + Ok(()) + } +} + +impl Deserializer for Scope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + load!(Scope, reader) + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct BlockAddedScope {} +impl Serializer for BlockAddedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for BlockAddedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct VirtualChainChangedScope { pub include_accepted_transaction_ids: bool, @@ -73,12 +103,56 @@ impl std::fmt::Display for VirtualChainChangedScope { } } +impl Serializer for VirtualChainChangedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.include_accepted_transaction_ids, writer)?; + Ok(()) + } +} + +impl Deserializer for VirtualChainChangedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let include_accepted_transaction_ids = load!(bool, reader)?; + Ok(Self { include_accepted_transaction_ids }) + } +} + #[derive(Clone, Display, Debug, PartialEq, Eq, Default, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct FinalityConflictScope {} +impl Serializer for FinalityConflictScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for FinalityConflictScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Display, Debug, PartialEq, Eq, Default, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct FinalityConflictResolvedScope {} +impl Serializer for FinalityConflictResolvedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for FinalityConflictResolvedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Debug, Default, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct UtxosChangedScope { pub addresses: Vec
, @@ -109,14 +183,86 @@ impl UtxosChangedScope { } } +impl Serializer for UtxosChangedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec
, &self.addresses, writer)?; + Ok(()) + } +} + +impl Deserializer for UtxosChangedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let addresses = load!(Vec
, reader)?; + Ok(Self { addresses }) + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct SinkBlueScoreChangedScope {} +impl Serializer for SinkBlueScoreChangedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for SinkBlueScoreChangedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct VirtualDaaScoreChangedScope {} +impl Serializer for VirtualDaaScoreChangedScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for VirtualDaaScoreChangedScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct PruningPointUtxoSetOverrideScope {} +impl Serializer for PruningPointUtxoSetOverrideScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for PruningPointUtxoSetOverrideScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + #[derive(Clone, Display, Debug, Default, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct NewBlockTemplateScope {} + +impl Serializer for NewBlockTemplateScope { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NewBlockTemplateScope { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} diff --git a/notify/src/subscription/mod.rs b/notify/src/subscription/mod.rs index 6cded477d8..ff468be74b 100644 --- a/notify/src/subscription/mod.rs +++ b/notify/src/subscription/mod.rs @@ -16,6 +16,7 @@ pub mod context; pub mod single; #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[borsh(use_discriminant = true)] pub enum Command { Start = 0, Stop = 1, diff --git a/protocol/flows/src/flow_context.rs b/protocol/flows/src/flow_context.rs index 3d365c54f9..14d4168aca 100644 --- a/protocol/flows/src/flow_context.rs +++ b/protocol/flows/src/flow_context.rs @@ -25,8 +25,8 @@ use kaspa_core::{ }; use kaspa_core::{time::unix_now, warn}; use kaspa_hashes::Hash; -use kaspa_mining::manager::MiningManagerProxy; use kaspa_mining::mempool::tx::{Orphan, Priority}; +use kaspa_mining::{manager::MiningManagerProxy, mempool::tx::RbfPolicy}; use kaspa_notify::notifier::Notify; use kaspa_p2p_lib::{ common::ProtocolError, @@ -618,16 +618,48 @@ impl FlowContext { transaction: Transaction, orphan: Orphan, ) -> Result<(), ProtocolError> { - let accepted_transactions = - self.mining_manager().clone().validate_and_insert_transaction(consensus, transaction, Priority::High, orphan).await?; + let transaction_insertion = self + .mining_manager() + .clone() + .validate_and_insert_transaction(consensus, transaction, Priority::High, orphan, RbfPolicy::Forbidden) + .await?; self.broadcast_transactions( - accepted_transactions.iter().map(|x| x.id()), + transaction_insertion.accepted.iter().map(|x| x.id()), false, // RPC transactions are considered high priority, so we don't want to throttle them ) .await; Ok(()) } + /// Replaces the rpc-submitted transaction into the mempool and propagates it to peers. + /// + /// Returns the removed mempool transaction on successful replace by fee. + /// + /// Transactions submitted through rpc are considered high priority. This definition does not affect the tx selection algorithm + /// but only changes how we manage the lifetime of the tx. A high-priority tx does not expire and is repeatedly rebroadcasted to + /// peers + pub async fn submit_rpc_transaction_replacement( + &self, + consensus: &ConsensusProxy, + transaction: Transaction, + ) -> Result, ProtocolError> { + let transaction_insertion = self + .mining_manager() + .clone() + .validate_and_insert_transaction(consensus, transaction, Priority::High, Orphan::Forbidden, RbfPolicy::Mandatory) + .await?; + self.broadcast_transactions( + transaction_insertion.accepted.iter().map(|x| x.id()), + false, // RPC transactions are considered high priority, so we don't want to throttle them + ) + .await; + // The combination of args above of Orphan::Forbidden and RbfPolicy::Mandatory should always result + // in a removed transaction returned, however we prefer failing gracefully in case of future internal mempool changes + transaction_insertion.removed.ok_or(ProtocolError::Other( + "Replacement transaction was actually accepted but the *replaced* transaction was not returned from the mempool", + )) + } + /// Returns true if the time has come for running the task cleaning mempool transactions. async fn should_run_mempool_scanning_task(&self) -> bool { self.transactions_spread.write().await.should_run_mempool_scanning_task() diff --git a/protocol/flows/src/flowcontext/orphans.rs b/protocol/flows/src/flowcontext/orphans.rs index 75c223caa6..f18649e558 100644 --- a/protocol/flows/src/flowcontext/orphans.rs +++ b/protocol/flows/src/flowcontext/orphans.rs @@ -78,7 +78,7 @@ impl OrphanBlocksPool { if self.orphans.contains_key(&orphan_hash) { return None; } - + orphan_block.asses_for_cache()?; let (roots, orphan_ancestors) = match self.get_orphan_roots(consensus, orphan_block.header.direct_parents().iter().copied().collect()).await { FindRootsOutput::Roots(roots, orphan_ancestors) => (roots, orphan_ancestors), @@ -166,7 +166,7 @@ impl OrphanBlocksPool { } } else { let status = consensus.async_get_block_status(current).await; - if status.is_none_or(|s| s.is_header_only()) { + if status.is_none_or_ex(|s| s.is_header_only()) { // Block is not in the orphan pool nor does its body exist consensus-wise, so it is a root roots.push(current); } @@ -193,7 +193,8 @@ impl OrphanBlocksPool { if let Occupied(entry) = self.orphans.entry(orphan_hash) { let mut processable = true; for p in entry.get().block.header.direct_parents().iter().copied() { - if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or(|s| s.is_header_only()) { + if !processing.contains_key(&p) && consensus.async_get_block_status(p).await.is_none_or_ex(|s| s.is_header_only()) + { processable = false; break; } @@ -249,7 +250,7 @@ impl OrphanBlocksPool { let mut processable = true; for parent in block.block.header.direct_parents().iter().copied() { if self.orphans.contains_key(&parent) - || consensus.async_get_block_status(parent).await.is_none_or(|status| status.is_header_only()) + || consensus.async_get_block_status(parent).await.is_none_or_ex(|status| status.is_header_only()) { processable = false; break; diff --git a/protocol/flows/src/flowcontext/transactions.rs b/protocol/flows/src/flowcontext/transactions.rs index d3112f0af0..110b378b70 100644 --- a/protocol/flows/src/flowcontext/transactions.rs +++ b/protocol/flows/src/flowcontext/transactions.rs @@ -73,7 +73,7 @@ impl TransactionsSpread { /// within transaction Inv messages. /// /// The broadcast itself may happen only during a subsequent call to this function since it is done at most - /// every [`BROADCAST_INTERVAL`] milliseconds or when the queue length is larger than the Inv message + /// every `BROADCAST_INTERVAL` milliseconds or when the queue length is larger than the Inv message /// capacity. /// /// _GO-KASPAD: EnqueueTransactionIDsForPropagation_ diff --git a/protocol/flows/src/v5/txrelay/flow.rs b/protocol/flows/src/v5/txrelay/flow.rs index 6a177e57ff..af7e2b6c7d 100644 --- a/protocol/flows/src/v5/txrelay/flow.rs +++ b/protocol/flows/src/v5/txrelay/flow.rs @@ -10,7 +10,7 @@ use kaspa_mining::{ errors::MiningManagerError, mempool::{ errors::RuleError, - tx::{Orphan, Priority}, + tx::{Orphan, Priority, RbfPolicy}, }, model::tx_query::TransactionQuery, P2pTxCountSample, @@ -219,7 +219,7 @@ impl RelayTransactionsFlow { .ctx .mining_manager() .clone() - .validate_and_insert_transaction_batch(&consensus, transactions, Priority::Low, Orphan::Allowed) + .validate_and_insert_transaction_batch(&consensus, transactions, Priority::Low, Orphan::Allowed, RbfPolicy::Allowed) .await; for res in insert_results.iter() { diff --git a/protocol/p2p/build.rs b/protocol/p2p/build.rs index b41fe87f53..b4aea69394 100644 --- a/protocol/p2p/build.rs +++ b/protocol/p2p/build.rs @@ -5,7 +5,7 @@ fn main() { tonic_build::configure() .build_server(true) .build_client(true) - .compile(&proto_files[0..1], dirs) + .compile_protos(&proto_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compilation failed, error: {e}")); // recompile protobufs only if any of the proto files changes. for file in proto_files { diff --git a/protocol/p2p/src/common.rs b/protocol/p2p/src/common.rs index d32552cd8d..a0b314fb6d 100644 --- a/protocol/p2p/src/common.rs +++ b/protocol/p2p/src/common.rs @@ -100,6 +100,10 @@ impl ProtocolError { pub fn from_reject_message(reason: String) -> Self { if reason == LOOPBACK_CONNECTION_MESSAGE || reason == DUPLICATE_CONNECTION_MESSAGE { ProtocolError::IgnorableReject(reason) + } else if reason.contains("cannot find full block") { + let hint = "Hint: If this error persists, it might be due to the other peer having pruned block data after syncing headers and UTXOs. In such a case, you may need to reset the database."; + let detailed_reason = format!("{}. {}", reason, hint); + ProtocolError::Rejected(detailed_reason) } else { ProtocolError::Rejected(reason) } diff --git a/protocol/p2p/src/convert/net_address.rs b/protocol/p2p/src/convert/net_address.rs index 5a2ffec0e5..c525300ef8 100644 --- a/protocol/p2p/src/convert/net_address.rs +++ b/protocol/p2p/src/convert/net_address.rs @@ -1,7 +1,4 @@ -use std::{ - mem::size_of, - net::{IpAddr, Ipv4Addr, Ipv6Addr}, -}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use super::error::ConversionError; use crate::pb as protowire; diff --git a/protocol/p2p/src/core/connection_handler.rs b/protocol/p2p/src/core/connection_handler.rs index a8ec431e42..54d387043c 100644 --- a/protocol/p2p/src/core/connection_handler.rs +++ b/protocol/p2p/src/core/connection_handler.rs @@ -9,7 +9,7 @@ use kaspa_core::{debug, info}; use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use std::net::ToSocketAddrs; use std::pin::Pin; @@ -20,7 +20,6 @@ use tokio::sync::mpsc::{channel as mpsc_channel, Sender as MpscSender}; use tokio::sync::oneshot::{channel as oneshot_channel, Sender as OneshotSender}; use tokio_stream::wrappers::ReceiverStream; use tokio_stream::StreamExt; -use tonic::codegen::Body; use tonic::transport::{Error as TonicError, Server as TonicServer}; use tonic::{Request, Response, Status as TonicStatus, Streaming}; @@ -80,7 +79,7 @@ impl ConnectionHandler { // TODO: check whether we should set tcp_keepalive let serve_result = TonicServer::builder() - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(proto_server) .serve_with_shutdown(serve_address.into(), termination_receiver.map(drop)) @@ -110,9 +109,7 @@ impl ConnectionHandler { let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_rx.clone()))) - .layer(measure_request_body_size_layer(self.counters.bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, self.counters.bytes_tx.clone()).boxed_unsync())) .service(channel); let mut client = ProtoP2pClient::new(channel) diff --git a/protocol/p2p/src/echo.rs b/protocol/p2p/src/echo.rs index ed03db2044..07a26aac6b 100644 --- a/protocol/p2p/src/echo.rs +++ b/protocol/p2p/src/echo.rs @@ -96,7 +96,7 @@ fn build_dummy_version_message() -> VersionMessage { services: 0, timestamp: unix_now() as i64, address: None, - id: Vec::from(Uuid::new_v4().as_ref()), + id: Vec::from(Uuid::new_v4().as_bytes()), user_agent: String::new(), disable_relay_tx: false, subnetwork_id: None, diff --git a/rothschild/src/main.rs b/rothschild/src/main.rs index bbabca71b7..35d08493bb 100644 --- a/rothschild/src/main.rs +++ b/rothschild/src/main.rs @@ -13,15 +13,18 @@ use kaspa_consensus_core::{ use kaspa_core::{info, kaspad_env::version, time::unix_now, warn}; use kaspa_grpc_client::{ClientPool, GrpcClient}; use kaspa_notify::subscription::context::SubscriptionContext; -use kaspa_rpc_core::{api::rpc::RpcApi, notify::mode::NotificationMode}; +use kaspa_rpc_core::{api::rpc::RpcApi, notify::mode::NotificationMode, RpcUtxoEntry}; use kaspa_txscript::pay_to_address_script; use parking_lot::Mutex; use rayon::prelude::*; -use secp256k1::{rand::thread_rng, Keypair}; +use secp256k1::{ + rand::{thread_rng, Rng}, + Keypair, +}; use tokio::time::{interval, MissedTickBehavior}; const DEFAULT_SEND_AMOUNT: u64 = 10 * SOMPI_PER_KASPA; -const FEE_PER_MASS: u64 = 10; +const FEE_RATE: u64 = 10; const MILLIS_PER_TICK: u64 = 10; const ADDRESS_PREFIX: Prefix = Prefix::Testnet; const ADDRESS_VERSION: Version = Version::PubKey; @@ -40,6 +43,9 @@ pub struct Args { pub rpc_server: String, pub threads: u8, pub unleashed: bool, + pub addr: Option, + pub priority_fee: u64, + pub randomize_fee: bool, } impl Args { @@ -51,6 +57,9 @@ impl Args { rpc_server: m.get_one::("rpcserver").cloned().unwrap_or("localhost:16210".to_owned()), threads: m.get_one::("threads").cloned().unwrap(), unleashed: m.get_one::("unleashed").cloned().unwrap_or(false), + addr: m.get_one::("addr").cloned(), + priority_fee: m.get_one::("priority-fee").cloned().unwrap_or(0), + randomize_fee: m.get_one::("randomize-fee").cloned().unwrap_or(false), } } } @@ -85,6 +94,25 @@ pub fn cli() -> Command { .help("The number of threads to use for TX generation. Set to 0 to use 1 thread per core. Default is 2."), ) .arg(Arg::new("unleashed").long("unleashed").action(ArgAction::SetTrue).hide(true).help("Allow higher TPS")) + .arg(Arg::new("addr").long("to-addr").short('a').value_name("addr").help("address to send to")) + .arg( + Arg::new("priority-fee") + .long("priority-fee") + .short('f') + .value_name("priority-fee") + .default_value("0") + .value_parser(clap::value_parser!(u64)) + .help("Transaction priority fee"), + ) + .arg( + Arg::new("randomize-fee") + .long("randomize-fee") + .short('r') + .value_name("randomize-fee") + .action(ArgAction::SetTrue) + .default_value("false") + .help("Randomize transaction priority fee"), + ) } async fn new_rpc_client(subscription_context: &SubscriptionContext, address: &str) -> GrpcClient { @@ -111,6 +139,11 @@ struct ClientPoolArg { utxos_len: usize, } +struct TxsFeeConfig { + priority_fee: u64, + randomize_fee: bool, +} + #[tokio::main] async fn main() { kaspa_core::log::init_logger(None, ""); @@ -150,9 +183,31 @@ async fn main() { let kaspa_addr = Address::new(ADDRESS_PREFIX, ADDRESS_VERSION, &schnorr_key.x_only_public_key().0.serialize()); + let kaspa_to_addr = args.addr.as_ref().map_or_else(|| kaspa_addr.clone(), |addr_str| Address::try_from(addr_str.clone()).unwrap()); + + let fee_config = TxsFeeConfig { priority_fee: args.priority_fee, randomize_fee: args.randomize_fee }; + rayon::ThreadPoolBuilder::new().num_threads(args.threads as usize).build_global().unwrap(); - info!("Using Rothschild with private key {} and address {}", schnorr_key.display_secret(), String::from(&kaspa_addr)); + let mut log_message = format!( + "Using Rothschild with:\n\ + \tprivate key: {}\n\ + \tfrom address: {}", + schnorr_key.display_secret(), + String::from(&kaspa_addr) + ); + if args.addr.is_some() { + log_message.push_str(&format!("\n\tto address: {}", String::from(&kaspa_to_addr))); + } + if args.priority_fee != 0 { + log_message.push_str(&format!( + "\n\tpriority fee: {} SOMPS {}", + fee_config.priority_fee, + if fee_config.randomize_fee { "[randomize]" } else { "" } + )); + } + info!("{}", log_message); + let info = rpc_client.get_block_dag_info().await.unwrap(); let coinbase_maturity = match info.network.suffix { Some(11) => TESTNET11_PARAMS.coinbase_maturity, @@ -249,13 +304,14 @@ async fn main() { let has_funds = maybe_send_tx( txs_to_send, &tx_sender, - kaspa_addr.clone(), + kaspa_to_addr.clone(), &mut utxos, &mut pending, schnorr_key, stats.clone(), maximize_inputs, &mut next_available_utxo_index, + &fee_config, ) .await; if !has_funds { @@ -323,7 +379,7 @@ async fn populate_pending_outpoints_from_mempool( for entry in entries { for entry in entry.sending { for input in entry.transaction.inputs { - pending_outpoints.insert(input.previous_outpoint, now); + pending_outpoints.insert(input.previous_outpoint.into(), now); } } } @@ -337,20 +393,20 @@ async fn fetch_spendable_utxos( ) -> Vec<(TransactionOutpoint, UtxoEntry)> { let resp = rpc_client.get_utxos_by_addresses(vec![kaspa_addr]).await.unwrap(); let dag_info = rpc_client.get_block_dag_info().await.unwrap(); - let mut utxos = Vec::with_capacity(resp.len()); - for resp_entry in resp - .into_iter() - .filter(|resp_entry| is_utxo_spendable(&resp_entry.utxo_entry, dag_info.virtual_daa_score, coinbase_maturity)) + + let mut utxos = resp.into_iter() + .filter(|entry| { + is_utxo_spendable(&entry.utxo_entry, dag_info.virtual_daa_score, coinbase_maturity) + }) + .map(|entry| (TransactionOutpoint::from(entry.outpoint), UtxoEntry::from(entry.utxo_entry))) // Eliminates UTXOs we already tried to spend so we don't try to spend them again in this period - .filter(|utxo| !pending.contains_key(&utxo.outpoint)) - { - utxos.push((resp_entry.outpoint, resp_entry.utxo_entry)); - } + .filter(|(outpoint,_)| !pending.contains_key(outpoint)) + .collect::>(); utxos.sort_by(|a, b| b.1.amount.cmp(&a.1.amount)); utxos } -fn is_utxo_spendable(entry: &UtxoEntry, virtual_daa_score: u64, coinbase_maturity: u64) -> bool { +fn is_utxo_spendable(entry: &RpcUtxoEntry, virtual_daa_score: u64, coinbase_maturity: u64) -> bool { let needed_confs = if !entry.is_coinbase { 10 } else { @@ -369,6 +425,7 @@ async fn maybe_send_tx( stats: Arc>, maximize_inputs: bool, next_available_utxo_index: &mut usize, + fee_config: &TxsFeeConfig, ) -> bool { let num_outs = if maximize_inputs { 1 } else { 2 }; @@ -377,7 +434,7 @@ async fn maybe_send_tx( let selected_utxos_groups = (0..txs_to_send) .map(|_| { let (selected_utxos, selected_amount) = - select_utxos(utxos, DEFAULT_SEND_AMOUNT, num_outs, maximize_inputs, next_available_utxo_index); + select_utxos(utxos, DEFAULT_SEND_AMOUNT, num_outs, maximize_inputs, next_available_utxo_index, fee_config); if selected_amount == 0 { return None; } @@ -438,7 +495,7 @@ fn clean_old_pending_outpoints(pending: &mut HashMap) } fn required_fee(num_utxos: usize, num_outs: u64) -> u64 { - FEE_PER_MASS * estimated_mass(num_utxos, num_outs) + FEE_RATE * estimated_mass(num_utxos, num_outs) } fn estimated_mass(num_utxos: usize, num_outs: u64) -> u64 { @@ -473,10 +530,12 @@ fn select_utxos( num_outs: u64, maximize_utxos: bool, next_available_utxo_index: &mut usize, + fee_config: &TxsFeeConfig, ) -> (Vec<(TransactionOutpoint, UtxoEntry)>, u64) { const MAX_UTXOS: usize = 84; let mut selected_amount: u64 = 0; let mut selected = Vec::new(); + let mut rng = thread_rng(); while next_available_utxo_index < &mut utxos.len() { let (outpoint, entry) = utxos[*next_available_utxo_index].clone(); @@ -484,11 +543,16 @@ fn select_utxos( selected.push((outpoint, entry)); let fee = required_fee(selected.len(), num_outs); + let priority_fee = if fee_config.randomize_fee && fee_config.priority_fee > 0 { + rng.gen_range(0..fee_config.priority_fee) + } else { + fee_config.priority_fee + }; *next_available_utxo_index += 1; - if selected_amount >= min_amount + fee && (!maximize_utxos || selected.len() == MAX_UTXOS) { - return (selected, selected_amount - fee); + if selected_amount >= min_amount + fee + priority_fee && (!maximize_utxos || selected.len() == MAX_UTXOS) { + return (selected, selected_amount - fee - priority_fee); } if selected.len() > MAX_UTXOS { diff --git a/rpc/core/Cargo.toml b/rpc/core/Cargo.toml index cfa4895f25..f2e9f72f9e 100644 --- a/rpc/core/Cargo.toml +++ b/rpc/core/Cargo.toml @@ -42,6 +42,7 @@ hex.workspace = true js-sys.workspace = true log.workspace = true paste.workspace = true +rand.workspace = true serde-wasm-bindgen.workspace = true serde.workspace = true smallvec.workspace = true @@ -49,10 +50,11 @@ thiserror.workspace = true uuid.workspace = true wasm-bindgen.workspace = true workflow-core.workspace = true +workflow-serializer.workspace = true workflow-wasm.workspace = true [dev-dependencies] serde_json.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/rpc/core/src/api/connection.rs b/rpc/core/src/api/connection.rs new file mode 100644 index 0000000000..fba2aa71a0 --- /dev/null +++ b/rpc/core/src/api/connection.rs @@ -0,0 +1,11 @@ +//! +//! Generic connection trait representing a connection to a client (where available). +//! + +use std::sync::Arc; + +pub trait RpcConnection: Send + Sync { + fn id(&self) -> u64; +} + +pub type DynRpcConnection = Arc; diff --git a/rpc/core/src/api/ctl.rs b/rpc/core/src/api/ctl.rs index 49241e7d92..d7705127b9 100644 --- a/rpc/core/src/api/ctl.rs +++ b/rpc/core/src/api/ctl.rs @@ -1,3 +1,7 @@ +//! +//! Client-side RPC helper for handling connection and disconnection events. +//! + use crate::error::RpcResult; use std::sync::{Arc, Mutex}; use workflow_core::channel::Multiplexer; diff --git a/rpc/core/src/api/mod.rs b/rpc/core/src/api/mod.rs index 6bc968b46f..a75056a841 100644 --- a/rpc/core/src/api/mod.rs +++ b/rpc/core/src/api/mod.rs @@ -1,3 +1,8 @@ +//! +//! API module for the RPC server. Implements core RPC primitives. +//! + +pub mod connection; pub mod ctl; pub mod notifications; pub mod ops; diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index 6449f25c02..503af0de85 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -1,5 +1,8 @@ +//! +//! RPC notifications that can be sent to clients. +//! + use crate::model::message::*; -use borsh::{BorshDeserialize, BorshSerialize}; use derive_more::Display; use kaspa_notify::{ events::EventType, @@ -13,10 +16,11 @@ use kaspa_notify::{ use serde::{Deserialize, Serialize}; use std::sync::Arc; use wasm_bindgen::JsValue; +use workflow_serializer::prelude::*; use workflow_wasm::serde::to_value; full_featured! { -#[derive(Clone, Debug, Display, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Display, Serialize, Deserialize)] pub enum Notification { #[display(fmt = "BlockAdded notification: block hash {}", "_0.block.header.hash")] BlockAdded(BlockAddedNotification), @@ -113,14 +117,92 @@ impl NotificationTrait for Notification { } } -#[cfg(test)] -mod test { - use super::*; +impl Serializer for Notification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + match self { + Notification::BlockAdded(notification) => { + store!(u16, &0, writer)?; + serialize!(BlockAddedNotification, notification, writer)?; + } + Notification::VirtualChainChanged(notification) => { + store!(u16, &1, writer)?; + serialize!(VirtualChainChangedNotification, notification, writer)?; + } + Notification::FinalityConflict(notification) => { + store!(u16, &2, writer)?; + serialize!(FinalityConflictNotification, notification, writer)?; + } + Notification::FinalityConflictResolved(notification) => { + store!(u16, &3, writer)?; + serialize!(FinalityConflictResolvedNotification, notification, writer)?; + } + Notification::UtxosChanged(notification) => { + store!(u16, &4, writer)?; + serialize!(UtxosChangedNotification, notification, writer)?; + } + Notification::SinkBlueScoreChanged(notification) => { + store!(u16, &5, writer)?; + serialize!(SinkBlueScoreChangedNotification, notification, writer)?; + } + Notification::VirtualDaaScoreChanged(notification) => { + store!(u16, &6, writer)?; + serialize!(VirtualDaaScoreChangedNotification, notification, writer)?; + } + Notification::PruningPointUtxoSetOverride(notification) => { + store!(u16, &7, writer)?; + serialize!(PruningPointUtxoSetOverrideNotification, notification, writer)?; + } + Notification::NewBlockTemplate(notification) => { + store!(u16, &8, writer)?; + serialize!(NewBlockTemplateNotification, notification, writer)?; + } + } + Ok(()) + } +} - #[test] - fn test_notification_from_bytes() { - let bytes = &vec![6, 169, 167, 75, 2, 0, 0, 0, 0][..]; - let notification = Notification::try_from_slice(bytes); - println!("notification: {notification:?}"); +impl Deserializer for Notification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + match load!(u16, reader)? { + 0 => { + let notification = deserialize!(BlockAddedNotification, reader)?; + Ok(Notification::BlockAdded(notification)) + } + 1 => { + let notification = deserialize!(VirtualChainChangedNotification, reader)?; + Ok(Notification::VirtualChainChanged(notification)) + } + 2 => { + let notification = deserialize!(FinalityConflictNotification, reader)?; + Ok(Notification::FinalityConflict(notification)) + } + 3 => { + let notification = deserialize!(FinalityConflictResolvedNotification, reader)?; + Ok(Notification::FinalityConflictResolved(notification)) + } + 4 => { + let notification = deserialize!(UtxosChangedNotification, reader)?; + Ok(Notification::UtxosChanged(notification)) + } + 5 => { + let notification = deserialize!(SinkBlueScoreChangedNotification, reader)?; + Ok(Notification::SinkBlueScoreChanged(notification)) + } + 6 => { + let notification = deserialize!(VirtualDaaScoreChangedNotification, reader)?; + Ok(Notification::VirtualDaaScoreChanged(notification)) + } + 7 => { + let notification = deserialize!(PruningPointUtxoSetOverrideNotification, reader)?; + Ok(Notification::PruningPointUtxoSetOverride(notification)) + } + 8 => { + let notification = deserialize!(NewBlockTemplateNotification, reader)?; + Ok(Notification::NewBlockTemplate(notification)) + } + _ => Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid variant")), + } } } diff --git a/rpc/core/src/api/ops.rs b/rpc/core/src/api/ops.rs index a02fbf7462..26ca356eb0 100644 --- a/rpc/core/src/api/ops.rs +++ b/rpc/core/src/api/ops.rs @@ -1,119 +1,141 @@ +//! +//! RPC Operations used to identify RPC methods during transport and in various RPC-related macros. +//! + use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_notify::events::EventType; use serde::{Deserialize, Serialize}; use workflow_core::enums::Describe; -/// Rpc Api version (4 x short values); First short is reserved. -/// The version format is as follows: `[reserved, major, minor, patch]`. -/// The difference in the major version value indicates breaking binary API changes -/// (i.e. changes in non-versioned model data structures) -/// If such change occurs, BorshRPC-client should refuse to connect to the -/// server and should request a client-side upgrade. JsonRPC-client may opt-in to -/// continue interop, but data structures should handle mutations by pre-filtering -/// or using Serde attributes. This applies only to RPC infrastructure that uses internal -/// data structures and does not affect gRPC. gRPC should issue and handle its -/// own versioning. -pub const RPC_API_VERSION: [u16; 4] = [0, 1, 0, 0]; +/// API version. Change in this value should result +/// in the client refusing to connect. +pub const RPC_API_VERSION: u16 = 1; +/// API revision. Change in this value denotes +/// backwards-compatible changes. +pub const RPC_API_REVISION: u16 = 0; #[derive(Describe, Clone, Copy, Debug, PartialEq, Eq, Hash, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] +#[borsh(use_discriminant = true)] pub enum RpcApiOps { + NoOp = 0, + + // connection control (provisional) + Connect = 1, + Disconnect = 2, + + // subscription management + Subscribe = 3, + Unsubscribe = 4, + + // ~~~ + + // Subscription commands for starting/stopping notifications + NotifyBlockAdded = 10, + NotifyNewBlockTemplate = 11, + NotifyUtxosChanged = 12, + NotifyPruningPointUtxoSetOverride = 13, + NotifyFinalityConflict = 14, + NotifyFinalityConflictResolved = 15, // for uniformity purpose only since subscribing to NotifyFinalityConflict means receiving both FinalityConflict and FinalityConflictResolved + NotifyVirtualDaaScoreChanged = 16, + NotifyVirtualChainChanged = 17, + NotifySinkBlueScoreChanged = 18, + + // Notification ops required by wRPC + + // TODO: Remove these ops and use EventType as NotificationOps when workflow_rpc::server::interface::Interface + // will be generic over a MethodOps and NotificationOps instead of a single Ops param. + BlockAddedNotification = 60, + VirtualChainChangedNotification = 61, + FinalityConflictNotification = 62, + FinalityConflictResolvedNotification = 63, + UtxosChangedNotification = 64, + SinkBlueScoreChangedNotification = 65, + VirtualDaaScoreChangedNotification = 66, + PruningPointUtxoSetOverrideNotification = 67, + NewBlockTemplateNotification = 68, + + // RPC methods /// Ping the node to check if connection is alive - Ping = 0, + Ping = 110, /// Get metrics for consensus information and node performance - GetMetrics, + GetMetrics = 111, + /// Get system information (RAM available, number of cores, available file descriptors) + GetSystemInfo = 112, + /// Get current number of active TCP connections + GetConnections = 113, /// Get state information on the node - GetServerInfo, + GetServerInfo = 114, /// Get the current sync status of the node - GetSyncStatus, + GetSyncStatus = 115, /// Returns the network this Kaspad is connected to (Mainnet, Testnet) - GetCurrentNetwork, + GetCurrentNetwork = 116, /// Extracts a block out of the request message and attempts to add it to the DAG Returns an empty response or an error message - SubmitBlock, + SubmitBlock = 117, /// Returns a "template" by which a miner can mine a new block - GetBlockTemplate, + GetBlockTemplate = 118, /// Returns a list of all the addresses (IP, port) this Kaspad knows and a list of all addresses that are currently banned by this Kaspad - GetPeerAddresses, + GetPeerAddresses = 119, /// Returns the hash of the current selected tip block of the DAG - GetSink, + GetSink = 120, /// Get information about an entry in the node's mempool - GetMempoolEntry, + GetMempoolEntry = 121, /// Get a snapshot of the node's mempool - GetMempoolEntries, + GetMempoolEntries = 122, /// Returns a list of the peers currently connected to this Kaspad, along with some statistics on them - GetConnectedPeerInfo, + GetConnectedPeerInfo = 123, /// Instructs Kaspad to connect to a given IP address. - AddPeer, + AddPeer = 124, /// Extracts a transaction out of the request message and attempts to add it to the mempool Returns an empty response or an error message - SubmitTransaction, + SubmitTransaction = 125, /// Requests info on a block corresponding to a given block hash Returns block info if the block is known. - GetBlock, + GetBlock = 126, // - GetSubnetwork, + GetSubnetwork = 127, // - GetVirtualChainFromBlock, + GetVirtualChainFromBlock = 128, // - GetBlocks, + GetBlocks = 129, /// Returns the amount of blocks in the DAG - GetBlockCount, + GetBlockCount = 130, /// Returns info on the current state of the DAG - GetBlockDagInfo, + GetBlockDagInfo = 131, // - ResolveFinalityConflict, + ResolveFinalityConflict = 132, /// Instructs this node to shut down Returns an empty response or an error message - Shutdown, + Shutdown = 133, // - GetHeaders, + GetHeaders = 134, /// Get a list of available UTXOs for a given address - GetUtxosByAddresses, + GetUtxosByAddresses = 135, /// Get a balance for a given address - GetBalanceByAddress, + GetBalanceByAddress = 136, /// Get a balance for a number of addresses - GetBalancesByAddresses, + GetBalancesByAddresses = 137, // ? - GetSinkBlueScore, + GetSinkBlueScore = 138, /// Ban a specific peer by it's IP address - Ban, + Ban = 139, /// Unban a specific peer by it's IP address - Unban, + Unban = 140, /// Get generic node information - GetInfo, + GetInfo = 141, // - EstimateNetworkHashesPerSecond, + EstimateNetworkHashesPerSecond = 142, /// Get a list of mempool entries that belong to a specific address - GetMempoolEntriesByAddresses, + GetMempoolEntriesByAddresses = 143, /// Get current issuance supply - GetCoinSupply, + GetCoinSupply = 144, /// Get DAA Score timestamp estimate - GetDaaScoreTimestampEstimate, - - // Subscription commands for starting/stopping notifications - NotifyBlockAdded, - NotifyNewBlockTemplate, - NotifyUtxosChanged, - NotifyPruningPointUtxoSetOverride, - NotifyFinalityConflict, - NotifyFinalityConflictResolved, // for uniformity purpose only since subscribing to NotifyFinalityConflict means receiving both FinalityConflict and FinalityConflictResolved - NotifyVirtualDaaScoreChanged, - NotifyVirtualChainChanged, - NotifySinkBlueScoreChanged, - - // ~ - Subscribe, - Unsubscribe, - - // Notification ops required by wRPC - // TODO: Remove these ops and use EventType as NotificationOps when workflow_rpc::server::interface::Interface - // will be generic over a MethodOps and NotificationOps instead of a single Ops param. - BlockAddedNotification, - VirtualChainChangedNotification, - FinalityConflictNotification, - FinalityConflictResolvedNotification, - UtxosChangedNotification, - SinkBlueScoreChangedNotification, - VirtualDaaScoreChangedNotification, - PruningPointUtxoSetOverrideNotification, - NewBlockTemplateNotification, + GetDaaScoreTimestampEstimate = 145, + /// Extracts a transaction out of the request message and attempts to replace a matching transaction in the mempool with it, applying a mandatory Replace by Fee policy + SubmitTransactionReplacement = 146, + /// Fee estimation + GetFeeEstimate = 147, + /// Fee estimation (experimental) + GetFeeEstimateExperimental = 148, + /// Block color determination by iterating DAG. + GetCurrentBlockColor = 149, } impl RpcApiOps { diff --git a/rpc/core/src/api/rpc.rs b/rpc/core/src/api/rpc.rs index 36f8ef3085..cadc9e00cd 100644 --- a/rpc/core/src/api/rpc.rs +++ b/rpc/core/src/api/rpc.rs @@ -1,9 +1,12 @@ -//! The client API +//! +//! The main [`RpcApi`] trait that defines all RPC methods available in the Rusty Kaspa p2p node. //! //! Rpc = External RPC Service -//! All data provided by the RCP server can be trusted by the client -//! No data submitted by the client to the server can be trusted +//! All data provided by the RPC server can be trusted by the client +//! No data submitted by the client to the node can be trusted +//! +use crate::api::connection::DynRpcConnection; use crate::{model::*, notify::connection::ChannelConnection, RpcResult}; use async_trait::async_trait; use downcast::{downcast_sync, AnySync}; @@ -21,10 +24,32 @@ pub const MAX_SAFE_WINDOW_SIZE: u32 = 10_000; pub trait RpcApi: Sync + Send + AnySync { /// async fn ping(&self) -> RpcResult<()> { - self.ping_call(PingRequest {}).await?; + self.ping_call(None, PingRequest {}).await?; Ok(()) } - async fn ping_call(&self, request: PingRequest) -> RpcResult; + async fn ping_call(&self, connection: Option<&DynRpcConnection>, request: PingRequest) -> RpcResult; + + // --- + + async fn get_system_info(&self) -> RpcResult { + Ok(self.get_system_info_call(None, GetSystemInfoRequest {}).await?) + } + async fn get_system_info_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetSystemInfoRequest, + ) -> RpcResult; + + // --- + + async fn get_connections(&self, include_profile_data: bool) -> RpcResult { + self.get_connections_call(None, GetConnectionsRequest { include_profile_data }).await + } + async fn get_connections_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetConnectionsRequest, + ) -> RpcResult; // --- @@ -34,59 +59,100 @@ pub trait RpcApi: Sync + Send + AnySync { connection_metrics: bool, bandwidth_metrics: bool, consensus_metrics: bool, + storage_metrics: bool, + custom_metrics: bool, ) -> RpcResult { - self.get_metrics_call(GetMetricsRequest { process_metrics, connection_metrics, bandwidth_metrics, consensus_metrics }).await - } - async fn get_metrics_call(&self, request: GetMetricsRequest) -> RpcResult; + self.get_metrics_call( + None, + GetMetricsRequest { + process_metrics, + connection_metrics, + bandwidth_metrics, + consensus_metrics, + storage_metrics, + custom_metrics, + }, + ) + .await + } + async fn get_metrics_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetMetricsRequest, + ) -> RpcResult; // get_info alternative that carries only version, network_id (full), is_synced, virtual_daa_score // these are the only variables needed to negotiate a wRPC connection (besides the wRPC handshake) async fn get_server_info(&self) -> RpcResult { - self.get_server_info_call(GetServerInfoRequest {}).await + self.get_server_info_call(None, GetServerInfoRequest {}).await } - async fn get_server_info_call(&self, request: GetServerInfoRequest) -> RpcResult; + async fn get_server_info_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetServerInfoRequest, + ) -> RpcResult; // Get current sync status of the node (should be converted to a notification subscription) async fn get_sync_status(&self) -> RpcResult { - Ok(self.get_sync_status_call(GetSyncStatusRequest {}).await?.is_synced) + Ok(self.get_sync_status_call(None, GetSyncStatusRequest {}).await?.is_synced) } - async fn get_sync_status_call(&self, request: GetSyncStatusRequest) -> RpcResult; + async fn get_sync_status_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetSyncStatusRequest, + ) -> RpcResult; // --- /// Requests the network the node is currently running against. async fn get_current_network(&self) -> RpcResult { - Ok(self.get_current_network_call(GetCurrentNetworkRequest {}).await?.network) + Ok(self.get_current_network_call(None, GetCurrentNetworkRequest {}).await?.network) } - async fn get_current_network_call(&self, request: GetCurrentNetworkRequest) -> RpcResult; + async fn get_current_network_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetCurrentNetworkRequest, + ) -> RpcResult; /// Submit a block into the DAG. /// /// Blocks are generally expected to have been generated using the get_block_template call. - async fn submit_block(&self, block: RpcBlock, allow_non_daa_blocks: bool) -> RpcResult { - self.submit_block_call(SubmitBlockRequest::new(block, allow_non_daa_blocks)).await + async fn submit_block(&self, block: RpcRawBlock, allow_non_daa_blocks: bool) -> RpcResult { + self.submit_block_call(None, SubmitBlockRequest::new(block, allow_non_daa_blocks)).await } - async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult; + async fn submit_block_call( + &self, + connection: Option<&DynRpcConnection>, + request: SubmitBlockRequest, + ) -> RpcResult; /// Request a current block template. /// /// Callers are expected to solve the block template and submit it using the submit_block call. async fn get_block_template(&self, pay_address: RpcAddress, extra_data: RpcExtraData) -> RpcResult { - self.get_block_template_call(GetBlockTemplateRequest::new(pay_address, extra_data)).await + self.get_block_template_call(None, GetBlockTemplateRequest::new(pay_address, extra_data)).await } - async fn get_block_template_call(&self, request: GetBlockTemplateRequest) -> RpcResult; + async fn get_block_template_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetBlockTemplateRequest, + ) -> RpcResult; /// Requests the list of known kaspad addresses in the current network (mainnet, testnet, etc.) async fn get_peer_addresses(&self) -> RpcResult { - self.get_peer_addresses_call(GetPeerAddressesRequest {}).await + self.get_peer_addresses_call(None, GetPeerAddressesRequest {}).await } - async fn get_peer_addresses_call(&self, request: GetPeerAddressesRequest) -> RpcResult; + async fn get_peer_addresses_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetPeerAddressesRequest, + ) -> RpcResult; /// requests the hash of the current virtual's selected parent. async fn get_sink(&self) -> RpcResult { - self.get_sink_call(GetSinkRequest {}).await + self.get_sink_call(None, GetSinkRequest {}).await } - async fn get_sink_call(&self, request: GetSinkRequest) -> RpcResult; + async fn get_sink_call(&self, connection: Option<&DynRpcConnection>, request: GetSinkRequest) -> RpcResult; /// Requests information about a specific transaction in the mempool. async fn get_mempool_entry( @@ -96,53 +162,85 @@ pub trait RpcApi: Sync + Send + AnySync { filter_transaction_pool: bool, ) -> RpcResult { Ok(self - .get_mempool_entry_call(GetMempoolEntryRequest::new(transaction_id, include_orphan_pool, filter_transaction_pool)) + .get_mempool_entry_call(None, GetMempoolEntryRequest::new(transaction_id, include_orphan_pool, filter_transaction_pool)) .await? .mempool_entry) } - async fn get_mempool_entry_call(&self, request: GetMempoolEntryRequest) -> RpcResult; + async fn get_mempool_entry_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetMempoolEntryRequest, + ) -> RpcResult; /// Requests information about all the transactions currently in the mempool. async fn get_mempool_entries(&self, include_orphan_pool: bool, filter_transaction_pool: bool) -> RpcResult> { Ok(self - .get_mempool_entries_call(GetMempoolEntriesRequest::new(include_orphan_pool, filter_transaction_pool)) + .get_mempool_entries_call(None, GetMempoolEntriesRequest::new(include_orphan_pool, filter_transaction_pool)) .await? .mempool_entries) } - async fn get_mempool_entries_call(&self, request: GetMempoolEntriesRequest) -> RpcResult; + async fn get_mempool_entries_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetMempoolEntriesRequest, + ) -> RpcResult; /// requests information about all the p2p peers currently connected to this node. async fn get_connected_peer_info(&self) -> RpcResult { - self.get_connected_peer_info_call(GetConnectedPeerInfoRequest {}).await + self.get_connected_peer_info_call(None, GetConnectedPeerInfoRequest {}).await } - async fn get_connected_peer_info_call(&self, request: GetConnectedPeerInfoRequest) -> RpcResult; + async fn get_connected_peer_info_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetConnectedPeerInfoRequest, + ) -> RpcResult; /// Adds a peer to the node's outgoing connection list. /// /// This will, in most cases, result in the node connecting to said peer. async fn add_peer(&self, peer_address: RpcContextualPeerAddress, is_permanent: bool) -> RpcResult<()> { - self.add_peer_call(AddPeerRequest::new(peer_address, is_permanent)).await?; + self.add_peer_call(None, AddPeerRequest::new(peer_address, is_permanent)).await?; Ok(()) } - async fn add_peer_call(&self, request: AddPeerRequest) -> RpcResult; + async fn add_peer_call(&self, connection: Option<&DynRpcConnection>, request: AddPeerRequest) -> RpcResult; /// Submits a transaction to the mempool. async fn submit_transaction(&self, transaction: RpcTransaction, allow_orphan: bool) -> RpcResult { - Ok(self.submit_transaction_call(SubmitTransactionRequest { transaction, allow_orphan }).await?.transaction_id) + Ok(self.submit_transaction_call(None, SubmitTransactionRequest { transaction, allow_orphan }).await?.transaction_id) + } + async fn submit_transaction_call( + &self, + connection: Option<&DynRpcConnection>, + request: SubmitTransactionRequest, + ) -> RpcResult; + + /// Submits a transaction replacement to the mempool, applying a mandatory Replace by Fee policy. + /// + /// Returns the ID of the inserted transaction and the transaction the submission replaced in the mempool. + async fn submit_transaction_replacement(&self, transaction: RpcTransaction) -> RpcResult { + self.submit_transaction_replacement_call(None, SubmitTransactionReplacementRequest { transaction }).await } - async fn submit_transaction_call(&self, request: SubmitTransactionRequest) -> RpcResult; + async fn submit_transaction_replacement_call( + &self, + connection: Option<&DynRpcConnection>, + request: SubmitTransactionReplacementRequest, + ) -> RpcResult; /// Requests information about a specific block. async fn get_block(&self, hash: RpcHash, include_transactions: bool) -> RpcResult { - Ok(self.get_block_call(GetBlockRequest::new(hash, include_transactions)).await?.block) + Ok(self.get_block_call(None, GetBlockRequest::new(hash, include_transactions)).await?.block) } - async fn get_block_call(&self, request: GetBlockRequest) -> RpcResult; + async fn get_block_call(&self, connection: Option<&DynRpcConnection>, request: GetBlockRequest) -> RpcResult; /// Requests information about a specific subnetwork. async fn get_subnetwork(&self, subnetwork_id: RpcSubnetworkId) -> RpcResult { - self.get_subnetwork_call(GetSubnetworkRequest::new(subnetwork_id)).await + self.get_subnetwork_call(None, GetSubnetworkRequest::new(subnetwork_id)).await } - async fn get_subnetwork_call(&self, request: GetSubnetworkRequest) -> RpcResult; + async fn get_subnetwork_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetSubnetworkRequest, + ) -> RpcResult; /// Requests the virtual selected parent chain from some `start_hash` to this node's current virtual. async fn get_virtual_chain_from_block( @@ -150,11 +248,15 @@ pub trait RpcApi: Sync + Send + AnySync { start_hash: RpcHash, include_accepted_transaction_ids: bool, ) -> RpcResult { - self.get_virtual_chain_from_block_call(GetVirtualChainFromBlockRequest::new(start_hash, include_accepted_transaction_ids)) - .await + self.get_virtual_chain_from_block_call( + None, + GetVirtualChainFromBlockRequest::new(start_hash, include_accepted_transaction_ids), + ) + .await } async fn get_virtual_chain_from_block_call( &self, + connection: Option<&DynRpcConnection>, request: GetVirtualChainFromBlockRequest, ) -> RpcResult; @@ -165,61 +267,79 @@ pub trait RpcApi: Sync + Send + AnySync { include_blocks: bool, include_transactions: bool, ) -> RpcResult { - self.get_blocks_call(GetBlocksRequest::new(low_hash, include_blocks, include_transactions)).await + self.get_blocks_call(None, GetBlocksRequest::new(low_hash, include_blocks, include_transactions)).await } - async fn get_blocks_call(&self, request: GetBlocksRequest) -> RpcResult; + async fn get_blocks_call(&self, connection: Option<&DynRpcConnection>, request: GetBlocksRequest) -> RpcResult; /// Requests the current number of blocks in this node. /// /// Note that this number may decrease as pruning occurs. async fn get_block_count(&self) -> RpcResult { - self.get_block_count_call(GetBlockCountRequest {}).await + self.get_block_count_call(None, GetBlockCountRequest {}).await } - async fn get_block_count_call(&self, request: GetBlockCountRequest) -> RpcResult; + async fn get_block_count_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetBlockCountRequest, + ) -> RpcResult; /// Requests general information about the current state of this node's DAG. async fn get_block_dag_info(&self) -> RpcResult { - self.get_block_dag_info_call(GetBlockDagInfoRequest {}).await + self.get_block_dag_info_call(None, GetBlockDagInfoRequest {}).await } - async fn get_block_dag_info_call(&self, request: GetBlockDagInfoRequest) -> RpcResult; + async fn get_block_dag_info_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetBlockDagInfoRequest, + ) -> RpcResult; /// async fn resolve_finality_conflict(&self, finality_block_hash: RpcHash) -> RpcResult<()> { - self.resolve_finality_conflict_call(ResolveFinalityConflictRequest::new(finality_block_hash)).await?; + self.resolve_finality_conflict_call(None, ResolveFinalityConflictRequest::new(finality_block_hash)).await?; Ok(()) } async fn resolve_finality_conflict_call( &self, + connection: Option<&DynRpcConnection>, request: ResolveFinalityConflictRequest, ) -> RpcResult; /// Shuts down this node. async fn shutdown(&self) -> RpcResult<()> { - self.shutdown_call(ShutdownRequest {}).await?; + self.shutdown_call(None, ShutdownRequest {}).await?; Ok(()) } - async fn shutdown_call(&self, request: ShutdownRequest) -> RpcResult; + async fn shutdown_call(&self, connection: Option<&DynRpcConnection>, request: ShutdownRequest) -> RpcResult; /// Requests headers between the given `start_hash` and the current virtual, up to the given limit. async fn get_headers(&self, start_hash: RpcHash, limit: u64, is_ascending: bool) -> RpcResult> { - Ok(self.get_headers_call(GetHeadersRequest::new(start_hash, limit, is_ascending)).await?.headers) + Ok(self.get_headers_call(None, GetHeadersRequest::new(start_hash, limit, is_ascending)).await?.headers) } - async fn get_headers_call(&self, request: GetHeadersRequest) -> RpcResult; + async fn get_headers_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetHeadersRequest, + ) -> RpcResult; /// Returns the total balance in unspent transactions towards a given address. /// /// This call is only available when this node was started with `--utxoindex`. async fn get_balance_by_address(&self, address: RpcAddress) -> RpcResult { - Ok(self.get_balance_by_address_call(GetBalanceByAddressRequest::new(address)).await?.balance) + Ok(self.get_balance_by_address_call(None, GetBalanceByAddressRequest::new(address)).await?.balance) } - async fn get_balance_by_address_call(&self, request: GetBalanceByAddressRequest) -> RpcResult; + async fn get_balance_by_address_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetBalanceByAddressRequest, + ) -> RpcResult; /// async fn get_balances_by_addresses(&self, addresses: Vec) -> RpcResult> { - Ok(self.get_balances_by_addresses_call(GetBalancesByAddressesRequest::new(addresses)).await?.entries) + Ok(self.get_balances_by_addresses_call(None, GetBalancesByAddressesRequest::new(addresses)).await?.entries) } async fn get_balances_by_addresses_call( &self, + connection: Option<&DynRpcConnection>, request: GetBalancesByAddressesRequest, ) -> RpcResult; @@ -227,45 +347,54 @@ pub trait RpcApi: Sync + Send + AnySync { /// /// This call is only available when this node was started with `--utxoindex`. async fn get_utxos_by_addresses(&self, addresses: Vec) -> RpcResult> { - Ok(self.get_utxos_by_addresses_call(GetUtxosByAddressesRequest::new(addresses)).await?.entries) + Ok(self.get_utxos_by_addresses_call(None, GetUtxosByAddressesRequest::new(addresses)).await?.entries) } - async fn get_utxos_by_addresses_call(&self, request: GetUtxosByAddressesRequest) -> RpcResult; + async fn get_utxos_by_addresses_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetUtxosByAddressesRequest, + ) -> RpcResult; /// Requests the blue score of the current selected parent of the virtual block. async fn get_sink_blue_score(&self) -> RpcResult { - Ok(self.get_sink_blue_score_call(GetSinkBlueScoreRequest {}).await?.blue_score) + Ok(self.get_sink_blue_score_call(None, GetSinkBlueScoreRequest {}).await?.blue_score) } - async fn get_sink_blue_score_call(&self, request: GetSinkBlueScoreRequest) -> RpcResult; + async fn get_sink_blue_score_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetSinkBlueScoreRequest, + ) -> RpcResult; /// Bans the given ip. async fn ban(&self, ip: RpcIpAddress) -> RpcResult<()> { - self.ban_call(BanRequest::new(ip)).await?; + self.ban_call(None, BanRequest::new(ip)).await?; Ok(()) } - async fn ban_call(&self, request: BanRequest) -> RpcResult; + async fn ban_call(&self, connection: Option<&DynRpcConnection>, request: BanRequest) -> RpcResult; /// Unbans the given ip. async fn unban(&self, ip: RpcIpAddress) -> RpcResult<()> { - self.unban_call(UnbanRequest::new(ip)).await?; + self.unban_call(None, UnbanRequest::new(ip)).await?; Ok(()) } - async fn unban_call(&self, request: UnbanRequest) -> RpcResult; + async fn unban_call(&self, connection: Option<&DynRpcConnection>, request: UnbanRequest) -> RpcResult; /// Returns info about the node. - async fn get_info_call(&self, request: GetInfoRequest) -> RpcResult; async fn get_info(&self) -> RpcResult { - self.get_info_call(GetInfoRequest {}).await + self.get_info_call(None, GetInfoRequest {}).await } + async fn get_info_call(&self, connection: Option<&DynRpcConnection>, request: GetInfoRequest) -> RpcResult; /// async fn estimate_network_hashes_per_second(&self, window_size: u32, start_hash: Option) -> RpcResult { Ok(self - .estimate_network_hashes_per_second_call(EstimateNetworkHashesPerSecondRequest::new(window_size, start_hash)) + .estimate_network_hashes_per_second_call(None, EstimateNetworkHashesPerSecondRequest::new(window_size, start_hash)) .await? .network_hashes_per_second) } async fn estimate_network_hashes_per_second_call( &self, + connection: Option<&DynRpcConnection>, request: EstimateNetworkHashesPerSecondRequest, ) -> RpcResult; @@ -277,33 +406,69 @@ pub trait RpcApi: Sync + Send + AnySync { filter_transaction_pool: bool, ) -> RpcResult> { Ok(self - .get_mempool_entries_by_addresses_call(GetMempoolEntriesByAddressesRequest::new( - addresses, - include_orphan_pool, - filter_transaction_pool, - )) + .get_mempool_entries_by_addresses_call( + None, + GetMempoolEntriesByAddressesRequest::new(addresses, include_orphan_pool, filter_transaction_pool), + ) .await? .entries) } async fn get_mempool_entries_by_addresses_call( &self, + connection: Option<&DynRpcConnection>, request: GetMempoolEntriesByAddressesRequest, ) -> RpcResult; /// async fn get_coin_supply(&self) -> RpcResult { - self.get_coin_supply_call(GetCoinSupplyRequest {}).await + self.get_coin_supply_call(None, GetCoinSupplyRequest {}).await } - async fn get_coin_supply_call(&self, request: GetCoinSupplyRequest) -> RpcResult; + async fn get_coin_supply_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetCoinSupplyRequest, + ) -> RpcResult; async fn get_daa_score_timestamp_estimate(&self, daa_scores: Vec) -> RpcResult> { - Ok(self.get_daa_score_timestamp_estimate_call(GetDaaScoreTimestampEstimateRequest { daa_scores }).await?.timestamps) + Ok(self.get_daa_score_timestamp_estimate_call(None, GetDaaScoreTimestampEstimateRequest { daa_scores }).await?.timestamps) } async fn get_daa_score_timestamp_estimate_call( &self, + connection: Option<&DynRpcConnection>, request: GetDaaScoreTimestampEstimateRequest, ) -> RpcResult; + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Fee estimation API + + async fn get_fee_estimate(&self) -> RpcResult { + Ok(self.get_fee_estimate_call(None, GetFeeEstimateRequest {}).await?.estimate) + } + async fn get_fee_estimate_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetFeeEstimateRequest, + ) -> RpcResult; + + async fn get_fee_estimate_experimental(&self, verbose: bool) -> RpcResult { + self.get_fee_estimate_experimental_call(None, GetFeeEstimateExperimentalRequest { verbose }).await + } + async fn get_fee_estimate_experimental_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetFeeEstimateExperimentalRequest, + ) -> RpcResult; + + /// + async fn get_current_block_color(&self, hash: RpcHash) -> RpcResult { + Ok(self.get_current_block_color_call(None, GetCurrentBlockColorRequest { hash }).await?) + } + async fn get_current_block_color_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetCurrentBlockColorRequest, + ) -> RpcResult; + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/core/src/convert/block.rs b/rpc/core/src/convert/block.rs index 3f66b617f2..8cb0ab01e3 100644 --- a/rpc/core/src/convert/block.rs +++ b/rpc/core/src/convert/block.rs @@ -1,6 +1,8 @@ +//! Conversion of Block related types + use std::sync::Arc; -use crate::{RpcBlock, RpcError, RpcResult, RpcTransaction}; +use crate::{RpcBlock, RpcError, RpcRawBlock, RpcResult, RpcTransaction}; use kaspa_consensus_core::block::{Block, MutableBlock}; // ---------------------------------------------------------------------------- @@ -10,7 +12,7 @@ use kaspa_consensus_core::block::{Block, MutableBlock}; impl From<&Block> for RpcBlock { fn from(item: &Block) -> Self { Self { - header: (*item.header).clone(), + header: item.header.as_ref().into(), transactions: item.transactions.iter().map(RpcTransaction::from).collect(), // TODO: Implement a populating process inspired from kaspad\app\rpc\rpccontext\verbosedata.go verbose_data: None, @@ -18,28 +20,61 @@ impl From<&Block> for RpcBlock { } } +impl From<&Block> for RpcRawBlock { + fn from(item: &Block) -> Self { + Self { header: item.header.as_ref().into(), transactions: item.transactions.iter().map(RpcTransaction::from).collect() } + } +} + impl From<&MutableBlock> for RpcBlock { fn from(item: &MutableBlock) -> Self { Self { - header: item.header.clone(), + header: item.header.as_ref().into(), transactions: item.transactions.iter().map(RpcTransaction::from).collect(), verbose_data: None, } } } +impl From<&MutableBlock> for RpcRawBlock { + fn from(item: &MutableBlock) -> Self { + Self { header: item.header.as_ref().into(), transactions: item.transactions.iter().map(RpcTransaction::from).collect() } + } +} + +impl From for RpcRawBlock { + fn from(item: MutableBlock) -> Self { + Self { header: item.header.into(), transactions: item.transactions.iter().map(RpcTransaction::from).collect() } + } +} + // ---------------------------------------------------------------------------- // rpc_core to consensus_core // ---------------------------------------------------------------------------- -impl TryFrom<&RpcBlock> for Block { +impl TryFrom for Block { + type Error = RpcError; + fn try_from(item: RpcBlock) -> RpcResult { + Ok(Self { + header: Arc::new(item.header.into()), + transactions: Arc::new( + item.transactions + .into_iter() + .map(kaspa_consensus_core::tx::Transaction::try_from) + .collect::>>()?, + ), + }) + } +} + +impl TryFrom for Block { type Error = RpcError; - fn try_from(item: &RpcBlock) -> RpcResult { + fn try_from(item: RpcRawBlock) -> RpcResult { Ok(Self { - header: Arc::new(item.header.clone()), + header: Arc::new(item.header.into()), transactions: Arc::new( item.transactions - .iter() + .into_iter() .map(kaspa_consensus_core::tx::Transaction::try_from) .collect::>>()?, ), diff --git a/rpc/core/src/convert/mod.rs b/rpc/core/src/convert/mod.rs index dee1988d59..bc5c0e64b9 100644 --- a/rpc/core/src/convert/mod.rs +++ b/rpc/core/src/convert/mod.rs @@ -1,3 +1,7 @@ +//! +//! Data conversion utilities and structs for the RPC layer. +//! + pub mod block; pub mod notification; pub mod scope; diff --git a/rpc/core/src/convert/notification.rs b/rpc/core/src/convert/notification.rs index 362dd5ed9c..6251cc1cdf 100644 --- a/rpc/core/src/convert/notification.rs +++ b/rpc/core/src/convert/notification.rs @@ -1,3 +1,5 @@ +//! Conversion of Notification related types + use crate::{ convert::utxo::utxo_set_into_rpc, BlockAddedNotification, FinalityConflictNotification, FinalityConflictResolvedNotification, NewBlockTemplateNotification, Notification, PruningPointUtxoSetOverrideNotification, RpcAcceptedTransactionIds, diff --git a/rpc/core/src/convert/scope.rs b/rpc/core/src/convert/scope.rs index e38f09a1f6..6d94de326f 100644 --- a/rpc/core/src/convert/scope.rs +++ b/rpc/core/src/convert/scope.rs @@ -1,3 +1,5 @@ +//! Conversion of Notification Scope related types + use crate::{ NotifyBlockAddedRequest, NotifyFinalityConflictRequest, NotifyNewBlockTemplateRequest, NotifyPruningPointUtxoSetOverrideRequest, NotifySinkBlueScoreChangedRequest, NotifyUtxosChangedRequest, NotifyVirtualChainChangedRequest, diff --git a/rpc/core/src/convert/tx.rs b/rpc/core/src/convert/tx.rs index 44a9389a4b..9b69ca1688 100644 --- a/rpc/core/src/convert/tx.rs +++ b/rpc/core/src/convert/tx.rs @@ -1,3 +1,5 @@ +//! Conversion of Transaction related types + use crate::{RpcError, RpcResult, RpcTransaction, RpcTransactionInput, RpcTransactionOutput}; use kaspa_consensus_core::tx::{Transaction, TransactionInput, TransactionOutput}; @@ -36,7 +38,7 @@ impl From<&TransactionOutput> for RpcTransactionOutput { impl From<&TransactionInput> for RpcTransactionInput { fn from(item: &TransactionInput) -> Self { Self { - previous_outpoint: item.previous_outpoint, + previous_outpoint: item.previous_outpoint.into(), signature_script: item.signature_script.clone(), sequence: item.sequence, sig_op_count: item.sig_op_count, @@ -50,17 +52,17 @@ impl From<&TransactionInput> for RpcTransactionInput { // rpc_core to consensus_core // ---------------------------------------------------------------------------- -impl TryFrom<&RpcTransaction> for Transaction { +impl TryFrom for Transaction { type Error = RpcError; - fn try_from(item: &RpcTransaction) -> RpcResult { + fn try_from(item: RpcTransaction) -> RpcResult { let transaction = Transaction::new( item.version, item.inputs - .iter() + .into_iter() .map(kaspa_consensus_core::tx::TransactionInput::try_from) .collect::>>()?, item.outputs - .iter() + .into_iter() .map(kaspa_consensus_core::tx::TransactionOutput::try_from) .collect::>>()?, item.lock_time, @@ -73,16 +75,16 @@ impl TryFrom<&RpcTransaction> for Transaction { } } -impl TryFrom<&RpcTransactionOutput> for TransactionOutput { +impl TryFrom for TransactionOutput { type Error = RpcError; - fn try_from(item: &RpcTransactionOutput) -> RpcResult { - Ok(Self::new(item.value, item.script_public_key.clone())) + fn try_from(item: RpcTransactionOutput) -> RpcResult { + Ok(Self::new(item.value, item.script_public_key)) } } -impl TryFrom<&RpcTransactionInput> for TransactionInput { +impl TryFrom for TransactionInput { type Error = RpcError; - fn try_from(item: &RpcTransactionInput) -> RpcResult { - Ok(Self::new(item.previous_outpoint, item.signature_script.clone(), item.sequence, item.sig_op_count)) + fn try_from(item: RpcTransactionInput) -> RpcResult { + Ok(Self::new(item.previous_outpoint.into(), item.signature_script, item.sequence, item.sig_op_count)) } } diff --git a/rpc/core/src/convert/utxo.rs b/rpc/core/src/convert/utxo.rs index 305fb0931f..5fc09f6902 100644 --- a/rpc/core/src/convert/utxo.rs +++ b/rpc/core/src/convert/utxo.rs @@ -1,6 +1,8 @@ +//! Conversion functions for UTXO related types. + +use crate::RpcUtxoEntry; use crate::RpcUtxosByAddressesEntry; use kaspa_addresses::Prefix; -use kaspa_consensus_core::tx::UtxoEntry; use kaspa_index_core::indexed_utxos::UtxoSetByScriptPublicKey; use kaspa_txscript::extract_script_pub_key_address; @@ -16,8 +18,8 @@ pub fn utxo_set_into_rpc(item: &UtxoSetByScriptPublicKey, prefix: Option .iter() .map(|(outpoint, entry)| RpcUtxosByAddressesEntry { address: address.clone(), - outpoint: *outpoint, - utxo_entry: UtxoEntry::new(entry.amount, script_public_key.clone(), entry.block_daa_score, entry.is_coinbase), + outpoint: (*outpoint).into(), + utxo_entry: RpcUtxoEntry::new(entry.amount, script_public_key.clone(), entry.block_daa_score, entry.is_coinbase), }) .collect::>() }) diff --git a/rpc/core/src/error.rs b/rpc/core/src/error.rs index 59f6b910e7..0e2bfee225 100644 --- a/rpc/core/src/error.rs +++ b/rpc/core/src/error.rs @@ -1,4 +1,8 @@ -use kaspa_consensus_core::{subnets::SubnetworkConversionError, tx::TransactionId}; +//! +//! [`RpcError`] enum used by RPC primitives. +//! + +use kaspa_consensus_core::{subnets::SubnetworkConversionError, tx::TransactionId}; use kaspa_utils::networking::IpAddress; use std::{net::AddrParseError, num::TryFromIntError}; use thiserror::Error; @@ -77,6 +81,9 @@ pub enum RpcError { #[error("IP {0} is not registered as banned.")] IpIsNotBanned(IpAddress), + #[error("Block {0} doesn't have any merger block.")] + MergerNotFound(RpcHash), + #[error("Block was not submitted: {0}")] SubmitBlockError(SubmitBlockRejectReason), @@ -116,9 +123,9 @@ pub enum RpcError { #[error("transaction query must either not filter transactions or include orphans")] InconsistentMempoolTxQuery, - #[error(transparent)] - SubnetParsingError(#[from] SubnetworkConversionError), - + #[error(transparent)] + SubnetParsingError(#[from] SubnetworkConversionError), + #[error(transparent)] WasmError(#[from] workflow_wasm::error::Error), diff --git a/rpc/core/src/lib.rs b/rpc/core/src/lib.rs index 66e4ece3a8..a2ece77d4d 100644 --- a/rpc/core/src/lib.rs +++ b/rpc/core/src/lib.rs @@ -1,3 +1,16 @@ +//! # RPC Core +//! +//! This crate provides foundational primitives used in Rusty Kaspa node RPC subsystem. +//! These include the main [`RpcApi`](api::rpc::RpcApi) trait, [`RpcApiOps`](crate::api::ops::RpcApiOps) +//! enum used in RPC method dispatching, and various data structures used in RPC method arguments. +//! +//! This crate acts as a foundation for [`kaspa_grpc_client`](https://docs.rs/kaspa_grpc_client) and +//! [`kaspa_wrpc_client`](https://docs.rs/kaspa_wrpc_client) crates, which provide gRPC and WebSocket +//! RPC client implementations. This crate is also used by WASM bindings to provide [WASM RpcClient +//! implementation](https://docs.rs/kaspa-wrpc-client/latest/kaspa_wrpc_client/wasm/struct.RpcClient.html) +//! (based on wRPC). +//! + // This attribute is required by BorshSerialize/Deserialize #![recursion_limit = "256"] @@ -9,6 +22,7 @@ pub mod notify; pub mod wasm; pub mod prelude { + //! Re-exports of the most commonly used types and traits in this crate. pub use super::api::notifications::*; pub use super::model::script_class::*; pub use super::model::*; diff --git a/rpc/core/src/model/address.rs b/rpc/core/src/model/address.rs index 720cb4f86d..e2c069a50b 100644 --- a/rpc/core/src/model/address.rs +++ b/rpc/core/src/model/address.rs @@ -1,11 +1,11 @@ use crate::{RpcTransactionOutpoint, RpcUtxoEntry}; -use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; pub type RpcAddress = kaspa_addresses::Address; /// Represents a UTXO entry of an address returned by the `GetUtxosByAddresses` RPC. -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcUtxosByAddressesEntry { pub address: Option, @@ -13,8 +13,27 @@ pub struct RpcUtxosByAddressesEntry { pub utxo_entry: RpcUtxoEntry, } +impl Serializer for RpcUtxosByAddressesEntry { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; // version + store!(Option, &self.address, writer)?; + serialize!(RpcTransactionOutpoint, &self.outpoint, writer)?; + serialize!(RpcUtxoEntry, &self.utxo_entry, writer) + } +} + +impl Deserializer for RpcUtxosByAddressesEntry { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version: u8 = load!(u8, reader)?; + let address = load!(Option, reader)?; + let outpoint = deserialize!(RpcTransactionOutpoint, reader)?; + let utxo_entry = deserialize!(RpcUtxoEntry, reader)?; + Ok(Self { address, outpoint, utxo_entry }) + } +} + /// Represents a balance of an address returned by the `GetBalancesByAddresses` RPC. -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcBalancesByAddressesEntry { pub address: RpcAddress, @@ -22,3 +41,20 @@ pub struct RpcBalancesByAddressesEntry { /// Balance of `address` if available pub balance: Option, } + +impl Serializer for RpcBalancesByAddressesEntry { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; // version + store!(RpcAddress, &self.address, writer)?; + store!(Option, &self.balance, writer) + } +} + +impl Deserializer for RpcBalancesByAddressesEntry { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version: u8 = load!(u8, reader)?; + let address = load!(RpcAddress, reader)?; + let balance = load!(Option, reader)?; + Ok(Self { address, balance }) + } +} diff --git a/rpc/core/src/model/block.rs b/rpc/core/src/model/block.rs index c4c501afb9..3f4870dc69 100644 --- a/rpc/core/src/model/block.rs +++ b/rpc/core/src/model/block.rs @@ -1,8 +1,18 @@ +use super::RpcRawHeader; use crate::prelude::{RpcHash, RpcHeader, RpcTransaction}; -use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +/// Raw Rpc block type - without a cached header hash and without verbose data. +/// Used for mining APIs (get_block_template & submit_block) +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcRawBlock { + pub header: RpcRawHeader, + pub transactions: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcBlock { pub header: RpcHeader, @@ -10,7 +20,49 @@ pub struct RpcBlock { pub verbose_data: Option, } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for RpcBlock { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcHeader, &self.header, writer)?; + serialize!(Vec, &self.transactions, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcBlock { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let header = deserialize!(RpcHeader, reader)?; + let transactions = deserialize!(Vec, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Ok(Self { header, transactions, verbose_data }) + } +} + +impl Serializer for RpcRawBlock { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcRawHeader, &self.header, writer)?; + serialize!(Vec, &self.transactions, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcRawBlock { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let header = deserialize!(RpcRawHeader, reader)?; + let transactions = deserialize!(Vec, reader)?; + + Ok(Self { header, transactions }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcBlockVerboseData { pub hash: RpcHash, @@ -25,6 +77,53 @@ pub struct RpcBlockVerboseData { pub is_chain_block: bool, } +impl Serializer for RpcBlockVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(RpcHash, &self.hash, writer)?; + store!(f64, &self.difficulty, writer)?; + store!(RpcHash, &self.selected_parent_hash, writer)?; + store!(Vec, &self.transaction_ids, writer)?; + store!(bool, &self.is_header_only, writer)?; + store!(u64, &self.blue_score, writer)?; + store!(Vec, &self.children_hashes, writer)?; + store!(Vec, &self.merge_set_blues_hashes, writer)?; + store!(Vec, &self.merge_set_reds_hashes, writer)?; + store!(bool, &self.is_chain_block, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcBlockVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let hash = load!(RpcHash, reader)?; + let difficulty = load!(f64, reader)?; + let selected_parent_hash = load!(RpcHash, reader)?; + let transaction_ids = load!(Vec, reader)?; + let is_header_only = load!(bool, reader)?; + let blue_score = load!(u64, reader)?; + let children_hashes = load!(Vec, reader)?; + let merge_set_blues_hashes = load!(Vec, reader)?; + let merge_set_reds_hashes = load!(Vec, reader)?; + let is_chain_block = load!(bool, reader)?; + + Ok(Self { + hash, + difficulty, + selected_parent_hash, + transaction_ids, + is_header_only, + blue_score, + children_hashes, + merge_set_blues_hashes, + merge_set_reds_hashes, + is_chain_block, + }) + } +} + cfg_if::cfg_if! { if #[cfg(feature = "wasm32-sdk")] { use wasm_bindgen::prelude::*; @@ -59,6 +158,21 @@ cfg_if::cfg_if! { mergeSetRedsHashes: HexString[]; isChainBlock: boolean; } + + /** + * Interface defining the structure of a raw block. + * + * Raw block is a structure used by GetBlockTemplate and SubmitBlock RPCs + * and differs from `IBlock` in that it does not include verbose data and carries + * `IRawHeader` that does not include a cached block hash. + * + * @category Consensus + */ + export interface IRawBlock { + header: IRawHeader; + transactions: ITransaction[]; + } + "#; } } diff --git a/rpc/core/src/model/feerate_estimate.rs b/rpc/core/src/model/feerate_estimate.rs new file mode 100644 index 0000000000..cf97a4b36e --- /dev/null +++ b/rpc/core/src/model/feerate_estimate.rs @@ -0,0 +1,109 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcFeerateBucket { + /// The fee/mass ratio estimated to be required for inclusion time <= estimated_seconds + pub feerate: f64, + + /// The estimated inclusion time for a transaction with fee/mass = feerate + pub estimated_seconds: f64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcFeeEstimate { + /// *Top-priority* feerate bucket. Provides an estimation of the feerate required for sub-second DAG inclusion. + /// + /// Note: for all buckets, feerate values represent fee/mass of a transaction in `sompi/gram` units. + /// Given a feerate value recommendation, calculate the required fee by + /// taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` + pub priority_bucket: RpcFeerateBucket, + + /// A vector of *normal* priority feerate values. The first value of this vector is guaranteed to exist and + /// provide an estimation for sub-*minute* DAG inclusion. All other values will have shorter estimation + /// times than all `low_bucket` values. Therefor by chaining `[priority] | normal | low` and interpolating + /// between them, one can compose a complete feerate function on the client side. The API makes an effort + /// to sample enough "interesting" points on the feerate-to-time curve, so that the interpolation is meaningful. + pub normal_buckets: Vec, + + /// A vector of *low* priority feerate values. The first value of this vector is guaranteed to + /// exist and provide an estimation for sub-*hour* DAG inclusion. + pub low_buckets: Vec, +} + +impl RpcFeeEstimate { + pub fn ordered_buckets(&self) -> Vec { + std::iter::once(self.priority_bucket) + .chain(self.normal_buckets.iter().copied()) + .chain(self.low_buckets.iter().copied()) + .collect() + } +} + +impl Serializer for RpcFeeEstimate { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcFeerateBucket, &self.priority_bucket, writer)?; + store!(Vec, &self.normal_buckets, writer)?; + store!(Vec, &self.low_buckets, writer)?; + Ok(()) + } +} + +impl Deserializer for RpcFeeEstimate { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let priority_bucket = load!(RpcFeerateBucket, reader)?; + let normal_buckets = load!(Vec, reader)?; + let low_buckets = load!(Vec, reader)?; + Ok(Self { priority_bucket, normal_buckets, low_buckets }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcFeeEstimateVerboseExperimentalData { + pub mempool_ready_transactions_count: u64, + pub mempool_ready_transactions_total_mass: u64, + pub network_mass_per_second: u64, + + pub next_block_template_feerate_min: f64, + pub next_block_template_feerate_median: f64, + pub next_block_template_feerate_max: f64, +} + +impl Serializer for RpcFeeEstimateVerboseExperimentalData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.mempool_ready_transactions_count, writer)?; + store!(u64, &self.mempool_ready_transactions_total_mass, writer)?; + store!(u64, &self.network_mass_per_second, writer)?; + store!(f64, &self.next_block_template_feerate_min, writer)?; + store!(f64, &self.next_block_template_feerate_median, writer)?; + store!(f64, &self.next_block_template_feerate_max, writer)?; + Ok(()) + } +} + +impl Deserializer for RpcFeeEstimateVerboseExperimentalData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let mempool_ready_transactions_count = load!(u64, reader)?; + let mempool_ready_transactions_total_mass = load!(u64, reader)?; + let network_mass_per_second = load!(u64, reader)?; + let next_block_template_feerate_min = load!(f64, reader)?; + let next_block_template_feerate_median = load!(f64, reader)?; + let next_block_template_feerate_max = load!(f64, reader)?; + Ok(Self { + mempool_ready_transactions_count, + mempool_ready_transactions_total_mass, + network_mass_per_second, + next_block_template_feerate_min, + next_block_template_feerate_median, + next_block_template_feerate_max, + }) + } +} diff --git a/rpc/core/src/model/header.rs b/rpc/core/src/model/header.rs index e116f87eae..dddf767b7f 100644 --- a/rpc/core/src/model/header.rs +++ b/rpc/core/src/model/header.rs @@ -1 +1,331 @@ -pub type RpcHeader = kaspa_consensus_core::header::Header; +use borsh::{BorshDeserialize, BorshSerialize}; +use kaspa_consensus_core::{header::Header, BlueWorkType}; +use kaspa_hashes::Hash; +use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; + +/// Raw Rpc header type - without a cached header hash. +/// Used for mining APIs (get_block_template & submit_block) +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] + +pub struct RpcRawHeader { + pub version: u16, + pub parents_by_level: Vec>, + pub hash_merkle_root: Hash, + pub accepted_id_merkle_root: Hash, + pub utxo_commitment: Hash, + /// Timestamp is in milliseconds + pub timestamp: u64, + pub bits: u32, + pub nonce: u64, + pub daa_score: u64, + pub blue_work: BlueWorkType, + pub blue_score: u64, + pub pruning_point: Hash, +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcHeader { + /// Cached hash + pub hash: Hash, + pub version: u16, + pub parents_by_level: Vec>, + pub hash_merkle_root: Hash, + pub accepted_id_merkle_root: Hash, + pub utxo_commitment: Hash, + /// Timestamp is in milliseconds + pub timestamp: u64, + pub bits: u32, + pub nonce: u64, + pub daa_score: u64, + pub blue_work: BlueWorkType, + pub blue_score: u64, + pub pruning_point: Hash, +} + +impl RpcHeader { + pub fn direct_parents(&self) -> &[Hash] { + if self.parents_by_level.is_empty() { + &[] + } else { + &self.parents_by_level[0] + } + } +} + +impl AsRef for RpcHeader { + fn as_ref(&self) -> &RpcHeader { + self + } +} + +impl From
for RpcHeader { + fn from(header: Header) -> Self { + Self { + hash: header.hash, + version: header.version, + parents_by_level: header.parents_by_level, + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl From<&Header> for RpcHeader { + fn from(header: &Header) -> Self { + Self { + hash: header.hash, + version: header.version, + parents_by_level: header.parents_by_level.clone(), + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl From for Header { + fn from(header: RpcHeader) -> Self { + Self { + hash: header.hash, + version: header.version, + parents_by_level: header.parents_by_level, + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl From<&RpcHeader> for Header { + fn from(header: &RpcHeader) -> Self { + Self { + hash: header.hash, + version: header.version, + parents_by_level: header.parents_by_level.clone(), + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl Serializer for RpcHeader { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + store!(Hash, &self.hash, writer)?; + store!(u16, &self.version, writer)?; + store!(Vec>, &self.parents_by_level, writer)?; + store!(Hash, &self.hash_merkle_root, writer)?; + store!(Hash, &self.accepted_id_merkle_root, writer)?; + store!(Hash, &self.utxo_commitment, writer)?; + store!(u64, &self.timestamp, writer)?; + store!(u32, &self.bits, writer)?; + store!(u64, &self.nonce, writer)?; + store!(u64, &self.daa_score, writer)?; + store!(BlueWorkType, &self.blue_work, writer)?; + store!(u64, &self.blue_score, writer)?; + store!(Hash, &self.pruning_point, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcHeader { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + let hash = load!(Hash, reader)?; + let version = load!(u16, reader)?; + let parents_by_level = load!(Vec>, reader)?; + let hash_merkle_root = load!(Hash, reader)?; + let accepted_id_merkle_root = load!(Hash, reader)?; + let utxo_commitment = load!(Hash, reader)?; + let timestamp = load!(u64, reader)?; + let bits = load!(u32, reader)?; + let nonce = load!(u64, reader)?; + let daa_score = load!(u64, reader)?; + let blue_work = load!(BlueWorkType, reader)?; + let blue_score = load!(u64, reader)?; + let pruning_point = load!(Hash, reader)?; + + Ok(Self { + hash, + version, + parents_by_level, + hash_merkle_root, + accepted_id_merkle_root, + utxo_commitment, + timestamp, + bits, + nonce, + daa_score, + blue_work, + blue_score, + pruning_point, + }) + } +} + +impl From for Header { + fn from(header: RpcRawHeader) -> Self { + Self::new_finalized( + header.version, + header.parents_by_level, + header.hash_merkle_root, + header.accepted_id_merkle_root, + header.utxo_commitment, + header.timestamp, + header.bits, + header.nonce, + header.daa_score, + header.blue_work, + header.blue_score, + header.pruning_point, + ) + } +} + +impl From<&RpcRawHeader> for Header { + fn from(header: &RpcRawHeader) -> Self { + Self::new_finalized( + header.version, + header.parents_by_level.clone(), + header.hash_merkle_root, + header.accepted_id_merkle_root, + header.utxo_commitment, + header.timestamp, + header.bits, + header.nonce, + header.daa_score, + header.blue_work, + header.blue_score, + header.pruning_point, + ) + } +} + +impl From<&Header> for RpcRawHeader { + fn from(header: &Header) -> Self { + Self { + version: header.version, + parents_by_level: header.parents_by_level.clone(), + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl From
for RpcRawHeader { + fn from(header: Header) -> Self { + Self { + version: header.version, + parents_by_level: header.parents_by_level, + hash_merkle_root: header.hash_merkle_root, + accepted_id_merkle_root: header.accepted_id_merkle_root, + utxo_commitment: header.utxo_commitment, + timestamp: header.timestamp, + bits: header.bits, + nonce: header.nonce, + daa_score: header.daa_score, + blue_work: header.blue_work, + blue_score: header.blue_score, + pruning_point: header.pruning_point, + } + } +} + +impl Serializer for RpcRawHeader { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + store!(u16, &self.version, writer)?; + store!(Vec>, &self.parents_by_level, writer)?; + store!(Hash, &self.hash_merkle_root, writer)?; + store!(Hash, &self.accepted_id_merkle_root, writer)?; + store!(Hash, &self.utxo_commitment, writer)?; + store!(u64, &self.timestamp, writer)?; + store!(u32, &self.bits, writer)?; + store!(u64, &self.nonce, writer)?; + store!(u64, &self.daa_score, writer)?; + store!(BlueWorkType, &self.blue_work, writer)?; + store!(u64, &self.blue_score, writer)?; + store!(Hash, &self.pruning_point, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcRawHeader { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + let version = load!(u16, reader)?; + let parents_by_level = load!(Vec>, reader)?; + let hash_merkle_root = load!(Hash, reader)?; + let accepted_id_merkle_root = load!(Hash, reader)?; + let utxo_commitment = load!(Hash, reader)?; + let timestamp = load!(u64, reader)?; + let bits = load!(u32, reader)?; + let nonce = load!(u64, reader)?; + let daa_score = load!(u64, reader)?; + let blue_work = load!(BlueWorkType, reader)?; + let blue_score = load!(u64, reader)?; + let pruning_point = load!(Hash, reader)?; + + Ok(Self { + version, + parents_by_level, + hash_merkle_root, + accepted_id_merkle_root, + utxo_commitment, + timestamp, + bits, + nonce, + daa_score, + blue_work, + blue_score, + pruning_point, + }) + } +} diff --git a/rpc/core/src/model/mempool.rs b/rpc/core/src/model/mempool.rs index bd08b745a0..1a04bed756 100644 --- a/rpc/core/src/model/mempool.rs +++ b/rpc/core/src/model/mempool.rs @@ -1,9 +1,9 @@ use super::RpcAddress; use super::RpcTransaction; -use borsh::{BorshDeserialize, BorshSerialize}; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct RpcMempoolEntry { pub fee: u64, pub transaction: RpcTransaction, @@ -16,7 +16,24 @@ impl RpcMempoolEntry { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for RpcMempoolEntry { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u64, &self.fee, writer)?; + serialize!(RpcTransaction, &self.transaction, writer)?; + store!(bool, &self.is_orphan, writer) + } +} + +impl Deserializer for RpcMempoolEntry { + fn deserialize(reader: &mut R) -> std::io::Result { + let fee = load!(u64, reader)?; + let transaction = deserialize!(RpcTransaction, reader)?; + let is_orphan = load!(bool, reader)?; + Ok(Self { fee, transaction, is_orphan }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct RpcMempoolEntryByAddress { pub address: RpcAddress, pub sending: Vec, @@ -29,6 +46,23 @@ impl RpcMempoolEntryByAddress { } } +impl Serializer for RpcMempoolEntryByAddress { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(RpcAddress, &self.address, writer)?; + serialize!(Vec, &self.sending, writer)?; + serialize!(Vec, &self.receiving, writer) + } +} + +impl Deserializer for RpcMempoolEntryByAddress { + fn deserialize(reader: &mut R) -> std::io::Result { + let address = load!(RpcAddress, reader)?; + let sending = deserialize!(Vec, reader)?; + let receiving = deserialize!(Vec, reader)?; + Ok(Self { address, sending, receiving }) + } +} + cfg_if::cfg_if! { if #[cfg(feature = "wasm32-sdk")] { use wasm_bindgen::prelude::*; diff --git a/rpc/core/src/model/message.rs b/rpc/core/src/model/message.rs index 7366bf3ccb..ba8d6abf76 100644 --- a/rpc/core/src/model/message.rs +++ b/rpc/core/src/model/message.rs @@ -3,11 +3,14 @@ use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_consensus_core::api::stats::BlockCount; use kaspa_core::debug; use kaspa_notify::subscription::{context::SubscriptionContext, single::UtxosChangedSubscription, Command}; +use kaspa_utils::hex::ToHex; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; use std::{ fmt::{Display, Formatter}, sync::Arc, }; +use workflow_serializer::prelude::*; pub type RpcExtraData = Vec; @@ -15,21 +18,42 @@ pub type RpcExtraData = Vec; /// Blocks are generally expected to have been generated using the getBlockTemplate call. /// /// See: [`GetBlockTemplateRequest`] -#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SubmitBlockRequest { - pub block: RpcBlock, + pub block: RpcRawBlock, #[serde(alias = "allowNonDAABlocks")] pub allow_non_daa_blocks: bool, } impl SubmitBlockRequest { - pub fn new(block: RpcBlock, allow_non_daa_blocks: bool) -> Self { + pub fn new(block: RpcRawBlock, allow_non_daa_blocks: bool) -> Self { Self { block, allow_non_daa_blocks } } } +impl Serializer for SubmitBlockRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcRawBlock, &self.block, writer)?; + store!(bool, &self.allow_non_daa_blocks, writer)?; + + Ok(()) + } +} + +impl Deserializer for SubmitBlockRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let block = deserialize!(RpcRawBlock, reader)?; + let allow_non_daa_blocks = load!(bool, reader)?; + + Ok(Self { block, allow_non_daa_blocks }) + } +} + #[derive(Clone, Copy, Eq, PartialEq, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] +#[borsh(use_discriminant = true)] pub enum SubmitBlockRejectReason { BlockInvalid = 1, IsInIBD = 2, @@ -54,6 +78,7 @@ impl Display for SubmitBlockRejectReason { #[derive(Eq, PartialEq, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "lowercase")] #[serde(tag = "type", content = "reason")] +#[borsh(use_discriminant = true)] pub enum SubmitBlockReport { Success, Reject(SubmitBlockRejectReason), @@ -64,17 +89,34 @@ impl SubmitBlockReport { } } -#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SubmitBlockResponse { pub report: SubmitBlockReport, } +impl Serializer for SubmitBlockResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(SubmitBlockReport, &self.report, writer)?; + Ok(()) + } +} + +impl Deserializer for SubmitBlockResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let report = load!(SubmitBlockReport, reader)?; + + Ok(Self { report }) + } +} + /// GetBlockTemplateRequest requests a current block template. /// Callers are expected to solve the block template and submit it using the submitBlock call /// /// See: [`SubmitBlockRequest`] -#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockTemplateRequest { /// Which kaspa address should the coinbase block reward transaction pay into @@ -88,10 +130,30 @@ impl GetBlockTemplateRequest { } } -#[derive(Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlockTemplateRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcAddress, &self.pay_address, writer)?; + store!(RpcExtraData, &self.extra_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlockTemplateRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let pay_address = load!(RpcAddress, reader)?; + let extra_data = load!(RpcExtraData, reader)?; + + Ok(Self { pay_address, extra_data }) + } +} + +#[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockTemplateResponse { - pub block: RpcBlock, + pub block: RpcRawBlock, /// Whether kaspad thinks that it's synced. /// Callers are discouraged (but not forbidden) from solving blocks when kaspad is not synced. @@ -100,8 +162,28 @@ pub struct GetBlockTemplateResponse { pub is_synced: bool, } +impl Serializer for GetBlockTemplateResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcRawBlock, &self.block, writer)?; + store!(bool, &self.is_synced, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlockTemplateResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let block = deserialize!(RpcRawBlock, reader)?; + let is_synced = load!(bool, reader)?; + + Ok(Self { block, is_synced }) + } +} + /// GetBlockRequest requests information about a specific block -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockRequest { /// The hash of the requested block @@ -116,18 +198,70 @@ impl GetBlockRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlockRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.hash, writer)?; + store!(bool, &self.include_transactions, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlockRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let hash = load!(RpcHash, reader)?; + let include_transactions = load!(bool, reader)?; + + Ok(Self { hash, include_transactions }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockResponse { pub block: RpcBlock, } +impl Serializer for GetBlockResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcBlock, &self.block, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlockResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let block = deserialize!(RpcBlock, reader)?; + + Ok(Self { block }) + } +} + /// GetInfoRequest returns info about the node. -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetInfoRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetInfoRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetInfoRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetInfoResponse { pub p2p_id: String, @@ -139,11 +273,55 @@ pub struct GetInfoResponse { pub has_message_id: bool, } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetInfoResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(String, &self.p2p_id, writer)?; + store!(u64, &self.mempool_size, writer)?; + store!(String, &self.server_version, writer)?; + store!(bool, &self.is_utxo_indexed, writer)?; + store!(bool, &self.is_synced, writer)?; + store!(bool, &self.has_notify_command, writer)?; + store!(bool, &self.has_message_id, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetInfoResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let p2p_id = load!(String, reader)?; + let mempool_size = load!(u64, reader)?; + let server_version = load!(String, reader)?; + let is_utxo_indexed = load!(bool, reader)?; + let is_synced = load!(bool, reader)?; + let has_notify_command = load!(bool, reader)?; + let has_message_id = load!(bool, reader)?; + + Ok(Self { p2p_id, mempool_size, server_version, is_utxo_indexed, is_synced, has_notify_command, has_message_id }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetCurrentNetworkRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetCurrentNetworkRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetCurrentNetworkRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetCurrentNetworkResponse { pub network: RpcNetworkType, @@ -155,11 +333,41 @@ impl GetCurrentNetworkResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetCurrentNetworkResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcNetworkType, &self.network, writer)?; + Ok(()) + } +} + +impl Deserializer for GetCurrentNetworkResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let network = load!(RpcNetworkType, reader)?; + Ok(Self { network }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetPeerAddressesRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetPeerAddressesRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetPeerAddressesRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetPeerAddressesResponse { pub known_addresses: Vec, @@ -172,11 +380,43 @@ impl GetPeerAddressesResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetPeerAddressesResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.known_addresses, writer)?; + store!(Vec, &self.banned_addresses, writer)?; + Ok(()) + } +} + +impl Deserializer for GetPeerAddressesResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let known_addresses = load!(Vec, reader)?; + let banned_addresses = load!(Vec, reader)?; + Ok(Self { known_addresses, banned_addresses }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetSinkRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSinkRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetSinkRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetSinkResponse { pub sink: RpcHash, @@ -188,7 +428,23 @@ impl GetSinkResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSinkResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.sink, writer)?; + Ok(()) + } +} + +impl Deserializer for GetSinkResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let sink = load!(RpcHash, reader)?; + Ok(Self { sink }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntryRequest { pub transaction_id: RpcTransactionId, @@ -203,7 +459,29 @@ impl GetMempoolEntryRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntryRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcTransactionId, &self.transaction_id, writer)?; + store!(bool, &self.include_orphan_pool, writer)?; + store!(bool, &self.filter_transaction_pool, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMempoolEntryRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let transaction_id = load!(RpcTransactionId, reader)?; + let include_orphan_pool = load!(bool, reader)?; + let filter_transaction_pool = load!(bool, reader)?; + + Ok(Self { transaction_id, include_orphan_pool, filter_transaction_pool }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntryResponse { pub mempool_entry: RpcMempoolEntry, @@ -215,7 +493,23 @@ impl GetMempoolEntryResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntryResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcMempoolEntry, &self.mempool_entry, writer)?; + Ok(()) + } +} + +impl Deserializer for GetMempoolEntryResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let mempool_entry = deserialize!(RpcMempoolEntry, reader)?; + Ok(Self { mempool_entry }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntriesRequest { pub include_orphan_pool: bool, @@ -229,7 +523,27 @@ impl GetMempoolEntriesRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntriesRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.include_orphan_pool, writer)?; + store!(bool, &self.filter_transaction_pool, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMempoolEntriesRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let include_orphan_pool = load!(bool, reader)?; + let filter_transaction_pool = load!(bool, reader)?; + + Ok(Self { include_orphan_pool, filter_transaction_pool }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntriesResponse { pub mempool_entries: Vec, @@ -241,11 +555,41 @@ impl GetMempoolEntriesResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntriesResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Vec, &self.mempool_entries, writer)?; + Ok(()) + } +} + +impl Deserializer for GetMempoolEntriesResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let mempool_entries = deserialize!(Vec, reader)?; + Ok(Self { mempool_entries }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetConnectedPeerInfoRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetConnectedPeerInfoRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetConnectedPeerInfoRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetConnectedPeerInfoResponse { pub peer_info: Vec, @@ -257,7 +601,23 @@ impl GetConnectedPeerInfoResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetConnectedPeerInfoResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.peer_info, writer)?; + Ok(()) + } +} + +impl Deserializer for GetConnectedPeerInfoResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let peer_info = load!(Vec, reader)?; + Ok(Self { peer_info }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct AddPeerRequest { pub peer_address: RpcContextualPeerAddress, @@ -270,11 +630,45 @@ impl AddPeerRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for AddPeerRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcContextualPeerAddress, &self.peer_address, writer)?; + store!(bool, &self.is_permanent, writer)?; + + Ok(()) + } +} + +impl Deserializer for AddPeerRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let peer_address = load!(RpcContextualPeerAddress, reader)?; + let is_permanent = load!(bool, reader)?; + + Ok(Self { peer_address, is_permanent }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct AddPeerResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for AddPeerResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for AddPeerResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SubmitTransactionRequest { pub transaction: RpcTransaction, @@ -287,7 +681,27 @@ impl SubmitTransactionRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for SubmitTransactionRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcTransaction, &self.transaction, writer)?; + store!(bool, &self.allow_orphan, writer)?; + + Ok(()) + } +} + +impl Deserializer for SubmitTransactionRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let transaction = deserialize!(RpcTransaction, reader)?; + let allow_orphan = load!(bool, reader)?; + + Ok(Self { transaction, allow_orphan }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SubmitTransactionResponse { pub transaction_id: RpcTransactionId, @@ -299,6 +713,87 @@ impl SubmitTransactionResponse { } } +impl Serializer for SubmitTransactionResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcTransactionId, &self.transaction_id, writer)?; + + Ok(()) + } +} + +impl Deserializer for SubmitTransactionResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let transaction_id = load!(RpcTransactionId, reader)?; + + Ok(Self { transaction_id }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SubmitTransactionReplacementRequest { + pub transaction: RpcTransaction, +} + +impl SubmitTransactionReplacementRequest { + pub fn new(transaction: RpcTransaction) -> Self { + Self { transaction } + } +} + +impl Serializer for SubmitTransactionReplacementRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcTransaction, &self.transaction, writer)?; + + Ok(()) + } +} + +impl Deserializer for SubmitTransactionReplacementRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let transaction = deserialize!(RpcTransaction, reader)?; + + Ok(Self { transaction }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SubmitTransactionReplacementResponse { + pub transaction_id: RpcTransactionId, + pub replaced_transaction: RpcTransaction, +} + +impl SubmitTransactionReplacementResponse { + pub fn new(transaction_id: RpcTransactionId, replaced_transaction: RpcTransaction) -> Self { + Self { transaction_id, replaced_transaction } + } +} + +impl Serializer for SubmitTransactionReplacementResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcTransactionId, &self.transaction_id, writer)?; + serialize!(RpcTransaction, &self.replaced_transaction, writer)?; + + Ok(()) + } +} + +impl Deserializer for SubmitTransactionReplacementResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let transaction_id = load!(RpcTransactionId, reader)?; + let replaced_transaction = deserialize!(RpcTransaction, reader)?; + + Ok(Self { transaction_id, replaced_transaction }) + } +} + #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] pub struct GetSubnetworkRequest { @@ -311,7 +806,25 @@ impl GetSubnetworkRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSubnetworkRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcSubnetworkId, &self.subnetwork_id, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetSubnetworkRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let subnetwork_id = load!(RpcSubnetworkId, reader)?; + + Ok(Self { subnetwork_id }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetSubnetworkResponse { pub gas_limit: u64, @@ -323,7 +836,25 @@ impl GetSubnetworkResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSubnetworkResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.gas_limit, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetSubnetworkResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let gas_limit = load!(u64, reader)?; + + Ok(Self { gas_limit }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetVirtualChainFromBlockRequest { pub start_hash: RpcHash, @@ -336,7 +867,27 @@ impl GetVirtualChainFromBlockRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetVirtualChainFromBlockRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.start_hash, writer)?; + store!(bool, &self.include_accepted_transaction_ids, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetVirtualChainFromBlockRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let start_hash = load!(RpcHash, reader)?; + let include_accepted_transaction_ids = load!(bool, reader)?; + + Ok(Self { start_hash, include_accepted_transaction_ids }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetVirtualChainFromBlockResponse { pub removed_chain_block_hashes: Vec, @@ -354,7 +905,29 @@ impl GetVirtualChainFromBlockResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetVirtualChainFromBlockResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.removed_chain_block_hashes, writer)?; + store!(Vec, &self.added_chain_block_hashes, writer)?; + store!(Vec, &self.accepted_transaction_ids, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetVirtualChainFromBlockResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let removed_chain_block_hashes = load!(Vec, reader)?; + let added_chain_block_hashes = load!(Vec, reader)?; + let accepted_transaction_ids = load!(Vec, reader)?; + + Ok(Self { removed_chain_block_hashes, added_chain_block_hashes, accepted_transaction_ids }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlocksRequest { pub low_hash: Option, @@ -368,7 +941,29 @@ impl GetBlocksRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlocksRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Option, &self.low_hash, writer)?; + store!(bool, &self.include_blocks, writer)?; + store!(bool, &self.include_transactions, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlocksRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let low_hash = load!(Option, reader)?; + let include_blocks = load!(bool, reader)?; + let include_transactions = load!(bool, reader)?; + + Ok(Self { low_hash, include_blocks, include_transactions }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlocksResponse { pub block_hashes: Vec, @@ -381,17 +976,65 @@ impl GetBlocksResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlocksResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.block_hashes, writer)?; + serialize!(Vec, &self.blocks, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlocksResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let block_hashes = load!(Vec, reader)?; + let blocks = deserialize!(Vec, reader)?; + + Ok(Self { block_hashes, blocks }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockCountRequest {} +impl Serializer for GetBlockCountRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetBlockCountRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + pub type GetBlockCountResponse = BlockCount; -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockDagInfoRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlockDagInfoRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetBlockDagInfoRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBlockDagInfoResponse { pub network: RpcNetworkId, @@ -434,7 +1077,54 @@ impl GetBlockDagInfoResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBlockDagInfoResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcNetworkId, &self.network, writer)?; + store!(u64, &self.block_count, writer)?; + store!(u64, &self.header_count, writer)?; + store!(Vec, &self.tip_hashes, writer)?; + store!(f64, &self.difficulty, writer)?; + store!(u64, &self.past_median_time, writer)?; + store!(Vec, &self.virtual_parent_hashes, writer)?; + store!(RpcHash, &self.pruning_point_hash, writer)?; + store!(u64, &self.virtual_daa_score, writer)?; + store!(RpcHash, &self.sink, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBlockDagInfoResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let network = load!(RpcNetworkId, reader)?; + let block_count = load!(u64, reader)?; + let header_count = load!(u64, reader)?; + let tip_hashes = load!(Vec, reader)?; + let difficulty = load!(f64, reader)?; + let past_median_time = load!(u64, reader)?; + let virtual_parent_hashes = load!(Vec, reader)?; + let pruning_point_hash = load!(RpcHash, reader)?; + let virtual_daa_score = load!(u64, reader)?; + let sink = load!(RpcHash, reader)?; + + Ok(Self { + network, + block_count, + header_count, + tip_hashes, + difficulty, + past_median_time, + virtual_parent_hashes, + pruning_point_hash, + virtual_daa_score, + sink, + }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ResolveFinalityConflictRequest { pub finality_block_hash: RpcHash, @@ -446,19 +1136,79 @@ impl ResolveFinalityConflictRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ResolveFinalityConflictRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.finality_block_hash, writer)?; + + Ok(()) + } +} + +impl Deserializer for ResolveFinalityConflictRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let finality_block_hash = load!(RpcHash, reader)?; + + Ok(Self { finality_block_hash }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ResolveFinalityConflictResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ResolveFinalityConflictResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for ResolveFinalityConflictResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ShutdownRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ShutdownRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for ShutdownRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ShutdownResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ShutdownResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for ShutdownResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetHeadersRequest { pub start_hash: RpcHash, @@ -472,7 +1222,29 @@ impl GetHeadersRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetHeadersRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.start_hash, writer)?; + store!(u64, &self.limit, writer)?; + store!(bool, &self.is_ascending, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetHeadersRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let start_hash = load!(RpcHash, reader)?; + let limit = load!(u64, reader)?; + let is_ascending = load!(bool, reader)?; + + Ok(Self { start_hash, limit, is_ascending }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetHeadersResponse { pub headers: Vec, @@ -484,7 +1256,25 @@ impl GetHeadersResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetHeadersResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.headers, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetHeadersResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let headers = load!(Vec, reader)?; + + Ok(Self { headers }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBalanceByAddressRequest { pub address: RpcAddress, @@ -496,7 +1286,25 @@ impl GetBalanceByAddressRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBalanceByAddressRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcAddress, &self.address, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBalanceByAddressRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let address = load!(RpcAddress, reader)?; + + Ok(Self { address }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBalanceByAddressResponse { pub balance: u64, @@ -508,7 +1316,25 @@ impl GetBalanceByAddressResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBalanceByAddressResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.balance, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBalanceByAddressResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let balance = load!(u64, reader)?; + + Ok(Self { balance }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBalancesByAddressesRequest { pub addresses: Vec, @@ -520,7 +1346,25 @@ impl GetBalancesByAddressesRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBalancesByAddressesRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.addresses, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBalancesByAddressesRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let addresses = load!(Vec, reader)?; + + Ok(Self { addresses }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetBalancesByAddressesResponse { pub entries: Vec, @@ -532,11 +1376,43 @@ impl GetBalancesByAddressesResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetBalancesByAddressesResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Vec, &self.entries, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetBalancesByAddressesResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let entries = deserialize!(Vec, reader)?; + + Ok(Self { entries }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetSinkBlueScoreRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSinkBlueScoreRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetSinkBlueScoreRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetSinkBlueScoreResponse { pub blue_score: u64, @@ -548,7 +1424,25 @@ impl GetSinkBlueScoreResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetSinkBlueScoreResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.blue_score, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetSinkBlueScoreResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let blue_score = load!(u64, reader)?; + + Ok(Self { blue_score }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetUtxosByAddressesRequest { pub addresses: Vec, @@ -560,7 +1454,25 @@ impl GetUtxosByAddressesRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetUtxosByAddressesRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.addresses, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetUtxosByAddressesRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let addresses = load!(Vec, reader)?; + + Ok(Self { addresses }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetUtxosByAddressesResponse { pub entries: Vec, @@ -572,7 +1484,25 @@ impl GetUtxosByAddressesResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetUtxosByAddressesResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Vec, &self.entries, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetUtxosByAddressesResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let entries = deserialize!(Vec, reader)?; + + Ok(Self { entries }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BanRequest { pub ip: RpcIpAddress, @@ -584,11 +1514,43 @@ impl BanRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for BanRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcIpAddress, &self.ip, writer)?; + + Ok(()) + } +} + +impl Deserializer for BanRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let ip = load!(RpcIpAddress, reader)?; + + Ok(Self { ip }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BanResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for BanResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for BanResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UnbanRequest { pub ip: RpcIpAddress, @@ -600,11 +1562,43 @@ impl UnbanRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for UnbanRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcIpAddress, &self.ip, writer)?; + + Ok(()) + } +} + +impl Deserializer for UnbanRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let ip = load!(RpcIpAddress, reader)?; + + Ok(Self { ip }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UnbanResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for UnbanResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for UnbanResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EstimateNetworkHashesPerSecondRequest { pub window_size: u32, @@ -617,7 +1611,27 @@ impl EstimateNetworkHashesPerSecondRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for EstimateNetworkHashesPerSecondRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u32, &self.window_size, writer)?; + store!(Option, &self.start_hash, writer)?; + + Ok(()) + } +} + +impl Deserializer for EstimateNetworkHashesPerSecondRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let window_size = load!(u32, reader)?; + let start_hash = load!(Option, reader)?; + + Ok(Self { window_size, start_hash }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EstimateNetworkHashesPerSecondResponse { pub network_hashes_per_second: u64, @@ -629,7 +1643,25 @@ impl EstimateNetworkHashesPerSecondResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for EstimateNetworkHashesPerSecondResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.network_hashes_per_second, writer)?; + + Ok(()) + } +} + +impl Deserializer for EstimateNetworkHashesPerSecondResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let network_hashes_per_second = load!(u64, reader)?; + + Ok(Self { network_hashes_per_second }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntriesByAddressesRequest { pub addresses: Vec, @@ -644,7 +1676,29 @@ impl GetMempoolEntriesByAddressesRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntriesByAddressesRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.addresses, writer)?; + store!(bool, &self.include_orphan_pool, writer)?; + store!(bool, &self.filter_transaction_pool, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMempoolEntriesByAddressesRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let addresses = load!(Vec, reader)?; + let include_orphan_pool = load!(bool, reader)?; + let filter_transaction_pool = load!(bool, reader)?; + + Ok(Self { addresses, include_orphan_pool, filter_transaction_pool }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMempoolEntriesByAddressesResponse { pub entries: Vec, @@ -656,11 +1710,43 @@ impl GetMempoolEntriesByAddressesResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMempoolEntriesByAddressesResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Vec, &self.entries, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMempoolEntriesByAddressesResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let entries = deserialize!(Vec, reader)?; + + Ok(Self { entries }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetCoinSupplyRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetCoinSupplyRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetCoinSupplyRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetCoinSupplyResponse { pub max_sompi: u64, @@ -673,26 +1759,234 @@ impl GetCoinSupplyResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetCoinSupplyResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.max_sompi, writer)?; + store!(u64, &self.circulating_sompi, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetCoinSupplyResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let max_sompi = load!(u64, reader)?; + let circulating_sompi = load!(u64, reader)?; + + Ok(Self { max_sompi, circulating_sompi }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PingRequest {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for PingRequest { + fn serialize(&self, _writer: &mut W) -> std::io::Result<()> { + Ok(()) + } +} + +impl Deserializer for PingRequest { + fn deserialize(_reader: &mut R) -> std::io::Result { + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PingResponse {} -// TODO - custom wRPC commands (need review and implementation in gRPC) +impl Serializer for PingResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for PingResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + Ok(Self {}) + } +} #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] +pub struct ConnectionsProfileData { + pub cpu_usage: f32, + pub memory_usage: u64, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetConnectionsRequest { + pub include_profile_data: bool, +} + +impl Serializer for GetConnectionsRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(bool, &self.include_profile_data, writer)?; + Ok(()) + } +} + +impl Deserializer for GetConnectionsRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let include_profile_data = load!(bool, reader)?; + Ok(Self { include_profile_data }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetConnectionsResponse { + pub clients: u32, + pub peers: u16, + pub profile_data: Option, +} + +impl Serializer for GetConnectionsResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u32, &self.clients, writer)?; + store!(u16, &self.peers, writer)?; + store!(Option, &self.profile_data, writer)?; + Ok(()) + } +} + +impl Deserializer for GetConnectionsResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let clients = load!(u32, reader)?; + let peers = load!(u16, reader)?; + let extra = load!(Option, reader)?; + Ok(Self { clients, peers, profile_data: extra }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSystemInfoRequest {} + +impl Serializer for GetSystemInfoRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetSystemInfoRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + Ok(Self {}) + } +} + +#[derive(Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSystemInfoResponse { + pub version: String, + pub system_id: Option>, + pub git_hash: Option>, + pub cpu_physical_cores: u16, + pub total_memory: u64, + pub fd_limit: u32, + pub proxy_socket_limit_per_cpu_core: Option, +} + +impl std::fmt::Debug for GetSystemInfoResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("GetSystemInfoResponse") + .field("version", &self.version) + .field("system_id", &self.system_id.as_ref().map(|id| id.to_hex())) + .field("git_hash", &self.git_hash.as_ref().map(|hash| hash.to_hex())) + .field("cpu_physical_cores", &self.cpu_physical_cores) + .field("total_memory", &self.total_memory) + .field("fd_limit", &self.fd_limit) + .field("proxy_socket_limit_per_cpu_core", &self.proxy_socket_limit_per_cpu_core) + .finish() + } +} + +impl Serializer for GetSystemInfoResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &2, writer)?; + store!(String, &self.version, writer)?; + store!(Option>, &self.system_id, writer)?; + store!(Option>, &self.git_hash, writer)?; + store!(u16, &self.cpu_physical_cores, writer)?; + store!(u64, &self.total_memory, writer)?; + store!(u32, &self.fd_limit, writer)?; + store!(Option, &self.proxy_socket_limit_per_cpu_core, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetSystemInfoResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let payload_version = load!(u16, reader)?; + let version = load!(String, reader)?; + let system_id = load!(Option>, reader)?; + let git_hash = load!(Option>, reader)?; + let cpu_physical_cores = load!(u16, reader)?; + let total_memory = load!(u64, reader)?; + let fd_limit = load!(u32, reader)?; + + let proxy_socket_limit_per_cpu_core = if payload_version > 1 { load!(Option, reader)? } else { None }; + + Ok(Self { version, system_id, git_hash, cpu_physical_cores, total_memory, fd_limit, proxy_socket_limit_per_cpu_core }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] pub struct GetMetricsRequest { pub process_metrics: bool, pub connection_metrics: bool, pub bandwidth_metrics: bool, pub consensus_metrics: bool, + pub storage_metrics: bool, + pub custom_metrics: bool, } -#[derive(Default, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for GetMetricsRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.process_metrics, writer)?; + store!(bool, &self.connection_metrics, writer)?; + store!(bool, &self.bandwidth_metrics, writer)?; + store!(bool, &self.consensus_metrics, writer)?; + store!(bool, &self.storage_metrics, writer)?; + store!(bool, &self.custom_metrics, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMetricsRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let process_metrics = load!(bool, reader)?; + let connection_metrics = load!(bool, reader)?; + let bandwidth_metrics = load!(bool, reader)?; + let consensus_metrics = load!(bool, reader)?; + let storage_metrics = load!(bool, reader)?; + let custom_metrics = load!(bool, reader)?; + + Ok(Self { process_metrics, connection_metrics, bandwidth_metrics, consensus_metrics, storage_metrics, custom_metrics }) + } +} + +#[derive(Default, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ProcessMetrics { pub resident_set_size: u64, @@ -706,7 +2000,51 @@ pub struct ProcessMetrics { pub disk_io_write_per_sec: f32, } -#[derive(Default, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ProcessMetrics { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.resident_set_size, writer)?; + store!(u64, &self.virtual_memory_size, writer)?; + store!(u32, &self.core_num, writer)?; + store!(f32, &self.cpu_usage, writer)?; + store!(u32, &self.fd_num, writer)?; + store!(u64, &self.disk_io_read_bytes, writer)?; + store!(u64, &self.disk_io_write_bytes, writer)?; + store!(f32, &self.disk_io_read_per_sec, writer)?; + store!(f32, &self.disk_io_write_per_sec, writer)?; + + Ok(()) + } +} + +impl Deserializer for ProcessMetrics { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let resident_set_size = load!(u64, reader)?; + let virtual_memory_size = load!(u64, reader)?; + let core_num = load!(u32, reader)?; + let cpu_usage = load!(f32, reader)?; + let fd_num = load!(u32, reader)?; + let disk_io_read_bytes = load!(u64, reader)?; + let disk_io_write_bytes = load!(u64, reader)?; + let disk_io_read_per_sec = load!(f32, reader)?; + let disk_io_write_per_sec = load!(f32, reader)?; + + Ok(Self { + resident_set_size, + virtual_memory_size, + core_num, + cpu_usage, + fd_num, + disk_io_read_bytes, + disk_io_write_bytes, + disk_io_read_per_sec, + disk_io_write_per_sec, + }) + } +} + +#[derive(Default, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ConnectionMetrics { pub borsh_live_connections: u32, @@ -719,7 +2057,45 @@ pub struct ConnectionMetrics { pub active_peers: u32, } -#[derive(Default, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ConnectionMetrics { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u32, &self.borsh_live_connections, writer)?; + store!(u64, &self.borsh_connection_attempts, writer)?; + store!(u64, &self.borsh_handshake_failures, writer)?; + store!(u32, &self.json_live_connections, writer)?; + store!(u64, &self.json_connection_attempts, writer)?; + store!(u64, &self.json_handshake_failures, writer)?; + store!(u32, &self.active_peers, writer)?; + + Ok(()) + } +} + +impl Deserializer for ConnectionMetrics { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let borsh_live_connections = load!(u32, reader)?; + let borsh_connection_attempts = load!(u64, reader)?; + let borsh_handshake_failures = load!(u64, reader)?; + let json_live_connections = load!(u32, reader)?; + let json_connection_attempts = load!(u64, reader)?; + let json_handshake_failures = load!(u64, reader)?; + let active_peers = load!(u32, reader)?; + + Ok(Self { + borsh_live_connections, + borsh_connection_attempts, + borsh_handshake_failures, + json_live_connections, + json_connection_attempts, + json_handshake_failures, + active_peers, + }) + } +} + +#[derive(Default, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BandwidthMetrics { pub borsh_bytes_tx: u64, @@ -732,7 +2108,48 @@ pub struct BandwidthMetrics { pub grpc_bytes_rx: u64, } -#[derive(Default, Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for BandwidthMetrics { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.borsh_bytes_tx, writer)?; + store!(u64, &self.borsh_bytes_rx, writer)?; + store!(u64, &self.json_bytes_tx, writer)?; + store!(u64, &self.json_bytes_rx, writer)?; + store!(u64, &self.p2p_bytes_tx, writer)?; + store!(u64, &self.p2p_bytes_rx, writer)?; + store!(u64, &self.grpc_bytes_tx, writer)?; + store!(u64, &self.grpc_bytes_rx, writer)?; + + Ok(()) + } +} + +impl Deserializer for BandwidthMetrics { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let borsh_bytes_tx = load!(u64, reader)?; + let borsh_bytes_rx = load!(u64, reader)?; + let json_bytes_tx = load!(u64, reader)?; + let json_bytes_rx = load!(u64, reader)?; + let p2p_bytes_tx = load!(u64, reader)?; + let p2p_bytes_rx = load!(u64, reader)?; + let grpc_bytes_tx = load!(u64, reader)?; + let grpc_bytes_rx = load!(u64, reader)?; + + Ok(Self { + borsh_bytes_tx, + borsh_bytes_rx, + json_bytes_tx, + json_bytes_rx, + p2p_bytes_tx, + p2p_bytes_rx, + grpc_bytes_tx, + grpc_bytes_rx, + }) + } +} + +#[derive(Default, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ConsensusMetrics { pub node_blocks_submitted_count: u64, @@ -754,7 +2171,115 @@ pub struct ConsensusMetrics { pub network_virtual_daa_score: u64, } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for ConsensusMetrics { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.node_blocks_submitted_count, writer)?; + store!(u64, &self.node_headers_processed_count, writer)?; + store!(u64, &self.node_dependencies_processed_count, writer)?; + store!(u64, &self.node_bodies_processed_count, writer)?; + store!(u64, &self.node_transactions_processed_count, writer)?; + store!(u64, &self.node_chain_blocks_processed_count, writer)?; + store!(u64, &self.node_mass_processed_count, writer)?; + store!(u64, &self.node_database_blocks_count, writer)?; + store!(u64, &self.node_database_headers_count, writer)?; + store!(u64, &self.network_mempool_size, writer)?; + store!(u32, &self.network_tip_hashes_count, writer)?; + store!(f64, &self.network_difficulty, writer)?; + store!(u64, &self.network_past_median_time, writer)?; + store!(u32, &self.network_virtual_parent_hashes_count, writer)?; + store!(u64, &self.network_virtual_daa_score, writer)?; + + Ok(()) + } +} + +impl Deserializer for ConsensusMetrics { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let node_blocks_submitted_count = load!(u64, reader)?; + let node_headers_processed_count = load!(u64, reader)?; + let node_dependencies_processed_count = load!(u64, reader)?; + let node_bodies_processed_count = load!(u64, reader)?; + let node_transactions_processed_count = load!(u64, reader)?; + let node_chain_blocks_processed_count = load!(u64, reader)?; + let node_mass_processed_count = load!(u64, reader)?; + let node_database_blocks_count = load!(u64, reader)?; + let node_database_headers_count = load!(u64, reader)?; + let network_mempool_size = load!(u64, reader)?; + let network_tip_hashes_count = load!(u32, reader)?; + let network_difficulty = load!(f64, reader)?; + let network_past_median_time = load!(u64, reader)?; + let network_virtual_parent_hashes_count = load!(u32, reader)?; + let network_virtual_daa_score = load!(u64, reader)?; + + Ok(Self { + node_blocks_submitted_count, + node_headers_processed_count, + node_dependencies_processed_count, + node_bodies_processed_count, + node_transactions_processed_count, + node_chain_blocks_processed_count, + node_mass_processed_count, + node_database_blocks_count, + node_database_headers_count, + network_mempool_size, + network_tip_hashes_count, + network_difficulty, + network_past_median_time, + network_virtual_parent_hashes_count, + network_virtual_daa_score, + }) + } +} + +#[derive(Default, Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct StorageMetrics { + pub storage_size_bytes: u64, +} + +impl Serializer for StorageMetrics { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.storage_size_bytes, writer)?; + + Ok(()) + } +} + +impl Deserializer for StorageMetrics { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let storage_size_bytes = load!(u64, reader)?; + + Ok(Self { storage_size_bytes }) + } +} + +// TODO: Custom metrics dictionary +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CustomMetricValue { + Placeholder, +} + +impl Serializer for CustomMetricValue { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + Ok(()) + } +} + +impl Deserializer for CustomMetricValue { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + Ok(CustomMetricValue::Placeholder) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetMetricsResponse { pub server_time: u64, @@ -762,66 +2287,382 @@ pub struct GetMetricsResponse { pub connection_metrics: Option, pub bandwidth_metrics: Option, pub consensus_metrics: Option, + pub storage_metrics: Option, + // TODO: this is currently a placeholder + pub custom_metrics: Option>, +} + +impl GetMetricsResponse { + pub fn new( + server_time: u64, + process_metrics: Option, + connection_metrics: Option, + bandwidth_metrics: Option, + consensus_metrics: Option, + storage_metrics: Option, + custom_metrics: Option>, + ) -> Self { + Self { + process_metrics, + connection_metrics, + bandwidth_metrics, + consensus_metrics, + storage_metrics, + server_time, + custom_metrics, + } + } +} + +impl Serializer for GetMetricsResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.server_time, writer)?; + serialize!(Option, &self.process_metrics, writer)?; + serialize!(Option, &self.connection_metrics, writer)?; + serialize!(Option, &self.bandwidth_metrics, writer)?; + serialize!(Option, &self.consensus_metrics, writer)?; + serialize!(Option, &self.storage_metrics, writer)?; + serialize!(Option>, &self.custom_metrics, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetMetricsResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let server_time = load!(u64, reader)?; + let process_metrics = deserialize!(Option, reader)?; + let connection_metrics = deserialize!(Option, reader)?; + let bandwidth_metrics = deserialize!(Option, reader)?; + let consensus_metrics = deserialize!(Option, reader)?; + let storage_metrics = deserialize!(Option, reader)?; + let custom_metrics = deserialize!(Option>, reader)?; + + Ok(Self { + server_time, + process_metrics, + connection_metrics, + bandwidth_metrics, + consensus_metrics, + storage_metrics, + custom_metrics, + }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +#[borsh(use_discriminant = true)] +pub enum RpcCaps { + Full = 0, + Blocks, + UtxoIndex, + Mempool, + Metrics, + Visualizer, + Mining, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetServerInfoRequest {} + +impl Serializer for GetServerInfoRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetServerInfoRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetServerInfoResponse { + pub rpc_api_version: u16, + pub rpc_api_revision: u16, + pub server_version: String, + pub network_id: RpcNetworkId, + pub has_utxo_index: bool, + pub is_synced: bool, + pub virtual_daa_score: u64, +} + +impl Serializer for GetServerInfoResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + + store!(u16, &self.rpc_api_version, writer)?; + store!(u16, &self.rpc_api_revision, writer)?; + + store!(String, &self.server_version, writer)?; + store!(RpcNetworkId, &self.network_id, writer)?; + store!(bool, &self.has_utxo_index, writer)?; + store!(bool, &self.is_synced, writer)?; + store!(u64, &self.virtual_daa_score, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetServerInfoResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + + let rpc_api_version = load!(u16, reader)?; + let rpc_api_revision = load!(u16, reader)?; + + let server_version = load!(String, reader)?; + let network_id = load!(RpcNetworkId, reader)?; + let has_utxo_index = load!(bool, reader)?; + let is_synced = load!(bool, reader)?; + let virtual_daa_score = load!(u64, reader)?; + + Ok(Self { rpc_api_version, rpc_api_revision, server_version, network_id, has_utxo_index, is_synced, virtual_daa_score }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSyncStatusRequest {} + +impl Serializer for GetSyncStatusRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetSyncStatusRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetSyncStatusResponse { + pub is_synced: bool, +} + +impl Serializer for GetSyncStatusResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.is_synced, writer)?; + Ok(()) + } +} + +impl Deserializer for GetSyncStatusResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let is_synced = load!(bool, reader)?; + Ok(Self { is_synced }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetDaaScoreTimestampEstimateRequest { + pub daa_scores: Vec, +} + +impl GetDaaScoreTimestampEstimateRequest { + pub fn new(daa_scores: Vec) -> Self { + Self { daa_scores } + } +} + +impl Serializer for GetDaaScoreTimestampEstimateRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.daa_scores, writer)?; + Ok(()) + } +} + +impl Deserializer for GetDaaScoreTimestampEstimateRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let daa_scores = load!(Vec, reader)?; + Ok(Self { daa_scores }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetDaaScoreTimestampEstimateResponse { + pub timestamps: Vec, +} + +impl GetDaaScoreTimestampEstimateResponse { + pub fn new(timestamps: Vec) -> Self { + Self { timestamps } + } +} + +impl Serializer for GetDaaScoreTimestampEstimateResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.timestamps, writer)?; + Ok(()) + } +} + +impl Deserializer for GetDaaScoreTimestampEstimateResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let timestamps = load!(Vec, reader)?; + Ok(Self { timestamps }) + } +} + +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Fee rate estimations + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetFeeEstimateRequest {} + +impl Serializer for GetFeeEstimateRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for GetFeeEstimateRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetFeeEstimateResponse { + pub estimate: RpcFeeEstimate, +} + +impl Serializer for GetFeeEstimateResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcFeeEstimate, &self.estimate, writer)?; + Ok(()) + } +} + +impl Deserializer for GetFeeEstimateResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let estimate = deserialize!(RpcFeeEstimate, reader)?; + Ok(Self { estimate }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetFeeEstimateExperimentalRequest { + pub verbose: bool, } -impl GetMetricsResponse { - pub fn new( - server_time: u64, - process_metrics: Option, - connection_metrics: Option, - bandwidth_metrics: Option, - consensus_metrics: Option, - ) -> Self { - Self { process_metrics, connection_metrics, bandwidth_metrics, consensus_metrics, server_time } +impl Serializer for GetFeeEstimateExperimentalRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.verbose, writer)?; + Ok(()) } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[serde(rename_all = "camelCase")] -pub struct GetServerInfoRequest {} +impl Deserializer for GetFeeEstimateExperimentalRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let verbose = load!(bool, reader)?; + Ok(Self { verbose }) + } +} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct GetServerInfoResponse { - pub rpc_api_version: [u16; 4], - pub server_version: String, - pub network_id: RpcNetworkId, - pub has_utxo_index: bool, - pub is_synced: bool, - pub virtual_daa_score: u64, +pub struct GetFeeEstimateExperimentalResponse { + /// The usual feerate estimate response + pub estimate: RpcFeeEstimate, + + /// Experimental verbose data + pub verbose: Option, } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[serde(rename_all = "camelCase")] -pub struct GetSyncStatusRequest {} +impl Serializer for GetFeeEstimateExperimentalResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcFeeEstimate, &self.estimate, writer)?; + serialize!(Option, &self.verbose, writer)?; + Ok(()) + } +} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] -#[serde(rename_all = "camelCase")] -pub struct GetSyncStatusResponse { - pub is_synced: bool, +impl Deserializer for GetFeeEstimateExperimentalResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let estimate = deserialize!(RpcFeeEstimate, reader)?; + let verbose = deserialize!(Option, reader)?; + Ok(Self { estimate, verbose }) + } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct GetDaaScoreTimestampEstimateRequest { - pub daa_scores: Vec, +pub struct GetCurrentBlockColorRequest { + pub hash: RpcHash, } -impl GetDaaScoreTimestampEstimateRequest { - pub fn new(daa_scores: Vec) -> Self { - Self { daa_scores } +impl Serializer for GetCurrentBlockColorRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.hash, writer)?; + + Ok(()) } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Deserializer for GetCurrentBlockColorRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let hash = load!(RpcHash, reader)?; + + Ok(Self { hash }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct GetDaaScoreTimestampEstimateResponse { - pub timestamps: Vec, +pub struct GetCurrentBlockColorResponse { + pub blue: bool, } -impl GetDaaScoreTimestampEstimateResponse { - pub fn new(timestamps: Vec) -> Self { - Self { timestamps } +impl Serializer for GetCurrentBlockColorResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.blue, writer)?; + + Ok(()) + } +} + +impl Deserializer for GetCurrentBlockColorResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let blue = load!(bool, reader)?; + + Ok(Self { blue }) } } @@ -835,7 +2676,7 @@ impl GetDaaScoreTimestampEstimateResponse { /// NotifyBlockAddedRequest registers this connection for blockAdded notifications. /// /// See: BlockAddedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyBlockAddedRequest { pub command: Command, @@ -846,20 +2687,66 @@ impl NotifyBlockAddedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyBlockAddedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyBlockAddedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyBlockAddedResponse {} +impl Serializer for NotifyBlockAddedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyBlockAddedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + /// BlockAddedNotification is sent whenever a blocks has been added (NOT accepted) /// into the DAG. /// /// See: NotifyBlockAddedRequest -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BlockAddedNotification { pub block: Arc, } +impl Serializer for BlockAddedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(RpcBlock, &self.block, writer)?; + Ok(()) + } +} + +impl Deserializer for BlockAddedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let block = deserialize!(RpcBlock, reader)?; + Ok(Self { block: block.into() }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // VirtualChainChangedNotification @@ -867,7 +2754,7 @@ pub struct BlockAddedNotification { // virtualDaaScoreChanged notifications. // // See: VirtualChainChangedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyVirtualChainChangedRequest { pub include_accepted_transaction_ids: bool, @@ -880,15 +2767,47 @@ impl NotifyVirtualChainChangedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyVirtualChainChangedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(bool, &self.include_accepted_transaction_ids, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyVirtualChainChangedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let include_accepted_transaction_ids = load!(bool, reader)?; + let command = load!(Command, reader)?; + Ok(Self { include_accepted_transaction_ids, command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyVirtualChainChangedResponse {} +impl Serializer for NotifyVirtualChainChangedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyVirtualChainChangedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + // VirtualChainChangedNotification is sent whenever the DAG's selected parent // chain had changed. // // See: NotifyVirtualChainChangedRequest -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct VirtualChainChangedNotification { pub removed_chain_block_hashes: Arc>, @@ -896,10 +2815,34 @@ pub struct VirtualChainChangedNotification { pub accepted_transaction_ids: Arc>, } +impl Serializer for VirtualChainChangedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.removed_chain_block_hashes, writer)?; + store!(Vec, &self.added_chain_block_hashes, writer)?; + store!(Vec, &self.accepted_transaction_ids, writer)?; + Ok(()) + } +} + +impl Deserializer for VirtualChainChangedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let removed_chain_block_hashes = load!(Vec, reader)?; + let added_chain_block_hashes = load!(Vec, reader)?; + let accepted_transaction_ids = load!(Vec, reader)?; + Ok(Self { + removed_chain_block_hashes: removed_chain_block_hashes.into(), + added_chain_block_hashes: added_chain_block_hashes.into(), + accepted_transaction_ids: accepted_transaction_ids.into(), + }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // FinalityConflictNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyFinalityConflictRequest { pub command: Command, @@ -911,20 +2854,66 @@ impl NotifyFinalityConflictRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyFinalityConflictRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyFinalityConflictRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyFinalityConflictResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyFinalityConflictResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyFinalityConflictResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FinalityConflictNotification { pub violating_block_hash: RpcHash, } +impl Serializer for FinalityConflictNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.violating_block_hash, writer)?; + Ok(()) + } +} + +impl Deserializer for FinalityConflictNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let violating_block_hash = load!(RpcHash, reader)?; + Ok(Self { violating_block_hash }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // FinalityConflictResolvedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyFinalityConflictResolvedRequest { pub command: Command, @@ -936,16 +2925,62 @@ impl NotifyFinalityConflictResolvedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyFinalityConflictResolvedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyFinalityConflictResolvedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyFinalityConflictResolvedResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyFinalityConflictResolvedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyFinalityConflictResolvedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct FinalityConflictResolvedNotification { pub finality_block_hash: RpcHash, } +impl Serializer for FinalityConflictResolvedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(RpcHash, &self.finality_block_hash, writer)?; + Ok(()) + } +} + +impl Deserializer for FinalityConflictResolvedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let finality_block_hash = load!(RpcHash, reader)?; + Ok(Self { finality_block_hash }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~ // UtxosChangedNotification @@ -958,7 +2993,7 @@ pub struct FinalityConflictResolvedNotification { // This call is only available when this kaspad was started with `--utxoindex` // // See: UtxosChangedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyUtxosChangedRequest { pub addresses: Vec, @@ -971,14 +3006,46 @@ impl NotifyUtxosChangedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyUtxosChangedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Vec, &self.addresses, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyUtxosChangedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let addresses = load!(Vec, reader)?; + let command = load!(Command, reader)?; + Ok(Self { addresses, command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyUtxosChangedResponse {} +impl Serializer for NotifyUtxosChangedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyUtxosChangedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + // UtxosChangedNotificationMessage is sent whenever the UTXO index had been updated. // // See: NotifyUtxosChangedRequest -#[derive(Clone, Debug, Default, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UtxosChangedNotification { pub added: Arc>, @@ -1015,6 +3082,24 @@ impl UtxosChangedNotification { } } +impl Serializer for UtxosChangedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + serialize!(Vec, &self.added, writer)?; + serialize!(Vec, &self.removed, writer)?; + Ok(()) + } +} + +impl Deserializer for UtxosChangedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let added = deserialize!(Vec, reader)?; + let removed = deserialize!(Vec, reader)?; + Ok(Self { added: added.into(), removed: removed.into() }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // SinkBlueScoreChangedNotification @@ -1022,7 +3107,7 @@ impl UtxosChangedNotification { // sinkBlueScoreChanged notifications. // // See: SinkBlueScoreChangedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifySinkBlueScoreChangedRequest { pub command: Command, @@ -1034,20 +3119,66 @@ impl NotifySinkBlueScoreChangedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifySinkBlueScoreChangedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifySinkBlueScoreChangedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifySinkBlueScoreChangedResponse {} +impl Serializer for NotifySinkBlueScoreChangedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifySinkBlueScoreChangedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + // SinkBlueScoreChangedNotification is sent whenever the blue score // of the virtual's selected parent changes. // /// See: NotifySinkBlueScoreChangedRequest -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SinkBlueScoreChangedNotification { pub sink_blue_score: u64, } +impl Serializer for SinkBlueScoreChangedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.sink_blue_score, writer)?; + Ok(()) + } +} + +impl Deserializer for SinkBlueScoreChangedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let sink_blue_score = load!(u64, reader)?; + Ok(Self { sink_blue_score }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // VirtualDaaScoreChangedNotification @@ -1055,7 +3186,7 @@ pub struct SinkBlueScoreChangedNotification { // virtualDaaScoreChanged notifications. // // See: VirtualDaaScoreChangedNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyVirtualDaaScoreChangedRequest { pub command: Command, @@ -1067,24 +3198,70 @@ impl NotifyVirtualDaaScoreChangedRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyVirtualDaaScoreChangedRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyVirtualDaaScoreChangedRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyVirtualDaaScoreChangedResponse {} +impl Serializer for NotifyVirtualDaaScoreChangedResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyVirtualDaaScoreChangedResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + // VirtualDaaScoreChangedNotification is sent whenever the DAA score // of the virtual changes. // // See NotifyVirtualDaaScoreChangedRequest -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct VirtualDaaScoreChangedNotification { pub virtual_daa_score: u64, } +impl Serializer for VirtualDaaScoreChangedNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.virtual_daa_score, writer)?; + Ok(()) + } +} + +impl Deserializer for VirtualDaaScoreChangedNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let virtual_daa_score = load!(u64, reader)?; + Ok(Self { virtual_daa_score }) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // PruningPointUtxoSetOverrideNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyPruningPointUtxoSetOverrideRequest { pub command: Command, @@ -1096,21 +3273,65 @@ impl NotifyPruningPointUtxoSetOverrideRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyPruningPointUtxoSetOverrideRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyPruningPointUtxoSetOverrideRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyPruningPointUtxoSetOverrideResponse {} -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyPruningPointUtxoSetOverrideResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyPruningPointUtxoSetOverrideResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PruningPointUtxoSetOverrideNotification {} +impl Serializer for PruningPointUtxoSetOverrideNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for PruningPointUtxoSetOverrideNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NewBlockTemplateNotification /// NotifyNewBlockTemplateRequest registers this connection for blockAdded notifications. /// /// See: NewBlockTemplateNotification -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyNewBlockTemplateRequest { pub command: Command, @@ -1121,22 +3342,66 @@ impl NotifyNewBlockTemplateRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +impl Serializer for NotifyNewBlockTemplateRequest { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(Command, &self.command, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyNewBlockTemplateRequest { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let command = load!(Command, reader)?; + Ok(Self { command }) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotifyNewBlockTemplateResponse {} +impl Serializer for NotifyNewBlockTemplateResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NotifyNewBlockTemplateResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + /// NewBlockTemplateNotification is sent whenever a blocks has been added (NOT accepted) /// into the DAG. /// /// See: NotifyNewBlockTemplateRequest -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NewBlockTemplateNotification {} +impl Serializer for NewBlockTemplateNotification { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for NewBlockTemplateNotification { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + Ok(Self {}) + } +} + /// /// wRPC response for RpcApiOps::Subscribe request /// -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct SubscribeResponse { id: u64, @@ -1148,9 +3413,38 @@ impl SubscribeResponse { } } +impl Serializer for SubscribeResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u64, &self.id, writer)?; + Ok(()) + } +} + +impl Deserializer for SubscribeResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader)?; + let id = load!(u64, reader)?; + Ok(Self { id }) + } +} + /// /// wRPC response for RpcApiOps::Unsubscribe request /// -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct UnsubscribeResponse {} + +impl Serializer for UnsubscribeResponse { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer) + } +} + +impl Deserializer for UnsubscribeResponse { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u16, reader); + Ok(Self {}) + } +} diff --git a/rpc/core/src/model/mod.rs b/rpc/core/src/model/mod.rs index fd07a109ee..a7c2556249 100644 --- a/rpc/core/src/model/mod.rs +++ b/rpc/core/src/model/mod.rs @@ -1,6 +1,10 @@ +//! This module contains RPC-specific data structures +//! used in RPC methods. + pub mod address; pub mod block; pub mod blue_work; +pub mod feerate_estimate; pub mod hash; pub mod header; pub mod hex_cnv; @@ -10,11 +14,13 @@ pub mod network; pub mod peer; pub mod script_class; pub mod subnets; +mod tests; pub mod tx; pub use address::*; pub use block::*; pub use blue_work::*; +pub use feerate_estimate::*; pub use hash::*; pub use header::*; pub use hex_cnv::*; diff --git a/rpc/core/src/model/tests.rs b/rpc/core/src/model/tests.rs new file mode 100644 index 0000000000..d931f5ac23 --- /dev/null +++ b/rpc/core/src/model/tests.rs @@ -0,0 +1,1332 @@ +#[cfg(test)] +mod mockery { + + use crate::{model::*, RpcScriptClass}; + use kaspa_addresses::{Prefix, Version}; + use kaspa_consensus_core::api::BlockCount; + use kaspa_consensus_core::network::NetworkType; + use kaspa_consensus_core::subnets::SubnetworkId; + use kaspa_consensus_core::tx::ScriptPublicKey; + use kaspa_hashes::Hash; + use kaspa_math::Uint192; + use kaspa_notify::subscription::Command; + use kaspa_rpc_macros::test_wrpc_serializer as test; + use kaspa_utils::networking::{ContextualNetAddress, IpAddress, NetAddress}; + use rand::Rng; + use std::net::{IpAddr, Ipv4Addr}; + use std::sync::Arc; + use uuid::Uuid; + use workflow_serializer::prelude::*; + + // this trait is used to generate random + // values for testing on various data types + trait Mock { + fn mock() -> Self; + } + + impl Mock for Option + where + T: Mock, + { + fn mock() -> Self { + Some(T::mock()) + } + } + + impl Mock for Vec + where + T: Mock, + { + fn mock() -> Self { + vec![T::mock()] + } + } + + impl Mock for Arc + where + T: Mock, + { + fn mock() -> Self { + Arc::new(T::mock()) + } + } + + fn mock() -> T + where + T: Mock, + { + Mock::mock() + } + + // this function tests serialization and deserialization of a type + // by serializing it (A), deserializing it, serializing it again (B) + // and comparing A and B buffers. + fn test(kind: &str) + where + T: Serializer + Deserializer + Mock, + { + let data = T::mock(); + + const PREFIX: u32 = 0x12345678; + const SUFFIX: u32 = 0x90abcdef; + + let mut buffer1 = Vec::new(); + let writer = &mut buffer1; + store!(u32, &PREFIX, writer).unwrap(); + serialize!(T, &data, writer).unwrap(); + store!(u32, &SUFFIX, writer).unwrap(); + + let reader = &mut buffer1.as_slice(); + let prefix: u32 = load!(u32, reader).unwrap(); + // this will never occur, but it's a good practice to check in case + // the serialization/deserialization logic changes in the future + assert_eq!(prefix, PREFIX, "misalignment when consuming serialized buffer in `{kind}`"); + let tmp = deserialize!(T, reader).unwrap(); + let suffix: u32 = load!(u32, reader).unwrap(); + assert_eq!(suffix, SUFFIX, "misalignment when consuming serialized buffer in `{kind}`"); + + let mut buffer2 = Vec::new(); + let writer = &mut buffer2; + store!(u32, &PREFIX, writer).unwrap(); + serialize!(T, &tmp, writer).unwrap(); + store!(u32, &SUFFIX, writer).unwrap(); + + assert!(buffer1 == buffer2, "serialization/deserialization failure while testing `{kind}`"); + } + + #[macro_export] + macro_rules! impl_mock { + ($($type:ty),*) => { + $(impl Mock for $type { + fn mock() -> Self { + rand::thread_rng().gen() + } + })* + }; + } + + impl_mock!(bool, u8, u16, u32, f32, u64, i64, f64); + + impl Mock for Uint192 { + fn mock() -> Self { + Uint192([mock(), mock(), mock()]) + } + } + + impl Mock for SubnetworkId { + fn mock() -> Self { + let mut bytes: [u8; 20] = [0; 20]; + rand::thread_rng().fill(&mut bytes); + SubnetworkId::from_bytes(bytes) + } + } + + impl Mock for Hash { + fn mock() -> Self { + let mut bytes: [u8; 32] = [0; 32]; + rand::thread_rng().fill(&mut bytes); + Hash::from_bytes(bytes) + } + } + + impl Mock for RpcAddress { + fn mock() -> Self { + RpcAddress::new(Prefix::Mainnet, Version::PubKey, Hash::mock().as_bytes().as_slice()) + } + } + + impl Mock for RpcHeader { + fn mock() -> Self { + RpcHeader { + version: mock(), + timestamp: mock(), + bits: mock(), + nonce: mock(), + hash_merkle_root: mock(), + accepted_id_merkle_root: mock(), + utxo_commitment: mock(), + hash: mock(), + parents_by_level: vec![mock()], + daa_score: mock(), + blue_score: mock(), + blue_work: mock(), + pruning_point: mock(), + } + } + } + + impl Mock for RpcRawHeader { + fn mock() -> Self { + RpcRawHeader { + version: mock(), + timestamp: mock(), + bits: mock(), + nonce: mock(), + hash_merkle_root: mock(), + accepted_id_merkle_root: mock(), + utxo_commitment: mock(), + parents_by_level: vec![mock()], + daa_score: mock(), + blue_score: mock(), + blue_work: mock(), + pruning_point: mock(), + } + } + } + + impl Mock for RpcBlockVerboseData { + fn mock() -> Self { + RpcBlockVerboseData { + hash: mock(), + difficulty: mock(), + selected_parent_hash: mock(), + transaction_ids: mock(), + is_header_only: mock(), + blue_score: mock(), + children_hashes: mock(), + merge_set_blues_hashes: mock(), + merge_set_reds_hashes: mock(), + is_chain_block: mock(), + } + } + } + + impl Mock for RpcBlock { + fn mock() -> Self { + RpcBlock { header: mock(), transactions: mock(), verbose_data: mock() } + } + } + + impl Mock for RpcRawBlock { + fn mock() -> Self { + RpcRawBlock { header: mock(), transactions: mock() } + } + } + + impl Mock for RpcTransactionInputVerboseData { + fn mock() -> Self { + RpcTransactionInputVerboseData {} + } + } + + impl Mock for RpcTransactionInput { + fn mock() -> Self { + RpcTransactionInput { + previous_outpoint: mock(), + signature_script: Hash::mock().as_bytes().to_vec(), + sequence: mock(), + sig_op_count: mock(), + verbose_data: mock(), + } + } + } + + impl Mock for RpcTransactionOutputVerboseData { + fn mock() -> Self { + RpcTransactionOutputVerboseData { script_public_key_type: RpcScriptClass::PubKey, script_public_key_address: mock() } + } + } + + impl Mock for RpcTransactionOutput { + fn mock() -> Self { + RpcTransactionOutput { value: mock(), script_public_key: mock(), verbose_data: mock() } + } + } + + impl Mock for RpcTransactionVerboseData { + fn mock() -> Self { + RpcTransactionVerboseData { + transaction_id: mock(), + hash: mock(), + compute_mass: mock(), + block_hash: mock(), + block_time: mock(), + } + } + } + + impl Mock for RpcTransaction { + fn mock() -> Self { + RpcTransaction { + version: mock(), + inputs: mock(), + outputs: mock(), + lock_time: mock(), + subnetwork_id: mock(), + gas: mock(), + payload: Hash::mock().as_bytes().to_vec(), + mass: mock(), + verbose_data: mock(), + } + } + } + + impl Mock for RpcNodeId { + fn mock() -> Self { + RpcNodeId::new(Uuid::new_v4()) + } + } + + impl Mock for IpAddr { + fn mock() -> Self { + IpAddr::V4(Ipv4Addr::new(mock(), mock(), mock(), mock())) + } + } + + impl Mock for IpAddress { + fn mock() -> Self { + IpAddress::new(mock()) + } + } + + impl Mock for NetAddress { + fn mock() -> Self { + NetAddress::new(IpAddress::new(mock()), mock()) + } + } + + impl Mock for ContextualNetAddress { + fn mock() -> Self { + ContextualNetAddress::new(mock(), mock()) + } + } + + impl Mock for RpcPeerInfo { + fn mock() -> Self { + RpcPeerInfo { + id: mock(), + address: mock(), + last_ping_duration: mock(), + is_outbound: mock(), + time_offset: mock(), + user_agent: "0.4.2".to_string(), + advertised_protocol_version: mock(), + time_connected: mock(), + is_ibd_peer: mock(), + } + } + } + + impl Mock for RpcMempoolEntry { + fn mock() -> Self { + RpcMempoolEntry { fee: mock(), transaction: mock(), is_orphan: mock() } + } + } + + impl Mock for RpcMempoolEntryByAddress { + fn mock() -> Self { + RpcMempoolEntryByAddress { address: mock(), sending: mock(), receiving: mock() } + } + } + + impl Mock for ScriptPublicKey { + fn mock() -> Self { + let mut bytes: [u8; 36] = [0; 36]; + rand::thread_rng().fill(&mut bytes[..]); + ScriptPublicKey::from_vec(0, bytes.to_vec()) + } + } + + impl Mock for RpcUtxoEntry { + fn mock() -> Self { + RpcUtxoEntry { amount: mock(), script_public_key: mock(), block_daa_score: mock(), is_coinbase: true } + } + } + + impl Mock for RpcTransactionOutpoint { + fn mock() -> Self { + RpcTransactionOutpoint { transaction_id: mock(), index: mock() } + } + } + + impl Mock for RpcUtxosByAddressesEntry { + fn mock() -> Self { + RpcUtxosByAddressesEntry { address: mock(), outpoint: mock(), utxo_entry: mock() } + } + } + + impl Mock for ProcessMetrics { + fn mock() -> Self { + ProcessMetrics { + resident_set_size: mock(), + virtual_memory_size: mock(), + core_num: mock(), + cpu_usage: mock(), + fd_num: mock(), + disk_io_read_bytes: mock(), + disk_io_write_bytes: mock(), + disk_io_read_per_sec: mock(), + disk_io_write_per_sec: mock(), + } + } + } + + impl Mock for ConnectionMetrics { + fn mock() -> Self { + ConnectionMetrics { + borsh_live_connections: mock(), + borsh_connection_attempts: mock(), + borsh_handshake_failures: mock(), + json_live_connections: mock(), + json_connection_attempts: mock(), + json_handshake_failures: mock(), + active_peers: mock(), + } + } + } + + impl Mock for BandwidthMetrics { + fn mock() -> Self { + BandwidthMetrics { + borsh_bytes_tx: mock(), + borsh_bytes_rx: mock(), + json_bytes_tx: mock(), + json_bytes_rx: mock(), + p2p_bytes_tx: mock(), + p2p_bytes_rx: mock(), + grpc_bytes_tx: mock(), + grpc_bytes_rx: mock(), + } + } + } + + impl Mock for ConsensusMetrics { + fn mock() -> Self { + ConsensusMetrics { + node_blocks_submitted_count: mock(), + node_headers_processed_count: mock(), + node_dependencies_processed_count: mock(), + node_bodies_processed_count: mock(), + node_transactions_processed_count: mock(), + node_chain_blocks_processed_count: mock(), + node_mass_processed_count: mock(), + node_database_blocks_count: mock(), + node_database_headers_count: mock(), + network_mempool_size: mock(), + network_tip_hashes_count: mock(), + network_difficulty: mock(), + network_past_median_time: mock(), + network_virtual_parent_hashes_count: mock(), + network_virtual_daa_score: mock(), + } + } + } + + impl Mock for StorageMetrics { + fn mock() -> Self { + StorageMetrics { storage_size_bytes: mock() } + } + } + + // -------------------------------------------- + // implementations for all the rpc request + // and response data structures. + + impl Mock for SubmitBlockRequest { + fn mock() -> Self { + SubmitBlockRequest { block: mock(), allow_non_daa_blocks: true } + } + } + + test!(SubmitBlockRequest); + + impl Mock for SubmitBlockResponse { + fn mock() -> Self { + SubmitBlockResponse { report: SubmitBlockReport::Success } + } + } + + test!(SubmitBlockResponse); + + impl Mock for GetBlockTemplateRequest { + fn mock() -> Self { + GetBlockTemplateRequest { pay_address: mock(), extra_data: vec![4, 2] } + } + } + + test!(GetBlockTemplateRequest); + + impl Mock for GetBlockTemplateResponse { + fn mock() -> Self { + GetBlockTemplateResponse { block: mock(), is_synced: true } + } + } + + test!(GetBlockTemplateResponse); + + impl Mock for GetBlockRequest { + fn mock() -> Self { + GetBlockRequest { hash: mock(), include_transactions: true } + } + } + + test!(GetBlockRequest); + + impl Mock for GetBlockResponse { + fn mock() -> Self { + GetBlockResponse { block: mock() } + } + } + + test!(GetBlockResponse); + + impl Mock for GetInfoRequest { + fn mock() -> Self { + GetInfoRequest {} + } + } + + test!(GetInfoRequest); + + impl Mock for GetInfoResponse { + fn mock() -> Self { + GetInfoResponse { + p2p_id: Hash::mock().to_string(), + mempool_size: mock(), + server_version: "0.4.2".to_string(), + is_utxo_indexed: true, + is_synced: false, + has_notify_command: true, + has_message_id: false, + } + } + } + + test!(GetInfoResponse); + + impl Mock for GetCurrentNetworkRequest { + fn mock() -> Self { + GetCurrentNetworkRequest {} + } + } + + test!(GetCurrentNetworkRequest); + + impl Mock for GetCurrentNetworkResponse { + fn mock() -> Self { + GetCurrentNetworkResponse { network: NetworkType::Mainnet } + } + } + + test!(GetCurrentNetworkResponse); + + impl Mock for GetPeerAddressesRequest { + fn mock() -> Self { + GetPeerAddressesRequest {} + } + } + + test!(GetPeerAddressesRequest); + + impl Mock for GetPeerAddressesResponse { + fn mock() -> Self { + GetPeerAddressesResponse { known_addresses: mock(), banned_addresses: mock() } + } + } + + test!(GetPeerAddressesResponse); + + impl Mock for GetSinkRequest { + fn mock() -> Self { + GetSinkRequest {} + } + } + + test!(GetSinkRequest); + + impl Mock for GetSinkResponse { + fn mock() -> Self { + GetSinkResponse { sink: mock() } + } + } + + test!(GetSinkResponse); + + impl Mock for GetMempoolEntryRequest { + fn mock() -> Self { + GetMempoolEntryRequest { transaction_id: mock(), include_orphan_pool: true, filter_transaction_pool: false } + } + } + + test!(GetMempoolEntryRequest); + + impl Mock for GetMempoolEntryResponse { + fn mock() -> Self { + GetMempoolEntryResponse { mempool_entry: RpcMempoolEntry { fee: mock(), transaction: mock(), is_orphan: false } } + } + } + + test!(GetMempoolEntryResponse); + + impl Mock for GetMempoolEntriesRequest { + fn mock() -> Self { + GetMempoolEntriesRequest { include_orphan_pool: true, filter_transaction_pool: false } + } + } + + test!(GetMempoolEntriesRequest); + + impl Mock for GetMempoolEntriesResponse { + fn mock() -> Self { + GetMempoolEntriesResponse { mempool_entries: mock() } + } + } + + test!(GetMempoolEntriesResponse); + + impl Mock for GetConnectedPeerInfoRequest { + fn mock() -> Self { + GetConnectedPeerInfoRequest {} + } + } + + test!(GetConnectedPeerInfoRequest); + + impl Mock for GetConnectedPeerInfoResponse { + fn mock() -> Self { + GetConnectedPeerInfoResponse { peer_info: mock() } + } + } + + test!(GetConnectedPeerInfoResponse); + + impl Mock for AddPeerRequest { + fn mock() -> Self { + AddPeerRequest { peer_address: mock(), is_permanent: mock() } + } + } + + test!(AddPeerRequest); + + impl Mock for AddPeerResponse { + fn mock() -> Self { + AddPeerResponse {} + } + } + + test!(AddPeerResponse); + + impl Mock for SubmitTransactionRequest { + fn mock() -> Self { + SubmitTransactionRequest { transaction: mock(), allow_orphan: mock() } + } + } + + test!(SubmitTransactionRequest); + + impl Mock for SubmitTransactionResponse { + fn mock() -> Self { + SubmitTransactionResponse { transaction_id: mock() } + } + } + + test!(SubmitTransactionResponse); + + impl Mock for GetSubnetworkRequest { + fn mock() -> Self { + GetSubnetworkRequest { subnetwork_id: mock() } + } + } + + test!(GetSubnetworkRequest); + + impl Mock for GetSubnetworkResponse { + fn mock() -> Self { + GetSubnetworkResponse { gas_limit: mock() } + } + } + + test!(GetSubnetworkResponse); + + impl Mock for GetVirtualChainFromBlockRequest { + fn mock() -> Self { + GetVirtualChainFromBlockRequest { start_hash: mock(), include_accepted_transaction_ids: mock() } + } + } + + test!(GetVirtualChainFromBlockRequest); + + impl Mock for RpcAcceptedTransactionIds { + fn mock() -> Self { + RpcAcceptedTransactionIds { accepting_block_hash: mock(), accepted_transaction_ids: mock() } + } + } + + impl Mock for GetVirtualChainFromBlockResponse { + fn mock() -> Self { + GetVirtualChainFromBlockResponse { + removed_chain_block_hashes: mock(), + added_chain_block_hashes: mock(), + accepted_transaction_ids: mock(), + } + } + } + + test!(GetVirtualChainFromBlockResponse); + + impl Mock for GetBlocksRequest { + fn mock() -> Self { + GetBlocksRequest { low_hash: mock(), include_blocks: mock(), include_transactions: mock() } + } + } + + test!(GetBlocksRequest); + + impl Mock for GetBlocksResponse { + fn mock() -> Self { + GetBlocksResponse { block_hashes: mock(), blocks: mock() } + } + } + + test!(GetBlocksResponse); + + impl Mock for GetBlockCountRequest { + fn mock() -> Self { + GetBlockCountRequest {} + } + } + + test!(GetBlockCountRequest); + + impl Mock for BlockCount { + fn mock() -> Self { + BlockCount { header_count: mock(), block_count: mock() } + } + } + + test!(BlockCount); + + impl Mock for GetBlockDagInfoRequest { + fn mock() -> Self { + GetBlockDagInfoRequest {} + } + } + + test!(GetBlockDagInfoRequest); + + impl Mock for GetBlockDagInfoResponse { + fn mock() -> Self { + GetBlockDagInfoResponse { + network: NetworkType::Mainnet.try_into().unwrap(), + block_count: mock(), + header_count: mock(), + tip_hashes: mock(), + difficulty: mock(), + past_median_time: mock(), + virtual_parent_hashes: mock(), + pruning_point_hash: mock(), + virtual_daa_score: mock(), + sink: mock(), + } + } + } + + test!(GetBlockDagInfoResponse); + + impl Mock for ResolveFinalityConflictRequest { + fn mock() -> Self { + ResolveFinalityConflictRequest { finality_block_hash: mock() } + } + } + + test!(ResolveFinalityConflictRequest); + + impl Mock for ResolveFinalityConflictResponse { + fn mock() -> Self { + ResolveFinalityConflictResponse {} + } + } + + test!(ResolveFinalityConflictResponse); + + impl Mock for ShutdownRequest { + fn mock() -> Self { + ShutdownRequest {} + } + } + + test!(ShutdownRequest); + + impl Mock for ShutdownResponse { + fn mock() -> Self { + ShutdownResponse {} + } + } + + test!(ShutdownResponse); + + impl Mock for GetHeadersRequest { + fn mock() -> Self { + GetHeadersRequest { start_hash: mock(), limit: mock(), is_ascending: mock() } + } + } + + test!(GetHeadersRequest); + + impl Mock for GetHeadersResponse { + fn mock() -> Self { + GetHeadersResponse { headers: mock() } + } + } + + test!(GetHeadersResponse); + + impl Mock for GetBalanceByAddressRequest { + fn mock() -> Self { + GetBalanceByAddressRequest { address: mock() } + } + } + + test!(GetBalanceByAddressRequest); + + impl Mock for GetBalanceByAddressResponse { + fn mock() -> Self { + GetBalanceByAddressResponse { balance: mock() } + } + } + + test!(GetBalanceByAddressResponse); + + impl Mock for GetBalancesByAddressesRequest { + fn mock() -> Self { + GetBalancesByAddressesRequest { addresses: mock() } + } + } + + test!(GetBalancesByAddressesRequest); + + impl Mock for RpcBalancesByAddressesEntry { + fn mock() -> Self { + RpcBalancesByAddressesEntry { address: mock(), balance: mock() } + } + } + + impl Mock for GetBalancesByAddressesResponse { + fn mock() -> Self { + GetBalancesByAddressesResponse { entries: mock() } + } + } + + test!(GetBalancesByAddressesResponse); + + impl Mock for GetSinkBlueScoreRequest { + fn mock() -> Self { + GetSinkBlueScoreRequest {} + } + } + + test!(GetSinkBlueScoreRequest); + + impl Mock for GetSinkBlueScoreResponse { + fn mock() -> Self { + GetSinkBlueScoreResponse { blue_score: mock() } + } + } + + test!(GetSinkBlueScoreResponse); + + impl Mock for GetUtxosByAddressesRequest { + fn mock() -> Self { + GetUtxosByAddressesRequest { addresses: mock() } + } + } + + test!(GetUtxosByAddressesRequest); + + impl Mock for GetUtxosByAddressesResponse { + fn mock() -> Self { + GetUtxosByAddressesResponse { entries: mock() } + } + } + + test!(GetUtxosByAddressesResponse); + + impl Mock for BanRequest { + fn mock() -> Self { + BanRequest { ip: mock() } + } + } + + test!(BanRequest); + + impl Mock for BanResponse { + fn mock() -> Self { + BanResponse {} + } + } + + test!(BanResponse); + + impl Mock for UnbanRequest { + fn mock() -> Self { + UnbanRequest { ip: mock() } + } + } + + test!(UnbanRequest); + + impl Mock for UnbanResponse { + fn mock() -> Self { + UnbanResponse {} + } + } + + test!(UnbanResponse); + + impl Mock for EstimateNetworkHashesPerSecondRequest { + fn mock() -> Self { + EstimateNetworkHashesPerSecondRequest { window_size: mock(), start_hash: mock() } + } + } + + test!(EstimateNetworkHashesPerSecondRequest); + + impl Mock for EstimateNetworkHashesPerSecondResponse { + fn mock() -> Self { + EstimateNetworkHashesPerSecondResponse { network_hashes_per_second: mock() } + } + } + + test!(EstimateNetworkHashesPerSecondResponse); + + impl Mock for GetMempoolEntriesByAddressesRequest { + fn mock() -> Self { + GetMempoolEntriesByAddressesRequest { addresses: mock(), include_orphan_pool: true, filter_transaction_pool: false } + } + } + + test!(GetMempoolEntriesByAddressesRequest); + + impl Mock for GetMempoolEntriesByAddressesResponse { + fn mock() -> Self { + GetMempoolEntriesByAddressesResponse { entries: mock() } + } + } + + test!(GetMempoolEntriesByAddressesResponse); + + impl Mock for GetCoinSupplyRequest { + fn mock() -> Self { + GetCoinSupplyRequest {} + } + } + + test!(GetCoinSupplyRequest); + + impl Mock for GetCoinSupplyResponse { + fn mock() -> Self { + GetCoinSupplyResponse { max_sompi: mock(), circulating_sompi: mock() } + } + } + + test!(GetCoinSupplyResponse); + + impl Mock for PingRequest { + fn mock() -> Self { + PingRequest {} + } + } + + test!(PingRequest); + + impl Mock for PingResponse { + fn mock() -> Self { + PingResponse {} + } + } + + test!(PingResponse); + + impl Mock for GetConnectionsRequest { + fn mock() -> Self { + GetConnectionsRequest { include_profile_data: false } + } + } + + test!(GetConnectionsRequest); + + impl Mock for GetConnectionsResponse { + fn mock() -> Self { + GetConnectionsResponse { clients: mock(), peers: mock(), profile_data: None } + } + } + + test!(GetConnectionsResponse); + + impl Mock for GetSystemInfoRequest { + fn mock() -> Self { + GetSystemInfoRequest {} + } + } + + test!(GetSystemInfoRequest); + + impl Mock for GetSystemInfoResponse { + fn mock() -> Self { + GetSystemInfoResponse { + version: "1.2.3".to_string(), + system_id: mock(), + git_hash: mock(), + cpu_physical_cores: mock(), + total_memory: mock(), + fd_limit: mock(), + proxy_socket_limit_per_cpu_core: mock(), + } + } + } + + test!(GetSystemInfoResponse); + + impl Mock for GetMetricsRequest { + fn mock() -> Self { + GetMetricsRequest { + process_metrics: true, + connection_metrics: true, + bandwidth_metrics: true, + consensus_metrics: true, + storage_metrics: true, + custom_metrics: false, + } + } + } + + test!(GetMetricsRequest); + + impl Mock for GetMetricsResponse { + fn mock() -> Self { + GetMetricsResponse { + server_time: mock(), + process_metrics: mock(), + connection_metrics: mock(), + bandwidth_metrics: mock(), + consensus_metrics: mock(), + storage_metrics: mock(), + custom_metrics: None, + } + } + } + + test!(GetMetricsResponse); + + impl Mock for GetServerInfoRequest { + fn mock() -> Self { + GetServerInfoRequest {} + } + } + + test!(GetServerInfoRequest); + + impl Mock for GetServerInfoResponse { + fn mock() -> Self { + GetServerInfoResponse { + rpc_api_version: mock(), + rpc_api_revision: mock(), + server_version: "0.4.2".to_string(), + network_id: NetworkType::Mainnet.try_into().unwrap(), + has_utxo_index: true, + is_synced: false, + virtual_daa_score: mock(), + } + } + } + + test!(GetServerInfoResponse); + + impl Mock for GetSyncStatusRequest { + fn mock() -> Self { + GetSyncStatusRequest {} + } + } + + test!(GetSyncStatusRequest); + + impl Mock for GetSyncStatusResponse { + fn mock() -> Self { + GetSyncStatusResponse { is_synced: true } + } + } + + test!(GetSyncStatusResponse); + + impl Mock for GetDaaScoreTimestampEstimateRequest { + fn mock() -> Self { + GetDaaScoreTimestampEstimateRequest { daa_scores: mock() } + } + } + + test!(GetDaaScoreTimestampEstimateRequest); + + impl Mock for GetDaaScoreTimestampEstimateResponse { + fn mock() -> Self { + GetDaaScoreTimestampEstimateResponse { timestamps: mock() } + } + } + + test!(GetDaaScoreTimestampEstimateResponse); + + impl Mock for NotifyBlockAddedRequest { + fn mock() -> Self { + NotifyBlockAddedRequest { command: Command::Start } + } + } + + test!(NotifyBlockAddedRequest); + + impl Mock for NotifyBlockAddedResponse { + fn mock() -> Self { + NotifyBlockAddedResponse {} + } + } + + test!(NotifyBlockAddedResponse); + + impl Mock for BlockAddedNotification { + fn mock() -> Self { + BlockAddedNotification { block: mock() } + } + } + + test!(BlockAddedNotification); + + impl Mock for NotifyVirtualChainChangedRequest { + fn mock() -> Self { + NotifyVirtualChainChangedRequest { command: Command::Start, include_accepted_transaction_ids: true } + } + } + + test!(NotifyVirtualChainChangedRequest); + + impl Mock for NotifyVirtualChainChangedResponse { + fn mock() -> Self { + NotifyVirtualChainChangedResponse {} + } + } + + test!(NotifyVirtualChainChangedResponse); + + impl Mock for VirtualChainChangedNotification { + fn mock() -> Self { + VirtualChainChangedNotification { + removed_chain_block_hashes: mock(), + added_chain_block_hashes: mock(), + accepted_transaction_ids: mock(), + } + } + } + + test!(VirtualChainChangedNotification); + + impl Mock for NotifyFinalityConflictRequest { + fn mock() -> Self { + NotifyFinalityConflictRequest { command: Command::Start } + } + } + + test!(NotifyFinalityConflictRequest); + + impl Mock for NotifyFinalityConflictResponse { + fn mock() -> Self { + NotifyFinalityConflictResponse {} + } + } + + test!(NotifyFinalityConflictResponse); + + impl Mock for FinalityConflictNotification { + fn mock() -> Self { + FinalityConflictNotification { violating_block_hash: mock() } + } + } + + test!(FinalityConflictNotification); + + impl Mock for NotifyFinalityConflictResolvedRequest { + fn mock() -> Self { + NotifyFinalityConflictResolvedRequest { command: Command::Start } + } + } + + test!(NotifyFinalityConflictResolvedRequest); + + impl Mock for NotifyFinalityConflictResolvedResponse { + fn mock() -> Self { + NotifyFinalityConflictResolvedResponse {} + } + } + + test!(NotifyFinalityConflictResolvedResponse); + + impl Mock for FinalityConflictResolvedNotification { + fn mock() -> Self { + FinalityConflictResolvedNotification { finality_block_hash: mock() } + } + } + + test!(FinalityConflictResolvedNotification); + + impl Mock for NotifyUtxosChangedRequest { + fn mock() -> Self { + NotifyUtxosChangedRequest { addresses: mock(), command: Command::Start } + } + } + + test!(NotifyUtxosChangedRequest); + + impl Mock for NotifyUtxosChangedResponse { + fn mock() -> Self { + NotifyUtxosChangedResponse {} + } + } + + test!(NotifyUtxosChangedResponse); + + impl Mock for UtxosChangedNotification { + fn mock() -> Self { + UtxosChangedNotification { added: mock(), removed: mock() } + } + } + + test!(UtxosChangedNotification); + + impl Mock for NotifySinkBlueScoreChangedRequest { + fn mock() -> Self { + NotifySinkBlueScoreChangedRequest { command: Command::Start } + } + } + + test!(NotifySinkBlueScoreChangedRequest); + + impl Mock for NotifySinkBlueScoreChangedResponse { + fn mock() -> Self { + NotifySinkBlueScoreChangedResponse {} + } + } + + test!(NotifySinkBlueScoreChangedResponse); + + impl Mock for SinkBlueScoreChangedNotification { + fn mock() -> Self { + SinkBlueScoreChangedNotification { sink_blue_score: mock() } + } + } + + test!(SinkBlueScoreChangedNotification); + + impl Mock for NotifyVirtualDaaScoreChangedRequest { + fn mock() -> Self { + NotifyVirtualDaaScoreChangedRequest { command: Command::Start } + } + } + + test!(NotifyVirtualDaaScoreChangedRequest); + + impl Mock for NotifyVirtualDaaScoreChangedResponse { + fn mock() -> Self { + NotifyVirtualDaaScoreChangedResponse {} + } + } + + test!(NotifyVirtualDaaScoreChangedResponse); + + impl Mock for VirtualDaaScoreChangedNotification { + fn mock() -> Self { + VirtualDaaScoreChangedNotification { virtual_daa_score: mock() } + } + } + + test!(VirtualDaaScoreChangedNotification); + + impl Mock for NotifyPruningPointUtxoSetOverrideRequest { + fn mock() -> Self { + NotifyPruningPointUtxoSetOverrideRequest { command: Command::Start } + } + } + + test!(NotifyPruningPointUtxoSetOverrideRequest); + + impl Mock for NotifyPruningPointUtxoSetOverrideResponse { + fn mock() -> Self { + NotifyPruningPointUtxoSetOverrideResponse {} + } + } + + test!(NotifyPruningPointUtxoSetOverrideResponse); + + impl Mock for PruningPointUtxoSetOverrideNotification { + fn mock() -> Self { + PruningPointUtxoSetOverrideNotification {} + } + } + + test!(PruningPointUtxoSetOverrideNotification); + + impl Mock for NotifyNewBlockTemplateRequest { + fn mock() -> Self { + NotifyNewBlockTemplateRequest { command: Command::Start } + } + } + + test!(NotifyNewBlockTemplateRequest); + + impl Mock for NotifyNewBlockTemplateResponse { + fn mock() -> Self { + NotifyNewBlockTemplateResponse {} + } + } + + test!(NotifyNewBlockTemplateResponse); + + impl Mock for NewBlockTemplateNotification { + fn mock() -> Self { + NewBlockTemplateNotification {} + } + } + + test!(NewBlockTemplateNotification); + + impl Mock for SubscribeResponse { + fn mock() -> Self { + SubscribeResponse::new(mock()) + } + } + + test!(SubscribeResponse); + + impl Mock for UnsubscribeResponse { + fn mock() -> Self { + UnsubscribeResponse {} + } + } + + test!(UnsubscribeResponse); + + struct Misalign; + + impl Mock for Misalign { + fn mock() -> Self { + Misalign + } + } + + impl Serializer for Misalign { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u32, &1, writer)?; + store!(u32, &2, writer)?; + store!(u32, &3, writer)?; + Ok(()) + } + } + + impl Deserializer for Misalign { + fn deserialize(reader: &mut R) -> std::io::Result { + let version: u32 = load!(u32, reader)?; + assert_eq!(version, 1); + Ok(Self) + } + } + + #[test] + fn test_misalignment() { + test::("Misalign"); + } +} diff --git a/rpc/core/src/model/tx.rs b/rpc/core/src/model/tx.rs index bb13f797de..0c17e26f53 100644 --- a/rpc/core/src/model/tx.rs +++ b/rpc/core/src/model/tx.rs @@ -1,9 +1,12 @@ use borsh::{BorshDeserialize, BorshSerialize}; use kaspa_addresses::Address; use kaspa_consensus_core::tx::{ - ScriptPublicKey, ScriptVec, TransactionId, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry, + ScriptPublicKey, ScriptVec, TransactionId, TransactionIndexType, TransactionInput, TransactionOutpoint, TransactionOutput, + UtxoEntry, }; +use kaspa_utils::{hex::ToHex, serde_bytes_fixed_ref}; use serde::{Deserialize, Serialize}; +use workflow_serializer::prelude::*; use crate::prelude::{RpcHash, RpcScriptClass, RpcSubnetworkId}; @@ -12,13 +15,123 @@ pub type RpcTransactionId = TransactionId; pub type RpcScriptVec = ScriptVec; pub type RpcScriptPublicKey = ScriptPublicKey; -pub type RpcUtxoEntry = UtxoEntry; + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcUtxoEntry { + pub amount: u64, + pub script_public_key: ScriptPublicKey, + pub block_daa_score: u64, + pub is_coinbase: bool, +} + +impl RpcUtxoEntry { + pub fn new(amount: u64, script_public_key: ScriptPublicKey, block_daa_score: u64, is_coinbase: bool) -> Self { + Self { amount, script_public_key, block_daa_score, is_coinbase } + } +} + +impl From for RpcUtxoEntry { + fn from(entry: UtxoEntry) -> Self { + Self { + amount: entry.amount, + script_public_key: entry.script_public_key, + block_daa_score: entry.block_daa_score, + is_coinbase: entry.is_coinbase, + } + } +} + +impl From for UtxoEntry { + fn from(entry: RpcUtxoEntry) -> Self { + Self { + amount: entry.amount, + script_public_key: entry.script_public_key, + block_daa_score: entry.block_daa_score, + is_coinbase: entry.is_coinbase, + } + } +} + +impl Serializer for RpcUtxoEntry { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(u64, &self.amount, writer)?; + store!(ScriptPublicKey, &self.script_public_key, writer)?; + store!(u64, &self.block_daa_score, writer)?; + store!(bool, &self.is_coinbase, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcUtxoEntry { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let amount = load!(u64, reader)?; + let script_public_key = load!(ScriptPublicKey, reader)?; + let block_daa_score = load!(u64, reader)?; + let is_coinbase = load!(bool, reader)?; + + Ok(Self { amount, script_public_key, block_daa_score, is_coinbase }) + } +} /// Represents a Kaspa transaction outpoint -pub type RpcTransactionOutpoint = TransactionOutpoint; +#[derive(Eq, Hash, PartialEq, Debug, Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcTransactionOutpoint { + #[serde(with = "serde_bytes_fixed_ref")] + pub transaction_id: TransactionId, + pub index: TransactionIndexType, +} + +impl From for RpcTransactionOutpoint { + fn from(outpoint: TransactionOutpoint) -> Self { + Self { transaction_id: outpoint.transaction_id, index: outpoint.index } + } +} + +impl From for TransactionOutpoint { + fn from(outpoint: RpcTransactionOutpoint) -> Self { + Self { transaction_id: outpoint.transaction_id, index: outpoint.index } + } +} + +impl From for RpcTransactionOutpoint { + fn from(outpoint: kaspa_consensus_client::TransactionOutpoint) -> Self { + TransactionOutpoint::from(outpoint).into() + } +} + +impl From for kaspa_consensus_client::TransactionOutpoint { + fn from(outpoint: RpcTransactionOutpoint) -> Self { + TransactionOutpoint::from(outpoint).into() + } +} + +impl Serializer for RpcTransactionOutpoint { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(TransactionId, &self.transaction_id, writer)?; + store!(TransactionIndexType, &self.index, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionOutpoint { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let transaction_id = load!(TransactionId, reader)?; + let index = load!(TransactionIndexType, reader)?; + + Ok(Self { transaction_id, index }) + } +} /// Represents a Kaspa transaction input -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionInput { pub previous_outpoint: RpcTransactionOutpoint, @@ -29,10 +142,22 @@ pub struct RpcTransactionInput { pub verbose_data: Option, } +impl std::fmt::Debug for RpcTransactionInput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransactionInput") + .field("previous_outpoint", &self.previous_outpoint) + .field("signature_script", &self.signature_script.to_hex()) + .field("sequence", &self.sequence) + .field("sig_op_count", &self.sig_op_count) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + impl From for RpcTransactionInput { fn from(input: TransactionInput) -> Self { Self { - previous_outpoint: input.previous_outpoint, + previous_outpoint: input.previous_outpoint.into(), signature_script: input.signature_script, sequence: input.sequence, sig_op_count: input.sig_op_count, @@ -47,13 +172,53 @@ impl RpcTransactionInput { } } +impl Serializer for RpcTransactionInput { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + serialize!(RpcTransactionOutpoint, &self.previous_outpoint, writer)?; + store!(Vec, &self.signature_script, writer)?; + store!(u64, &self.sequence, writer)?; + store!(u8, &self.sig_op_count, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionInput { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let previous_outpoint = deserialize!(RpcTransactionOutpoint, reader)?; + let signature_script = load!(Vec, reader)?; + let sequence = load!(u64, reader)?; + let sig_op_count = load!(u8, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Ok(Self { previous_outpoint, signature_script, sequence, sig_op_count, verbose_data }) + } +} + /// Represent Kaspa transaction input verbose data -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionInputVerboseData {} +impl Serializer for RpcTransactionInputVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + Ok(()) + } +} + +impl Deserializer for RpcTransactionInputVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + Ok(Self {}) + } +} + /// Represents a Kaspad transaction output -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionOutput { pub value: u64, @@ -73,16 +238,58 @@ impl From for RpcTransactionOutput { } } +impl Serializer for RpcTransactionOutput { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(u64, &self.value, writer)?; + store!(RpcScriptPublicKey, &self.script_public_key, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionOutput { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let value = load!(u64, reader)?; + let script_public_key = load!(RpcScriptPublicKey, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Ok(Self { value, script_public_key, verbose_data }) + } +} + /// Represent Kaspa transaction output verbose data -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionOutputVerboseData { pub script_public_key_type: RpcScriptClass, pub script_public_key_address: Address, } +impl Serializer for RpcTransactionOutputVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(RpcScriptClass, &self.script_public_key_type, writer)?; + store!(Address, &self.script_public_key_address, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionOutputVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let script_public_key_type = load!(RpcScriptClass, reader)?; + let script_public_key_address = load!(Address, reader)?; + + Ok(Self { script_public_key_type, script_public_key_address }) + } +} + /// Represents a Kaspa transaction -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransaction { pub version: u16, @@ -97,17 +304,93 @@ pub struct RpcTransaction { pub verbose_data: Option, } +impl std::fmt::Debug for RpcTransaction { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RpcTransaction") + .field("version", &self.version) + .field("lock_time", &self.lock_time) + .field("subnetwork_id", &self.subnetwork_id) + .field("gas", &self.gas) + .field("payload", &self.payload.to_hex()) + .field("mass", &self.mass) + .field("inputs", &self.inputs) // Inputs and outputs are placed purposely at the end for better debug visibility + .field("outputs", &self.outputs) + .field("verbose_data", &self.verbose_data) + .finish() + } +} + +impl Serializer for RpcTransaction { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u16, &1, writer)?; + store!(u16, &self.version, writer)?; + serialize!(Vec, &self.inputs, writer)?; + serialize!(Vec, &self.outputs, writer)?; + store!(u64, &self.lock_time, writer)?; + store!(RpcSubnetworkId, &self.subnetwork_id, writer)?; + store!(u64, &self.gas, writer)?; + store!(Vec, &self.payload, writer)?; + store!(u64, &self.mass, writer)?; + serialize!(Option, &self.verbose_data, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransaction { + fn deserialize(reader: &mut R) -> std::io::Result { + let _struct_version = load!(u16, reader)?; + let version = load!(u16, reader)?; + let inputs = deserialize!(Vec, reader)?; + let outputs = deserialize!(Vec, reader)?; + let lock_time = load!(u64, reader)?; + let subnetwork_id = load!(RpcSubnetworkId, reader)?; + let gas = load!(u64, reader)?; + let payload = load!(Vec, reader)?; + let mass = load!(u64, reader)?; + let verbose_data = deserialize!(Option, reader)?; + + Ok(Self { version, inputs, outputs, lock_time, subnetwork_id, gas, payload, mass, verbose_data }) + } +} + /// Represent Kaspa transaction verbose data -#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcTransactionVerboseData { pub transaction_id: RpcTransactionId, pub hash: RpcHash, - pub mass: u64, + pub compute_mass: u64, pub block_hash: RpcHash, pub block_time: u64, } +impl Serializer for RpcTransactionVerboseData { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + store!(u8, &1, writer)?; + store!(RpcTransactionId, &self.transaction_id, writer)?; + store!(RpcHash, &self.hash, writer)?; + store!(u64, &self.compute_mass, writer)?; + store!(RpcHash, &self.block_hash, writer)?; + store!(u64, &self.block_time, writer)?; + + Ok(()) + } +} + +impl Deserializer for RpcTransactionVerboseData { + fn deserialize(reader: &mut R) -> std::io::Result { + let _version = load!(u8, reader)?; + let transaction_id = load!(RpcTransactionId, reader)?; + let hash = load!(RpcHash, reader)?; + let compute_mass = load!(u64, reader)?; + let block_hash = load!(RpcHash, reader)?; + let block_time = load!(u64, reader)?; + + Ok(Self { transaction_id, hash, compute_mass, block_hash, block_time }) + } +} + /// Represents accepted transaction ids #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] diff --git a/rpc/core/src/notify/mod.rs b/rpc/core/src/notify/mod.rs index 088483e8fa..e6dc1be062 100644 --- a/rpc/core/src/notify/mod.rs +++ b/rpc/core/src/notify/mod.rs @@ -1,3 +1,7 @@ +//! +//! Notification structures used by the RPC subsystem. +//! + pub mod channel; pub mod collector; pub mod connection; diff --git a/rpc/core/src/wasm/convert.rs b/rpc/core/src/wasm/convert.rs index 0c33cf0ec3..319f74bf0c 100644 --- a/rpc/core/src/wasm/convert.rs +++ b/rpc/core/src/wasm/convert.rs @@ -1,12 +1,15 @@ +//! +//! WASM specific conversion functions +//! + use crate::model::*; use kaspa_consensus_client::*; -use kaspa_consensus_core::tx as cctx; use std::sync::Arc; impl From for UtxoEntry { fn from(entry: RpcUtxosByAddressesEntry) -> UtxoEntry { let RpcUtxosByAddressesEntry { address, outpoint, utxo_entry } = entry; - let cctx::UtxoEntry { amount, script_public_key, block_daa_score, is_coinbase } = utxo_entry; + let RpcUtxoEntry { amount, script_public_key, block_daa_score, is_coinbase } = utxo_entry; UtxoEntry { address, outpoint: outpoint.into(), amount, script_public_key, block_daa_score, is_coinbase } } } @@ -31,7 +34,7 @@ cfg_if::cfg_if! { let inner = tx_input.inner(); RpcTransactionInput { previous_outpoint: inner.previous_outpoint.clone().into(), - signature_script: inner.signature_script.clone(), + signature_script: inner.signature_script.clone().unwrap_or_default(), sequence: inner.sequence, sig_op_count: inner.sig_op_count, verbose_data: None, @@ -68,7 +71,7 @@ cfg_if::cfg_if! { subnetwork_id: inner.subnetwork_id.clone(), gas: inner.gas, payload: inner.payload.clone(), - mass: 0, // TODO: apply mass to all external APIs including wasm + mass: inner.mass, verbose_data: None, } } diff --git a/rpc/core/src/wasm/message.rs b/rpc/core/src/wasm/message.rs index 56af183dbf..85c0857023 100644 --- a/rpc/core/src/wasm/message.rs +++ b/rpc/core/src/wasm/message.rs @@ -1,5 +1,8 @@ -#![allow(non_snake_case)] +//! +//! WASM interfaces and conversion to and from RPC messages. +//! +#![allow(non_snake_case)] use crate::error::RpcError as Error; use crate::error::RpcResult as Result; use crate::model::*; @@ -7,6 +10,7 @@ use kaspa_addresses::Address; use kaspa_addresses::AddressOrStringArrayT; use kaspa_consensus_client::Transaction; use kaspa_consensus_client::UtxoEntryReference; +use kaspa_consensus_core::tx as cctx; use kaspa_rpc_macros::declare_typescript_wasm_interface as declare; pub use serde_wasm_bindgen::from_value; use wasm_bindgen::prelude::*; @@ -317,6 +321,38 @@ try_from! ( args: GetMetricsResponse, IGetMetricsResponse, { // --- +declare! { + IGetConnectionsRequest, + r#" + /** + * @category Node RPC + */ + export interface IGetConnectionsRequest { } + "#, +} + +try_from! ( args: IGetConnectionsRequest, GetConnectionsRequest, { + Ok(from_value(args.into())?) +}); + +declare! { + IGetConnectionsResponse, + r#" + /** + * @category Node RPC + */ + export interface IGetConnectionsResponse { + [key: string]: any + } + "#, +} + +try_from! ( args: GetConnectionsResponse, IGetConnectionsResponse, { + Ok(to_value(&args)?.into()) +}); + +// --- + declare! { IGetSinkRequest, r#" @@ -788,7 +824,7 @@ declare! { } try_from! ( args: IGetBlockTemplateRequest, GetBlockTemplateRequest, { - let pay_address = args.get_cast::
("payAddress")?.into_owned(); + let pay_address = args.cast_into::
("payAddress")?; let extra_data = if let Some(extra_data) = args.try_get_value("extraData")? { if let Some(text) = extra_data.as_string() { text.into_bytes() @@ -813,7 +849,7 @@ declare! { * @category Node RPC */ export interface IGetBlockTemplateResponse { - block : IBlock; + block : IRawBlock; } "#, } @@ -824,6 +860,44 @@ try_from! ( args: GetBlockTemplateResponse, IGetBlockTemplateResponse, { // --- +declare! { + IGetCurrentBlockColorRequest, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetCurrentBlockColorRequest { + hash: HexString; + } + "#, +} + +try_from! ( args: IGetCurrentBlockColorRequest, GetCurrentBlockColorRequest, { + Ok(from_value(args.into())?) +}); + +declare! { + IGetCurrentBlockColorResponse, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetCurrentBlockColorResponse { + blue: boolean; + } + "#, +} + +try_from! ( args: GetCurrentBlockColorResponse, IGetCurrentBlockColorResponse, { + Ok(to_value(&args)?.into()) +}); + +// --- + declare! { IGetDaaScoreTimestampEstimateRequest, r#" @@ -1129,7 +1203,7 @@ declare! { * @category Node RPC */ export interface IGetUtxosByAddressesResponse { - entries : IUtxoEntry[]; + entries : UtxoEntryReference[]; } "#, } @@ -1231,7 +1305,7 @@ declare! { * @category Node RPC */ export interface ISubmitBlockRequest { - block : IBlock; + block : IRawBlock; allowNonDAABlocks: boolean; } "#, @@ -1292,6 +1366,66 @@ try_from! ( args: SubmitBlockResponse, ISubmitBlockResponse, { // --- +declare! { + ISubmitTransactionReplacementRequest, + // "ISubmitTransactionRequest | Transaction", + r#" + /** + * Submit transaction replacement to the node. + * + * @category Node RPC + */ + export interface ISubmitTransactionReplacementRequest { + transaction : Transaction, + } + "#, +} + +try_from! ( args: ISubmitTransactionReplacementRequest, SubmitTransactionReplacementRequest, { + let transaction = if let Some(transaction) = args.try_get_value("transaction")? { + transaction + } else { + args.into() + }; + + let request = if let Ok(transaction) = Transaction::try_owned_from(&transaction) { + SubmitTransactionReplacementRequest { + transaction : transaction.into(), + } + } else { + from_value(transaction)? + }; + Ok(request) +}); + +declare! { + ISubmitTransactionReplacementResponse, + r#" + /** + * + * + * @category Node RPC + */ + export interface ISubmitTransactionReplacementResponse { + transactionId : HexString; + replacedTransaction: Transaction; + } + "#, +} + +try_from! ( args: SubmitTransactionReplacementResponse, ISubmitTransactionReplacementResponse, { + let transaction_id = args.transaction_id; + let replaced_transaction = cctx::Transaction::try_from(args.replaced_transaction)?; + let replaced_transaction = Transaction::from(replaced_transaction); + + let response = ISubmitTransactionReplacementResponse::default(); + response.set("transactionId", &transaction_id.into())?; + response.set("replacedTransaction", &replaced_transaction.into())?; + Ok(response) +}); + +// --- + declare! { ISubmitTransactionRequest, // "ISubmitTransactionRequest | Transaction", @@ -1322,7 +1456,11 @@ try_from! ( args: ISubmitTransactionRequest, SubmitTransactionRequest, { allow_orphan, } } else { - from_value(transaction)? + let tx = Transaction::try_cast_from(&transaction)?; + SubmitTransactionRequest { + transaction : tx.as_ref().into(), + allow_orphan, + } }; Ok(request) }); @@ -1383,3 +1521,210 @@ declare! { try_from! ( args: UnbanResponse, IUnbanResponse, { Ok(to_value(&args)?.into()) }); + +// --- + +declare! { + IFeerateBucket, + r#" + /** + * + * + * @category Node RPC + */ + export interface IFeerateBucket { + /** + * The fee/mass ratio estimated to be required for inclusion time <= estimated_seconds + */ + feerate : number; + /** + * The estimated inclusion time for a transaction with fee/mass = feerate + */ + estimatedSeconds : number; + } + "#, +} + +declare! { + IFeeEstimate, + r#" + /** + * + * + * @category Node RPC + */ + export interface IFeeEstimate { + /** + * *Top-priority* feerate bucket. Provides an estimation of the feerate required for sub-second DAG inclusion. + * + * Note: for all buckets, feerate values represent fee/mass of a transaction in `sompi/gram` units. + * Given a feerate value recommendation, calculate the required fee by + * taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` + */ + + priorityBucket : IFeerateBucket; + /** + * A vector of *normal* priority feerate values. The first value of this vector is guaranteed to exist and + * provide an estimation for sub-*minute* DAG inclusion. All other values will have shorter estimation + * times than all `low_bucket` values. Therefor by chaining `[priority] | normal | low` and interpolating + * between them, one can compose a complete feerate function on the client side. The API makes an effort + * to sample enough "interesting" points on the feerate-to-time curve, so that the interpolation is meaningful. + */ + + normalBuckets : IFeerateBucket[]; + /** + * An array of *low* priority feerate values. The first value of this vector is guaranteed to + * exist and provide an estimation for sub-*hour* DAG inclusion. + */ + lowBuckets : IFeerateBucket[]; + } + "#, +} + +try_from!( estimate: RpcFeeEstimate, IFeeEstimate, { + + let priority_bucket = IFeerateBucket::default(); + priority_bucket.set("feerate", &estimate.priority_bucket.feerate.into())?; + priority_bucket.set("estimatedSeconds", &estimate.priority_bucket.estimated_seconds.into())?; + + let normal_buckets = estimate.normal_buckets.into_iter().map(|normal_bucket| { + let bucket = IFeerateBucket::default(); + bucket.set("feerate", &normal_bucket.feerate.into())?; + bucket.set("estimatedSeconds", &normal_bucket.estimated_seconds.into())?; + Ok(bucket) + }).collect::>>()?; + + let low_buckets = estimate.low_buckets.into_iter().map(|low_bucket| { + let bucket = IFeerateBucket::default(); + bucket.set("feerate", &low_bucket.feerate.into())?; + bucket.set("estimatedSeconds", &low_bucket.estimated_seconds.into())?; + Ok(bucket) + }).collect::>>()?; + + let estimate = IFeeEstimate::default(); + estimate.set("priorityBucket", &priority_bucket)?; + estimate.set("normalBuckets", &js_sys::Array::from_iter(normal_buckets))?; + estimate.set("lowBuckets", &js_sys::Array::from_iter(low_buckets))?; + + Ok(estimate) +}); + +// --- + +declare! { + IGetFeeEstimateRequest, + r#" + /** + * Get fee estimate from the node. + * + * @category Node RPC + */ + export interface IGetFeeEstimateRequest { } + "#, +} + +try_from! ( args: IGetFeeEstimateRequest, GetFeeEstimateRequest, { + Ok(from_value(args.into())?) +}); + +declare! { + IGetFeeEstimateResponse, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetFeeEstimateResponse { + estimate : IFeeEstimate; + } + "#, +} + +try_from!( args: GetFeeEstimateResponse, IGetFeeEstimateResponse, { + let estimate = IFeeEstimate::try_from(args.estimate)?; + let response = IGetFeeEstimateResponse::default(); + response.set("estimate", &estimate)?; + Ok(response) +}); + +// --- + +declare! { + IFeeEstimateVerboseExperimentalData, + r#" + /** + * + * + * @category Node RPC + */ + export interface IFeeEstimateVerboseExperimentalData { + mempoolReadyTransactionsCount : bigint; + mempoolReadyTransactionsTotalMass : bigint; + networkMassPerSecond : bigint; + nextBlockTemplateFeerateMin : number; + nextBlockTemplateFeerateMedian : number; + nextBlockTemplateFeerateMax : number; + } + "#, +} + +try_from!( data: RpcFeeEstimateVerboseExperimentalData, IFeeEstimateVerboseExperimentalData, { + + let target = IFeeEstimateVerboseExperimentalData::default(); + target.set("mempoolReadyTransactionsCount", &js_sys::BigInt::from(data.mempool_ready_transactions_count).into())?; + target.set("mempoolReadyTransactionsTotalMass", &js_sys::BigInt::from(data.mempool_ready_transactions_total_mass).into())?; + target.set("networkMassPerSecond", &js_sys::BigInt::from(data.network_mass_per_second).into())?; + target.set("nextBlockTemplateFeerateMin", &data.next_block_template_feerate_min.into())?; + target.set("nextBlockTemplateFeerateMedian", &data.next_block_template_feerate_median.into())?; + target.set("nextBlockTemplateFeerateMax", &data.next_block_template_feerate_max.into())?; + + Ok(target) +}); + +declare! { + IGetFeeEstimateExperimentalRequest, + // "ISubmitTransactionRequest | Transaction", + r#" + /** + * Get fee estimate from the node. + * + * @category Node RPC + */ + export interface IGetFeeEstimateExperimentalRequest { } + "#, +} + +try_from! ( args: IGetFeeEstimateExperimentalRequest, GetFeeEstimateExperimentalRequest, { + Ok(from_value(args.into())?) +}); + +declare! { + IGetFeeEstimateExperimentalResponse, + r#" + /** + * + * + * @category Node RPC + */ + export interface IGetFeeEstimateExperimentalResponse { + estimate : IFeeEstimate; + verbose? : IFeeEstimateVerboseExperimentalData + } + "#, +} + +try_from!( args: GetFeeEstimateExperimentalResponse, IGetFeeEstimateExperimentalResponse, { + let estimate = IFeeEstimate::try_from(args.estimate)?; + let response = IGetFeeEstimateExperimentalResponse::default(); + response.set("estimate", &estimate)?; + + if let Some(verbose) = args.verbose { + let verbose = IFeeEstimateVerboseExperimentalData::try_from(verbose)?; + response.set("verbose", &verbose)?; + } + + Ok(response) +}); + +// --- diff --git a/rpc/core/src/wasm/mod.rs b/rpc/core/src/wasm/mod.rs index 6552baa42b..e3bcdc024b 100644 --- a/rpc/core/src/wasm/mod.rs +++ b/rpc/core/src/wasm/mod.rs @@ -1,3 +1,5 @@ +//! WASM related conversions + pub mod convert; cfg_if::cfg_if! { diff --git a/rpc/grpc/client/Cargo.toml b/rpc/grpc/client/Cargo.toml index f4be5818cc..eeda4b2f8d 100644 --- a/rpc/grpc/client/Cargo.toml +++ b/rpc/grpc/client/Cargo.toml @@ -41,6 +41,7 @@ tokio-stream.workspace = true tonic = { workspace = true, features = ["gzip"] } triggered.workspace = true futures-util.workspace = true +rustls.workspace = true [features] heap = [] diff --git a/rpc/grpc/client/src/lib.rs b/rpc/grpc/client/src/lib.rs index c7eebd8d1e..b7e53bb5e1 100644 --- a/rpc/grpc/client/src/lib.rs +++ b/rpc/grpc/client/src/lib.rs @@ -38,7 +38,7 @@ use kaspa_rpc_core::{ use kaspa_utils::{channel::Channel, triggers::DuplexTrigger}; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer, ServiceBuilder}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer, ServiceBuilder}, }; use regex::Regex; use std::{ @@ -50,7 +50,6 @@ use std::{ }; use tokio::sync::Mutex; use tonic::codec::CompressionEncoding; -use tonic::codegen::Body; use tonic::Streaming; mod connection_event; @@ -102,7 +101,7 @@ impl GrpcClient { /// `url`: the server to connect to /// /// `subscription_context`: it is advised to provide a clone of the same instance if multiple clients dealing with - /// [`UtxosChangedNotifications`] are connected concurrently in order to optimize the memory footprint. + /// `UtxosChangedNotifications` are connected concurrently in order to optimize the memory footprint. /// /// `reconnect`: features an automatic reconnection to the server, reactivating all subscriptions on success. /// @@ -241,6 +240,8 @@ impl RpcApi for GrpcClient { route!(get_sync_status_call, GetSyncStatus); route!(get_server_info_call, GetServerInfo); route!(get_metrics_call, GetMetrics); + route!(get_connections_call, GetConnections); + route!(get_system_info_call, GetSystemInfo); route!(submit_block_call, SubmitBlock); route!(get_block_template_call, GetBlockTemplate); route!(get_block_call, GetBlock); @@ -253,6 +254,7 @@ impl RpcApi for GrpcClient { route!(get_connected_peer_info_call, GetConnectedPeerInfo); route!(add_peer_call, AddPeer); route!(submit_transaction_call, SubmitTransaction); + route!(submit_transaction_replacement_call, SubmitTransactionReplacement); route!(get_subnetwork_call, GetSubnetwork); route!(get_virtual_chain_from_block_call, GetVirtualChainFromBlock); route!(get_blocks_call, GetBlocks); @@ -271,6 +273,9 @@ impl RpcApi for GrpcClient { route!(get_mempool_entries_by_addresses_call, GetMempoolEntriesByAddresses); route!(get_coin_supply_call, GetCoinSupply); route!(get_daa_score_timestamp_estimate_call, GetDaaScoreTimestampEstimate); + route!(get_fee_estimate_call, GetFeeEstimate); + route!(get_fee_estimate_experimental_call, GetFeeEstimateExperimental); + route!(get_current_block_color_call, GetCurrentBlockColor); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API @@ -538,9 +543,7 @@ impl Inner { let bytes_tx = &counters.bytes_tx; let channel = ServiceBuilder::new() .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()))) - .layer(measure_request_body_size_layer(bytes_tx.clone(), |body| { - body.map_err(|e| tonic::Status::from_error(Box::new(e))).boxed_unsync() - })) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()).boxed_unsync())) .service(channel); // Build the gRPC client with an interceptor setting the request timeout diff --git a/rpc/grpc/client/src/route.rs b/rpc/grpc/client/src/route.rs index 5bb1bf3950..bb5b5ce56d 100644 --- a/rpc/grpc/client/src/route.rs +++ b/rpc/grpc/client/src/route.rs @@ -9,12 +9,14 @@ macro_rules! route { clippy::type_repetition_in_bounds, clippy::used_underscore_binding )] - fn $fn<'life0, 'async_trait>( + fn $fn<'life0, 'life1, 'async_trait>( &'life0 self, + _connection : ::core::option::Option<&'life1 Arc>, request: [<$name Request>], ) -> ::core::pin::Pin]>> + ::core::marker::Send + 'async_trait>> where 'life0: 'async_trait, + 'life1: 'async_trait, Self: 'async_trait, { Box::pin(async move { diff --git a/rpc/grpc/core/build.rs b/rpc/grpc/core/build.rs index fdf54486dd..b3a0614eae 100644 --- a/rpc/grpc/core/build.rs +++ b/rpc/grpc/core/build.rs @@ -10,7 +10,7 @@ fn main() { // uncomment this line and reflect the change in src/lib.rs //.out_dir("./src") - .compile(&protowire_files[0..1], dirs) + .compile_protos(&protowire_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compile error: {e}")); // recompile protobufs only if any of the proto files changes. diff --git a/rpc/grpc/core/proto/messages.proto b/rpc/grpc/core/proto/messages.proto index ec72426350..2d6310d9e3 100644 --- a/rpc/grpc/core/proto/messages.proto +++ b/rpc/grpc/core/proto/messages.proto @@ -58,7 +58,13 @@ message KaspadRequest { GetMetricsRequestMessage getMetricsRequest = 1090; GetServerInfoRequestMessage getServerInfoRequest = 1092; GetSyncStatusRequestMessage getSyncStatusRequest = 1094; - GetDaaScoreTimestampEstimateRequestMessage GetDaaScoreTimestampEstimateRequest = 1096; + GetDaaScoreTimestampEstimateRequestMessage getDaaScoreTimestampEstimateRequest = 1096; + SubmitTransactionReplacementRequestMessage submitTransactionReplacementRequest = 1100; + GetConnectionsRequestMessage getConnectionsRequest = 1102; + GetSystemInfoRequestMessage getSystemInfoRequest = 1104; + GetFeeEstimateRequestMessage getFeeEstimateRequest = 1106; + GetFeeEstimateExperimentalRequestMessage getFeeEstimateExperimentalRequest = 1108; + GetCurrentBlockColorRequestMessage getCurrentBlockColorRequest = 1110; } } @@ -117,7 +123,13 @@ message KaspadResponse { GetMetricsResponseMessage getMetricsResponse= 1091; GetServerInfoResponseMessage getServerInfoResponse = 1093; GetSyncStatusResponseMessage getSyncStatusResponse = 1095; - GetDaaScoreTimestampEstimateResponseMessage GetDaaScoreTimestampEstimateResponse = 1097; + GetDaaScoreTimestampEstimateResponseMessage getDaaScoreTimestampEstimateResponse = 1097; + SubmitTransactionReplacementResponseMessage submitTransactionReplacementResponse = 1101; + GetConnectionsResponseMessage getConnectionsResponse= 1103; + GetSystemInfoResponseMessage getSystemInfoResponse= 1105; + GetFeeEstimateResponseMessage getFeeEstimateResponse = 1107; + GetFeeEstimateExperimentalResponseMessage getFeeEstimateExperimentalResponse = 1109; + GetCurrentBlockColorResponseMessage getCurrentBlockColorResponse = 1111; } } diff --git a/rpc/grpc/core/proto/rpc.proto b/rpc/grpc/core/proto/rpc.proto index e558c65485..e218681b65 100644 --- a/rpc/grpc/core/proto/rpc.proto +++ b/rpc/grpc/core/proto/rpc.proto @@ -5,7 +5,6 @@ // Having received a RequestMessage, (wrapped in a KaspadMessage) the RPC server will respond with a // ResponseMessage (likewise wrapped in a KaspadMessage) respective to the original RequestMessage. // -// **IMPORTANT:** This API is a work in progress and is subject to break between versions. // syntax = "proto3"; package protowire; @@ -101,7 +100,7 @@ message RpcUtxoEntry { message RpcTransactionVerboseData{ string transactionId = 1; string hash = 2; - uint64 mass = 4; + uint64 computeMass = 4; string blockHash = 12; uint64 blockTime = 14; } @@ -307,6 +306,21 @@ message SubmitTransactionResponseMessage{ RPCError error = 1000; } +// SubmitTransactionReplacementRequestMessage submits a transaction to the mempool, applying a mandatory Replace by Fee policy +message SubmitTransactionReplacementRequestMessage{ + RpcTransaction transaction = 1; +} + +message SubmitTransactionReplacementResponseMessage{ + // The transaction ID of the submitted transaction + string transactionId = 1; + + // The previous transaction replaced in the mempool by the newly submitted one + RpcTransaction replacedTransaction = 2; + + RPCError error = 1000; +} + // NotifyVirtualChainChangedRequestMessage registers this connection for virtualChainChanged notifications. // // See: VirtualChainChangedNotificationMessage @@ -360,8 +374,13 @@ message GetSubnetworkResponseMessage{ RPCError error = 1000; } -// GetVirtualChainFromBlockRequestMessage requests the virtual selected -// parent chain from some startHash to this kaspad's current virtual +/// GetVirtualChainFromBlockRequestMessage requests the virtual selected +/// parent chain from some startHash to this kaspad's current virtual +/// Note: +/// this call batches the response to: +/// a. the network's `mergeset size limit * 10` amount of added chain blocks, if `includeAcceptedTransactionIds = false` +/// b. or `mergeset size limit * 10` amount of merged blocks, if `includeAcceptedTransactionIds = true` +/// c. it does not batch the removed chain blocks, only the added ones. message GetVirtualChainFromBlockRequestMessage{ string startHash = 1; bool includeAcceptedTransactionIds = 2; @@ -806,11 +825,47 @@ message ConsensusMetrics{ uint64 virtualDaaScore = 18; } +message StorageMetrics{ + uint64 storageSizeBytes = 1; +} + +message GetConnectionsRequestMessage{ + bool includeProfileData = 1; +} + +message ConnectionsProfileData { + double cpuUsage = 1; + uint64 memoryUsage = 2; +} + +message GetConnectionsResponseMessage{ + uint32 clients = 1; + uint32 peers = 2; + ConnectionsProfileData profileData = 3; + RPCError error = 1000; +} + +message GetSystemInfoRequestMessage{ +} + +message GetSystemInfoResponseMessage{ + string version = 1; + string systemId = 2; + string gitHash = 3; + uint32 coreNum = 4; + uint64 totalMemory = 5; + uint32 fdLimit = 6; + uint32 proxySocketLimitPerCpuCore = 7; + RPCError error = 1000; +} + message GetMetricsRequestMessage{ bool processMetrics = 1; bool connectionMetrics = 2; bool bandwidthMetrics = 3; bool consensusMetrics = 4; + bool storageMetrics = 5; + bool customMetrics = 6; } message GetMetricsResponseMessage{ @@ -819,6 +874,7 @@ message GetMetricsResponseMessage{ ConnectionMetrics connectionMetrics = 12; BandwidthMetrics bandwidthMetrics = 13; ConsensusMetrics consensusMetrics = 14; + StorageMetrics storageMetrics = 15; RPCError error = 1000; } @@ -826,12 +882,13 @@ message GetServerInfoRequestMessage{ } message GetServerInfoResponseMessage{ - repeated uint32 rpcApiVersion = 1; // Expecting exactly 4 elements - string serverVersion = 2; - string networkId = 3; - bool hasUtxoIndex = 4; - bool isSynced = 5; - uint64 virtualDaaScore = 6; + uint32 rpcApiVersion = 1; + uint32 rpcApiRevision = 2; + string serverVersion = 3; + string networkId = 4; + bool hasUtxoIndex = 5; + bool isSynced = 6; + uint64 virtualDaaScore = 7; RPCError error = 1000; } @@ -844,10 +901,76 @@ message GetSyncStatusResponseMessage{ } message GetDaaScoreTimestampEstimateRequestMessage { - repeated uint64 daa_scores = 1; + repeated uint64 daaScores = 1; } message GetDaaScoreTimestampEstimateResponseMessage{ - repeated uint64 timestamps = 1; - RPCError error = 1000; + repeated uint64 timestamps = 1; + RPCError error = 1000; +} + +message RpcFeerateBucket { + // Fee/mass of a transaction in `sompi/gram` units + double feerate = 1; + double estimatedSeconds = 2; +} + +// Data required for making fee estimates. +// +// Feerate values represent fee/mass of a transaction in `sompi/gram` units. +// Given a feerate value recommendation, calculate the required fee by +// taking the transaction mass and multiplying it by feerate: `fee = feerate * mass(tx)` +message RpcFeeEstimate { + // Top-priority feerate bucket. Provides an estimation of the feerate required for sub-second DAG inclusion. + RpcFeerateBucket priorityBucket = 1; + + // A vector of *normal* priority feerate values. The first value of this vector is guaranteed to exist and + // provide an estimation for sub-*minute* DAG inclusion. All other values will have shorter estimation + // times than all `lowBucket` values. Therefor by chaining `[priority] | normal | low` and interpolating + // between them, one can compose a complete feerate function on the client side. The API makes an effort + // to sample enough "interesting" points on the feerate-to-time curve, so that the interpolation is meaningful. + repeated RpcFeerateBucket normalBuckets = 2; + + // A vector of *low* priority feerate values. The first value of this vector is guaranteed to + // exist and provide an estimation for sub-*hour* DAG inclusion. + repeated RpcFeerateBucket lowBuckets = 3; +} + +message RpcFeeEstimateVerboseExperimentalData { + uint64 mempoolReadyTransactionsCount = 1; + uint64 mempoolReadyTransactionsTotalMass = 2; + uint64 networkMassPerSecond = 3; + + double nextBlockTemplateFeerateMin = 11; + double nextBlockTemplateFeerateMedian = 12; + double nextBlockTemplateFeerateMax = 13; +} + +message GetFeeEstimateRequestMessage { +} + +message GetFeeEstimateResponseMessage { + RpcFeeEstimate estimate = 1; + RPCError error = 1000; +} + +message GetFeeEstimateExperimentalRequestMessage { + bool verbose = 1; +} + +message GetFeeEstimateExperimentalResponseMessage { + RpcFeeEstimate estimate = 1; + RpcFeeEstimateVerboseExperimentalData verbose = 2; + + RPCError error = 1000; +} + +message GetCurrentBlockColorRequestMessage { + string hash = 1; +} + +message GetCurrentBlockColorResponseMessage { + bool blue = 1; + + RPCError error = 1000; } diff --git a/rpc/grpc/core/src/convert/block.rs b/rpc/grpc/core/src/convert/block.rs index 8429f3256d..6ab9e37fa0 100644 --- a/rpc/grpc/core/src/convert/block.rs +++ b/rpc/grpc/core/src/convert/block.rs @@ -15,6 +15,14 @@ from!(item: &kaspa_rpc_core::RpcBlock, protowire::RpcBlock, { } }); +from!(item: &kaspa_rpc_core::RpcRawBlock, protowire::RpcBlock, { + Self { + header: Some(protowire::RpcBlockHeader::from(&item.header)), + transactions: item.transactions.iter().map(protowire::RpcTransaction::from).collect(), + verbose_data: None, + } +}); + from!(item: &kaspa_rpc_core::RpcBlockVerboseData, protowire::RpcBlockVerboseData, { Self { hash: item.hash.to_string(), @@ -46,6 +54,17 @@ try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcBlock, { } }); +try_from!(item: &protowire::RpcBlock, kaspa_rpc_core::RpcRawBlock, { + Self { + header: item + .header + .as_ref() + .ok_or_else(|| RpcError::MissingRpcFieldError("RpcBlock".to_string(), "header".to_string()))? + .try_into()?, + transactions: item.transactions.iter().map(kaspa_rpc_core::RpcTransaction::try_from).collect::, _>>()?, + } +}); + try_from!(item: &protowire::RpcBlockVerboseData, kaspa_rpc_core::RpcBlockVerboseData, { Self { hash: RpcHash::from_str(&item.hash)?, diff --git a/rpc/grpc/core/src/convert/feerate_estimate.rs b/rpc/grpc/core/src/convert/feerate_estimate.rs new file mode 100644 index 0000000000..d1bff8f452 --- /dev/null +++ b/rpc/grpc/core/src/convert/feerate_estimate.rs @@ -0,0 +1,66 @@ +use crate::protowire; +use crate::{from, try_from}; +use kaspa_rpc_core::RpcError; + +// ---------------------------------------------------------------------------- +// rpc_core to protowire +// ---------------------------------------------------------------------------- + +from!(item: &kaspa_rpc_core::RpcFeerateBucket, protowire::RpcFeerateBucket, { + Self { + feerate: item.feerate, + estimated_seconds: item.estimated_seconds, + } +}); + +from!(item: &kaspa_rpc_core::RpcFeeEstimate, protowire::RpcFeeEstimate, { + Self { + priority_bucket: Some((&item.priority_bucket).into()), + normal_buckets: item.normal_buckets.iter().map(|b| b.into()).collect(), + low_buckets: item.low_buckets.iter().map(|b| b.into()).collect(), + } +}); + +from!(item: &kaspa_rpc_core::RpcFeeEstimateVerboseExperimentalData, protowire::RpcFeeEstimateVerboseExperimentalData, { + Self { + network_mass_per_second: item.network_mass_per_second, + mempool_ready_transactions_count: item.mempool_ready_transactions_count, + mempool_ready_transactions_total_mass: item.mempool_ready_transactions_total_mass, + next_block_template_feerate_min: item.next_block_template_feerate_min, + next_block_template_feerate_median: item.next_block_template_feerate_median, + next_block_template_feerate_max: item.next_block_template_feerate_max, + } +}); + +// ---------------------------------------------------------------------------- +// protowire to rpc_core +// ---------------------------------------------------------------------------- + +try_from!(item: &protowire::RpcFeerateBucket, kaspa_rpc_core::RpcFeerateBucket, { + Self { + feerate: item.feerate, + estimated_seconds: item.estimated_seconds, + } +}); + +try_from!(item: &protowire::RpcFeeEstimate, kaspa_rpc_core::RpcFeeEstimate, { + Self { + priority_bucket: item.priority_bucket + .as_ref() + .ok_or_else(|| RpcError::MissingRpcFieldError("RpcFeeEstimate".to_string(), "priority_bucket".to_string()))? + .try_into()?, + normal_buckets: item.normal_buckets.iter().map(|b| b.try_into()).collect::, _>>()?, + low_buckets: item.low_buckets.iter().map(|b| b.try_into()).collect::, _>>()?, + } +}); + +try_from!(item: &protowire::RpcFeeEstimateVerboseExperimentalData, kaspa_rpc_core::RpcFeeEstimateVerboseExperimentalData, { + Self { + network_mass_per_second: item.network_mass_per_second, + mempool_ready_transactions_count: item.mempool_ready_transactions_count, + mempool_ready_transactions_total_mass: item.mempool_ready_transactions_total_mass, + next_block_template_feerate_min: item.next_block_template_feerate_min, + next_block_template_feerate_median: item.next_block_template_feerate_median, + next_block_template_feerate_max: item.next_block_template_feerate_max, + } +}); diff --git a/rpc/grpc/core/src/convert/header.rs b/rpc/grpc/core/src/convert/header.rs index f4d78b7c11..5d763034a7 100644 --- a/rpc/grpc/core/src/convert/header.rs +++ b/rpc/grpc/core/src/convert/header.rs @@ -1,5 +1,6 @@ use crate::protowire; use crate::{from, try_from}; +use kaspa_consensus_core::header::Header; use kaspa_rpc_core::{FromRpcHex, RpcError, RpcHash, RpcResult, ToRpcHex}; use std::str::FromStr; @@ -24,6 +25,23 @@ from!(item: &kaspa_rpc_core::RpcHeader, protowire::RpcBlockHeader, { } }); +from!(item: &kaspa_rpc_core::RpcRawHeader, protowire::RpcBlockHeader, { + Self { + version: item.version.into(), + parents: item.parents_by_level.iter().map(protowire::RpcBlockLevelParents::from).collect(), + hash_merkle_root: item.hash_merkle_root.to_string(), + accepted_id_merkle_root: item.accepted_id_merkle_root.to_string(), + utxo_commitment: item.utxo_commitment.to_string(), + timestamp: item.timestamp.try_into().expect("timestamp is always convertible to i64"), + bits: item.bits, + nonce: item.nonce, + daa_score: item.daa_score, + blue_work: item.blue_work.to_rpc_hex(), + blue_score: item.blue_score, + pruning_point: item.pruning_point.to_string(), + } +}); + from!(item: &Vec, protowire::RpcBlockLevelParents, { Self { parent_hashes: item.iter().map(|x| x.to_string()).collect() } }); // ---------------------------------------------------------------------------- @@ -32,7 +50,7 @@ from!(item: &Vec, protowire::RpcBlockLevelParents, { Self { parent_hash try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcHeader, { // We re-hash the block to remain as most trustless as possible - Self::new_finalized( + let header = Header::new_finalized( item.version.try_into()?, item.parents.iter().map(Vec::::try_from).collect::>>>()?, RpcHash::from_str(&item.hash_merkle_root)?, @@ -45,7 +63,26 @@ try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcHeader, { kaspa_rpc_core::RpcBlueWorkType::from_rpc_hex(&item.blue_work)?, item.blue_score, RpcHash::from_str(&item.pruning_point)?, - ) + ); + + header.into() +}); + +try_from!(item: &protowire::RpcBlockHeader, kaspa_rpc_core::RpcRawHeader, { + Self { + version: item.version.try_into()?, + parents_by_level: item.parents.iter().map(Vec::::try_from).collect::>>>()?, + hash_merkle_root: RpcHash::from_str(&item.hash_merkle_root)?, + accepted_id_merkle_root: RpcHash::from_str(&item.accepted_id_merkle_root)?, + utxo_commitment: RpcHash::from_str(&item.utxo_commitment)?, + timestamp: item.timestamp.try_into()?, + bits: item.bits, + nonce: item.nonce, + daa_score: item.daa_score, + blue_work: kaspa_rpc_core::RpcBlueWorkType::from_rpc_hex(&item.blue_work)?, + blue_score: item.blue_score, + pruning_point: RpcHash::from_str(&item.pruning_point)?, + } }); try_from!(item: &protowire::RpcBlockLevelParents, Vec, { @@ -55,7 +92,8 @@ try_from!(item: &protowire::RpcBlockLevelParents, Vec, { #[cfg(test)] mod tests { use crate::protowire; - use kaspa_rpc_core::{RpcHash, RpcHeader}; + use kaspa_consensus_core::{block::Block, header::Header}; + use kaspa_rpc_core::{RpcBlock, RpcHash, RpcHeader}; fn new_unique() -> RpcHash { use std::sync::atomic::{AtomicU64, Ordering}; @@ -106,7 +144,7 @@ mod tests { #[test] fn test_rpc_header() { - let r = RpcHeader::new_finalized( + let r = Header::new_finalized( 0, vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]], new_unique(), @@ -120,6 +158,7 @@ mod tests { 1928374, new_unique(), ); + let r = RpcHeader::from(r); let p: protowire::RpcBlockHeader = (&r).into(); let r2: RpcHeader = (&p).try_into().unwrap(); let p2: protowire::RpcBlockHeader = (&r2).into(); @@ -134,4 +173,42 @@ mod tests { assert_eq!(r.hash, r2.hash); assert_eq!(p, p2); } + + #[test] + fn test_rpc_block() { + let h = Header::new_finalized( + 0, + vec![vec![new_unique(), new_unique(), new_unique()], vec![new_unique()], vec![new_unique(), new_unique()]], + new_unique(), + new_unique(), + new_unique(), + 123, + 12345, + 98765, + 120055, + 459912.into(), + 1928374, + new_unique(), + ); + let b = Block::from_header(h); + let r: RpcBlock = (&b).into(); + let p: protowire::RpcBlock = (&r).into(); + let r2: RpcBlock = (&p).try_into().unwrap(); + let b2: Block = r2.clone().try_into().unwrap(); + let r3: RpcBlock = (&b2).into(); + let p2: protowire::RpcBlock = (&r3).into(); + + assert_eq!(r.header.parents_by_level, r2.header.parents_by_level); + assert_eq!(p.header.as_ref().unwrap().parents, p2.header.as_ref().unwrap().parents); + test_parents_by_level_rxr(&r.header.parents_by_level, &r2.header.parents_by_level); + test_parents_by_level_rxr(&r.header.parents_by_level, &r3.header.parents_by_level); + test_parents_by_level_rxr(&b.header.parents_by_level, &r2.header.parents_by_level); + test_parents_by_level_rxr(&b.header.parents_by_level, &b2.header.parents_by_level); + test_parents_by_level_rxp(&r.header.parents_by_level, &p.header.as_ref().unwrap().parents); + test_parents_by_level_rxp(&r.header.parents_by_level, &p2.header.as_ref().unwrap().parents); + test_parents_by_level_rxp(&r2.header.parents_by_level, &p2.header.as_ref().unwrap().parents); + + assert_eq!(b.hash(), b2.hash()); + assert_eq!(p, p2); + } } diff --git a/rpc/grpc/core/src/convert/kaspad.rs b/rpc/grpc/core/src/convert/kaspad.rs index 0fef61523a..c3411545cc 100644 --- a/rpc/grpc/core/src/convert/kaspad.rs +++ b/rpc/grpc/core/src/convert/kaspad.rs @@ -36,6 +36,7 @@ pub mod kaspad_request_convert { impl_into_kaspad_request!(GetConnectedPeerInfo); impl_into_kaspad_request!(AddPeer); impl_into_kaspad_request!(SubmitTransaction); + impl_into_kaspad_request!(SubmitTransactionReplacement); impl_into_kaspad_request!(GetSubnetwork); impl_into_kaspad_request!(GetVirtualChainFromBlock); impl_into_kaspad_request!(GetBlocks); @@ -54,9 +55,14 @@ pub mod kaspad_request_convert { impl_into_kaspad_request!(GetCoinSupply); impl_into_kaspad_request!(Ping); impl_into_kaspad_request!(GetMetrics); + impl_into_kaspad_request!(GetConnections); + impl_into_kaspad_request!(GetSystemInfo); impl_into_kaspad_request!(GetServerInfo); impl_into_kaspad_request!(GetSyncStatus); impl_into_kaspad_request!(GetDaaScoreTimestampEstimate); + impl_into_kaspad_request!(GetFeeEstimate); + impl_into_kaspad_request!(GetFeeEstimateExperimental); + impl_into_kaspad_request!(GetCurrentBlockColor); impl_into_kaspad_request!(NotifyBlockAdded); impl_into_kaspad_request!(NotifyNewBlockTemplate); @@ -167,6 +173,7 @@ pub mod kaspad_response_convert { impl_into_kaspad_response!(GetConnectedPeerInfo); impl_into_kaspad_response!(AddPeer); impl_into_kaspad_response!(SubmitTransaction); + impl_into_kaspad_response!(SubmitTransactionReplacement); impl_into_kaspad_response!(GetSubnetwork); impl_into_kaspad_response!(GetVirtualChainFromBlock); impl_into_kaspad_response!(GetBlocks); @@ -185,9 +192,14 @@ pub mod kaspad_response_convert { impl_into_kaspad_response!(GetCoinSupply); impl_into_kaspad_response!(Ping); impl_into_kaspad_response!(GetMetrics); + impl_into_kaspad_response!(GetConnections); + impl_into_kaspad_response!(GetSystemInfo); impl_into_kaspad_response!(GetServerInfo); impl_into_kaspad_response!(GetSyncStatus); impl_into_kaspad_response!(GetDaaScoreTimestampEstimate); + impl_into_kaspad_response!(GetFeeEstimate); + impl_into_kaspad_response!(GetFeeEstimateExperimental); + impl_into_kaspad_response!(GetCurrentBlockColor); impl_into_kaspad_notify_response!(NotifyBlockAdded); impl_into_kaspad_notify_response!(NotifyNewBlockTemplate); diff --git a/rpc/grpc/core/src/convert/message.rs b/rpc/grpc/core/src/convert/message.rs index 9babf29c84..67ac60650c 100644 --- a/rpc/grpc/core/src/convert/message.rs +++ b/rpc/grpc/core/src/convert/message.rs @@ -3,7 +3,7 @@ //! Response payloads in protowire do always contain an error field and generally a set of //! fields providing the requested data. //! -//! Responses in rpc core are expressed as RpcResult, where Xxx is the called +//! Responses in rpc core are expressed as `RpcResult`, where `Xxx` is the called //! RPC method. //! //! The general conversion convention from protowire to rpc core is to consider the error @@ -26,6 +26,7 @@ use kaspa_rpc_core::{ RpcContextualPeerAddress, RpcError, RpcExtraData, RpcHash, RpcIpAddress, RpcNetworkType, RpcPeerAddress, RpcResult, SubmitBlockRejectReason, SubmitBlockReport, }; +use kaspa_utils::hex::*; use std::str::FromStr; macro_rules! from { @@ -248,6 +249,13 @@ from!(item: RpcResult<&kaspa_rpc_core::SubmitTransactionResponse>, protowire::Su Self { transaction_id: item.transaction_id.to_string(), error: None } }); +from!(item: &kaspa_rpc_core::SubmitTransactionReplacementRequest, protowire::SubmitTransactionReplacementRequestMessage, { + Self { transaction: Some((&item.transaction).into()) } +}); +from!(item: RpcResult<&kaspa_rpc_core::SubmitTransactionReplacementResponse>, protowire::SubmitTransactionReplacementResponseMessage, { + Self { transaction_id: item.transaction_id.to_string(), replaced_transaction: Some((&item.replaced_transaction).into()), error: None } +}); + from!(item: &kaspa_rpc_core::GetSubnetworkRequest, protowire::GetSubnetworkRequestMessage, { Self { subnetwork_id: item.subnetwork_id.to_string() } }); @@ -394,6 +402,34 @@ from!(item: RpcResult<&kaspa_rpc_core::GetDaaScoreTimestampEstimateResponse>, pr Self { timestamps: item.timestamps.clone(), error: None } }); +// Fee estimate API + +from!(&kaspa_rpc_core::GetFeeEstimateRequest, protowire::GetFeeEstimateRequestMessage); +from!(item: RpcResult<&kaspa_rpc_core::GetFeeEstimateResponse>, protowire::GetFeeEstimateResponseMessage, { + Self { estimate: Some((&item.estimate).into()), error: None } +}); +from!(item: &kaspa_rpc_core::GetFeeEstimateExperimentalRequest, protowire::GetFeeEstimateExperimentalRequestMessage, { + Self { + verbose: item.verbose + } +}); +from!(item: RpcResult<&kaspa_rpc_core::GetFeeEstimateExperimentalResponse>, protowire::GetFeeEstimateExperimentalResponseMessage, { + Self { + estimate: Some((&item.estimate).into()), + verbose: item.verbose.as_ref().map(|x| x.into()), + error: None + } +}); + +from!(item: &kaspa_rpc_core::GetCurrentBlockColorRequest, protowire::GetCurrentBlockColorRequestMessage, { + Self { + hash: item.hash.to_string() + } +}); +from!(item: RpcResult<&kaspa_rpc_core::GetCurrentBlockColorResponse>, protowire::GetCurrentBlockColorResponseMessage, { + Self { blue: item.blue, error: None } +}); + from!(&kaspa_rpc_core::PingRequest, protowire::PingRequestMessage); from!(RpcResult<&kaspa_rpc_core::PingResponse>, protowire::PingResponseMessage); @@ -403,6 +439,8 @@ from!(item: &kaspa_rpc_core::GetMetricsRequest, protowire::GetMetricsRequestMess connection_metrics: item.connection_metrics, bandwidth_metrics: item.bandwidth_metrics, consensus_metrics: item.consensus_metrics, + storage_metrics: item.storage_metrics, + custom_metrics: item.custom_metrics, } }); from!(item: RpcResult<&kaspa_rpc_core::GetMetricsResponse>, protowire::GetMetricsResponseMessage, { @@ -412,13 +450,46 @@ from!(item: RpcResult<&kaspa_rpc_core::GetMetricsResponse>, protowire::GetMetric connection_metrics: item.connection_metrics.as_ref().map(|x| x.into()), bandwidth_metrics: item.bandwidth_metrics.as_ref().map(|x| x.into()), consensus_metrics: item.consensus_metrics.as_ref().map(|x| x.into()), + storage_metrics: item.storage_metrics.as_ref().map(|x| x.into()), + // TODO + // custom_metrics : None, error: None, } }); + +from!(item: &kaspa_rpc_core::GetConnectionsRequest, protowire::GetConnectionsRequestMessage, { + Self { + include_profile_data : item.include_profile_data, + } +}); +from!(item: RpcResult<&kaspa_rpc_core::GetConnectionsResponse>, protowire::GetConnectionsResponseMessage, { + Self { + clients: item.clients, + peers: item.peers as u32, + profile_data: item.profile_data.as_ref().map(|x| x.into()), + error: None, + } +}); + +from!(&kaspa_rpc_core::GetSystemInfoRequest, protowire::GetSystemInfoRequestMessage); +from!(item: RpcResult<&kaspa_rpc_core::GetSystemInfoResponse>, protowire::GetSystemInfoResponseMessage, { + Self { + version : item.version.clone(), + system_id : item.system_id.as_ref().map(|system_id|system_id.to_hex()).unwrap_or_default(), + git_hash : item.git_hash.as_ref().map(|git_hash|git_hash.to_hex()).unwrap_or_default(), + total_memory : item.total_memory, + core_num : item.cpu_physical_cores as u32, + fd_limit : item.fd_limit, + proxy_socket_limit_per_cpu_core : item.proxy_socket_limit_per_cpu_core.unwrap_or_default(), + error: None, + } +}); + from!(&kaspa_rpc_core::GetServerInfoRequest, protowire::GetServerInfoRequestMessage); from!(item: RpcResult<&kaspa_rpc_core::GetServerInfoResponse>, protowire::GetServerInfoResponseMessage, { Self { - rpc_api_version: item.rpc_api_version.iter().map(|x| *x as u32).collect(), + rpc_api_version: item.rpc_api_version as u32, + rpc_api_revision: item.rpc_api_revision as u32, server_version: item.server_version.clone(), network_id: item.network_id.to_string(), has_utxo_index: item.has_utxo_index, @@ -647,6 +718,26 @@ try_from!(item: &protowire::SubmitTransactionResponseMessage, RpcResult, { + Self { + transaction_id: RpcHash::from_str(&item.transaction_id)?, + replaced_transaction: item + .replaced_transaction + .as_ref() + .ok_or_else(|| RpcError::MissingRpcFieldError("SubmitTransactionReplacementRequestMessage".to_string(), "replaced_transaction".to_string()))? + .try_into()?, + } +}); + try_from!(item: &protowire::GetSubnetworkRequestMessage, kaspa_rpc_core::GetSubnetworkRequest, { Self { subnetwork_id: kaspa_rpc_core::RpcSubnetworkId::from_str(&item.subnetwork_id)? } }); @@ -791,11 +882,53 @@ try_from!(item: &protowire::GetDaaScoreTimestampEstimateResponseMessage, RpcResu Self { timestamps: item.timestamps.clone() } }); +try_from!(&protowire::GetFeeEstimateRequestMessage, kaspa_rpc_core::GetFeeEstimateRequest); +try_from!(item: &protowire::GetFeeEstimateResponseMessage, RpcResult, { + Self { + estimate: item.estimate + .as_ref() + .ok_or_else(|| RpcError::MissingRpcFieldError("GetFeeEstimateResponseMessage".to_string(), "estimate".to_string()))? + .try_into()? + } +}); +try_from!(item: &protowire::GetFeeEstimateExperimentalRequestMessage, kaspa_rpc_core::GetFeeEstimateExperimentalRequest, { + Self { + verbose: item.verbose + } +}); +try_from!(item: &protowire::GetFeeEstimateExperimentalResponseMessage, RpcResult, { + Self { + estimate: item.estimate + .as_ref() + .ok_or_else(|| RpcError::MissingRpcFieldError("GetFeeEstimateExperimentalResponseMessage".to_string(), "estimate".to_string()))? + .try_into()?, + verbose: item.verbose.as_ref().map(|x| x.try_into()).transpose()? + } +}); + +try_from!(item: &protowire::GetCurrentBlockColorRequestMessage, kaspa_rpc_core::GetCurrentBlockColorRequest, { + Self { + hash: RpcHash::from_str(&item.hash)? + } +}); +try_from!(item: &protowire::GetCurrentBlockColorResponseMessage, RpcResult, { + Self { + blue: item.blue + } +}); + try_from!(&protowire::PingRequestMessage, kaspa_rpc_core::PingRequest); try_from!(&protowire::PingResponseMessage, RpcResult); try_from!(item: &protowire::GetMetricsRequestMessage, kaspa_rpc_core::GetMetricsRequest, { - Self { process_metrics: item.process_metrics, connection_metrics: item.connection_metrics, bandwidth_metrics:item.bandwidth_metrics, consensus_metrics: item.consensus_metrics } + Self { + process_metrics: item.process_metrics, + connection_metrics: item.connection_metrics, + bandwidth_metrics:item.bandwidth_metrics, + consensus_metrics: item.consensus_metrics, + storage_metrics: item.storage_metrics, + custom_metrics : item.custom_metrics, + } }); try_from!(item: &protowire::GetMetricsResponseMessage, RpcResult, { Self { @@ -804,13 +937,41 @@ try_from!(item: &protowire::GetMetricsResponseMessage, RpcResult, { + Self { + clients: item.clients, + peers: item.peers as u16, + profile_data: item.profile_data.as_ref().map(|x| x.try_into()).transpose()?, + } +}); + +try_from!(&protowire::GetSystemInfoRequestMessage, kaspa_rpc_core::GetSystemInfoRequest); +try_from!(item: &protowire::GetSystemInfoResponseMessage, RpcResult, { + Self { + version: item.version.clone(), + system_id: (!item.system_id.is_empty()).then(|| FromHex::from_hex(&item.system_id)).transpose()?, + git_hash: (!item.git_hash.is_empty()).then(|| FromHex::from_hex(&item.git_hash)).transpose()?, + total_memory: item.total_memory, + cpu_physical_cores: item.core_num as u16, + fd_limit: item.fd_limit, + proxy_socket_limit_per_cpu_core : (item.proxy_socket_limit_per_cpu_core > 0).then_some(item.proxy_socket_limit_per_cpu_core), } }); try_from!(&protowire::GetServerInfoRequestMessage, kaspa_rpc_core::GetServerInfoRequest); try_from!(item: &protowire::GetServerInfoResponseMessage, RpcResult, { Self { - rpc_api_version: item.rpc_api_version.iter().map(|x| *x as u16).collect::>().as_slice().try_into().map_err(|_| RpcError::RpcApiVersionFormatError)?, + rpc_api_version: item.rpc_api_version as u16, + rpc_api_revision: item.rpc_api_revision as u16, server_version: item.server_version.clone(), network_id: NetworkId::from_str(&item.network_id)?, has_utxo_index: item.has_utxo_index, diff --git a/rpc/grpc/core/src/convert/metrics.rs b/rpc/grpc/core/src/convert/metrics.rs index 8e0e48c045..5037b370f4 100644 --- a/rpc/grpc/core/src/convert/metrics.rs +++ b/rpc/grpc/core/src/convert/metrics.rs @@ -6,6 +6,14 @@ use kaspa_rpc_core::RpcError; // rpc_core to protowire // ---------------------------------------------------------------------------- +from!(item: &kaspa_rpc_core::ConnectionsProfileData, protowire::ConnectionsProfileData, { + Self { + cpu_usage: item.cpu_usage as f64, + memory_usage: item.memory_usage, + + } +}); + from!(item: &kaspa_rpc_core::ProcessMetrics, protowire::ProcessMetrics, { Self { resident_set_size: item.resident_set_size, @@ -66,10 +74,20 @@ from!(item: &kaspa_rpc_core::ConsensusMetrics, protowire::ConsensusMetrics, { } }); +from!(item: &kaspa_rpc_core::StorageMetrics, protowire::StorageMetrics, { + Self { + storage_size_bytes: item.storage_size_bytes, + } +}); + // ---------------------------------------------------------------------------- // protowire to rpc_core // ---------------------------------------------------------------------------- +try_from!(item: &protowire::ConnectionsProfileData, kaspa_rpc_core::ConnectionsProfileData, { + Self { cpu_usage : item.cpu_usage as f32, memory_usage : item.memory_usage } +}); + try_from!(item: &protowire::ProcessMetrics, kaspa_rpc_core::ProcessMetrics, { Self { resident_set_size: item.resident_set_size, @@ -129,3 +147,9 @@ try_from!(item: &protowire::ConsensusMetrics, kaspa_rpc_core::ConsensusMetrics, network_virtual_daa_score: item.virtual_daa_score, } }); + +try_from!(item: &protowire::StorageMetrics, kaspa_rpc_core::StorageMetrics, { + Self { + storage_size_bytes: item.storage_size_bytes, + } +}); diff --git a/rpc/grpc/core/src/convert/mod.rs b/rpc/grpc/core/src/convert/mod.rs index 2f3252d22e..d4948f57dc 100644 --- a/rpc/grpc/core/src/convert/mod.rs +++ b/rpc/grpc/core/src/convert/mod.rs @@ -1,6 +1,7 @@ pub mod address; pub mod block; pub mod error; +pub mod feerate_estimate; pub mod header; pub mod kaspad; pub mod mempool; diff --git a/rpc/grpc/core/src/convert/tx.rs b/rpc/grpc/core/src/convert/tx.rs index 10d79b8be8..7a75a0255e 100644 --- a/rpc/grpc/core/src/convert/tx.rs +++ b/rpc/grpc/core/src/convert/tx.rs @@ -60,7 +60,7 @@ from!(item: &kaspa_rpc_core::RpcTransactionVerboseData, protowire::RpcTransactio Self { transaction_id: item.transaction_id.to_string(), hash: item.hash.to_string(), - mass: item.mass, + compute_mass: item.compute_mass, block_hash: item.block_hash.to_string(), block_time: item.block_time, } @@ -167,7 +167,7 @@ try_from!(item: &protowire::RpcTransactionVerboseData, kaspa_rpc_core::RpcTransa Self { transaction_id: RpcHash::from_str(&item.transaction_id)?, hash: RpcHash::from_str(&item.hash)?, - mass: item.mass, + compute_mass: item.compute_mass, block_hash: RpcHash::from_str(&item.block_hash)?, block_time: item.block_time, } diff --git a/rpc/grpc/core/src/ops.rs b/rpc/grpc/core/src/ops.rs index 7cc23f1609..f3bc12c829 100644 --- a/rpc/grpc/core/src/ops.rs +++ b/rpc/grpc/core/src/ops.rs @@ -61,6 +61,7 @@ pub enum KaspadPayloadOps { GetConnectedPeerInfo, AddPeer, SubmitTransaction, + SubmitTransactionReplacement, GetSubnetwork, GetVirtualChainFromBlock, GetBlockCount, @@ -78,9 +79,14 @@ pub enum KaspadPayloadOps { GetCoinSupply, Ping, GetMetrics, + GetConnections, + GetSystemInfo, GetServerInfo, GetSyncStatus, GetDaaScoreTimestampEstimate, + GetFeeEstimate, + GetFeeEstimateExperimental, + GetCurrentBlockColor, // Subscription commands for starting/stopping notifications NotifyBlockAdded, diff --git a/rpc/grpc/server/Cargo.toml b/rpc/grpc/server/Cargo.toml index 9f6ae74b47..452dc7d9ee 100644 --- a/rpc/grpc/server/Cargo.toml +++ b/rpc/grpc/server/Cargo.toml @@ -39,6 +39,7 @@ tokio-stream.workspace = true tonic = { workspace = true, features = ["gzip"] } triggered.workspace = true uuid.workspace = true +rustls.workspace = true [dev-dependencies] kaspa-grpc-client.workspace = true diff --git a/rpc/grpc/server/src/connection_handler.rs b/rpc/grpc/server/src/connection_handler.rs index d581ea441f..fd13cf9bb0 100644 --- a/rpc/grpc/server/src/connection_handler.rs +++ b/rpc/grpc/server/src/connection_handler.rs @@ -29,7 +29,7 @@ use kaspa_rpc_core::{ use kaspa_utils::networking::NetAddress; use kaspa_utils_tower::{ counters::TowerConnectionCounters, - middleware::{measure_request_body_size_layer, CountBytesBody, MapResponseBodyLayer}, + middleware::{BodyExt, CountBytesBody, MapRequestBodyLayer, MapResponseBodyLayer}, }; use std::fmt::Debug; use std::{ @@ -144,7 +144,7 @@ impl ConnectionHandler { let serve_result = TonicServer::builder() // .http2_keepalive_interval(Some(GRPC_KEEP_ALIVE_PING_INTERVAL)) // .http2_keepalive_timeout(Some(GRPC_KEEP_ALIVE_PING_TIMEOUT)) - .layer(measure_request_body_size_layer(bytes_rx, |b| b)) + .layer(MapRequestBodyLayer::new(move |body| CountBytesBody::new(body, bytes_rx.clone()).boxed_unsync())) .layer(MapResponseBodyLayer::new(move |body| CountBytesBody::new(body, bytes_tx.clone()))) .add_service(protowire_server) .serve_with_shutdown( diff --git a/rpc/grpc/server/src/request_handler/factory.rs b/rpc/grpc/server/src/request_handler/factory.rs index 802cb6cd6b..b6a5b4476f 100644 --- a/rpc/grpc/server/src/request_handler/factory.rs +++ b/rpc/grpc/server/src/request_handler/factory.rs @@ -55,6 +55,7 @@ impl Factory { GetConnectedPeerInfo, AddPeer, SubmitTransaction, + SubmitTransactionReplacement, GetSubnetwork, GetVirtualChainFromBlock, GetBlockCount, @@ -72,9 +73,14 @@ impl Factory { GetCoinSupply, Ping, GetMetrics, + GetConnections, + GetSystemInfo, GetServerInfo, GetSyncStatus, GetDaaScoreTimestampEstimate, + GetFeeEstimate, + GetFeeEstimateExperimental, + GetCurrentBlockColor, NotifyBlockAdded, NotifyNewBlockTemplate, NotifyFinalityConflict, diff --git a/rpc/grpc/server/src/tests/rpc_core_mock.rs b/rpc/grpc/server/src/tests/rpc_core_mock.rs index ddf78ccbd7..dd6de46d2a 100644 --- a/rpc/grpc/server/src/tests/rpc_core_mock.rs +++ b/rpc/grpc/server/src/tests/rpc_core_mock.rs @@ -6,7 +6,7 @@ use kaspa_notify::notifier::{Notifier, Notify}; use kaspa_notify::scope::Scope; use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_notify::subscription::{MutationPolicies, UtxosChangedMutationPolicy}; -use kaspa_rpc_core::{api::rpc::RpcApi, *}; +use kaspa_rpc_core::{api::connection::DynRpcConnection, api::rpc::RpcApi, *}; use kaspa_rpc_core::{notify::connection::ChannelConnection, RpcResult}; use std::sync::Arc; @@ -66,7 +66,7 @@ impl RpcCoreMock { #[async_trait] impl RpcApi for RpcCoreMock { // This fn needs to succeed while the client connects - async fn get_info_call(&self, _request: GetInfoRequest) -> RpcResult { + async fn get_info_call(&self, _connection: Option<&DynRpcConnection>, _request: GetInfoRequest) -> RpcResult { Ok(GetInfoResponse { p2p_id: "p2p-mock".to_string(), mempool_size: 1234, @@ -78,133 +78,245 @@ impl RpcApi for RpcCoreMock { }) } - async fn ping_call(&self, _request: PingRequest) -> RpcResult { + async fn ping_call(&self, _connection: Option<&DynRpcConnection>, _request: PingRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_metrics_call(&self, _request: GetMetricsRequest) -> RpcResult { + async fn get_metrics_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMetricsRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_connections_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetConnectionsRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_system_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSystemInfoRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_server_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetServerInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_server_info_call(&self, _request: GetServerInfoRequest) -> RpcResult { + async fn get_sync_status_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSyncStatusRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sync_status_call(&self, _request: GetSyncStatusRequest) -> RpcResult { + async fn get_current_network_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCurrentNetworkRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_current_network_call(&self, _request: GetCurrentNetworkRequest) -> RpcResult { + async fn submit_block_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitBlockRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn submit_block_call(&self, _request: SubmitBlockRequest) -> RpcResult { + async fn get_block_template_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockTemplateRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_template_call(&self, _request: GetBlockTemplateRequest) -> RpcResult { + async fn get_peer_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetPeerAddressesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_peer_addresses_call(&self, _request: GetPeerAddressesRequest) -> RpcResult { + async fn get_sink_call(&self, _connection: Option<&DynRpcConnection>, _request: GetSinkRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sink_call(&self, _request: GetSinkRequest) -> RpcResult { + async fn get_mempool_entry_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMempoolEntryRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_mempool_entry_call(&self, _request: GetMempoolEntryRequest) -> RpcResult { + async fn get_mempool_entries_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMempoolEntriesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_mempool_entries_call(&self, _request: GetMempoolEntriesRequest) -> RpcResult { + async fn get_connected_peer_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetConnectedPeerInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_connected_peer_info_call(&self, _request: GetConnectedPeerInfoRequest) -> RpcResult { + async fn submit_transaction_replacement_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitTransactionReplacementRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn add_peer_call(&self, _request: AddPeerRequest) -> RpcResult { + async fn add_peer_call(&self, _connection: Option<&DynRpcConnection>, _request: AddPeerRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn submit_transaction_call(&self, _request: SubmitTransactionRequest) -> RpcResult { + async fn submit_transaction_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitTransactionRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_call(&self, _request: GetBlockRequest) -> RpcResult { + async fn get_block_call(&self, _connection: Option<&DynRpcConnection>, _request: GetBlockRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_subnetwork_call(&self, _request: GetSubnetworkRequest) -> RpcResult { + async fn get_subnetwork_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSubnetworkRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_virtual_chain_from_block_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetVirtualChainFromBlockRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_blocks_call(&self, _request: GetBlocksRequest) -> RpcResult { + async fn get_blocks_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlocksRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_count_call(&self, _request: GetBlockCountRequest) -> RpcResult { + async fn get_current_block_color_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCurrentBlockColorRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_dag_info_call(&self, _request: GetBlockDagInfoRequest) -> RpcResult { + async fn get_block_count_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockCountRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_block_dag_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockDagInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn resolve_finality_conflict_call( &self, + _connection: Option<&DynRpcConnection>, _request: ResolveFinalityConflictRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn shutdown_call(&self, _request: ShutdownRequest) -> RpcResult { + async fn shutdown_call(&self, _connection: Option<&DynRpcConnection>, _request: ShutdownRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_headers_call(&self, _request: GetHeadersRequest) -> RpcResult { + async fn get_headers_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetHeadersRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_balance_by_address_call(&self, _request: GetBalanceByAddressRequest) -> RpcResult { + async fn get_balance_by_address_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBalanceByAddressRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_balances_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetBalancesByAddressesRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_utxos_by_addresses_call(&self, _request: GetUtxosByAddressesRequest) -> RpcResult { + async fn get_utxos_by_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetUtxosByAddressesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sink_blue_score_call(&self, _request: GetSinkBlueScoreRequest) -> RpcResult { + async fn get_sink_blue_score_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSinkBlueScoreRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn ban_call(&self, _request: BanRequest) -> RpcResult { + async fn ban_call(&self, _connection: Option<&DynRpcConnection>, _request: BanRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn unban_call(&self, _request: UnbanRequest) -> RpcResult { + async fn unban_call(&self, _connection: Option<&DynRpcConnection>, _request: UnbanRequest) -> RpcResult { Err(RpcError::NotImplemented) } async fn estimate_network_hashes_per_second_call( &self, + _connection: Option<&DynRpcConnection>, _request: EstimateNetworkHashesPerSecondRequest, ) -> RpcResult { Err(RpcError::NotImplemented) @@ -212,22 +324,44 @@ impl RpcApi for RpcCoreMock { async fn get_mempool_entries_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetMempoolEntriesByAddressesRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_coin_supply_call(&self, _request: GetCoinSupplyRequest) -> RpcResult { + async fn get_coin_supply_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCoinSupplyRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_daa_score_timestamp_estimate_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetDaaScoreTimestampEstimateRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } + async fn get_fee_estimate_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetFeeEstimateRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_fee_estimate_experimental_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetFeeEstimateExperimentalRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/rpc/macros/src/grpc/server.rs b/rpc/macros/src/grpc/server.rs index 91dea1dd96..9378117195 100644 --- a/rpc/macros/src/grpc/server.rs +++ b/rpc/macros/src/grpc/server.rs @@ -72,7 +72,8 @@ impl ToTokens for RpcTable { Box::pin(async move { let mut response: #kaspad_response_type = match request.payload { Some(Payload::#request_type(ref request)) => match request.try_into() { - Ok(request) => server_ctx.core_service.#fn_call(request).await.into(), + // TODO: RPC-CONNECTION + Ok(request) => server_ctx.core_service.#fn_call(None,request).await.into(), Err(err) => #response_message_type::from(err).into(), }, _ => { @@ -128,7 +129,7 @@ impl ToTokens for RpcTable { { let mut interface = Interface::new(#server_ctx); - for op in #payload_ops::list() { + for op in #payload_ops::iter() { match op { #(#targets)* } diff --git a/rpc/macros/src/lib.rs b/rpc/macros/src/lib.rs index 1c205c26ef..9ca49bf54a 100644 --- a/rpc/macros/src/lib.rs +++ b/rpc/macros/src/lib.rs @@ -39,3 +39,9 @@ pub fn build_wrpc_wasm_bindgen_subscriptions(input: TokenStream) -> TokenStream pub fn build_grpc_server_interface(input: TokenStream) -> TokenStream { grpc::server::build_grpc_server_interface(input) } + +#[proc_macro] +#[proc_macro_error] +pub fn test_wrpc_serializer(input: TokenStream) -> TokenStream { + wrpc::test::build_test(input) +} diff --git a/rpc/macros/src/wrpc/client.rs b/rpc/macros/src/wrpc/client.rs index f33fe57f31..12f41687a7 100644 --- a/rpc/macros/src/wrpc/client.rs +++ b/rpc/macros/src/wrpc/client.rs @@ -52,26 +52,29 @@ impl ToTokens for RpcTable { // the async implementation of the RPC caller is inlined targets.push(quote! { - fn #fn_call<'life0, 'async_trait>( + fn #fn_call<'life0, 'life1, 'async_trait>( &'life0 self, + _connection : ::core::option::Option<&'life1 Arc>, request: #request_type, ) -> ::core::pin::Pin> + ::core::marker::Send + 'async_trait>> where 'life0: 'async_trait, + 'life1: 'async_trait, Self: 'async_trait, { + use workflow_serializer::prelude::*; Box::pin(async move { if let ::core::option::Option::Some(__ret) = ::core::option::Option::None::> { return __ret; } let __self = self; //let request = request; - let __ret: RpcResult<#response_type> = { - let resp: ClientResult<#response_type> = __self.inner.rpc_client.call(#rpc_api_ops::#handler, request).await; + let __ret: RpcResult> = { + let resp: ClientResult> = __self.inner.rpc_client.call(#rpc_api_ops::#handler, Serializable(request)).await; Ok(resp.map_err(|e| kaspa_rpc_core::error::RpcError::RpcSubsystem(e.to_string()))?) }; #[allow(unreachable_code)] - __ret + __ret.map(Serializable::into_inner) }) } diff --git a/rpc/macros/src/wrpc/mod.rs b/rpc/macros/src/wrpc/mod.rs index 1a15b06646..8c8238cdb8 100644 --- a/rpc/macros/src/wrpc/mod.rs +++ b/rpc/macros/src/wrpc/mod.rs @@ -1,3 +1,4 @@ pub mod client; pub mod server; +pub mod test; pub mod wasm; diff --git a/rpc/macros/src/wrpc/server.rs b/rpc/macros/src/wrpc/server.rs index 09a3e0c692..092b1edb32 100644 --- a/rpc/macros/src/wrpc/server.rs +++ b/rpc/macros/src/wrpc/server.rs @@ -50,13 +50,14 @@ impl ToTokens for RpcTable { targets.push(quote! { #rpc_api_ops::#handler => { - interface.method(#rpc_api_ops::#handler, method!(|server_ctx: #server_ctx_type, connection_ctx: #connection_ctx_type, request: #request_type| async move { + interface.method(#rpc_api_ops::#handler, method!(|server_ctx: #server_ctx_type, connection_ctx: #connection_ctx_type, request: Serializable<#request_type>| async move { let verbose = server_ctx.verbose(); if verbose { workflow_log::log_info!("request: {:?}",request); } - let response: #response_type = server_ctx.rpc_service(&connection_ctx).#fn_call(request).await + // TODO: RPC-CONNECT + let response: #response_type = server_ctx.rpc_service(&connection_ctx).#fn_call(None, request.into_inner()).await .map_err(|e|ServerError::Text(e.to_string()))?; if verbose { workflow_log::log_info!("response: {:?}",response); } - Ok(response) + Ok(Serializable(response)) })); } }); @@ -71,7 +72,8 @@ impl ToTokens for RpcTable { #rpc_api_ops >::new(#server_ctx); - for op in #rpc_api_ops::list() { + for op in #rpc_api_ops::iter() { + use workflow_serializer::prelude::*; match op { #(#targets)* _ => { } diff --git a/rpc/macros/src/wrpc/test.rs b/rpc/macros/src/wrpc/test.rs new file mode 100644 index 0000000000..92591b22b0 --- /dev/null +++ b/rpc/macros/src/wrpc/test.rs @@ -0,0 +1,60 @@ +use convert_case::{Case, Casing}; +use proc_macro2::TokenStream; +use proc_macro2::{Ident, Span}; +use quote::{quote, ToTokens}; +use std::convert::Into; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, + punctuated::Punctuated, + Error, Expr, Result, Token, +}; + +#[derive(Debug)] +struct TestTable { + rpc_op: Expr, +} + +impl Parse for TestTable { + fn parse(input: ParseStream) -> Result { + let parsed = Punctuated::::parse_terminated(input).unwrap(); + if parsed.len() != 1 { + return Err(Error::new_spanned(parsed, "usage: test!(GetInfo)".to_string())); + } + + let mut iter = parsed.iter(); + let rpc_op = iter.next().unwrap().clone(); + + Ok(TestTable { rpc_op }) + } +} + +impl ToTokens for TestTable { + fn to_tokens(&self, tokens: &mut TokenStream) { + let rpc_op = &self.rpc_op; + + let (name, _docs) = match rpc_op { + syn::Expr::Path(expr_path) => (expr_path.path.to_token_stream().to_string(), expr_path.attrs.clone()), + _ => (rpc_op.to_token_stream().to_string(), vec![]), + }; + let typename = Ident::new(&name.to_string(), Span::call_site()); + let fn_test = Ident::new(&format!("test_wrpc_serializer_{}", name.to_case(Case::Snake)), Span::call_site()); + + quote! { + + #[test] + fn #fn_test() { + test::<#typename>(#name); + } + + } + .to_tokens(tokens); + } +} + +pub fn build_test(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let rpc_table = parse_macro_input!(input as TestTable); + let ts = rpc_table.to_token_stream(); + // println!("MACRO: {}", ts.to_string()); + ts.into() +} diff --git a/rpc/macros/src/wrpc/wasm.rs b/rpc/macros/src/wrpc/wasm.rs index 0118220199..30af3e74a3 100644 --- a/rpc/macros/src/wrpc/wasm.rs +++ b/rpc/macros/src/wrpc/wasm.rs @@ -59,7 +59,7 @@ impl ToTokens for RpcHandlers { pub async fn #fn_no_suffix(&self, request : Option<#ts_request_type>) -> Result<#ts_response_type> { let request: #request_type = request.unwrap_or_default().try_into()?; // log_info!("request: {:#?}",request); - let result: RpcResult<#response_type> = self.inner.client.#fn_call(request).await; + let result: RpcResult<#response_type> = self.inner.client.#fn_call(None, request).await; // log_info!("result: {:#?}",result); let response: #response_type = result.map_err(|err|wasm_bindgen::JsError::new(&err.to_string()))?; //log_info!("response: {:#?}",response); @@ -83,7 +83,7 @@ impl ToTokens for RpcHandlers { #[wasm_bindgen(js_name = #fn_camel)] pub async fn #fn_no_suffix(&self, request: #ts_request_type) -> Result<#ts_response_type> { let request: #request_type = request.try_into()?; - let result: RpcResult<#response_type> = self.inner.client.#fn_call(request).await; + let result: RpcResult<#response_type> = self.inner.client.#fn_call(None, request).await; let response: #response_type = result.map_err(|err|wasm_bindgen::JsError::new(&err.to_string()))?; Ok(response.try_into()?) } diff --git a/rpc/service/Cargo.toml b/rpc/service/Cargo.toml index d606d51533..54e9764088 100644 --- a/rpc/service/Cargo.toml +++ b/rpc/service/Cargo.toml @@ -33,4 +33,4 @@ async-trait.workspace = true log.workspace = true tokio.workspace = true triggered.workspace = true -workflow-rpc.workspace = true +workflow-rpc.workspace = true \ No newline at end of file diff --git a/rpc/service/src/converter/consensus.rs b/rpc/service/src/converter/consensus.rs index 9f3f5b661d..c744300e52 100644 --- a/rpc/service/src/converter/consensus.rs +++ b/rpc/service/src/converter/consensus.rs @@ -81,7 +81,7 @@ impl ConsensusConverter { vec![] }; - Ok(RpcBlock { header: (*block.header).clone(), transactions, verbose_data }) + Ok(RpcBlock { header: block.header.as_ref().into(), transactions, verbose_data }) } pub fn get_mempool_entry(&self, consensus: &ConsensusProxy, transaction: &MutableTransaction) -> RpcMempoolEntry { @@ -125,7 +125,7 @@ impl ConsensusConverter { let verbose_data = Some(RpcTransactionVerboseData { transaction_id: transaction.id(), hash: hash(transaction, false), - mass: consensus.calculate_transaction_compute_mass(transaction), + compute_mass: consensus.calculate_transaction_compute_mass(transaction), // TODO: make block_hash an option block_hash: header.map_or_else(RpcHash::default, |x| x.hash), block_time: header.map_or(0, |x| x.timestamp), @@ -162,8 +162,9 @@ impl ConsensusConverter { &self, consensus: &ConsensusProxy, chain_path: &ChainPath, + merged_blocks_limit: Option, ) -> RpcResult> { - let acceptance_data = consensus.async_get_blocks_acceptance_data(chain_path.added.clone()).await.unwrap(); + let acceptance_data = consensus.async_get_blocks_acceptance_data(chain_path.added.clone(), merged_blocks_limit).await.unwrap(); Ok(chain_path .added .iter() diff --git a/rpc/service/src/converter/feerate_estimate.rs b/rpc/service/src/converter/feerate_estimate.rs new file mode 100644 index 0000000000..8df695c0cd --- /dev/null +++ b/rpc/service/src/converter/feerate_estimate.rs @@ -0,0 +1,49 @@ +use kaspa_mining::feerate::{FeeEstimateVerbose, FeerateBucket, FeerateEstimations}; +use kaspa_rpc_core::{ + message::GetFeeEstimateExperimentalResponse as RpcFeeEstimateVerboseResponse, RpcFeeEstimate, + RpcFeeEstimateVerboseExperimentalData as RpcFeeEstimateVerbose, RpcFeerateBucket, +}; + +pub trait FeerateBucketConverter { + fn into_rpc(self) -> RpcFeerateBucket; +} + +impl FeerateBucketConverter for FeerateBucket { + fn into_rpc(self) -> RpcFeerateBucket { + RpcFeerateBucket { feerate: self.feerate, estimated_seconds: self.estimated_seconds } + } +} + +pub trait FeeEstimateConverter { + fn into_rpc(self) -> RpcFeeEstimate; +} + +impl FeeEstimateConverter for FeerateEstimations { + fn into_rpc(self) -> RpcFeeEstimate { + RpcFeeEstimate { + priority_bucket: self.priority_bucket.into_rpc(), + normal_buckets: self.normal_buckets.into_iter().map(FeerateBucketConverter::into_rpc).collect(), + low_buckets: self.low_buckets.into_iter().map(FeerateBucketConverter::into_rpc).collect(), + } + } +} + +pub trait FeeEstimateVerboseConverter { + fn into_rpc(self) -> RpcFeeEstimateVerboseResponse; +} + +impl FeeEstimateVerboseConverter for FeeEstimateVerbose { + fn into_rpc(self) -> RpcFeeEstimateVerboseResponse { + RpcFeeEstimateVerboseResponse { + estimate: self.estimations.into_rpc(), + verbose: Some(RpcFeeEstimateVerbose { + network_mass_per_second: self.network_mass_per_second, + mempool_ready_transactions_count: self.mempool_ready_transactions_count, + mempool_ready_transactions_total_mass: self.mempool_ready_transactions_total_mass, + next_block_template_feerate_min: self.next_block_template_feerate_min, + next_block_template_feerate_median: self.next_block_template_feerate_median, + next_block_template_feerate_max: self.next_block_template_feerate_max, + }), + } + } +} diff --git a/rpc/service/src/converter/mod.rs b/rpc/service/src/converter/mod.rs index 2e14603857..fd167d3493 100644 --- a/rpc/service/src/converter/mod.rs +++ b/rpc/service/src/converter/mod.rs @@ -1,3 +1,4 @@ pub mod consensus; +pub mod feerate_estimate; pub mod index; pub mod protocol; diff --git a/rpc/service/src/service.rs b/rpc/service/src/service.rs index 00cc0d0828..d75ff770b0 100644 --- a/rpc/service/src/service.rs +++ b/rpc/service/src/service.rs @@ -1,6 +1,7 @@ //! Core server implementation for ClientAPI use super::collector::{CollectorFromConsensus, CollectorFromIndex}; +use crate::converter::feerate_estimate::{FeeEstimateConverter, FeeEstimateVerboseConverter}; use crate::converter::{consensus::ConsensusConverter, index::IndexConverter, protocol::ProtocolConverter}; use crate::service::NetworkType::{Mainnet, Testnet}; use async_trait::async_trait; @@ -34,6 +35,7 @@ use kaspa_index_core::{ connection::IndexChannelConnection, indexed_utxos::UtxoSetByScriptPublicKey, notification::Notification as IndexNotification, notifier::IndexNotifier, }; +use kaspa_mining::feerate::FeeEstimateVerbose; use kaspa_mining::model::tx_query::TransactionQuery; use kaspa_mining::{manager::MiningManagerProxy, mempool::tx::Orphan}; use kaspa_notify::listener::ListenerLifespan; @@ -53,7 +55,8 @@ use kaspa_p2p_lib::common::ProtocolError; use kaspa_perf_monitor::{counters::CountersSnapshot, Monitor as PerfMonitor}; use kaspa_rpc_core::{ api::{ - ops::RPC_API_VERSION, + connection::DynRpcConnection, + ops::{RPC_API_REVISION, RPC_API_VERSION}, rpc::{RpcApi, MAX_SAFE_WINDOW_SIZE}, }, model::*, @@ -61,9 +64,12 @@ use kaspa_rpc_core::{ Notification, RpcError, RpcResult, }; use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script}; +use kaspa_utils::expiring_cache::ExpiringCache; +use kaspa_utils::sysinfo::SystemInfo; use kaspa_utils::{channel::Channel, triggers::SingleTrigger}; use kaspa_utils_tower::counters::TowerConnectionCounters; use kaspa_utxoindex::api::UtxoIndexProxy; +use std::time::Duration; use std::{ collections::HashMap, iter::once, @@ -109,6 +115,9 @@ pub struct RpcCoreService { perf_monitor: Arc>>, p2p_tower_counters: Arc, grpc_tower_counters: Arc, + system_info: SystemInfo, + fee_estimate_cache: ExpiringCache, + fee_estimate_verbose_cache: ExpiringCache>, } const RPC_CORE: &str = "rpc-core"; @@ -133,6 +142,7 @@ impl RpcCoreService { perf_monitor: Arc>>, p2p_tower_counters: Arc, grpc_tower_counters: Arc, + system_info: SystemInfo, ) -> Self { // This notifier UTXOs subscription granularity to index-processor or consensus notifier let policies = match index_notifier { @@ -208,6 +218,9 @@ impl RpcCoreService { perf_monitor, p2p_tower_counters, grpc_tower_counters, + system_info, + fee_estimate_cache: ExpiringCache::new(Duration::from_millis(500), Duration::from_millis(1000)), + fee_estimate_verbose_cache: ExpiringCache::new(Duration::from_millis(500), Duration::from_millis(1000)), } } @@ -275,7 +288,11 @@ impl RpcCoreService { #[async_trait] impl RpcApi for RpcCoreService { - async fn submit_block_call(&self, request: SubmitBlockRequest) -> RpcResult { + async fn submit_block_call( + &self, + _connection: Option<&DynRpcConnection>, + request: SubmitBlockRequest, + ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); // TODO: consider adding an error field to SubmitBlockReport to document both the report and error fields @@ -286,7 +303,7 @@ impl RpcApi for RpcCoreService { return Ok(SubmitBlockResponse { report: SubmitBlockReport::Reject(SubmitBlockRejectReason::IsInIBD) }); } - let try_block: RpcResult = (&request.block).try_into(); + let try_block: RpcResult = request.block.try_into(); if let Err(err) = &try_block { trace!("incoming SubmitBlockRequest with block conversion error: {}", err); // error = format!("Could not parse block: {0}", err) @@ -333,7 +350,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and } } - async fn get_block_template_call(&self, request: GetBlockTemplateRequest) -> RpcResult { + async fn get_block_template_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetBlockTemplateRequest, + ) -> RpcResult { trace!("incoming GetBlockTemplate request"); if *self.config.net == NetworkType::Mainnet && !self.config.enable_mainnet_mining { @@ -360,12 +381,25 @@ NOTE: This error usually indicates an RPC conversion error between the node and let is_nearly_synced = self.config.is_nearly_synced(block_template.selected_parent_timestamp, block_template.selected_parent_daa_score); Ok(GetBlockTemplateResponse { - block: (&block_template.block).into(), + block: block_template.block.into(), is_synced: self.has_sufficient_peer_connectivity() && is_nearly_synced, }) } - async fn get_block_call(&self, request: GetBlockRequest) -> RpcResult { + async fn get_current_block_color_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetCurrentBlockColorRequest, + ) -> RpcResult { + let session = self.consensus_manager.consensus().unguarded_session(); + + match session.async_get_current_block_color(request.hash).await { + Some(blue) => Ok(GetCurrentBlockColorResponse { blue }), + None => Err(RpcError::MergerNotFound(request.hash)), + } + } + + async fn get_block_call(&self, _connection: Option<&DynRpcConnection>, request: GetBlockRequest) -> RpcResult { // TODO: test let session = self.consensus_manager.consensus().session().await; let block = session.async_get_block_even_if_header_only(request.hash).await?; @@ -377,7 +411,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and }) } - async fn get_blocks_call(&self, request: GetBlocksRequest) -> RpcResult { + async fn get_blocks_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetBlocksRequest, + ) -> RpcResult { // Validate that user didn't set include_transactions without setting include_blocks if !request.include_blocks && request.include_transactions { return Err(RpcError::InvalidGetBlocksRequest); @@ -426,7 +464,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetBlocksResponse { block_hashes, blocks }) } - async fn get_info_call(&self, _request: GetInfoRequest) -> RpcResult { + async fn get_info_call(&self, _connection: Option<&DynRpcConnection>, _request: GetInfoRequest) -> RpcResult { let is_nearly_synced = self.consensus_manager.consensus().unguarded_session().async_is_nearly_synced().await; Ok(GetInfoResponse { p2p_id: self.flow_context.node_id.to_string(), @@ -439,7 +477,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and }) } - async fn get_mempool_entry_call(&self, request: GetMempoolEntryRequest) -> RpcResult { + async fn get_mempool_entry_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetMempoolEntryRequest, + ) -> RpcResult { let query = self.extract_tx_query(request.filter_transaction_pool, request.include_orphan_pool)?; let Some(transaction) = self.mining_manager.clone().get_transaction(request.transaction_id, query).await else { return Err(RpcError::TransactionNotFound(request.transaction_id)); @@ -448,7 +490,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetMempoolEntryResponse::new(self.consensus_converter.get_mempool_entry(&session, &transaction))) } - async fn get_mempool_entries_call(&self, request: GetMempoolEntriesRequest) -> RpcResult { + async fn get_mempool_entries_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetMempoolEntriesRequest, + ) -> RpcResult { let query = self.extract_tx_query(request.filter_transaction_pool, request.include_orphan_pool)?; let session = self.consensus_manager.consensus().unguarded_session(); let (transactions, orphans) = self.mining_manager.clone().get_all_transactions(query).await; @@ -462,6 +508,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and async fn get_mempool_entries_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, request: GetMempoolEntriesByAddressesRequest, ) -> RpcResult { let query = self.extract_tx_query(request.filter_transaction_pool, request.include_orphan_pool)?; @@ -485,13 +532,17 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetMempoolEntriesByAddressesResponse::new(mempool_entries)) } - async fn submit_transaction_call(&self, request: SubmitTransactionRequest) -> RpcResult { + async fn submit_transaction_call( + &self, + _connection: Option<&DynRpcConnection>, + request: SubmitTransactionRequest, + ) -> RpcResult { let allow_orphan = self.config.unsafe_rpc && request.allow_orphan; if !self.config.unsafe_rpc && request.allow_orphan { - warn!("SubmitTransaction RPC command called with AllowOrphan enabled while node in safe RPC mode -- switching to ForbidOrphan."); + debug!("SubmitTransaction RPC command called with AllowOrphan enabled while node in safe RPC mode -- switching to ForbidOrphan."); } - let transaction: Transaction = (&request.transaction).try_into()?; + let transaction: Transaction = request.transaction.try_into()?; let transaction_id = transaction.id(); let session = self.consensus_manager.consensus().unguarded_session(); let orphan = match allow_orphan { @@ -506,42 +557,93 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(SubmitTransactionResponse::new(transaction_id)) } - async fn get_current_network_call(&self, _: GetCurrentNetworkRequest) -> RpcResult { + async fn submit_transaction_replacement_call( + &self, + _connection: Option<&DynRpcConnection>, + request: SubmitTransactionReplacementRequest, + ) -> RpcResult { + let transaction: Transaction = request.transaction.try_into()?; + let transaction_id = transaction.id(); + let session = self.consensus_manager.consensus().unguarded_session(); + let replaced_transaction = + self.flow_context.submit_rpc_transaction_replacement(&session, transaction).await.map_err(|err| { + let err = RpcError::RejectedTransaction(transaction_id, err.to_string()); + debug!("{err}"); + err + })?; + Ok(SubmitTransactionReplacementResponse::new(transaction_id, (&*replaced_transaction).into())) + } + + async fn get_current_network_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetCurrentNetworkRequest, + ) -> RpcResult { Ok(GetCurrentNetworkResponse::new(*self.config.net)) } - async fn get_subnetwork_call(&self, _: GetSubnetworkRequest) -> RpcResult { + async fn get_subnetwork_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetSubnetworkRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sink_call(&self, _: GetSinkRequest) -> RpcResult { + async fn get_sink_call(&self, _connection: Option<&DynRpcConnection>, _: GetSinkRequest) -> RpcResult { Ok(GetSinkResponse::new(self.consensus_manager.consensus().unguarded_session().async_get_sink().await)) } - async fn get_sink_blue_score_call(&self, _: GetSinkBlueScoreRequest) -> RpcResult { + async fn get_sink_blue_score_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetSinkBlueScoreRequest, + ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); Ok(GetSinkBlueScoreResponse::new(session.async_get_ghostdag_data(session.async_get_sink().await).await?.blue_score)) } async fn get_virtual_chain_from_block_call( &self, + _connection: Option<&DynRpcConnection>, request: GetVirtualChainFromBlockRequest, ) -> RpcResult { let session = self.consensus_manager.consensus().session().await; - let virtual_chain = session.async_get_virtual_chain_from_block(request.start_hash).await?; + + // batch_size is set to 10 times the mergeset_size_limit. + // this means batch_size is 2480 on 10 bps, and 1800 on mainnet. + // this bounds by number of merged blocks, if include_accepted_transactions = true + // else it returns the batch_size amount on pure chain blocks. + // Note: batch_size does not bound removed chain blocks, only added chain blocks. + let batch_size = (self.config.mergeset_size_limit * 10) as usize; + let mut virtual_chain_batch = session.async_get_virtual_chain_from_block(request.start_hash, Some(batch_size)).await?; let accepted_transaction_ids = if request.include_accepted_transaction_ids { - self.consensus_converter.get_virtual_chain_accepted_transaction_ids(&session, &virtual_chain).await? + let accepted_transaction_ids = self + .consensus_converter + .get_virtual_chain_accepted_transaction_ids(&session, &virtual_chain_batch, Some(batch_size)) + .await?; + // bound added to the length of the accepted transaction ids, which is bounded by merged blocks + virtual_chain_batch.added.truncate(accepted_transaction_ids.len()); + accepted_transaction_ids } else { vec![] }; - Ok(GetVirtualChainFromBlockResponse::new(virtual_chain.removed, virtual_chain.added, accepted_transaction_ids)) + Ok(GetVirtualChainFromBlockResponse::new(virtual_chain_batch.removed, virtual_chain_batch.added, accepted_transaction_ids)) } - async fn get_block_count_call(&self, _: GetBlockCountRequest) -> RpcResult { + async fn get_block_count_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetBlockCountRequest, + ) -> RpcResult { Ok(self.consensus_manager.consensus().unguarded_session().async_estimate_block_count().await) } - async fn get_utxos_by_addresses_call(&self, request: GetUtxosByAddressesRequest) -> RpcResult { + async fn get_utxos_by_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetUtxosByAddressesRequest, + ) -> RpcResult { if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } @@ -551,7 +653,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetUtxosByAddressesResponse::new(self.index_converter.get_utxos_by_addresses_entries(&entry_map))) } - async fn get_balance_by_address_call(&self, request: GetBalanceByAddressRequest) -> RpcResult { + async fn get_balance_by_address_call( + &self, + _connection: Option<&DynRpcConnection>, + request: GetBalanceByAddressRequest, + ) -> RpcResult { if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } @@ -562,6 +668,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and async fn get_balances_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, request: GetBalancesByAddressesRequest, ) -> RpcResult { if !self.config.utxoindex { @@ -580,7 +687,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetBalancesByAddressesResponse::new(entries)) } - async fn get_coin_supply_call(&self, _: GetCoinSupplyRequest) -> RpcResult { + async fn get_coin_supply_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetCoinSupplyRequest, + ) -> RpcResult { if !self.config.utxoindex { return Err(RpcError::NoUtxoIndex); } @@ -591,6 +702,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and async fn get_daa_score_timestamp_estimate_call( &self, + _connection: Option<&DynRpcConnection>, request: GetDaaScoreTimestampEstimateRequest, ) -> RpcResult { let session = self.consensus_manager.consensus().session().await; @@ -647,15 +759,58 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(GetDaaScoreTimestampEstimateResponse::new(timestamps)) } - async fn ping_call(&self, _: PingRequest) -> RpcResult { + async fn get_fee_estimate_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetFeeEstimateRequest, + ) -> RpcResult { + let mining_manager = self.mining_manager.clone(); + let estimate = + self.fee_estimate_cache.get(async move { mining_manager.get_realtime_feerate_estimations().await.into_rpc() }).await; + Ok(GetFeeEstimateResponse { estimate }) + } + + async fn get_fee_estimate_experimental_call( + &self, + connection: Option<&DynRpcConnection>, + request: GetFeeEstimateExperimentalRequest, + ) -> RpcResult { + if request.verbose { + let mining_manager = self.mining_manager.clone(); + let consensus_manager = self.consensus_manager.clone(); + let prefix = self.config.prefix(); + + let response = self + .fee_estimate_verbose_cache + .get(async move { + let session = consensus_manager.consensus().unguarded_session(); + mining_manager.get_realtime_feerate_estimations_verbose(&session, prefix).await.map(FeeEstimateVerbose::into_rpc) + }) + .await?; + Ok(response) + } else { + let estimate = self.get_fee_estimate_call(connection, GetFeeEstimateRequest {}).await?.estimate; + Ok(GetFeeEstimateExperimentalResponse { estimate, verbose: None }) + } + } + + async fn ping_call(&self, _connection: Option<&DynRpcConnection>, _: PingRequest) -> RpcResult { Ok(PingResponse {}) } - async fn get_headers_call(&self, _request: GetHeadersRequest) -> RpcResult { + async fn get_headers_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetHeadersRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_dag_info_call(&self, _: GetBlockDagInfoRequest) -> RpcResult { + async fn get_block_dag_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetBlockDagInfoRequest, + ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); let (consensus_stats, tips, pruning_point, sink) = join!(session.async_get_stats(), session.async_get_tips(), session.async_pruning_point(), session.async_get_sink()); @@ -675,6 +830,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and async fn estimate_network_hashes_per_second_call( &self, + _connection: Option<&DynRpcConnection>, request: EstimateNetworkHashesPerSecondRequest, ) -> RpcResult { if !self.config.unsafe_rpc && request.window_size > MAX_SAFE_WINDOW_SIZE { @@ -704,7 +860,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and )) } - async fn add_peer_call(&self, request: AddPeerRequest) -> RpcResult { + async fn add_peer_call(&self, _connection: Option<&DynRpcConnection>, request: AddPeerRequest) -> RpcResult { if !self.config.unsafe_rpc { warn!("AddPeer RPC command called while node in safe RPC mode -- ignoring."); return Err(RpcError::UnavailableInSafeMode); @@ -718,12 +874,16 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(AddPeerResponse {}) } - async fn get_peer_addresses_call(&self, _: GetPeerAddressesRequest) -> RpcResult { + async fn get_peer_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetPeerAddressesRequest, + ) -> RpcResult { let address_manager = self.flow_context.address_manager.lock(); Ok(GetPeerAddressesResponse::new(address_manager.get_all_addresses(), address_manager.get_all_banned_addresses())) } - async fn ban_call(&self, request: BanRequest) -> RpcResult { + async fn ban_call(&self, _connection: Option<&DynRpcConnection>, request: BanRequest) -> RpcResult { if !self.config.unsafe_rpc { warn!("Ban RPC command called while node in safe RPC mode -- ignoring."); return Err(RpcError::UnavailableInSafeMode); @@ -740,7 +900,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(BanResponse {}) } - async fn unban_call(&self, request: UnbanRequest) -> RpcResult { + async fn unban_call(&self, _connection: Option<&DynRpcConnection>, request: UnbanRequest) -> RpcResult { if !self.config.unsafe_rpc { warn!("Unban RPC command called while node in safe RPC mode -- ignoring."); return Err(RpcError::UnavailableInSafeMode); @@ -754,13 +914,17 @@ NOTE: This error usually indicates an RPC conversion error between the node and Ok(UnbanResponse {}) } - async fn get_connected_peer_info_call(&self, _: GetConnectedPeerInfoRequest) -> RpcResult { + async fn get_connected_peer_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _: GetConnectedPeerInfoRequest, + ) -> RpcResult { let peers = self.flow_context.hub().active_peers(); let peer_info = self.protocol_converter.get_peers_info(&peers); Ok(GetConnectedPeerInfoResponse::new(peer_info)) } - async fn shutdown_call(&self, _: ShutdownRequest) -> RpcResult { + async fn shutdown_call(&self, _connection: Option<&DynRpcConnection>, _: ShutdownRequest) -> RpcResult { if !self.config.unsafe_rpc { warn!("Shutdown RPC command called while node in safe RPC mode -- ignoring."); return Err(RpcError::UnavailableInSafeMode); @@ -783,6 +947,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and async fn resolve_finality_conflict_call( &self, + _connection: Option<&DynRpcConnection>, _request: ResolveFinalityConflictRequest, ) -> RpcResult { if !self.config.unsafe_rpc { @@ -792,7 +957,25 @@ NOTE: This error usually indicates an RPC conversion error between the node and Err(RpcError::NotImplemented) } - async fn get_metrics_call(&self, req: GetMetricsRequest) -> RpcResult { + async fn get_connections_call( + &self, + _connection: Option<&DynRpcConnection>, + req: GetConnectionsRequest, + ) -> RpcResult { + let clients = (self.wrpc_borsh_counters.active_connections.load(Ordering::Relaxed) + + self.wrpc_json_counters.active_connections.load(Ordering::Relaxed)) as u32; + let peers = self.flow_context.hub().active_peers_len() as u16; + + let profile_data = req.include_profile_data.then(|| { + let CountersSnapshot { resident_set_size: memory_usage, cpu_usage, .. } = self.perf_monitor.snapshot(); + + ConnectionsProfileData { cpu_usage: cpu_usage as f32, memory_usage } + }); + + Ok(GetConnectionsResponse { clients, peers, profile_data }) + } + + async fn get_metrics_call(&self, _connection: Option<&DynRpcConnection>, req: GetMetricsRequest) -> RpcResult { let CountersSnapshot { resident_set_size, virtual_memory_size, @@ -817,7 +1000,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and disk_io_write_per_sec: disk_io_write_per_sec as f32, }); - let connection_metrics = req.connection_metrics.then_some(ConnectionMetrics { + let connection_metrics = req.connection_metrics.then(|| ConnectionMetrics { borsh_live_connections: self.wrpc_borsh_counters.active_connections.load(Ordering::Relaxed) as u32, borsh_connection_attempts: self.wrpc_borsh_counters.total_connections.load(Ordering::Relaxed) as u64, borsh_handshake_failures: self.wrpc_borsh_counters.handshake_failures.load(Ordering::Relaxed) as u64, @@ -828,7 +1011,7 @@ NOTE: This error usually indicates an RPC conversion error between the node and active_peers: self.flow_context.hub().active_peers_len() as u32, }); - let bandwidth_metrics = req.bandwidth_metrics.then_some(BandwidthMetrics { + let bandwidth_metrics = req.bandwidth_metrics.then(|| BandwidthMetrics { borsh_bytes_tx: self.wrpc_borsh_counters.tx_bytes.load(Ordering::Relaxed) as u64, borsh_bytes_rx: self.wrpc_borsh_counters.rx_bytes.load(Ordering::Relaxed) as u64, json_bytes_tx: self.wrpc_json_counters.tx_bytes.load(Ordering::Relaxed) as u64, @@ -866,20 +1049,55 @@ NOTE: This error usually indicates an RPC conversion error between the node and None }; + let storage_metrics = req.storage_metrics.then_some(StorageMetrics { storage_size_bytes: 0 }); + + let custom_metrics: Option> = None; + let server_time = unix_now(); - let response = GetMetricsResponse { server_time, process_metrics, connection_metrics, bandwidth_metrics, consensus_metrics }; + let response = GetMetricsResponse { + server_time, + process_metrics, + connection_metrics, + bandwidth_metrics, + consensus_metrics, + storage_metrics, + custom_metrics, + }; + + Ok(response) + } + + async fn get_system_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSystemInfoRequest, + ) -> RpcResult { + let response = GetSystemInfoResponse { + version: self.system_info.version.clone(), + system_id: self.system_info.system_id.clone(), + git_hash: self.system_info.git_short_hash.clone(), + cpu_physical_cores: self.system_info.cpu_physical_cores, + total_memory: self.system_info.total_memory, + fd_limit: self.system_info.fd_limit, + proxy_socket_limit_per_cpu_core: self.system_info.proxy_socket_limit_per_cpu_core, + }; Ok(response) } - async fn get_server_info_call(&self, _request: GetServerInfoRequest) -> RpcResult { + async fn get_server_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetServerInfoRequest, + ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); let is_synced: bool = self.has_sufficient_peer_connectivity() && session.async_is_nearly_synced().await; let virtual_daa_score = session.get_virtual_daa_score(); Ok(GetServerInfoResponse { rpc_api_version: RPC_API_VERSION, + rpc_api_revision: RPC_API_REVISION, server_version: version().to_string(), network_id: self.config.net, has_utxo_index: self.config.utxoindex, @@ -888,7 +1106,11 @@ NOTE: This error usually indicates an RPC conversion error between the node and }) } - async fn get_sync_status_call(&self, _request: GetSyncStatusRequest) -> RpcResult { + async fn get_sync_status_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSyncStatusRequest, + ) -> RpcResult { let session = self.consensus_manager.consensus().unguarded_session(); let is_synced: bool = self.has_sufficient_peer_connectivity() && session.async_is_nearly_synced().await; Ok(GetSyncStatusResponse { is_synced }) diff --git a/rpc/wrpc/client/Cargo.toml b/rpc/wrpc/client/Cargo.toml index 199232ff00..1cc0a21917 100644 --- a/rpc/wrpc/client/Cargo.toml +++ b/rpc/wrpc/client/Cargo.toml @@ -44,4 +44,6 @@ workflow-dom.workspace = true workflow-http.workspace = true workflow-log.workspace = true workflow-rpc.workspace = true -workflow-wasm.workspace = true \ No newline at end of file +workflow-serializer.workspace = true +workflow-wasm.workspace = true +rustls.workspace = true \ No newline at end of file diff --git a/rpc/wrpc/client/Resolvers.toml b/rpc/wrpc/client/Resolvers.toml index 295a352d2f..cb8b436527 100644 --- a/rpc/wrpc/client/Resolvers.toml +++ b/rpc/wrpc/client/Resolvers.toml @@ -1,11 +1,41 @@ [[resolver]] -url = "https://beacon.kaspa-ng.org" -enable = true +enable = false +address = "http://127.0.0.1:8888" -[[resolver]] -url = "https://beacon.kaspa-ng.io" -enable = true +[[group]] +template = "https://*.kaspa.stream" +nodes = ["eric","maxim","sean","troy"] -[[resolver]] -url = "http://127.0.0.1:8888" -enable = false +[[group]] +template = "https://*.kaspa.red" +nodes = ["john", "mike", "paul", "alex"] + +[[group]] +template = "https://*.kaspa.green" +nodes = ["jake", "mark", "adam", "liam"] + +[[group]] +template = "https://*.kaspa.blue" +nodes = ["noah", "ryan", "jack", "luke"] + +# [[group]] +# enable = true +# template = "https://*.kaspa-ng.org" +# nodes = ["cole", "ivan", "oscar", "zane"] + +# [[group]] +# enable = true +# template = "https://*.kaspa-ng.io" +# nodes = ["gary", "hugo", "finn", "evan"] + +# [[group]] +# enable = true +# template = "https://*.kaspa-ng.net" +# nodes = ["neil", "dave", "kyle", "toby"] + +# --- + +# [[group]] +# enable = false +# template = "https://*." +# nodes = ["rudy", "todd", "clay", "walt"] diff --git a/rpc/wrpc/client/src/client.rs b/rpc/wrpc/client/src/client.rs index 4e9fffc0c5..3ac04fa984 100644 --- a/rpc/wrpc/client/src/client.rs +++ b/rpc/wrpc/client/src/client.rs @@ -1,3 +1,5 @@ +//! Kaspa wRPC client implementation. + use crate::imports::*; use crate::parse::parse_host; use crate::{error::Error, node::NodeDescriptor}; @@ -18,7 +20,7 @@ use workflow_rpc::client::Ctl as WrpcCtl; pub use workflow_rpc::client::{ ConnectOptions, ConnectResult, ConnectStrategy, Resolver as RpcResolver, ResolverResult, WebSocketConfig, WebSocketError, }; - +use workflow_serializer::prelude::*; type RpcClientNotifier = Arc>; struct Inner { @@ -34,7 +36,14 @@ struct Inner { connect_guard: AsyncMutex<()>, disconnect_guard: AsyncMutex<()>, // --- + // The permanent url passed in the constructor + // (dominant, overrides Resolver if supplied). + ctor_url: Mutex>, + // The url passed in the connect() method + // (overrides default URL and the Resolver). default_url: Mutex>, + // The current url wRPC is connected to + // (possibly acquired via the Resolver). current_url: Mutex>, resolver: Mutex>, network_id: Mutex>, @@ -73,16 +82,16 @@ impl Inner { let notification_sender_ = notification_relay_channel.sender.clone(); interface.notification( notification_op, - workflow_rpc::client::Notification::new(move |notification: kaspa_rpc_core::Notification| { + workflow_rpc::client::Notification::new(move |notification: Serializable| { let notification_sender = notification_sender_.clone(); Box::pin(async move { // log_info!("notification receivers: {}", notification_sender.receiver_count()); // log_trace!("notification {:?}", notification); if notification_sender.receiver_count() > 1 { // log_info!("notification: posting to channel: {notification:?}"); - notification_sender.send(notification).await?; + notification_sender.send(notification.into_inner()).await?; } else { - log_warn!("WARNING: Kaspa RPC notification is not consumed by user: {:?}", notification); + log_warn!("WARNING: Kaspa RPC notification is not consumed by user: {:?}", notification.into_inner()); } Ok(()) }) @@ -104,7 +113,8 @@ impl Inner { connect_guard: async_std::sync::Mutex::new(()), disconnect_guard: async_std::sync::Mutex::new(()), // --- - default_url: Mutex::new(url.map(|s| s.to_string())), + ctor_url: Mutex::new(url.map(|s| s.to_string())), + default_url: Mutex::new(None), current_url: Mutex::new(None), resolver: Mutex::new(resolver), network_id: Mutex::new(network_id), @@ -121,17 +131,22 @@ impl Inner { /// Start sending notifications of some type to the client. async fn start_notify_to_client(&self, scope: Scope) -> RpcResult<()> { - let _response: SubscribeResponse = self.rpc_client.call(RpcApiOps::Subscribe, scope).await.map_err(|err| err.to_string())?; + let _response: Serializable = + self.rpc_client.call(RpcApiOps::Subscribe, Serializable(scope)).await.map_err(|err| err.to_string())?; Ok(()) } /// Stop sending notifications of some type to the client. async fn stop_notify_to_client(&self, scope: Scope) -> RpcResult<()> { - let _response: UnsubscribeResponse = - self.rpc_client.call(RpcApiOps::Unsubscribe, scope).await.map_err(|err| err.to_string())?; + let _response: Serializable = + self.rpc_client.call(RpcApiOps::Unsubscribe, Serializable(scope)).await.map_err(|err| err.to_string())?; Ok(()) } + fn ctor_url(&self) -> Option { + self.ctor_url.lock().unwrap().clone() + } + fn default_url(&self) -> Option { self.default_url.lock().unwrap().clone() } @@ -213,7 +228,7 @@ impl SubscriptionManager for Inner { #[async_trait] impl RpcResolver for Inner { async fn resolve_url(&self) -> ResolverResult { - let url = if let Some(url) = self.default_url() { + let url = if let Some(url) = self.default_url().or(self.ctor_url()) { url } else if let Some(resolver) = self.resolver().as_ref() { let network_id = self.network_id().expect("Resolver requires network id in RPC client configuration"); @@ -222,7 +237,7 @@ impl RpcResolver for Inner { self.node_descriptor.lock().unwrap().replace(Arc::new(node)); url } else { - panic!("RpcClient resolver configuration error (expecting Some(Resolver))") + panic!("RpcClient resolver configuration error (expecting `url` or `resolver` as `Some(Resolver))`") }; self.rpc_ctl.set_descriptor(Some(url.clone())); @@ -233,14 +248,17 @@ impl RpcResolver for Inner { const WRPC_CLIENT: &str = "wrpc-client"; -/// [`KaspaRpcClient`] allows connection to the Kaspa wRPC Server via -/// binary Borsh or JSON protocols. +/// # [`KaspaRpcClient`] connects to Kaspa wRPC endpoint via binary Borsh or JSON protocols. /// /// RpcClient has two ways to interface with the underlying RPC subsystem: /// [`Interface`] that has a [`notification()`](Interface::notification) /// method to register closures that will be invoked on server-side -/// notifications and the [`RpcClient::call`] method that allows async -/// method invocation server-side. +/// notifications and the [`RpcClient::call`] method that allows server-side +/// async method invocation. +/// +/// The node address can be supplied via a URL or a [`Resolver`] that +/// can be used to resolve a public node address dynamically. [`Resolver`] can also +/// be configured to operate against custom node clusters. /// #[derive(Clone)] pub struct KaspaRpcClient { @@ -254,7 +272,11 @@ impl Debug for KaspaRpcClient { } impl KaspaRpcClient { - /// Create a new `KaspaRpcClient` with the given Encoding and URL + /// Create a new `KaspaRpcClient` with the given Encoding, and an optional url or a Resolver. + /// Please note that if you pass the url to the constructor, it will force the KaspaRpcClient + /// to always use this url. If you want to have the ability to switch between urls, + /// you must pass [`Option::None`] as the `url` argument and then supply your own url to the `connect()` + /// function each time you connect. pub fn new( encoding: Encoding, url: Option<&str>, @@ -371,6 +393,10 @@ impl KaspaRpcClient { &self.inner.rpc_ctl } + pub fn ctl_multiplexer(&self) -> Multiplexer { + self.inner.wrpc_ctl_multiplexer.clone() + } + /// Start background RPC services. pub async fn start(&self) -> Result<()> { if !self.inner.background_services_running.load(Ordering::SeqCst) { @@ -401,14 +427,16 @@ impl KaspaRpcClient { /// This method starts background RPC services if they are not running and /// attempts to connect to the RPC endpoint. pub async fn connect(&self, options: Option) -> ConnectResult { + // this has no effect if not currently connected + self.disconnect().await?; + let _guard = self.inner.connect_guard.lock().await; - let mut options = options.unwrap_or_default(); + let options = options.unwrap_or_default(); let strategy = options.strategy; - if let Some(url) = options.url.take() { - self.set_url(Some(&url))?; - } + self.inner.set_default_url(options.url.as_deref()); + self.inner.rpc_ctl.set_descriptor(options.url.clone()); // 1Gb message and frame size limits (on native and NodeJs platforms) let ws_config = WebSocketConfig { @@ -584,6 +612,7 @@ impl RpcApi for KaspaRpcClient { build_wrpc_client_interface!( RpcApiOps, [ + Ping, AddPeer, Ban, EstimateNetworkHashesPerSecond, @@ -594,29 +623,34 @@ impl RpcApi for KaspaRpcClient { GetBlockDagInfo, GetBlocks, GetBlockTemplate, + GetCurrentBlockColor, GetCoinSupply, GetConnectedPeerInfo, - GetDaaScoreTimestampEstimate, - GetServerInfo, + GetConnections, GetCurrentNetwork, + GetDaaScoreTimestampEstimate, + GetFeeEstimate, + GetFeeEstimateExperimental, GetHeaders, GetInfo, GetMempoolEntries, GetMempoolEntriesByAddresses, GetMempoolEntry, - GetPeerAddresses, GetMetrics, + GetPeerAddresses, + GetServerInfo, GetSink, - GetSyncStatus, + GetSinkBlueScore, GetSubnetwork, + GetSyncStatus, + GetSystemInfo, GetUtxosByAddresses, - GetSinkBlueScore, GetVirtualChainFromBlock, - Ping, ResolveFinalityConflict, Shutdown, SubmitBlock, SubmitTransaction, + SubmitTransactionReplacement, Unban, ] ); diff --git a/rpc/wrpc/client/src/error.rs b/rpc/wrpc/client/src/error.rs index 781455ddd8..657027ed0b 100644 --- a/rpc/wrpc/client/src/error.rs +++ b/rpc/wrpc/client/src/error.rs @@ -1,3 +1,5 @@ +//! [`Error`](enum@Error) variants for the wRPC client library. + use thiserror::Error; use wasm_bindgen::JsError; use wasm_bindgen::JsValue; diff --git a/rpc/wrpc/client/src/lib.rs b/rpc/wrpc/client/src/lib.rs index b3f26c425f..ac004eccb4 100644 --- a/rpc/wrpc/client/src/lib.rs +++ b/rpc/wrpc/client/src/lib.rs @@ -1,3 +1,19 @@ +//! +//! # wRPC Client for Rusty Kaspa p2p Node +//! +//! This crate provides a WebSocket RPC client for Rusty Kaspa p2p node. It is based on the +//! [wRPC](https://docs.rs/workflow-rpc) crate that offers WebSocket RPC implementation +//! for Rust based on Borsh and Serde JSON serialization. wRPC is a lightweight RPC framework +//! meant to function as an IPC (Inter-Process Communication) mechanism for Rust applications. +//! +//! Rust examples on using wRPC client can be found in the +//! [examples](https://github.com/kaspanet/rusty-kaspa/tree/master/rpc/wrpc/examples) folder. +//! +//! WASM bindings for wRPC client can be found in the [`kaspa-wrpc-wasm`](https://docs.rs/kaspa-wrpc-wasm) crate. +//! +//! The main struct managing Kaspa RPC client connections is the [`KaspaRpcClient`]. +//! + pub mod client; pub mod error; mod imports; diff --git a/rpc/wrpc/client/src/node.rs b/rpc/wrpc/client/src/node.rs index 4afb0f1b7a..f775bfd188 100644 --- a/rpc/wrpc/client/src/node.rs +++ b/rpc/wrpc/client/src/node.rs @@ -1,3 +1,5 @@ +//! Node connection endpoint as provided by the [`Resolver`]. + use crate::imports::*; /// @@ -11,16 +13,10 @@ use crate::imports::*; pub struct NodeDescriptor { /// The unique identifier of the node. #[wasm_bindgen(getter_with_clone)] - pub id: String, + pub uid: String, /// The URL of the node WebSocket (wRPC URL). #[wasm_bindgen(getter_with_clone)] pub url: String, - /// Optional name of the node provider. - #[wasm_bindgen(getter_with_clone)] - pub provider_name: Option, - /// Optional site URL of the node provider. - #[wasm_bindgen(getter_with_clone)] - pub provider_url: Option, } impl Eq for NodeDescriptor {} diff --git a/rpc/wrpc/client/src/parse.rs b/rpc/wrpc/client/src/parse.rs index 35db2c7686..5a497c507a 100644 --- a/rpc/wrpc/client/src/parse.rs +++ b/rpc/wrpc/client/src/parse.rs @@ -1,3 +1,5 @@ +//! wRPC URL parsing and validation utilities. + use std::fmt::Display; use std::net::{Ipv4Addr, Ipv6Addr}; use std::num::ParseIntError; diff --git a/rpc/wrpc/client/src/prelude.rs b/rpc/wrpc/client/src/prelude.rs index 6a410b7235..a4598e5374 100644 --- a/rpc/wrpc/client/src/prelude.rs +++ b/rpc/wrpc/client/src/prelude.rs @@ -1,3 +1,5 @@ +//! Re-exports of the most commonly used types and traits. + pub use crate::client::{ConnectOptions, ConnectStrategy}; pub use crate::{KaspaRpcClient, Resolver, WrpcEncoding}; pub use kaspa_consensus_core::network::{NetworkId, NetworkType}; diff --git a/rpc/wrpc/client/src/resolver.rs b/rpc/wrpc/client/src/resolver.rs index 4fd159b40b..170fe12ddf 100644 --- a/rpc/wrpc/client/src/resolver.rs +++ b/rpc/wrpc/client/src/resolver.rs @@ -1,45 +1,93 @@ +//! +//! Module implementing [`Resolver`] client for obtaining public Kaspa wRPC endpoints. +//! + +use std::sync::OnceLock; + use crate::error::Error; use crate::imports::*; use crate::node::NodeDescriptor; pub use futures::future::join_all; use rand::seq::SliceRandom; use rand::thread_rng; +use workflow_core::runtime; use workflow_http::get_json; -const DEFAULT_VERSION: usize = 1; +const CURRENT_VERSION: usize = 2; +const RESOLVER_CONFIG: &str = include_str!("../Resolvers.toml"); #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ResolverRecord { - pub url: String, + pub address: String, + pub enable: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ResolverGroup { + pub template: String, + pub nodes: Vec, pub enable: Option, } #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ResolverConfig { - resolver: Vec, + #[serde(rename = "group")] + groups: Vec, + #[serde(rename = "resolver")] + resolvers: Vec, } fn try_parse_resolvers(toml: &str) -> Result>> { - Ok(toml::from_str::(toml)? - .resolver + let config = toml::from_str::(toml)?; + + let mut resolvers = config + .resolvers .into_iter() - .filter_map(|resolver| resolver.enable.unwrap_or(true).then_some(Arc::new(resolver.url))) - .collect::>()) + .filter_map(|resolver| resolver.enable.unwrap_or(true).then_some(resolver.address)) + .collect::>(); + + let groups = config.groups.into_iter().filter(|group| group.enable.unwrap_or(true)).collect::>(); + + for group in groups { + let ResolverGroup { template, nodes, .. } = group; + for node in nodes { + resolvers.push(template.replace('*', &node)); + } + } + + Ok(resolvers.into_iter().map(Arc::new).collect::>()) } #[derive(Debug)] struct Inner { pub urls: Vec>, + pub tls: bool, + public: bool, } impl Inner { - pub fn new(urls: Vec>) -> Self { - Self { urls } + pub fn new(urls: Option>>, tls: bool) -> Self { + if urls.as_ref().is_some_and(|urls| urls.is_empty()) { + panic!("Resolver: Empty URL list supplied to the constructor."); + } + + let mut public = false; + let urls = urls.unwrap_or_else(|| { + public = true; + try_parse_resolvers(RESOLVER_CONFIG).expect("TOML: Unable to parse RPC Resolver list") + }); + + Self { urls, tls, public } } } /// -/// Resolver is a client for obtaining public Kaspa wRPC endpoints. +/// # Resolver - a client for obtaining public Kaspa wRPC endpoints. +/// +/// This client operates against [Kaspa Resolver](https://github.com/aspectron/kaspa-resolver) service +/// that provides load-balancing and failover capabilities for Kaspa wRPC endpoints. The default +/// configuration allows access to public Kaspa nodes, while custom configurations can be supplied +/// if you are running your own custom Kaspa node cluster. /// #[derive(Debug, Clone)] pub struct Resolver { @@ -48,33 +96,75 @@ pub struct Resolver { impl Default for Resolver { fn default() -> Self { - let toml = include_str!("../Resolvers.toml"); - let urls = try_parse_resolvers(toml).expect("TOML: Unable to parse RPC Resolver list"); - Self { inner: Arc::new(Inner::new(urls)) } + Self { inner: Arc::new(Inner::new(None, false)) } } } impl Resolver { - pub fn new(urls: Vec>) -> Self { - if urls.is_empty() { - panic!("Resolver: Empty URL list supplied to the constructor."); + /// Create a new [`Resolver`] client with the specified list of resolver URLs and an optional `tls` flag. + /// The `tls` flag can be used to enforce secure connection to the node. + pub fn new(urls: Option>>, tls: bool) -> Self { + Self { inner: Arc::new(Inner::new(urls, tls)) } + } + + /// Obtain a list of URLs in the resolver client. (This function + /// returns `None` if the resolver is configured to use public + /// node endpoints.) + pub fn urls(&self) -> Option>> { + if self.inner.public { + None + } else { + Some(self.inner.urls.clone()) } + } - Self { inner: Arc::new(Inner::new(urls)) } + /// Obtain the `tls` flag in the resolver client. + pub fn tls(&self) -> bool { + self.inner.tls } - pub fn urls(&self) -> Vec> { - self.inner.urls.clone() + fn tls_as_str(&self) -> &'static str { + if self.inner.tls { + "tls" + } else { + "any" + } + } + + fn make_url(&self, url: &str, encoding: Encoding, network_id: NetworkId) -> String { + static TLS: OnceLock<&'static str> = OnceLock::new(); + + let tls = *TLS.get_or_init(|| { + if runtime::is_web() { + let tls = js_sys::Reflect::get(&js_sys::global(), &"location".into()) + .and_then(|location| js_sys::Reflect::get(&location, &"protocol".into())) + .ok() + .and_then(|protocol| protocol.as_string()) + .map(|protocol| protocol.starts_with("https")) + .unwrap_or(false); + if tls { + "tls" + } else { + self.tls_as_str() + } + } else { + self.tls_as_str() + } + }); + + format!("{url}/v{CURRENT_VERSION}/kaspa/{network_id}/{tls}/wrpc/{encoding}") } + // query a single resolver service async fn fetch_node_info(&self, url: &str, encoding: Encoding, network_id: NetworkId) -> Result { - let url = format!("{}/v{}/wrpc/{}/{}", url, DEFAULT_VERSION, encoding, network_id); + let url = self.make_url(url, encoding, network_id); let node = get_json::(&url).await.map_err(|error| Error::custom(format!("Unable to connect to {url}: {error}")))?; Ok(node) } - pub async fn fetch(&self, encoding: Encoding, network_id: NetworkId) -> Result { + // query multiple resolver services in random order + async fn fetch(&self, encoding: Encoding, network_id: NetworkId) -> Result { let mut urls = self.inner.urls.clone(); urls.shuffle(&mut thread_rng()); @@ -88,33 +178,48 @@ impl Resolver { Err(Error::Custom(format!("Failed to connect: {:?}", errors))) } - pub async fn fetch_all(&self, encoding: Encoding, network_id: NetworkId) -> Result> { - let futures = self.inner.urls.iter().map(|url| self.fetch_node_info(url, encoding, network_id)).collect::>(); - let mut errors = Vec::default(); - let result = join_all(futures) - .await - .into_iter() - .filter_map(|result| match result { - Ok(node) => Some(node), - Err(error) => { - errors.push(format!("{:?}", error)); - None - } - }) - .collect::>(); - if result.is_empty() { - Err(Error::Custom(format!("Failed to connect: {:?}", errors))) - } else { - Ok(result) - } - } - + /// Obtain a Kaspa p2p [`NodeDescriptor`] from the resolver based on the supplied [`Encoding`] and [`NetworkId`]. pub async fn get_node(&self, encoding: Encoding, network_id: NetworkId) -> Result { self.fetch(encoding, network_id).await } + /// Returns a Kaspa wRPC URL from the resolver based on the supplied [`Encoding`] and [`NetworkId`]. pub async fn get_url(&self, encoding: Encoding, network_id: NetworkId) -> Result { let nodes = self.fetch(encoding, network_id).await?; Ok(nodes.url.clone()) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_resolver_config_1() { + let toml = r#" + [[group]] + enable = true + template = "https://*.example.org" + nodes = ["alpha", "beta", "gamma", "delta", "epsilon", "zeta", "eta", "theta"] + + [[group]] + enable = true + template = "https://*.example.com" + nodes = ["iota", "kappa", "lambda", "mu", "nu", "xi", "omicron", "pi"] + + [[resolver]] + enable = true + address = "http://127.0.0.1:8888" + "#; + + let urls = try_parse_resolvers(toml).expect("TOML: Unable to parse RPC Resolver list"); + // println!("{:#?}", urls); + assert_eq!(urls.len(), 17); + } + + #[test] + fn test_resolver_config_2() { + let _urls = try_parse_resolvers(RESOLVER_CONFIG).expect("TOML: Unable to parse RPC Resolver list"); + // println!("{:#?}", urls); + } +} diff --git a/rpc/wrpc/client/src/result.rs b/rpc/wrpc/client/src/result.rs index 32f663388a..8427fd12f8 100644 --- a/rpc/wrpc/client/src/result.rs +++ b/rpc/wrpc/client/src/result.rs @@ -1 +1,3 @@ +//! The [`Result`] type alias bound to the [`Error`](super::error::Error) enum used in this crate. + pub type Result = std::result::Result; diff --git a/rpc/wrpc/examples/simple_client/Cargo.toml b/rpc/wrpc/examples/simple_client/Cargo.toml new file mode 100644 index 0000000000..c55774b685 --- /dev/null +++ b/rpc/wrpc/examples/simple_client/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "kaspa-wrpc-simple-client-example" +description = "Kaspa wRPC simple client example" +publish = false +rust-version.workspace = true +version.workspace = true +edition.workspace = true +authors.workspace = true +include.workspace = true +license.workspace = true +repository.workspace = true + +[dependencies] +futures.workspace = true +kaspa-rpc-core.workspace = true +kaspa-wrpc-client.workspace = true +tokio.workspace = true + diff --git a/rpc/wrpc/examples/simple_client/src/main.rs b/rpc/wrpc/examples/simple_client/src/main.rs new file mode 100644 index 0000000000..0d63a24c27 --- /dev/null +++ b/rpc/wrpc/examples/simple_client/src/main.rs @@ -0,0 +1,104 @@ +// Example of simple client to connect with Kaspa node using wRPC connection and collect some node and network basic data + +use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockDagInfoResponse, GetServerInfoResponse}; +use kaspa_wrpc_client::{ + client::{ConnectOptions, ConnectStrategy}, + prelude::NetworkId, + prelude::NetworkType, + result::Result, + KaspaRpcClient, Resolver, WrpcEncoding, +}; +use std::process::ExitCode; +use std::time::Duration; + +#[tokio::main] +async fn main() -> ExitCode { + match check_node_status().await { + Ok(_) => { + println!("Well done! You successfully completed your first client connection to Kaspa node!"); + ExitCode::SUCCESS + } + Err(error) => { + println!("An error occurred: {error}"); + ExitCode::FAILURE + } + } +} + +async fn check_node_status() -> Result<()> { + // Select encoding method to use, depending on node settings + let encoding = WrpcEncoding::Borsh; + + // If you want to connect to your own node, define your node address and wRPC port using let url = Some("ws://0.0.0.0:17110") + // Verify your Kaspa node is runnning with --rpclisten-borsh=0.0.0.0:17110 parameter + // In this example we don't use a specific node but we connect through the resolver, which use a pool of public nodes + let url = None; + let resolver = Some(Resolver::default()); + + // Define the network your Kaspa node is connected to + // You can select NetworkType::Mainnet, NetworkType::Testnet, NetworkType::Devnet, NetworkType::Simnet + let network_type = NetworkType::Mainnet; + let selected_network = Some(NetworkId::new(network_type)); + + // Advanced options + let subscription_context = None; + + // Create new wRPC client with parameters defined above + let client = KaspaRpcClient::new(encoding, url, resolver, selected_network, subscription_context)?; + + // Advanced connection options + let timeout = 5_000; + let options = ConnectOptions { + block_async_connect: true, + connect_timeout: Some(Duration::from_millis(timeout)), + strategy: ConnectStrategy::Fallback, + ..Default::default() + }; + + // Connect to selected Kaspa node + client.connect(Some(options)).await?; + + // Retrieve and show Kaspa node information + let GetServerInfoResponse { is_synced, server_version, network_id, has_utxo_index, .. } = client.get_server_info().await?; + + println!("Node version: {server_version}"); + println!("Network: {network_id}"); + println!("Node is synced: {is_synced}"); + println!("Node is indexing UTXOs: {has_utxo_index}"); + + // Retrieve and show Kaspa network information + let GetBlockDagInfoResponse { + block_count, + header_count, + tip_hashes, + difficulty, + past_median_time, + virtual_parent_hashes, + pruning_point_hash, + virtual_daa_score, + sink, + .. + } = client.get_block_dag_info().await?; + + println!("Block count: {block_count}"); + println!("Header count: {header_count}"); + println!("Tip hashes:"); + for tip_hash in tip_hashes { + println!("{tip_hash}"); + } + println!("Difficulty: {difficulty}"); + println!("Past median time: {past_median_time}"); + println!("Virtual parent hashes:"); + for virtual_parent_hash in virtual_parent_hashes { + println!("{virtual_parent_hash}"); + } + println!("Pruning point hash: {pruning_point_hash}"); + println!("Virtual DAA score: {virtual_daa_score}"); + println!("Sink: {sink}"); + + // Disconnect client from Kaspa node + client.disconnect().await?; + + // Return function result + Ok(()) +} diff --git a/rpc/wrpc/proxy/src/main.rs b/rpc/wrpc/proxy/src/main.rs index 43b3938c97..1cb9ad5c60 100644 --- a/rpc/wrpc/proxy/src/main.rs +++ b/rpc/wrpc/proxy/src/main.rs @@ -90,13 +90,15 @@ async fn main() -> Result<()> { rpc_handler.clone(), router.interface.clone(), Some(counters), + false, ); log_info!("Kaspa wRPC server is listening on {}", options.listen_address); log_info!("Using `{encoding}` protocol encoding"); let config = WebSocketConfig { max_message_size: Some(1024 * 1024 * 1024), ..Default::default() }; - server.listen(&options.listen_address, Some(config)).await?; + let listener = server.bind(&options.listen_address).await?; + server.listen(listener, Some(config)).await?; Ok(()) } diff --git a/rpc/wrpc/resolver/Cargo.toml b/rpc/wrpc/resolver/Cargo.toml deleted file mode 100644 index cb28d82bf8..0000000000 --- a/rpc/wrpc/resolver/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "kaspa-resolver" -description = "Kaspa wRPC endpoint resolver and monitor" -version.workspace = true -edition.workspace = true -authors.workspace = true -include.workspace = true -license.workspace = true -repository.workspace = true - -[dependencies] - - -ahash.workspace = true -cfg-if.workspace = true -clap.workspace = true -convert_case.workspace = true -futures.workspace = true -kaspa-consensus-core.workspace = true -kaspa-rpc-core.workspace = true -kaspa-utils.workspace = true -kaspa-wrpc-client.workspace = true -serde_json.workspace = true -serde.workspace = true -thiserror.workspace = true -tokio.workspace = true -toml.workspace = true -workflow-core.workspace = true -workflow-http.workspace = true -workflow-log.workspace = true -xxhash-rust = { workspace = true } - -# these are temporarily localized to prevent -# conflicts with other workspace dependencies -# as tower is used in gRPC-related crates. -axum = "0.7.4" -console = "0.15.8" -mime = "0.3.16" -tower = { version = "0.4.13", features = ["buffer","limit"] } -tower-http = { version = "0.5.1", features = ["cors"] } -tracing-subscriber = "0.3.18" diff --git a/rpc/wrpc/resolver/src/args.rs b/rpc/wrpc/resolver/src/args.rs deleted file mode 100644 index 7a526b99b1..0000000000 --- a/rpc/wrpc/resolver/src/args.rs +++ /dev/null @@ -1,54 +0,0 @@ -pub use clap::Parser; -use std::str::FromStr; - -#[derive(Default, Parser, Debug)] -#[command(version, about, long_about = None)] -pub struct Args { - /// HTTP server port - #[arg(long, default_value = "127.0.0.1:8888")] - pub listen: String, - - /// Optional rate limit in the form `:`, where `requests` is the number of requests allowed per specified number of `seconds` - #[arg(long = "rate-limit", value_name = "REQUESTS:SECONDS")] - pub rate_limit: Option, - - /// Verbose mode - #[arg(short, long, default_value = "false")] - pub verbose: bool, - - /// Show node data on each election - #[arg(short, long, default_value = "false")] - pub election: bool, - - /// Enable resolver status access via `/status` - #[arg(long, default_value = "false")] - pub status: bool, -} - -#[derive(Clone, Debug)] -pub struct RateLimit { - pub requests: u64, - pub period: u64, -} - -impl FromStr for RateLimit { - type Err = String; - - fn from_str(s: &str) -> Result { - let parts = s.split_once(':'); - let (requests, period) = match parts { - None | Some(("", _)) | Some((_, "")) => { - return Err("invalid rate limit, must be `:`".to_string()); - } - Some(x) => x, - }; - let requests = requests - .parse() - .map_err(|_| format!("Unable to parse number of requests, the value must be an integer, supplied: {:?}", requests))?; - let period = period.parse().map_err(|_| { - format!("Unable to parse period, the value must be an integer specifying number of seconds, supplied: {:?}", period) - })?; - - Ok(RateLimit { requests, period }) - } -} diff --git a/rpc/wrpc/resolver/src/connection.rs b/rpc/wrpc/resolver/src/connection.rs deleted file mode 100644 index 75577719f3..0000000000 --- a/rpc/wrpc/resolver/src/connection.rs +++ /dev/null @@ -1,262 +0,0 @@ -use crate::imports::*; - -const BIAS_SCALE: u64 = 1_000_000; - -#[derive(Debug, Clone)] -pub struct Descriptor { - pub connection: Arc, - pub json: String, -} - -impl From<&Arc> for Descriptor { - fn from(connection: &Arc) -> Self { - Self { connection: connection.clone(), json: serde_json::to_string(&Output::from(connection)).unwrap() } - } -} - -impl fmt::Display for Connection { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}: [{:>3}] {}", self.node.id_string, self.clients(), self.node.address) - } -} - -#[derive(Debug)] -pub struct Connection { - pub node: Arc, - bias: u64, - descriptor: RwLock>, - sender: Sender, - client: KaspaRpcClient, - shutdown_ctl: DuplexChannel<()>, - is_connected: Arc, - is_synced: Arc, - is_online: Arc, - clients: Arc, - args: Arc, -} - -impl Connection { - pub fn try_new(node: Arc, sender: Sender, args: &Arc) -> Result { - let client = KaspaRpcClient::new(node.encoding, Some(&node.address), None, None, None)?; - let descriptor = RwLock::default(); - let shutdown_ctl = DuplexChannel::oneshot(); - let is_connected = Arc::new(AtomicBool::new(false)); - let is_synced = Arc::new(AtomicBool::new(true)); - let is_online = Arc::new(AtomicBool::new(false)); - let clients = Arc::new(AtomicU64::new(0)); - let bias = (node.bias.unwrap_or(1.0) * BIAS_SCALE as f64) as u64; - let args = args.clone(); - Ok(Self { node, descriptor, sender, client, shutdown_ctl, is_connected, is_synced, is_online, clients, bias, args }) - } - - pub fn verbose(&self) -> bool { - self.args.verbose - } - - pub fn score(&self) -> u64 { - self.clients.load(Ordering::Relaxed) * self.bias / BIAS_SCALE - } - - pub fn connected(&self) -> bool { - self.is_connected.load(Ordering::Relaxed) - } - - pub fn online(&self) -> bool { - self.is_online.load(Ordering::Relaxed) - } - - pub fn is_synced(&self) -> bool { - self.is_synced.load(Ordering::Relaxed) - } - - pub fn clients(&self) -> u64 { - self.clients.load(Ordering::Relaxed) - } - - pub fn status(&self) -> &'static str { - if self.connected() { - if self.is_synced() { - "online" - } else { - "syncing" - } - } else { - "offline" - } - } - - pub fn descriptor(&self) -> Option { - self.descriptor.read().unwrap().clone() - } - - async fn connect(&self) -> Result<()> { - let options = ConnectOptions { block_async_connect: false, strategy: ConnectStrategy::Retry, ..Default::default() }; - - self.client.connect(Some(options)).await?; - Ok(()) - } - - async fn task(self: Arc) -> Result<()> { - self.connect().await?; - let rpc_ctl_channel = self.client.rpc_ctl().multiplexer().channel(); - let shutdown_ctl_receiver = self.shutdown_ctl.request.receiver.clone(); - let shutdown_ctl_sender = self.shutdown_ctl.response.sender.clone(); - - let interval = workflow_core::task::interval(Duration::from_secs(5)); - pin_mut!(interval); - - loop { - select! { - _ = interval.next().fuse() => { - if self.is_connected.load(Ordering::Relaxed) { - let previous = self.is_online.load(Ordering::Relaxed); - let online = self.update_metrics().await.is_ok(); - self.is_online.store(online, Ordering::Relaxed); - if online != previous { - if self.verbose() { - log_error!("Offline","{}", self.node.address); - } - self.update(online).await?; - } - } - } - - msg = rpc_ctl_channel.receiver.recv().fuse() => { - match msg { - Ok(msg) => { - - // handle wRPC channel connection and disconnection events - match msg { - RpcState::Connected => { - log_success!("Connected","{}",self.node.address); - self.is_connected.store(true, Ordering::Relaxed); - if self.update_metrics().await.is_ok() { - self.is_online.store(true, Ordering::Relaxed); - self.update(true).await?; - } else { - self.is_online.store(false, Ordering::Relaxed); - } - }, - RpcState::Disconnected => { - self.is_connected.store(false, Ordering::Relaxed); - self.is_online.store(false, Ordering::Relaxed); - self.update(false).await?; - log_error!("Disconnected","{}",self.node.address); - } - } - } - Err(err) => { - println!("Monitor: error while receiving rpc_ctl_channel message: {err}"); - break; - } - } - } - - _ = shutdown_ctl_receiver.recv().fuse() => { - break; - }, - - } - } - - shutdown_ctl_sender.send(()).await.unwrap(); - - Ok(()) - } - - pub fn start(self: &Arc) -> Result<()> { - let this = self.clone(); - spawn(async move { - if let Err(error) = this.task().await { - println!("NodeConnection task error: {:?}", error); - } - }); - - Ok(()) - } - - pub async fn stop(self: &Arc) -> Result<()> { - self.shutdown_ctl.signal(()).await.expect("NodeConnection shutdown signal error"); - Ok(()) - } - - async fn update_metrics(self: &Arc) -> Result { - match self.client.get_sync_status().await { - Ok(is_synced) => { - let previous_sync = self.is_synced.load(Ordering::Relaxed); - self.is_synced.store(is_synced, Ordering::Relaxed); - - if is_synced { - match self.client.get_metrics(false, true, false, false).await { - Ok(metrics) => { - if let Some(connection_metrics) = metrics.connection_metrics { - // update - let previous = self.clients.load(Ordering::Relaxed); - let clients = - connection_metrics.borsh_live_connections as u64 + connection_metrics.json_live_connections as u64; - self.clients.store(clients, Ordering::Relaxed); - if clients != previous { - if self.verbose() { - log_success!("Clients", "{self}"); - } - Ok(true) - } else { - Ok(false) - } - } else { - log_error!("Metrics", "{self} - failure"); - Err(Error::ConnectionMetrics) - } - } - Err(err) => { - log_error!("Metrics", "{self}"); - log_error!("RPC", "{err}"); - Err(Error::Metrics) - } - } - } else { - if is_synced != previous_sync { - log_error!("Syncing", "{self}"); - } - Err(Error::Sync) - } - } - Err(err) => { - log_error!("RPC", "{self}"); - log_error!("RPC", "{err}"); - Err(Error::Status) - } - } - } - - pub async fn update(self: &Arc, online: bool) -> Result<()> { - *self.descriptor.write().unwrap() = online.then_some(self.into()); - self.sender.try_send(self.node.params())?; - Ok(()) - } -} - -#[derive(Serialize)] -#[serde(rename_all = "kebab-case")] -pub struct Output<'a> { - pub id: &'a str, - pub url: &'a str, - - #[serde(skip_serializing_if = "Option::is_none")] - pub provider_name: Option<&'a str>, - #[serde(skip_serializing_if = "Option::is_none")] - pub provider_url: Option<&'a str>, -} - -impl<'a> From<&'a Arc> for Output<'a> { - fn from(connection: &'a Arc) -> Self { - let id = connection.node.id_string.as_str(); - let url = connection.node.address.as_str(); - let provider_name = connection.node.provider.as_ref().map(|provider| provider.name.as_str()); - let provider_url = connection.node.provider.as_ref().map(|provider| provider.url.as_str()); - - // let provider_name = connection.node.provider.as_deref(); - // let provider_url = connection.node.link.as_deref(); - Self { id, url, provider_name, provider_url } - } -} diff --git a/rpc/wrpc/resolver/src/error.rs b/rpc/wrpc/resolver/src/error.rs deleted file mode 100644 index 4f390c3ce5..0000000000 --- a/rpc/wrpc/resolver/src/error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use kaspa_wrpc_client::error::Error as RpcError; -use thiserror::Error; -use toml::de::Error as TomlError; - -#[derive(Error, Debug)] -pub enum Error { - #[error("{0}")] - Custom(String), - - #[error("RPC error: {0}")] - Rpc(#[from] RpcError), - - #[error("TOML error: {0}")] - Toml(#[from] TomlError), - - #[error("IO Error: {0}")] - Io(#[from] std::io::Error), - - #[error(transparent)] - Serde(#[from] serde_json::Error), - - #[error("Connection Metrics")] - ConnectionMetrics, - #[error("Metrics")] - Metrics, - #[error("Sync")] - Sync, - #[error("Status")] - Status, - - #[error("Channel send error")] - ChannelSend, - #[error("Channel try send error")] - TryChannelSend, -} - -impl Error { - pub fn custom(msg: T) -> Self { - Error::Custom(msg.to_string()) - } -} - -impl From> for Error { - fn from(_: workflow_core::channel::SendError) -> Self { - Error::ChannelSend - } -} - -impl From> for Error { - fn from(_: workflow_core::channel::TrySendError) -> Self { - Error::TryChannelSend - } -} diff --git a/rpc/wrpc/resolver/src/imports.rs b/rpc/wrpc/resolver/src/imports.rs deleted file mode 100644 index 29c86a4813..0000000000 --- a/rpc/wrpc/resolver/src/imports.rs +++ /dev/null @@ -1,28 +0,0 @@ -pub use crate::args::Args; -pub use crate::error::Error; -pub use crate::log::*; -pub use crate::node::Node; -pub use crate::params::{PathParams, QueryParams}; -pub use crate::result::Result; -pub use crate::transport::Transport; -pub use ahash::AHashMap; -pub use cfg_if::cfg_if; -pub use futures::{pin_mut, select, FutureExt, StreamExt}; -pub use kaspa_consensus_core::network::NetworkId; -pub use kaspa_rpc_core::api::ctl::RpcState; -pub use kaspa_rpc_core::api::rpc::RpcApi; -pub use kaspa_utils::hashmap::GroupExtension; -pub use kaspa_wrpc_client::{ - client::{ConnectOptions, ConnectStrategy}, - KaspaRpcClient, WrpcEncoding, -}; -pub use serde::{de::DeserializeOwned, Deserialize, Serialize}; -pub use std::collections::HashMap; -pub use std::fmt; -pub use std::path::Path; -pub use std::sync::atomic::AtomicBool; -pub use std::sync::atomic::{AtomicU64, Ordering}; -pub use std::sync::{Arc, Mutex, OnceLock, RwLock}; -pub use std::time::Duration; -pub use workflow_core::channel::*; -pub use workflow_core::task::spawn; diff --git a/rpc/wrpc/resolver/src/log.rs b/rpc/wrpc/resolver/src/log.rs deleted file mode 100644 index 5f66416a0a..0000000000 --- a/rpc/wrpc/resolver/src/log.rs +++ /dev/null @@ -1,44 +0,0 @@ -pub mod impls { - use console::style; - use std::fmt; - - pub fn log_success(source: &str, args: &fmt::Arguments<'_>) { - println!("{:>12} {}", style(source).green().bold(), args); - } - - pub fn log_warn(source: &str, args: &fmt::Arguments<'_>) { - println!("{:>12} {}", style(source).yellow().bold(), args); - } - - pub fn log_error(source: &str, args: &fmt::Arguments<'_>) { - println!("{:>12} {}", style(source).red().bold(), args); - } -} - -#[macro_export] -macro_rules! log_success { - ($target:expr, $($t:tt)*) => ( - $crate::log::impls::log_success($target, &format_args!($($t)*)) - ) -} - -pub use log_success; - -#[macro_export] -macro_rules! log_warn { - - ($target:expr, $($t:tt)*) => ( - $crate::log::impls::log_warn($target, &format_args!($($t)*)) - ) -} - -pub use log_warn; - -#[macro_export] -macro_rules! log_error { - ($target:expr, $($t:tt)*) => ( - $crate::log::impls::log_error($target, &format_args!($($t)*)) - ) -} - -pub use log_error; diff --git a/rpc/wrpc/resolver/src/main.rs b/rpc/wrpc/resolver/src/main.rs deleted file mode 100644 index f071b12874..0000000000 --- a/rpc/wrpc/resolver/src/main.rs +++ /dev/null @@ -1,41 +0,0 @@ -mod args; -mod connection; -mod error; -pub mod imports; -mod log; -mod monitor; -mod node; -mod panic; -mod params; -mod result; -mod server; -mod transport; - -use args::*; -use result::Result; -use std::sync::Arc; - -#[tokio::main] -async fn main() { - if let Err(error) = run().await { - eprintln!("Error: {}", error); - std::process::exit(1); - } -} - -async fn run() -> Result<()> { - let args = Arc::new(Args::parse()); - - workflow_log::set_log_level(workflow_log::LevelFilter::Info); - panic::init_ungraceful_panic_handler(); - - println!(); - println!("Kaspa wRPC Resolver v{} starting...", env!("CARGO_PKG_VERSION")); - - monitor::init(&args); - let (listener, app) = server::server(&args).await?; - monitor::start().await?; - axum::serve(listener, app).await?; - monitor::stop().await?; - Ok(()) -} diff --git a/rpc/wrpc/resolver/src/monitor.rs b/rpc/wrpc/resolver/src/monitor.rs deleted file mode 100644 index 748a5148f3..0000000000 --- a/rpc/wrpc/resolver/src/monitor.rs +++ /dev/null @@ -1,241 +0,0 @@ -use crate::connection::{Connection, Descriptor}; -use crate::imports::*; - -static MONITOR: OnceLock> = OnceLock::new(); - -pub fn init(args: &Arc) { - MONITOR.set(Arc::new(Monitor::new(args))).unwrap(); -} - -pub fn monitor() -> &'static Arc { - MONITOR.get().unwrap() -} - -pub async fn start() -> Result<()> { - monitor().start().await -} - -pub async fn stop() -> Result<()> { - monitor().stop().await -} - -/// Monitor receives updates from [Connection] monitoring tasks -/// and updates the descriptors for each [Params] based on the -/// connection store (number of connections * bias). -pub struct Monitor { - args: Arc, - connections: RwLock>>>, - descriptors: RwLock>, - channel: Channel, - shutdown_ctl: DuplexChannel<()>, -} - -impl Default for Monitor { - fn default() -> Self { - Self { - args: Arc::new(Args::default()), - connections: Default::default(), - descriptors: Default::default(), - channel: Channel::unbounded(), - shutdown_ctl: DuplexChannel::oneshot(), - } - } -} - -impl fmt::Debug for Monitor { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Monitor") - .field("verbose", &self.verbose()) - .field("connections", &self.connections) - .field("descriptors", &self.descriptors) - .finish() - } -} - -impl Monitor { - pub fn new(args: &Arc) -> Self { - Self { args: args.clone(), ..Default::default() } - } - - pub fn verbose(&self) -> bool { - self.args.verbose - } - - pub fn connections(&self) -> AHashMap>> { - self.connections.read().unwrap().clone() - } - - /// Process an update to `Server.toml` removing or adding node connections accordingly. - pub async fn update_nodes(&self, nodes: Vec>) -> Result<()> { - let mut connections = self.connections(); - - for params in PathParams::iter() { - let nodes = nodes.iter().filter(|node| node.params() == params).collect::>(); - - let list = connections.entry(params).or_default(); - - let create: Vec<_> = nodes.iter().filter(|node| !list.iter().any(|connection| connection.node == ***node)).collect(); - - let remove: Vec<_> = - list.iter().filter(|connection| !nodes.iter().any(|node| connection.node == **node)).cloned().collect(); - - for node in create { - let created = Arc::new(Connection::try_new((*node).clone(), self.channel.sender.clone(), &self.args)?); - created.start()?; - list.push(created); - } - - for removed in remove { - removed.stop().await?; - list.retain(|c| c.node != removed.node); - } - } - - *self.connections.write().unwrap() = connections; - - // flush all params to the update channel to refresh selected descriptors - PathParams::iter().for_each(|param| self.channel.sender.try_send(param).unwrap()); - - Ok(()) - } - - pub async fn start(self: &Arc) -> Result<()> { - let toml = std::fs::read_to_string(Path::new("Servers.toml"))?; - let nodes = crate::node::try_parse_nodes(toml.as_str())?; - - let this = self.clone(); - spawn(async move { - if let Err(error) = this.task().await { - println!("NodeConnection task error: {:?}", error); - } - }); - - self.update_nodes(nodes).await?; - - Ok(()) - } - - pub async fn stop(&self) -> Result<()> { - self.shutdown_ctl.signal(()).await.expect("Monitor shutdown signal error"); - Ok(()) - } - - async fn task(self: Arc) -> Result<()> { - let receiver = self.channel.receiver.clone(); - let shutdown_ctl_receiver = self.shutdown_ctl.request.receiver.clone(); - let shutdown_ctl_sender = self.shutdown_ctl.response.sender.clone(); - - loop { - select! { - - msg = receiver.recv().fuse() => { - match msg { - Ok(params) => { - - // run node elections - - let mut connections = self.connections() - .get(¶ms) - .expect("Monitor: expecting existing connection params") - .clone() - .into_iter() - .filter(|connection|connection.online()) - .collect::>(); - if connections.is_empty() { - self.descriptors.write().unwrap().remove(¶ms); - } else { - connections.sort_by_key(|connection| connection.score()); - - if self.args.election { - log_success!("",""); - connections.iter().for_each(|connection| { - log_warn!("Node","{}", connection); - }); - } - - if let Some(descriptor) = connections.first().unwrap().descriptor() { - let mut descriptors = self.descriptors.write().unwrap(); - - // extra debug output & monitoring - if self.args.verbose || self.args.election { - if let Some(current) = descriptors.get(¶ms) { - if current.connection.node.id != descriptor.connection.node.id { - log_success!("Election","{}", descriptor.connection); - descriptors.insert(params,descriptor); - } else { - log_success!("Keep","{}", descriptor.connection); - } - } else { - log_success!("Default","{}", descriptor.connection); - descriptors.insert(params,descriptor); - } - } else { - descriptors.insert(params,descriptor); - } - } - - if self.args.election && self.args.verbose { - log_success!("",""); - } - } - } - Err(err) => { - println!("Monitor: error while receiving update message: {err}"); - } - } - - } - _ = shutdown_ctl_receiver.recv().fuse() => { - break; - }, - - } - } - - shutdown_ctl_sender.send(()).await.unwrap(); - - Ok(()) - } - - /// Get the status of all nodes as a JSON string (available via `/status` endpoint if enabled). - pub fn get_all_json(&self) -> String { - let connections = self.connections(); - let nodes = connections.values().flatten().map(Status::from).collect::>(); - serde_json::to_string(&nodes).unwrap() - } - - /// Get JSON string representing node information (id, url, provider, link) - pub fn get_json(&self, params: &PathParams) -> Option { - self.descriptors.read().unwrap().get(params).cloned().map(|descriptor| descriptor.json) - } -} - -#[derive(Serialize)] -pub struct Status<'a> { - pub id: &'a str, - pub url: &'a str, - #[serde(skip_serializing_if = "Option::is_none")] - pub provider_name: Option<&'a str>, - #[serde(skip_serializing_if = "Option::is_none")] - pub provider_url: Option<&'a str>, - pub transport: Transport, - pub encoding: WrpcEncoding, - pub network: NetworkId, - pub online: bool, - pub status: &'static str, -} - -impl<'a> From<&'a Arc> for Status<'a> { - fn from(connection: &'a Arc) -> Self { - let url = connection.node.address.as_str(); - let provider_name = connection.node.provider.as_ref().map(|provider| provider.name.as_str()); - let provider_url = connection.node.provider.as_ref().map(|provider| provider.url.as_str()); - let id = connection.node.id_string.as_str(); - let transport = connection.node.transport; - let encoding = connection.node.encoding; - let network = connection.node.network; - let status = connection.status(); - let online = connection.online(); - Self { id, url, provider_name, provider_url, transport, encoding, network, status, online } - } -} diff --git a/rpc/wrpc/resolver/src/node.rs b/rpc/wrpc/resolver/src/node.rs deleted file mode 100644 index d0968966cf..0000000000 --- a/rpc/wrpc/resolver/src/node.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::imports::*; -use xxhash_rust::xxh3::xxh3_64; - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Provider { - pub name: String, - pub url: String, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Node { - #[serde(skip)] - pub id: u64, - #[serde(skip)] - pub id_string: String, - - pub name: Option, - pub location: Option, - pub address: String, - pub transport: Transport, - pub encoding: WrpcEncoding, - pub network: NetworkId, - pub enable: Option, - pub bias: Option, - pub version: Option, - pub provider: Option, -} - -impl Eq for Node {} - -impl PartialEq for Node { - fn eq(&self, other: &Self) -> bool { - self.address == other.address - } -} - -impl std::fmt::Display for Node { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let title = self.name.clone().unwrap_or(self.address.to_string()); - write!(f, "{}", title) - } -} - -impl Node { - pub fn params(&self) -> PathParams { - PathParams::new(self.encoding, self.network) - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct NodeConfig { - #[serde(rename = "node")] - nodes: Vec, -} - -pub fn try_parse_nodes(toml: &str) -> Result>> { - let nodes: Vec> = toml::from_str::(toml)? - .nodes - .into_iter() - .filter_map(|mut node| { - let id = xxh3_64(node.address.as_bytes()); - let id_string = format!("{id:x}"); - node.id = id; - node.id_string = id_string.chars().take(8).collect(); - node.enable.unwrap_or(true).then_some(node).map(Arc::new) - }) - .collect::>(); - Ok(nodes) -} - -impl AsRef for Node { - fn as_ref(&self) -> &Node { - self - } -} diff --git a/rpc/wrpc/resolver/src/panic.rs b/rpc/wrpc/resolver/src/panic.rs deleted file mode 100644 index 7e6d78a1ff..0000000000 --- a/rpc/wrpc/resolver/src/panic.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::panic; - -pub fn init_ungraceful_panic_handler() { - let default_hook = panic::take_hook(); - panic::set_hook(Box::new(move |panic_info| { - default_hook(panic_info); - println!("Exiting..."); - std::process::exit(1); - })); -} diff --git a/rpc/wrpc/resolver/src/params.rs b/rpc/wrpc/resolver/src/params.rs deleted file mode 100644 index 7e31b69e70..0000000000 --- a/rpc/wrpc/resolver/src/params.rs +++ /dev/null @@ -1,146 +0,0 @@ -use serde::{de, Deserializer, Serializer}; - -use crate::imports::*; -use std::{fmt, str::FromStr}; -// use convert_case::{Case, Casing}; - -#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq, Eq, Hash)] -pub struct PathParams { - pub encoding: WrpcEncoding, - pub network: NetworkId, -} - -impl PathParams { - pub fn new(encoding: WrpcEncoding, network: NetworkId) -> Self { - Self { encoding, network } - } - - pub fn iter() -> impl Iterator { - NetworkId::iter().flat_map(move |network_id| WrpcEncoding::iter().map(move |encoding| PathParams::new(*encoding, network_id))) - } -} - -impl fmt::Display for PathParams { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}:{}", self.encoding.to_string().to_lowercase(), self.network) - } -} - -// --- - -#[derive(Debug, Deserialize)] -pub struct QueryParams { - // Accessible via a query string like "?access=utxo-index+tx-index+block-dag+metrics+visualizer+mining" - pub access: Option, -} - -#[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq, Eq, Hash)] -#[serde(rename_all = "kebab-case")] -pub enum AccessType { - Transact, // UTXO and TX index, submit transaction, single mempool entry - Mempool, // Full mempool data access - BlockDag, // Access to Blocks - Network, // Network data access (peers, ban, etc.) - Metrics, // Access to Metrics - Visualizer, // Access to Visualization data feeds - Mining, // Access to submit block, GBT, etc. -} - -impl AccessType { - pub fn iter() -> impl Iterator { - [ - AccessType::Transact, - AccessType::Mempool, - AccessType::BlockDag, - AccessType::Network, - AccessType::Metrics, - AccessType::Visualizer, - AccessType::Mining, - ] - .into_iter() - } -} - -impl fmt::Display for AccessType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let s = match self { - AccessType::Transact => "transact", - AccessType::Mempool => "mempool", - AccessType::BlockDag => "block-dag", - AccessType::Network => "network", - AccessType::Metrics => "metrics", - AccessType::Visualizer => "visualizer", - AccessType::Mining => "mining", - }; - write!(f, "{s}") - } -} - -impl FromStr for AccessType { - type Err = String; - fn from_str(s: &str) -> std::result::Result { - match s { - "transact" => Ok(AccessType::Transact), - "mempool" => Ok(AccessType::Mempool), - "block-dag" => Ok(AccessType::BlockDag), - "network" => Ok(AccessType::Network), - "metrics" => Ok(AccessType::Metrics), - "visualizer" => Ok(AccessType::Visualizer), - "mining" => Ok(AccessType::Mining), - _ => Err(format!("Invalid access type: {}", s)), - } - } -} - -#[derive(Debug, Clone)] -pub struct AccessList { - pub access: Vec, -} - -impl std::fmt::Display for AccessList { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.access.iter().map(|access| access.to_string()).collect::>().join(" ")) - } -} - -impl FromStr for AccessList { - type Err = String; - - fn from_str(s: &str) -> std::result::Result { - let access = s.split(' ').map(|s| s.parse::()).collect::, _>>()?; - Ok(AccessList { access }) - } -} - -impl Serialize for AccessList { - fn serialize(&self, serializer: S) -> std::result::Result - where - S: Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -struct AccessListVisitor; -impl<'de> de::Visitor<'de> for AccessListVisitor { - type Value = AccessList; - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - formatter.write_str("a string containing list of permissions separated by a '+'") - } - - fn visit_str(self, value: &str) -> std::result::Result - where - E: de::Error, - { - AccessList::from_str(value).map_err(|err| de::Error::custom(err.to_string())) - } -} - -impl<'de> Deserialize<'de> for AccessList { - fn deserialize(deserializer: D) -> std::result::Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_str(AccessListVisitor) - } -} diff --git a/rpc/wrpc/resolver/src/result.rs b/rpc/wrpc/resolver/src/result.rs deleted file mode 100644 index 605dc25cfc..0000000000 --- a/rpc/wrpc/resolver/src/result.rs +++ /dev/null @@ -1 +0,0 @@ -pub type Result = std::result::Result; diff --git a/rpc/wrpc/resolver/src/server.rs b/rpc/wrpc/resolver/src/server.rs deleted file mode 100644 index 3717a6ebf4..0000000000 --- a/rpc/wrpc/resolver/src/server.rs +++ /dev/null @@ -1,149 +0,0 @@ -use crate::imports::*; -use crate::monitor::monitor; -use axum::{ - async_trait, - extract::{path::ErrorKind, rejection::PathRejection, FromRequestParts, Query}, - http::{header, request::Parts, HeaderValue, StatusCode}, - response::IntoResponse, - routing::get, - // Json, - Router, -}; -use tokio::net::TcpListener; - -use axum::{error_handling::HandleErrorLayer, BoxError}; -use std::time::Duration; -use tower::{buffer::BufferLayer, limit::RateLimitLayer, ServiceBuilder}; -use tower_http::cors::{Any, CorsLayer}; - -pub async fn server(args: &Args) -> Result<(TcpListener, Router)> { - // initialize tracing - tracing_subscriber::fmt::init(); - - let app = Router::new().route("/v1/wrpc/:encoding/:network", get(get_elected_node)); - - let app = if args.status { - log_warn!("Routes", "Enabling `/status` route"); - app.route("/status", get(get_status_all_nodes)) - } else { - log_success!("Routes", "Disabling `/status` route"); - app - }; - - let app = if let Some(rate_limit) = args.rate_limit.as_ref() { - log_success!("Limits", "Setting rate limit to: {} requests per {} seconds", rate_limit.requests, rate_limit.period); - app.layer( - ServiceBuilder::new() - .layer(HandleErrorLayer::new(|err: BoxError| async move { - (StatusCode::INTERNAL_SERVER_ERROR, format!("Unhandled error: {}", err)) - })) - .layer(BufferLayer::new(1024)) - .layer(RateLimitLayer::new(rate_limit.requests, Duration::from_secs(rate_limit.period))), - ) - } else { - log_warn!("Limits", "Rate limit is disabled"); - app - }; - - let app = app.layer(CorsLayer::new().allow_origin(Any)); - - log_success!("Server", "Listening on http://{}", args.listen.as_str()); - let listener = tokio::net::TcpListener::bind(args.listen.as_str()).await.unwrap(); - Ok((listener, app)) -} - -// respond with a JSON object containing the status of all nodes -async fn get_status_all_nodes() -> impl IntoResponse { - let json = monitor().get_all_json(); - (StatusCode::OK, [(header::CONTENT_TYPE, HeaderValue::from_static(mime::APPLICATION_JSON.as_ref()))], json).into_response() -} - -// respond with a JSON object containing the elected node -async fn get_elected_node(Query(_query): Query, Path(params): Path) -> impl IntoResponse { - // println!("params: {:?}", params); - // println!("query: {:?}", query); - - if let Some(json) = monitor().get_json(¶ms) { - ([(header::CONTENT_TYPE, HeaderValue::from_static(mime::APPLICATION_JSON.as_ref()))], json).into_response() - } else { - ( - StatusCode::NOT_FOUND, - [(header::CONTENT_TYPE, HeaderValue::from_static(mime::TEXT_PLAIN_UTF_8.as_ref()))], - "NOT FOUND".to_string(), - ) - .into_response() - } -} - -// We define our own `Path` extractor that customizes the error from `axum::extract::Path` -struct Path(T); - -#[async_trait] -impl FromRequestParts for Path -where - // these trait bounds are copied from `impl FromRequest for axum::extract::path::Path` - T: DeserializeOwned + Send, - S: Send + Sync, -{ - type Rejection = (StatusCode, axum::Json); - - async fn from_request_parts(parts: &mut Parts, state: &S) -> std::result::Result { - match axum::extract::Path::::from_request_parts(parts, state).await { - Ok(value) => Ok(Self(value.0)), - Err(rejection) => { - let (status, body) = match rejection { - PathRejection::FailedToDeserializePathParams(inner) => { - let mut status = StatusCode::BAD_REQUEST; - - let kind = inner.into_kind(); - let body = match &kind { - ErrorKind::WrongNumberOfParameters { .. } => PathError { message: kind.to_string(), location: None }, - - ErrorKind::ParseErrorAtKey { key, .. } => { - PathError { message: kind.to_string(), location: Some(key.clone()) } - } - - ErrorKind::ParseErrorAtIndex { index, .. } => { - PathError { message: kind.to_string(), location: Some(index.to_string()) } - } - - ErrorKind::ParseError { .. } => PathError { message: kind.to_string(), location: None }, - - ErrorKind::InvalidUtf8InPathParam { key } => { - PathError { message: kind.to_string(), location: Some(key.clone()) } - } - - ErrorKind::UnsupportedType { .. } => { - // this error is caused by the programmer using an unsupported type - // (such as nested maps) so respond with `500` instead - status = StatusCode::INTERNAL_SERVER_ERROR; - PathError { message: kind.to_string(), location: None } - } - - ErrorKind::Message(msg) => PathError { message: msg.clone(), location: None }, - - _ => PathError { message: format!("Unhandled deserialization error: {kind}"), location: None }, - }; - - (status, body) - } - PathRejection::MissingPathParams(error) => { - (StatusCode::INTERNAL_SERVER_ERROR, PathError { message: error.to_string(), location: None }) - } - _ => ( - StatusCode::INTERNAL_SERVER_ERROR, - PathError { message: format!("Unhandled path rejection: {rejection}"), location: None }, - ), - }; - - Err((status, axum::Json(body))) - } - } - } -} - -#[derive(Serialize)] -struct PathError { - message: String, - location: Option, -} diff --git a/rpc/wrpc/resolver/src/transport.rs b/rpc/wrpc/resolver/src/transport.rs deleted file mode 100644 index ccfd6dee73..0000000000 --- a/rpc/wrpc/resolver/src/transport.rs +++ /dev/null @@ -1,8 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum Transport { - Grpc, - Wrpc, -} diff --git a/rpc/wrpc/server/Cargo.toml b/rpc/wrpc/server/Cargo.toml index 885a01b751..54137e95be 100644 --- a/rpc/wrpc/server/Cargo.toml +++ b/rpc/wrpc/server/Cargo.toml @@ -33,9 +33,5 @@ tokio.workspace = true workflow-core.workspace = true workflow-log.workspace = true workflow-rpc.workspace = true - -[target.x86_64-unknown-linux-gnu.dependencies] -# Adding explicitely the openssl dependency here is needed for a successful build with zigbuild -# as used in the release deployment in GitHub CI -# see: https://github.com/rust-cross/cargo-zigbuild/issues/127 -openssl = { version = "0.10", features = ["vendored"] } +workflow-serializer.workspace = true +rustls.workspace = true \ No newline at end of file diff --git a/rpc/wrpc/server/src/address.rs b/rpc/wrpc/server/src/address.rs index 7dac4d75d9..1100860e26 100644 --- a/rpc/wrpc/server/src/address.rs +++ b/rpc/wrpc/server/src/address.rs @@ -29,11 +29,20 @@ impl WrpcNetAddress { }; format!("0.0.0.0:{port}").parse().unwrap() } - WrpcNetAddress::Custom(address) => *address, + WrpcNetAddress::Custom(address) => { + if address.port_not_specified() { + let port = match encoding { + WrpcEncoding::Borsh => network_type.default_borsh_rpc_port(), + WrpcEncoding::SerdeJson => network_type.default_json_rpc_port(), + }; + address.with_port(port) + } else { + *address + } + } } } } - impl FromStr for WrpcNetAddress { type Err = AddrParseError; fn from_str(s: &str) -> Result { @@ -63,3 +72,31 @@ impl TryFrom for WrpcNetAddress { WrpcNetAddress::from_str(&s) } } + +#[cfg(test)] +mod tests { + use super::*; + use kaspa_utils::networking::IpAddress; + + #[test] + fn test_wrpc_net_address_from_str() { + // Addresses + let port: u16 = 8080; + let addr = format!("1.2.3.4:{port}").parse::().unwrap(); + let addr_without_port = "1.2.3.4".parse::().unwrap(); + let ip_addr = "1.2.3.4".parse::().unwrap(); + // Test + for schema in WrpcEncoding::iter() { + for network in NetworkType::iter() { + let expected_port = match schema { + WrpcEncoding::Borsh => Some(network.default_borsh_rpc_port()), + WrpcEncoding::SerdeJson => Some(network.default_json_rpc_port()), + }; + // Custom address with port + assert_eq!(addr.to_address(&network, schema), ContextualNetAddress::new(ip_addr, Some(port))); + // Custom address without port + assert_eq!(addr_without_port.to_address(&network, schema), ContextualNetAddress::new(ip_addr, expected_port)) + } + } + } +} diff --git a/rpc/wrpc/server/src/connection.rs b/rpc/wrpc/server/src/connection.rs index 86345e5d58..e118d161d0 100644 --- a/rpc/wrpc/server/src/connection.rs +++ b/rpc/wrpc/server/src/connection.rs @@ -16,6 +16,7 @@ use workflow_rpc::{ server::{prelude::*, result::Result as WrpcResult}, types::{MsgT, OpsT}, }; +use workflow_serializer::prelude::*; // // FIXME: Use workflow_rpc::encoding::Encoding directly in the ConnectionT implementation by deriving Hash, Eq and PartialEq in situ @@ -133,7 +134,7 @@ impl Connection { { match encoding { Encoding::Borsh => workflow_rpc::server::protocol::borsh::create_serialized_notification_message(op, msg), - Encoding::SerdeJson => workflow_rpc::server::protocol::borsh::create_serialized_notification_message(op, msg), + Encoding::SerdeJson => workflow_rpc::server::protocol::serde_json::create_serialized_notification_message(op, msg), } } } @@ -157,7 +158,7 @@ impl ConnectionT for Connection { fn into_message(notification: &Self::Notification, encoding: &Self::Encoding) -> Self::Message { let op: RpcApiOps = notification.event_type().into(); - Self::create_serialized_notification_message(encoding.clone().into(), op, notification.clone()).unwrap() + Self::create_serialized_notification_message(encoding.clone().into(), op, Serializable(notification.clone())).unwrap() } async fn send(&self, message: Self::Message) -> core::result::Result<(), Self::Error> { diff --git a/rpc/wrpc/server/src/router.rs b/rpc/wrpc/server/src/router.rs index af46266811..4d0e206259 100644 --- a/rpc/wrpc/server/src/router.rs +++ b/rpc/wrpc/server/src/router.rs @@ -4,6 +4,7 @@ use kaspa_rpc_core::{api::ops::RpcApiOps, prelude::*}; use kaspa_rpc_macros::build_wrpc_server_interface; use std::sync::Arc; use workflow_rpc::server::prelude::*; +use workflow_serializer::prelude::*; /// A wrapper that creates an [`Interface`] instance and initializes /// RPC methods and notifications against this interface. The interface @@ -32,6 +33,7 @@ impl Router { Connection, RpcApiOps, [ + Ping, AddPeer, Ban, EstimateNetworkHashesPerSecond, @@ -42,52 +44,57 @@ impl Router { GetBlockDagInfo, GetBlocks, GetBlockTemplate, + GetCurrentBlockColor, GetCoinSupply, GetConnectedPeerInfo, - GetDaaScoreTimestampEstimate, - GetServerInfo, GetCurrentNetwork, + GetDaaScoreTimestampEstimate, + GetFeeEstimate, + GetFeeEstimateExperimental, GetHeaders, GetInfo, GetInfo, GetMempoolEntries, GetMempoolEntriesByAddresses, GetMempoolEntry, - GetPeerAddresses, GetMetrics, + GetConnections, + GetPeerAddresses, + GetServerInfo, GetSink, + GetSinkBlueScore, GetSubnetwork, GetSyncStatus, + GetSystemInfo, GetUtxosByAddresses, - GetSinkBlueScore, GetVirtualChainFromBlock, - Ping, ResolveFinalityConflict, Shutdown, SubmitBlock, SubmitTransaction, + SubmitTransactionReplacement, Unban, ] ); interface.method( RpcApiOps::Subscribe, - workflow_rpc::server::Method::new(move |manager: Server, connection: Connection, scope: Scope| { + workflow_rpc::server::Method::new(move |manager: Server, connection: Connection, scope: Serializable| { Box::pin(async move { - manager.start_notify(&connection, scope).await.map_err(|err| err.to_string())?; - Ok(SubscribeResponse::new(connection.id())) + manager.start_notify(&connection, scope.into_inner()).await.map_err(|err| err.to_string())?; + Ok(Serializable(SubscribeResponse::new(connection.id()))) }) }), ); interface.method( RpcApiOps::Unsubscribe, - workflow_rpc::server::Method::new(move |manager: Server, connection: Connection, scope: Scope| { + workflow_rpc::server::Method::new(move |manager: Server, connection: Connection, scope: Serializable| { Box::pin(async move { - manager.stop_notify(&connection, scope).await.unwrap_or_else(|err| { + manager.stop_notify(&connection, scope.into_inner()).await.unwrap_or_else(|err| { workflow_log::log_trace!("wRPC server -> error calling stop_notify(): {err}"); }); - Ok(UnsubscribeResponse {}) + Ok(Serializable(UnsubscribeResponse {})) }) }), ); diff --git a/rpc/wrpc/server/src/service.rs b/rpc/wrpc/server/src/service.rs index 898ac5e295..72d09f6e67 100644 --- a/rpc/wrpc/server/src/service.rs +++ b/rpc/wrpc/server/src/service.rs @@ -123,6 +123,7 @@ impl WrpcService { rpc_handler.clone(), router.interface.clone(), Some(counters), + false, ); WrpcService { options, server, rpc_handler, shutdown: SingleTrigger::default() } @@ -146,10 +147,15 @@ impl WrpcService { info!("WRPC Server starting on: {}", listen_address); tokio::spawn(async move { let config = WebSocketConfig { max_message_size: Some(MAX_WRPC_MESSAGE_SIZE), ..Default::default() }; - let serve_result = self.server.listen(&listen_address, Some(config)).await; - match serve_result { - Ok(_) => info!("WRPC Server stopped on: {}", listen_address), - Err(err) => panic!("WRPC Server {listen_address} stopped with error: {err:?}"), + match self.server.bind(&listen_address).await { + Ok(listener) => { + let serve_result = self.server.listen(listener, Some(config)).await; + match serve_result { + Ok(_) => info!("WRPC Server stopped on: {}", listen_address), + Err(err) => panic!("WRPC Server {listen_address} stopped with error: {err:?}"), + } + } + Err(err) => panic!("WRPC Server bind error on {listen_address}: {err:?}"), } }); diff --git a/rpc/wrpc/wasm/Cargo.toml b/rpc/wrpc/wasm/Cargo.toml index 83c78d26f8..54019fdcaa 100644 --- a/rpc/wrpc/wasm/Cargo.toml +++ b/rpc/wrpc/wasm/Cargo.toml @@ -40,6 +40,7 @@ js-sys.workspace = true wasm-bindgen-futures.workspace = true workflow-core.workspace = true futures.workspace = true +ring = { version = "0.17", features = ["wasm32_unknown_unknown_js"] } -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/rpc/wrpc/wasm/src/client.rs b/rpc/wrpc/wasm/src/client.rs index 35cabd8d84..ccd9cb284b 100644 --- a/rpc/wrpc/wasm/src/client.rs +++ b/rpc/wrpc/wasm/src/client.rs @@ -1,3 +1,10 @@ +//! +//! # WASM bindings for the [Kaspa p2p Node RPC client](KaspaRpcClient). +//! +//! This module provides a WASM interface for the Kaspa p2p Node RPC client +//! - [`RpcClient`]. +//! + #![allow(non_snake_case)] use crate::imports::*; @@ -130,7 +137,7 @@ impl TryFrom for NotificationEvent { } } -pub struct Inner { +pub(crate) struct Inner { client: Arc, resolver: Option, notification_task: AtomicBool, @@ -364,19 +371,7 @@ impl RpcClient { /// Optional: Resolver node id. #[wasm_bindgen(getter, js_name = "nodeId")] pub fn resolver_node_id(&self) -> Option { - self.inner.client.node_descriptor().map(|node| node.id.clone()) - } - - /// Optional: public node provider name. - #[wasm_bindgen(getter, js_name = "providerName")] - pub fn resolver_node_provider_name(&self) -> Option { - self.inner.client.node_descriptor().and_then(|node| node.provider_name.clone()) - } - - /// Optional: public node provider URL. - #[wasm_bindgen(getter, js_name = "providerUrl")] - pub fn resolver_node_provider_url(&self) -> Option { - self.inner.client.node_descriptor().and_then(|node| node.provider_url.clone()) + self.inner.client.node_descriptor().map(|node| node.uid.clone()) } /// Connect to the Kaspa RPC server. This function starts a background @@ -796,7 +791,7 @@ impl RpcClient { #[wasm_bindgen(js_name = subscribeVirtualDaaScoreChanged)] pub async fn subscribe_daa_score(&self) -> Result<()> { if let Some(listener_id) = self.listener_id() { - self.inner.client.stop_notify(listener_id, Scope::VirtualDaaScoreChanged(VirtualDaaScoreChangedScope {})).await?; + self.inner.client.start_notify(listener_id, Scope::VirtualDaaScoreChanged(VirtualDaaScoreChangedScope {})).await?; } else { log_error!("RPC unsubscribe on a closed connection"); } @@ -957,6 +952,8 @@ build_wrpc_wasm_bindgen_interface!( /// performance and status of the Kaspa node. /// Returned information: Memory usage, CPU usage, network activity. GetMetrics, + /// Retrieves current number of network connections + GetConnections, /// Retrieves the current sink block, which is the block with /// the highest cumulative difficulty in the Kaspa BlockDAG. /// Returned information: Sink block hash, sink block height. @@ -979,6 +976,11 @@ build_wrpc_wasm_bindgen_interface!( /// Obtains basic information about the synchronization status of the Kaspa node. /// Returned information: Syncing status. GetSyncStatus, + /// Feerate estimates + GetFeeEstimate, + /// Retrieves the current network configuration. + /// Returned information: Current network configuration. + GetCurrentNetwork, ], [ // functions with `request` argument @@ -1006,13 +1008,15 @@ build_wrpc_wasm_bindgen_interface!( /// Generates a new block template for mining. /// Returned information: Block template information. GetBlockTemplate, + /// Checks if block is blue or not. + /// Returned information: Block blueness. + GetCurrentBlockColor, /// Retrieves the estimated DAA (Difficulty Adjustment Algorithm) /// score timestamp estimate. /// Returned information: DAA score timestamp estimate. GetDaaScoreTimestampEstimate, - /// Retrieves the current network configuration. - /// Returned information: Current network configuration. - GetCurrentNetwork, + /// Feerate estimates (experimental) + GetFeeEstimateExperimental, /// Retrieves block headers from the Kaspa BlockDAG. /// Returned information: List of block headers. GetHeaders, @@ -1042,8 +1046,11 @@ build_wrpc_wasm_bindgen_interface!( /// Returned information: None. SubmitBlock, /// Submits a transaction to the Kaspa network. - /// Returned information: None. + /// Returned information: Submitted Transaction Id. SubmitTransaction, + /// Submits an RBF transaction to the Kaspa network. + /// Returned information: Submitted Transaction Id, Transaction that was replaced. + SubmitTransactionReplacement, /// Unbans a previously banned peer, allowing it to connect /// to the Kaspa node again. /// Returned information: None. diff --git a/rpc/wrpc/wasm/src/lib.rs b/rpc/wrpc/wasm/src/lib.rs index e80b3baac0..61d0de19b1 100644 --- a/rpc/wrpc/wasm/src/lib.rs +++ b/rpc/wrpc/wasm/src/lib.rs @@ -1,3 +1,7 @@ +//! +//! WASM bindings for the [Rusty Kaspa p2p Node wRPC Client](kaspa-wrpc-client) +//! + #![allow(unused_imports)] use cfg_if::cfg_if; diff --git a/rpc/wrpc/wasm/src/notify.rs b/rpc/wrpc/wasm/src/notify.rs index 23781e3143..c586f24dc7 100644 --- a/rpc/wrpc/wasm/src/notify.rs +++ b/rpc/wrpc/wasm/src/notify.rs @@ -1,3 +1,7 @@ +//! Notification types and interfaces for wRPC events. + +#![allow(non_snake_case)] + use crate::imports::*; use kaspa_rpc_macros::declare_typescript_wasm_interface as declare; diff --git a/rpc/wrpc/wasm/src/resolver.rs b/rpc/wrpc/wasm/src/resolver.rs index ee4b5d883e..7abfdb6884 100644 --- a/rpc/wrpc/wasm/src/resolver.rs +++ b/rpc/wrpc/wasm/src/resolver.rs @@ -1,3 +1,7 @@ +//! [`Resolver`](NativeResolver) bindings for obtaining public Kaspa wRPC URL endpoints. + +#![allow(non_snake_case)] + use crate::client::{RpcClient, RpcConfig}; use crate::imports::*; use js_sys::Array; @@ -21,6 +25,20 @@ declare! { * Optional URLs for one or multiple resolvers. */ urls?: string[]; + /** + * Use strict TLS for RPC connections. + * If not set or `false` (default), the resolver will + * provide the best available connection regardless of + * whether this connection supports TLS or not. + * If set to `true`, the resolver will only provide + * TLS-enabled connections. + * + * This setting is ignored in the browser environment + * when the browser navigator location is `https`. + * In which case the resolver will always use TLS-enabled + * connections. + */ + tls?: boolean; } "#, } @@ -130,8 +148,8 @@ impl Resolver { impl Resolver { /// List of public Kaspa Resolver URLs. #[wasm_bindgen(getter)] - pub fn urls(&self) -> ResolverArrayT { - Array::from_iter(self.resolver.urls().iter().map(|v| JsValue::from(v.as_str()))).unchecked_into() + pub fn urls(&self) -> Option { + self.resolver.urls().map(|urls| Array::from_iter(urls.iter().map(|v| JsValue::from(v.as_str()))).unchecked_into()) } /// Fetches a public Kaspa wRPC endpoint for the given encoding and network identifier. @@ -163,20 +181,27 @@ impl Resolver { impl TryFrom for NativeResolver { type Error = Error; fn try_from(config: IResolverConfig) -> Result { - let resolver = config + let tls = config.get_bool("tls").unwrap_or(false); + let urls = config .get_vec("urls") .map(|urls| urls.into_iter().map(|v| v.as_string()).collect::>>()) .or_else(|_| config.dyn_into::().map(|urls| urls.into_iter().map(|v| v.as_string()).collect::>>())) - .map_err(|_| Error::custom("Invalid or missing resolver URL"))? - .map(|urls| NativeResolver::new(urls.into_iter().map(Arc::new).collect())); + .map_err(|_| Error::custom("Invalid or missing resolver URL"))?; - Ok(resolver.unwrap_or_default()) + if let Some(urls) = urls { + Ok(NativeResolver::new(Some(urls.into_iter().map(Arc::new).collect()), tls)) + } else { + Ok(NativeResolver::new(None, tls)) + } } } impl TryCastFromJs for Resolver { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result> { + fn try_cast_from<'a, R>(value: &'a R) -> Result> + where + R: AsRef + 'a, + { Ok(Self::try_ref_from_js_value_as_cast(value)?) } } diff --git a/simpa/Cargo.toml b/simpa/Cargo.toml index b52aa6fd93..815edf6a64 100644 --- a/simpa/Cargo.toml +++ b/simpa/Cargo.toml @@ -11,7 +11,7 @@ license.workspace = true repository.workspace = true [dependencies] -kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first +kaspa-alloc.workspace = true # This changes the global allocator for all of the next dependencies so should be kept first kaspa-consensus-core.workspace = true kaspa-consensus-notify.workspace = true kaspa-consensus.workspace = true @@ -22,6 +22,7 @@ kaspa-perf-monitor.workspace = true kaspa-utils.workspace = true async-channel.workspace = true +cfg-if.workspace = true clap.workspace = true dhat = { workspace = true, optional = true } futures-util.workspace = true @@ -38,3 +39,4 @@ tokio = { workspace = true, features = ["rt", "macros", "rt-multi-thread"] } [features] heap = ["dhat", "kaspa-alloc/heap"] +semaphore-trace = ["kaspa-utils/semaphore-trace"] diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 1baecc3e78..c66656be3c 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -20,7 +20,12 @@ use kaspa_consensus_core::{ BlockHashSet, BlockLevel, HashMapCustomHasher, }; use kaspa_consensus_notify::root::ConsensusNotificationRoot; -use kaspa_core::{info, task::service::AsyncService, task::tick::TickService, time::unix_now, trace, warn}; +use kaspa_core::{ + info, + task::{service::AsyncService, tick::TickService}, + time::unix_now, + trace, warn, +}; use kaspa_database::prelude::ConnBuilder; use kaspa_database::{create_temp_db, load_existing_db}; use kaspa_hashes::Hash; @@ -78,7 +83,7 @@ struct Args { ram_scale: f64, /// Logging level for all subsystems {off, error, warn, info, debug, trace} - /// -- You may also specify =,=,... to set the log level for individual subsystems + /// -- You may also specify `=,=,...` to set the log level for individual subsystems #[arg(long = "loglevel", default_value = format!("info,{}=trace", env!("CARGO_PKG_NAME")))] log_level: String, @@ -133,7 +138,13 @@ fn main() { let args = Args::parse(); // Initialize the logger - kaspa_core::log::init_logger(None, &args.log_level); + cfg_if::cfg_if! { + if #[cfg(feature = "semaphore-trace")] { + kaspa_core::log::init_logger(None, &format!("{},{}=debug", args.log_level, kaspa_utils::sync::semaphore_module_path())); + } else { + kaspa_core::log::init_logger(None, &args.log_level); + } + }; // Configure the panic behavior // As we log the panic, we want to set it up after the logger diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 1bcf86d272..a9a4a3423d 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -3,10 +3,10 @@ use itertools::Itertools; use kaspa_consensus::consensus::Consensus; use kaspa_consensus::model::stores::virtual_state::VirtualStateStoreReader; use kaspa_consensus::params::Params; -use kaspa_consensus::processes::mass::MassCalculator; use kaspa_consensus_core::api::ConsensusApi; use kaspa_consensus_core::block::{Block, TemplateBuildMode, TemplateTransactionSelector}; use kaspa_consensus_core::coinbase::MinerData; +use kaspa_consensus_core::mass::{Kip9Version, MassCalculator}; use kaspa_consensus_core::sign::sign; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{ @@ -151,10 +151,7 @@ impl Miner { .into_par_iter() .map(|mutable_tx| { let signed_tx = sign(mutable_tx, schnorr_key); - let mass = self - .mass_calculator - .calc_tx_overall_mass(&signed_tx.as_verifiable(), None, kaspa_consensus::processes::mass::Kip9Version::Alpha) - .unwrap(); + let mass = self.mass_calculator.calc_tx_overall_mass(&signed_tx.as_verifiable(), None, Kip9Version::Alpha).unwrap(); signed_tx.tx.set_mass(mass); let mut signed_tx = signed_tx.tx; signed_tx.finalize(); diff --git a/testing/integration/src/common/utils.rs b/testing/integration/src/common/utils.rs index 824bda3886..10fb9cb671 100644 --- a/testing/integration/src/common/utils.rs +++ b/testing/integration/src/common/utils.rs @@ -3,6 +3,7 @@ use itertools::Itertools; use kaspa_addresses::Address; use kaspa_consensus_core::{ constants::TX_VERSION, + header::Header, sign::sign, subnets::SUBNETWORK_ID_NATIVE, tx::{ @@ -16,7 +17,7 @@ use kaspa_consensus_core::{ }; use kaspa_core::info; use kaspa_grpc_client::GrpcClient; -use kaspa_rpc_core::{api::rpc::RpcApi, BlockAddedNotification, Notification, VirtualDaaScoreChangedNotification}; +use kaspa_rpc_core::{api::rpc::RpcApi, BlockAddedNotification, Notification, RpcUtxoEntry, VirtualDaaScoreChangedNotification}; use kaspa_txscript::pay_to_address_script; use rayon::prelude::{IntoParallelIterator, ParallelIterator}; use secp256k1::Keypair; @@ -36,8 +37,8 @@ const fn estimated_mass(num_inputs: usize, num_outputs: u64) -> u64 { } pub const fn required_fee(num_inputs: usize, num_outputs: u64) -> u64 { - const FEE_PER_MASS: u64 = 10; - FEE_PER_MASS * estimated_mass(num_inputs, num_outputs) + const FEE_RATE: u64 = 10; + FEE_RATE * estimated_mass(num_inputs, num_outputs) } /// Builds a TX DAG based on the initial UTXO set and on constant params @@ -170,13 +171,13 @@ pub async fn fetch_spendable_utxos( { assert!(resp_entry.address.is_some()); assert_eq!(*resp_entry.address.as_ref().unwrap(), address); - utxos.push((resp_entry.outpoint, resp_entry.utxo_entry)); + utxos.push((TransactionOutpoint::from(resp_entry.outpoint), UtxoEntry::from(resp_entry.utxo_entry))); } utxos.sort_by(|a, b| b.1.amount.cmp(&a.1.amount)); utxos } -pub fn is_utxo_spendable(entry: &UtxoEntry, virtual_daa_score: u64, coinbase_maturity: u64) -> bool { +pub fn is_utxo_spendable(entry: &RpcUtxoEntry, virtual_daa_score: u64, coinbase_maturity: u64) -> bool { let needed_confirmations = if !entry.is_coinbase { 10 } else { coinbase_maturity }; entry.block_daa_score + needed_confirmations <= virtual_daa_score } @@ -187,7 +188,8 @@ pub async fn mine_block(pay_address: Address, submitting_client: &GrpcClient, li // Mine a block let template = submitting_client.get_block_template(pay_address.clone(), vec![]).await.unwrap(); - let block_hash = template.block.header.hash; + let header: Header = (&template.block.header).into(); + let block_hash = header.hash; submitting_client.submit_block(template.block, false).await.unwrap(); // Wait for each listening client to get notified the submitted block was added to the DAG diff --git a/testing/integration/src/daemon_integration_tests.rs b/testing/integration/src/daemon_integration_tests.rs index 29f74e75ee..460cf049c3 100644 --- a/testing/integration/src/daemon_integration_tests.rs +++ b/testing/integration/src/daemon_integration_tests.rs @@ -7,6 +7,7 @@ use crate::common::{ use kaspa_addresses::Address; use kaspa_alloc::init_allocator_with_default_settings; use kaspa_consensus::params::SIMNET_PARAMS; +use kaspa_consensus_core::header::Header; use kaspa_consensusmanager::ConsensusManager; use kaspa_core::{task::runtime::AsyncRuntime, trace}; use kaspa_grpc_client::GrpcClient; @@ -77,7 +78,8 @@ async fn daemon_mining_test() { .get_block_template(Address::new(kaspad1.network.into(), kaspa_addresses::Version::PubKey, &[0; 32]), vec![]) .await .unwrap(); - last_block_hash = Some(template.block.header.hash); + let header: Header = (&template.block.header).into(); + last_block_hash = Some(header.hash); rpc_client1.submit_block(template.block, false).await.unwrap(); while let Ok(notification) = match tokio::time::timeout(Duration::from_secs(1), event_receiver.recv()).await { @@ -104,7 +106,13 @@ async fn daemon_mining_test() { assert_eq!(dag_info.sink, last_block_hash.unwrap()); // Check that acceptance data contains the expected coinbase tx ids - let vc = rpc_client2.get_virtual_chain_from_block(kaspa_consensus::params::SIMNET_GENESIS.hash, true).await.unwrap(); + let vc = rpc_client2 + .get_virtual_chain_from_block( + kaspa_consensus::params::SIMNET_GENESIS.hash, // + true, + ) + .await + .unwrap(); assert_eq!(vc.removed_chain_block_hashes.len(), 0); assert_eq!(vc.added_chain_block_hashes.len(), 10); assert_eq!(vc.accepted_transaction_ids.len(), 10); @@ -180,7 +188,8 @@ async fn daemon_utxos_propagation_test() { let mut last_block_hash = None; for i in 0..initial_blocks { let template = rpc_client1.get_block_template(miner_address.clone(), vec![]).await.unwrap(); - last_block_hash = Some(template.block.header.hash); + let header: Header = (&template.block.header).into(); + last_block_hash = Some(header.hash); rpc_client1.submit_block(template.block, false).await.unwrap(); while let Ok(notification) = match tokio::time::timeout(Duration::from_secs(1), event_receiver1.recv()).await { diff --git a/testing/integration/src/mempool_benchmarks.rs b/testing/integration/src/mempool_benchmarks.rs index 3df716594d..00d9b78032 100644 --- a/testing/integration/src/mempool_benchmarks.rs +++ b/testing/integration/src/mempool_benchmarks.rs @@ -295,8 +295,8 @@ async fn bench_bbt_latency_2() { const BLOCK_COUNT: usize = usize::MAX; const MEMPOOL_TARGET: u64 = 600_000; - const TX_COUNT: usize = 1_400_000; - const TX_LEVEL_WIDTH: usize = 20_000; + const TX_COUNT: usize = 1_000_000; + const TX_LEVEL_WIDTH: usize = 300_000; const TPS_PRESSURE: u64 = u64::MAX; const SUBMIT_BLOCK_CLIENTS: usize = 20; diff --git a/testing/integration/src/rpc_tests.rs b/testing/integration/src/rpc_tests.rs index 3224cefee9..3c4df601b3 100644 --- a/testing/integration/src/rpc_tests.rs +++ b/testing/integration/src/rpc_tests.rs @@ -4,8 +4,8 @@ use crate::common::{client_notify::ChannelNotify, daemon::Daemon}; use futures_util::future::try_join_all; use kaspa_addresses::{Address, Prefix, Version}; use kaspa_consensus::params::SIMNET_GENESIS; -use kaspa_consensus_core::{constants::MAX_SOMPI, subnets::SubnetworkId, tx::Transaction}; -use kaspa_core::info; +use kaspa_consensus_core::{constants::MAX_SOMPI, header::Header, subnets::SubnetworkId, tx::Transaction}; +use kaspa_core::{assert_match, info}; use kaspa_grpc_core::ops::KaspadPayloadOps; use kaspa_hashes::Hash; use kaspa_notify::{ @@ -64,7 +64,7 @@ async fn sanity_test() { // The intent of this for/match design (emphasizing the absence of an arm with fallback pattern in the match) // is to force any implementor of a new RpcApi method to add a matching arm here and to strongly incentivize // the adding of an actual sanity test of said new method. - for op in KaspadPayloadOps::list() { + for op in KaspadPayloadOps::iter() { let network_id = daemon.network; let task: JoinHandle<()> = match op { KaspadPayloadOps::SubmitBlock => { @@ -79,21 +79,24 @@ async fn sanity_test() { .unwrap(); // Before submitting a first block, the sink is the genesis, - let response = rpc_client.get_sink_call(GetSinkRequest {}).await.unwrap(); + let response = rpc_client.get_sink_call(None, GetSinkRequest {}).await.unwrap(); assert_eq!(response.sink, SIMNET_GENESIS.hash); - let response = rpc_client.get_sink_blue_score_call(GetSinkBlueScoreRequest {}).await.unwrap(); + let response = rpc_client.get_sink_blue_score_call(None, GetSinkBlueScoreRequest {}).await.unwrap(); assert_eq!(response.blue_score, 0); // the block count is 0 - let response = rpc_client.get_block_count_call(GetBlockCountRequest {}).await.unwrap(); + let response = rpc_client.get_block_count_call(None, GetBlockCountRequest {}).await.unwrap(); assert_eq!(response.block_count, 0); // and the virtual chain is the genesis only let response = rpc_client - .get_virtual_chain_from_block_call(GetVirtualChainFromBlockRequest { - start_hash: SIMNET_GENESIS.hash, - include_accepted_transaction_ids: false, - }) + .get_virtual_chain_from_block_call( + None, + GetVirtualChainFromBlockRequest { + start_hash: SIMNET_GENESIS.hash, + include_accepted_transaction_ids: false, + }, + ) .await .unwrap(); assert!(response.added_chain_block_hashes.is_empty()); @@ -101,14 +104,21 @@ async fn sanity_test() { // Get a block template let GetBlockTemplateResponse { block, is_synced } = rpc_client - .get_block_template_call(GetBlockTemplateRequest { - pay_address: Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32]), - extra_data: Vec::new(), - }) + .get_block_template_call( + None, + GetBlockTemplateRequest { + pay_address: Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32]), + extra_data: Vec::new(), + }, + ) .await .unwrap(); assert!(!is_synced); + // Compute the expected block hash for the received block + let header: Header = (&block.header).into(); + let block_hash = header.hash; + // Submit the template (no mining, in simnet PoW is skipped) let response = rpc_client.submit_block(block.clone(), false).await.unwrap(); assert_eq!(response.report, SubmitBlockReport::Success); @@ -131,23 +141,40 @@ async fn sanity_test() { } // After submitting a first block, the sink is the submitted block, - let response = rpc_client.get_sink_call(GetSinkRequest {}).await.unwrap(); - assert_eq!(response.sink, block.header.hash); + let response = rpc_client.get_sink_call(None, GetSinkRequest {}).await.unwrap(); + assert_eq!(response.sink, block_hash); // the block count is 1 - let response = rpc_client.get_block_count_call(GetBlockCountRequest {}).await.unwrap(); + let response = rpc_client.get_block_count_call(None, GetBlockCountRequest {}).await.unwrap(); assert_eq!(response.block_count, 1); // and the virtual chain from genesis contains the added block let response = rpc_client - .get_virtual_chain_from_block_call(GetVirtualChainFromBlockRequest { - start_hash: SIMNET_GENESIS.hash, - include_accepted_transaction_ids: false, - }) + .get_virtual_chain_from_block_call( + None, + GetVirtualChainFromBlockRequest { + start_hash: SIMNET_GENESIS.hash, + include_accepted_transaction_ids: false, + }, + ) .await .unwrap(); - assert!(response.added_chain_block_hashes.contains(&block.header.hash)); + assert!(response.added_chain_block_hashes.contains(&block_hash)); assert!(response.removed_chain_block_hashes.is_empty()); + + let result = + rpc_client.get_current_block_color_call(None, GetCurrentBlockColorRequest { hash: SIMNET_GENESIS.hash }).await; + + // Genesis was merged by the new sink, so we're expecting a positive blueness response + assert_match!(result, Ok(GetCurrentBlockColorResponse { blue: true })); + + // The new sink has no merging block yet, so we expect a MergerNotFound error + let result = rpc_client.get_current_block_color_call(None, GetCurrentBlockColorRequest { hash: block_hash }).await; + assert!(result.is_err()); + + // Non-existing blocks should return an error + let result = rpc_client.get_current_block_color_call(None, GetCurrentBlockColorRequest { hash: 999.into() }).await; + assert!(result.is_err()); }) } @@ -155,10 +182,14 @@ async fn sanity_test() { tst!(op, "see SubmitBlock") } + KaspadPayloadOps::GetCurrentBlockColor => { + tst!(op, "see SubmitBlock") + } + KaspadPayloadOps::GetCurrentNetwork => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_current_network_call(GetCurrentNetworkRequest {}).await.unwrap(); + let response = rpc_client.get_current_network_call(None, GetCurrentNetworkRequest {}).await.unwrap(); assert_eq!(response.network, network_id.network_type); }) } @@ -166,11 +197,12 @@ async fn sanity_test() { KaspadPayloadOps::GetBlock => { let rpc_client = client.clone(); tst!(op, { - let result = rpc_client.get_block_call(GetBlockRequest { hash: 0.into(), include_transactions: false }).await; + let result = + rpc_client.get_block_call(None, GetBlockRequest { hash: 0.into(), include_transactions: false }).await; assert!(result.is_err()); let response = rpc_client - .get_block_call(GetBlockRequest { hash: SIMNET_GENESIS.hash, include_transactions: false }) + .get_block_call(None, GetBlockRequest { hash: SIMNET_GENESIS.hash, include_transactions: false }) .await .unwrap(); assert_eq!(response.block.header.hash, SIMNET_GENESIS.hash); @@ -181,7 +213,7 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response = rpc_client - .get_blocks_call(GetBlocksRequest { include_blocks: true, include_transactions: false, low_hash: None }) + .get_blocks_call(None, GetBlocksRequest { include_blocks: true, include_transactions: false, low_hash: None }) .await .unwrap(); assert_eq!(response.blocks.len(), 1, "genesis block should be returned"); @@ -193,7 +225,7 @@ async fn sanity_test() { KaspadPayloadOps::GetInfo => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_info_call(GetInfoRequest {}).await.unwrap(); + let response = rpc_client.get_info_call(None, GetInfoRequest {}).await.unwrap(); assert_eq!(response.server_version, kaspa_core::kaspad_env::version().to_string()); assert_eq!(response.mempool_size, 0); assert!(response.is_utxo_indexed); @@ -220,11 +252,14 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response_result = rpc_client - .get_mempool_entry_call(GetMempoolEntryRequest { - transaction_id: 0.into(), - include_orphan_pool: true, - filter_transaction_pool: false, - }) + .get_mempool_entry_call( + None, + GetMempoolEntryRequest { + transaction_id: 0.into(), + include_orphan_pool: true, + filter_transaction_pool: false, + }, + ) .await; // Test Get Mempool Entry: // TODO: Fix by adding actual mempool entries this can get because otherwise it errors out @@ -236,10 +271,10 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response = rpc_client - .get_mempool_entries_call(GetMempoolEntriesRequest { - include_orphan_pool: true, - filter_transaction_pool: false, - }) + .get_mempool_entries_call( + None, + GetMempoolEntriesRequest { include_orphan_pool: true, filter_transaction_pool: false }, + ) .await .unwrap(); assert!(response.mempool_entries.is_empty()); @@ -249,7 +284,7 @@ async fn sanity_test() { KaspadPayloadOps::GetConnectedPeerInfo => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_connected_peer_info_call(GetConnectedPeerInfoRequest {}).await.unwrap(); + let response = rpc_client.get_connected_peer_info_call(None, GetConnectedPeerInfoRequest {}).await.unwrap(); assert!(response.peer_info.is_empty()); }) } @@ -258,12 +293,12 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let peer_address = ContextualNetAddress::from_str("1.2.3.4").unwrap(); - let _ = rpc_client.add_peer_call(AddPeerRequest { peer_address, is_permanent: true }).await.unwrap(); + let _ = rpc_client.add_peer_call(None, AddPeerRequest { peer_address, is_permanent: true }).await.unwrap(); // Add peer only adds the IP to a connection request. It will only be added to known_addresses if it // actually can be connected to. So in this test we can't expect it to be added unless we set up an // actual peer. - let response = rpc_client.get_peer_addresses_call(GetPeerAddressesRequest {}).await.unwrap(); + let response = rpc_client.get_peer_addresses_call(None, GetPeerAddressesRequest {}).await.unwrap(); assert!(response.known_addresses.is_empty()); }) } @@ -274,14 +309,14 @@ async fn sanity_test() { let peer_address = ContextualNetAddress::from_str("5.6.7.8").unwrap(); let ip = peer_address.normalize(1).ip; - let _ = rpc_client.add_peer_call(AddPeerRequest { peer_address, is_permanent: false }).await.unwrap(); - let _ = rpc_client.ban_call(BanRequest { ip }).await.unwrap(); + let _ = rpc_client.add_peer_call(None, AddPeerRequest { peer_address, is_permanent: false }).await.unwrap(); + let _ = rpc_client.ban_call(None, BanRequest { ip }).await.unwrap(); - let response = rpc_client.get_peer_addresses_call(GetPeerAddressesRequest {}).await.unwrap(); + let response = rpc_client.get_peer_addresses_call(None, GetPeerAddressesRequest {}).await.unwrap(); assert!(response.banned_addresses.contains(&ip)); - let _ = rpc_client.unban_call(UnbanRequest { ip }).await.unwrap(); - let response = rpc_client.get_peer_addresses_call(GetPeerAddressesRequest {}).await.unwrap(); + let _ = rpc_client.unban_call(None, UnbanRequest { ip }).await.unwrap(); + let response = rpc_client.get_peer_addresses_call(None, GetPeerAddressesRequest {}).await.unwrap(); assert!(!response.banned_addresses.contains(&ip)); }) } @@ -301,11 +336,22 @@ async fn sanity_test() { }) } + KaspadPayloadOps::SubmitTransactionReplacement => { + let rpc_client = client.clone(); + tst!(op, { + // Build an erroneous transaction... + let transaction = Transaction::new(0, vec![], vec![], 0, SubnetworkId::default(), 0, vec![]); + let result = rpc_client.submit_transaction_replacement((&transaction).into()).await; + // ...that gets rejected by the consensus + assert!(result.is_err()); + }) + } + KaspadPayloadOps::GetSubnetwork => { let rpc_client = client.clone(); tst!(op, { let result = - rpc_client.get_subnetwork_call(GetSubnetworkRequest { subnetwork_id: SubnetworkId::from_byte(0) }).await; + rpc_client.get_subnetwork_call(None, GetSubnetworkRequest { subnetwork_id: SubnetworkId::from_byte(0) }).await; // Err because it's currently unimplemented assert!(result.is_err()); @@ -323,7 +369,7 @@ async fn sanity_test() { KaspadPayloadOps::GetBlockDagInfo => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_block_dag_info_call(GetBlockDagInfoRequest {}).await.unwrap(); + let response = rpc_client.get_block_dag_info_call(None, GetBlockDagInfoRequest {}).await.unwrap(); assert_eq!(response.network, network_id); }) } @@ -332,9 +378,10 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response_result = rpc_client - .resolve_finality_conflict_call(ResolveFinalityConflictRequest { - finality_block_hash: Hash::from_bytes([0; 32]), - }) + .resolve_finality_conflict_call( + None, + ResolveFinalityConflictRequest { finality_block_hash: Hash::from_bytes([0; 32]) }, + ) .await; // Err because it's currently unimplemented @@ -346,7 +393,7 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response_result = rpc_client - .get_headers_call(GetHeadersRequest { start_hash: SIMNET_GENESIS.hash, limit: 1, is_ascending: true }) + .get_headers_call(None, GetHeadersRequest { start_hash: SIMNET_GENESIS.hash, limit: 1, is_ascending: true }) .await; // Err because it's currently unimplemented @@ -358,7 +405,8 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let addresses = vec![Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32])]; - let response = rpc_client.get_utxos_by_addresses_call(GetUtxosByAddressesRequest { addresses }).await.unwrap(); + let response = + rpc_client.get_utxos_by_addresses_call(None, GetUtxosByAddressesRequest { addresses }).await.unwrap(); assert!(response.entries.is_empty()); }) } @@ -367,9 +415,10 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response = rpc_client - .get_balance_by_address_call(GetBalanceByAddressRequest { - address: Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32]), - }) + .get_balance_by_address_call( + None, + GetBalanceByAddressRequest { address: Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32]) }, + ) .await .unwrap(); assert_eq!(response.balance, 0); @@ -381,7 +430,7 @@ async fn sanity_test() { tst!(op, { let addresses = vec![Address::new(Prefix::Simnet, Version::PubKey, &[1u8; 32])]; let response = rpc_client - .get_balances_by_addresses_call(GetBalancesByAddressesRequest::new(addresses.clone())) + .get_balances_by_addresses_call(None, GetBalancesByAddressesRequest::new(addresses.clone())) .await .unwrap(); assert_eq!(response.entries.len(), 1); @@ -389,7 +438,7 @@ async fn sanity_test() { assert_eq!(response.entries[0].balance, Some(0)); let response = - rpc_client.get_balances_by_addresses_call(GetBalancesByAddressesRequest::new(vec![])).await.unwrap(); + rpc_client.get_balances_by_addresses_call(None, GetBalancesByAddressesRequest::new(vec![])).await.unwrap(); assert!(response.entries.is_empty()); }) } @@ -397,7 +446,7 @@ async fn sanity_test() { KaspadPayloadOps::GetSinkBlueScore => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_sink_blue_score_call(GetSinkBlueScoreRequest {}).await.unwrap(); + let response = rpc_client.get_sink_blue_score_call(None, GetSinkBlueScoreRequest {}).await.unwrap(); // A concurrent test may have added a single block so the blue score can be either 0 or 1 assert!(response.blue_score < 2); }) @@ -407,10 +456,10 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let response_result = rpc_client - .estimate_network_hashes_per_second_call(EstimateNetworkHashesPerSecondRequest { - window_size: 1000, - start_hash: None, - }) + .estimate_network_hashes_per_second_call( + None, + EstimateNetworkHashesPerSecondRequest { window_size: 1000, start_hash: None }, + ) .await; // The current DAA window is almost empty so an error is expected assert!(response_result.is_err()); @@ -422,11 +471,10 @@ async fn sanity_test() { tst!(op, { let addresses = vec![Address::new(Prefix::Simnet, Version::PubKey, &[0u8; 32])]; let response = rpc_client - .get_mempool_entries_by_addresses_call(GetMempoolEntriesByAddressesRequest::new( - addresses.clone(), - true, - false, - )) + .get_mempool_entries_by_addresses_call( + None, + GetMempoolEntriesByAddressesRequest::new(addresses.clone(), true, false), + ) .await .unwrap(); assert_eq!(response.entries.len(), 1); @@ -439,7 +487,7 @@ async fn sanity_test() { KaspadPayloadOps::GetCoinSupply => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_coin_supply_call(GetCoinSupplyRequest {}).await.unwrap(); + let response = rpc_client.get_coin_supply_call(None, GetCoinSupplyRequest {}).await.unwrap(); assert_eq!(response.circulating_sompi, 0); assert_eq!(response.max_sompi, MAX_SOMPI); }) @@ -448,7 +496,14 @@ async fn sanity_test() { KaspadPayloadOps::Ping => { let rpc_client = client.clone(); tst!(op, { - let _ = rpc_client.ping_call(PingRequest {}).await.unwrap(); + let _ = rpc_client.ping_call(None, PingRequest {}).await.unwrap(); + }) + } + + KaspadPayloadOps::GetConnections => { + let rpc_client = client.clone(); + tst!(op, { + let _ = rpc_client.get_connections_call(None, GetConnectionsRequest { include_profile_data: true }).await.unwrap(); }) } @@ -456,48 +511,68 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let get_metrics_call_response = rpc_client - .get_metrics_call(GetMetricsRequest { - consensus_metrics: true, - connection_metrics: true, - bandwidth_metrics: true, - process_metrics: true, - }) + .get_metrics_call( + None, + GetMetricsRequest { + consensus_metrics: true, + connection_metrics: true, + bandwidth_metrics: true, + process_metrics: true, + storage_metrics: true, + custom_metrics: true, + }, + ) .await .unwrap(); assert!(get_metrics_call_response.process_metrics.is_some()); assert!(get_metrics_call_response.consensus_metrics.is_some()); let get_metrics_call_response = rpc_client - .get_metrics_call(GetMetricsRequest { - consensus_metrics: false, - connection_metrics: true, - bandwidth_metrics: true, - process_metrics: true, - }) + .get_metrics_call( + None, + GetMetricsRequest { + consensus_metrics: false, + connection_metrics: true, + bandwidth_metrics: true, + process_metrics: true, + storage_metrics: true, + custom_metrics: true, + }, + ) .await .unwrap(); assert!(get_metrics_call_response.process_metrics.is_some()); assert!(get_metrics_call_response.consensus_metrics.is_none()); let get_metrics_call_response = rpc_client - .get_metrics_call(GetMetricsRequest { - consensus_metrics: true, - connection_metrics: true, - bandwidth_metrics: false, - process_metrics: false, - }) + .get_metrics_call( + None, + GetMetricsRequest { + consensus_metrics: true, + connection_metrics: true, + bandwidth_metrics: false, + process_metrics: false, + storage_metrics: false, + custom_metrics: true, + }, + ) .await .unwrap(); assert!(get_metrics_call_response.process_metrics.is_none()); assert!(get_metrics_call_response.consensus_metrics.is_some()); let get_metrics_call_response = rpc_client - .get_metrics_call(GetMetricsRequest { - consensus_metrics: false, - connection_metrics: true, - bandwidth_metrics: false, - process_metrics: false, - }) + .get_metrics_call( + None, + GetMetricsRequest { + consensus_metrics: false, + connection_metrics: true, + bandwidth_metrics: false, + process_metrics: false, + storage_metrics: false, + custom_metrics: true, + }, + ) .await .unwrap(); assert!(get_metrics_call_response.process_metrics.is_none()); @@ -505,10 +580,17 @@ async fn sanity_test() { }) } + KaspadPayloadOps::GetSystemInfo => { + let rpc_client = client.clone(); + tst!(op, { + let _response = rpc_client.get_system_info_call(None, GetSystemInfoRequest {}).await.unwrap(); + }) + } + KaspadPayloadOps::GetServerInfo => { let rpc_client = client.clone(); tst!(op, { - let response = rpc_client.get_server_info_call(GetServerInfoRequest {}).await.unwrap(); + let response = rpc_client.get_server_info_call(None, GetServerInfoRequest {}).await.unwrap(); assert!(response.has_utxo_index); // we set utxoindex above assert_eq!(response.network_id, network_id); }) @@ -517,7 +599,7 @@ async fn sanity_test() { KaspadPayloadOps::GetSyncStatus => { let rpc_client = client.clone(); tst!(op, { - let _ = rpc_client.get_sync_status_call(GetSyncStatusRequest {}).await.unwrap(); + let _ = rpc_client.get_sync_status_call(None, GetSyncStatusRequest {}).await.unwrap(); }) } @@ -525,9 +607,10 @@ async fn sanity_test() { let rpc_client = client.clone(); tst!(op, { let results = rpc_client - .get_daa_score_timestamp_estimate_call(GetDaaScoreTimestampEstimateRequest { - daa_scores: vec![0, 500, 2000, u64::MAX], - }) + .get_daa_score_timestamp_estimate_call( + None, + GetDaaScoreTimestampEstimateRequest { daa_scores: vec![0, 500, 2000, u64::MAX] }, + ) .await .unwrap(); @@ -536,7 +619,7 @@ async fn sanity_test() { } let results = rpc_client - .get_daa_score_timestamp_estimate_call(GetDaaScoreTimestampEstimateRequest { daa_scores: vec![] }) + .get_daa_score_timestamp_estimate_call(None, GetDaaScoreTimestampEstimateRequest { daa_scores: vec![] }) .await .unwrap(); @@ -546,6 +629,33 @@ async fn sanity_test() { }) } + KaspadPayloadOps::GetFeeEstimate => { + let rpc_client = client.clone(); + tst!(op, { + let response = rpc_client.get_fee_estimate().await.unwrap(); + info!("{:?}", response.priority_bucket); + assert!(!response.normal_buckets.is_empty()); + assert!(!response.low_buckets.is_empty()); + for bucket in response.ordered_buckets() { + info!("{:?}", bucket); + } + }) + } + + KaspadPayloadOps::GetFeeEstimateExperimental => { + let rpc_client = client.clone(); + tst!(op, { + let response = rpc_client.get_fee_estimate_experimental(true).await.unwrap(); + assert!(!response.estimate.normal_buckets.is_empty()); + assert!(!response.estimate.low_buckets.is_empty()); + for bucket in response.estimate.ordered_buckets() { + info!("{:?}", bucket); + } + assert!(response.verbose.is_some()); + info!("{:?}", response.verbose); + }) + } + KaspadPayloadOps::NotifyBlockAdded => { let rpc_client = client.clone(); let id = listener_id; @@ -632,7 +742,7 @@ async fn sanity_test() { // Shutdown should only be tested after everything let rpc_client = client.clone(); - let _ = rpc_client.shutdown_call(ShutdownRequest {}).await.unwrap(); + let _ = rpc_client.shutdown_call(None, ShutdownRequest {}).await.unwrap(); // // Fold-up diff --git a/testing/integration/src/tasks/block/miner.rs b/testing/integration/src/tasks/block/miner.rs index ae759801e5..2cf117028a 100644 --- a/testing/integration/src/tasks/block/miner.rs +++ b/testing/integration/src/tasks/block/miner.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use kaspa_addresses::Address; use kaspa_core::warn; use kaspa_grpc_client::GrpcClient; -use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockTemplateResponse, RpcBlock}; +use kaspa_rpc_core::{api::rpc::RpcApi, GetBlockTemplateResponse, RpcRawBlock}; use kaspa_utils::triggers::SingleTrigger; use parking_lot::Mutex; use rand::thread_rng; @@ -25,7 +25,7 @@ pub struct BlockMinerTask { client: Arc, bps: u64, block_count: usize, - sender: Sender, + sender: Sender, template: Arc>, pay_address: Address, tx_counter: Arc, @@ -38,7 +38,7 @@ impl BlockMinerTask { client: Arc, bps: u64, block_count: usize, - sender: Sender, + sender: Sender, template: Arc>, pay_address: Address, stopper: Stopper, @@ -60,7 +60,7 @@ impl BlockMinerTask { client: Arc, bps: u64, block_count: usize, - sender: Sender, + sender: Sender, template: Arc>, pay_address: Address, stopper: Stopper, @@ -68,7 +68,7 @@ impl BlockMinerTask { Arc::new(Self::new(client, bps, block_count, sender, template, pay_address, stopper)) } - pub fn sender(&self) -> Sender { + pub fn sender(&self) -> Sender { self.sender.clone() } diff --git a/testing/integration/src/tasks/block/submitter.rs b/testing/integration/src/tasks/block/submitter.rs index b57d032696..49bf9d83e5 100644 --- a/testing/integration/src/tasks/block/submitter.rs +++ b/testing/integration/src/tasks/block/submitter.rs @@ -6,18 +6,18 @@ use async_channel::Sender; use async_trait::async_trait; use kaspa_core::warn; use kaspa_grpc_client::ClientPool; -use kaspa_rpc_core::{api::rpc::RpcApi, RpcBlock}; +use kaspa_rpc_core::{api::rpc::RpcApi, RpcRawBlock}; use kaspa_utils::triggers::SingleTrigger; use std::{sync::Arc, time::Duration}; use tokio::{task::JoinHandle, time::sleep}; pub struct BlockSubmitterTask { - pool: ClientPool, + pool: ClientPool, stopper: Stopper, } impl BlockSubmitterTask { - pub fn new(pool: ClientPool, stopper: Stopper) -> Self { + pub fn new(pool: ClientPool, stopper: Stopper) -> Self { Self { pool, stopper } } @@ -26,7 +26,7 @@ impl BlockSubmitterTask { Arc::new(Self::new(pool, stopper)) } - pub fn sender(&self) -> Sender { + pub fn sender(&self) -> Sender { self.pool.sender() } } @@ -35,7 +35,7 @@ impl BlockSubmitterTask { impl Task for BlockSubmitterTask { fn start(&self, stop_signal: SingleTrigger) -> Vec> { warn!("Block submitter task starting..."); - let mut tasks = self.pool.start(|c, block: RpcBlock| async move { + let mut tasks = self.pool.start(|c, block: RpcRawBlock| async move { loop { match c.submit_block(block.clone(), false).await { Ok(response) => { diff --git a/testing/integration/src/tasks/tx/sender.rs b/testing/integration/src/tasks/tx/sender.rs index 26a334a76d..d29e74373d 100644 --- a/testing/integration/src/tasks/tx/sender.rs +++ b/testing/integration/src/tasks/tx/sender.rs @@ -114,7 +114,7 @@ impl Task for TransactionSenderTask { break; } prev_mempool_size = mempool_size; - sleep(Duration::from_secs(1)).await; + sleep(Duration::from_secs(2)).await; } if stopper == Stopper::Signal { warn!("Tx sender task signaling to stop"); diff --git a/utils/Cargo.toml b/utils/Cargo.toml index a3002afabd..6e579ef0c0 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -9,8 +9,11 @@ include.workspace = true license.workspace = true repository.workspace = true +[build-dependencies] +duct = "0.13.7" + [dependencies] -parking_lot.workspace = true +arc-swap.workspace = true async-channel.workspace = true borsh.workspace = true cfg-if.workspace = true @@ -18,15 +21,21 @@ event-listener.workspace = true faster-hex.workspace = true ipnet.workspace = true itertools.workspace = true +log.workspace = true +num_cpus.workspace = true +once_cell.workspace = true +parking_lot.workspace = true serde.workspace = true +sha2.workspace = true smallvec.workspace = true +sysinfo.workspace = true thiserror.workspace = true triggered.workspace = true uuid.workspace = true -log.workspace = true wasm-bindgen.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +mac_address.workspace = true rlimit.workspace = true [dev-dependencies] @@ -41,3 +50,6 @@ rand.workspace = true [[bench]] name = "bench" harness = false + +[features] +semaphore-trace = [] diff --git a/utils/alloc/Cargo.toml b/utils/alloc/Cargo.toml index 4a3068f257..be07fd988a 100644 --- a/utils/alloc/Cargo.toml +++ b/utils/alloc/Cargo.toml @@ -10,13 +10,13 @@ include.workspace = true repository.workspace = true [target.'cfg(not(target_os = "macos"))'.dependencies] -mimalloc = { version = "0.1.39", default-features = false, features = [ +mimalloc = { version = "0.1.43", default-features = false, features = [ 'override', ] } [target.'cfg(target_os = "macos")'.dependencies] # override is unstable in MacOS and is thus excluded -mimalloc = { version = "0.1.39", default-features = false } +mimalloc = { version = "0.1.43", default-features = false } [features] heap = [] diff --git a/utils/build.rs b/utils/build.rs new file mode 100644 index 0000000000..705e922962 --- /dev/null +++ b/utils/build.rs @@ -0,0 +1,82 @@ +use duct::cmd; +use std::env; +use std::path::*; + +struct GitHead { + head_path: String, + head_ref_path: String, + full_hash: String, + short_hash: String, +} + +fn main() { + let success = if env::var("RUSTY_KASPA_NO_COMMIT_HASH").is_err() { + if let Some(GitHead { head_path, head_ref_path, full_hash, short_hash }) = try_git_head() { + println!("cargo::rerun-if-changed={head_path}"); + println!("cargo::rerun-if-changed={head_ref_path}"); + println!("cargo:rustc-env=RUSTY_KASPA_GIT_FULL_COMMIT_HASH={full_hash}"); + println!("cargo:rustc-env=RUSTY_KASPA_GIT_SHORT_COMMIT_HASH={short_hash}"); + true + } else { + false + } + } else { + false + }; + + if !success { + println!("cargo:rustc-env=RUSTY_KASPA_GIT_FULL_COMMIT_HASH="); + println!("cargo:rustc-env=RUSTY_KASPA_GIT_SHORT_COMMIT_HASH="); + } +} + +fn try_git_head() -> Option { + let cargo_manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap()); + let path = cargo_manifest_dir.as_path().parent()?; + + let full_hash = cmd!("git", "rev-parse", "HEAD").dir(path).read().ok().map(|full_hash| full_hash.trim().to_string()); + + let short_hash = cmd!("git", "rev-parse", "--short", "HEAD").dir(path).read().ok().map(|short_hash| short_hash.trim().to_string()); + + let git_folder = path.join(".git"); + if git_folder.is_dir() { + let head_path = git_folder.join("HEAD"); + if head_path.is_file() { + let head = std::fs::read_to_string(&head_path).ok()?; + if head.starts_with("ref: ") { + let head_ref_path = head.trim_start_matches("ref: "); + let head_ref_path = git_folder.join(head_ref_path.trim()); + if head_ref_path.is_file() { + if let (Some(full_hash), Some(short_hash)) = (full_hash, short_hash) { + return Some(GitHead { + head_path: head_path.to_str().unwrap().to_string(), + head_ref_path: head_ref_path.to_str().unwrap().to_string(), + full_hash, + short_hash, + }); + } else if let Ok(full_hash) = std::fs::read_to_string(&head_ref_path) { + let full_hash = full_hash.trim().to_string(); + let short_hash = if full_hash.len() >= 7 { + // this is not actually correct as short hash has a variable + // length based on commit short hash collisions (which is) + // why we attempt to use `git rev-parse` above. But since this + // is for reference purposes only, we can live with it. + full_hash[0..7].to_string() + } else { + full_hash.to_string() + }; + + return Some(GitHead { + head_path: head_path.to_str().unwrap().to_string(), + head_ref_path: head_ref_path.to_str().unwrap().to_string(), + full_hash, + short_hash, + }); + } + } + } + } + } + + None +} diff --git a/utils/src/expiring_cache.rs b/utils/src/expiring_cache.rs new file mode 100644 index 0000000000..175bea548a --- /dev/null +++ b/utils/src/expiring_cache.rs @@ -0,0 +1,152 @@ +use arc_swap::ArcSwapOption; +use std::{ + future::Future, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::{Duration, Instant}, +}; + +struct Entry { + item: T, + timestamp: Instant, +} + +/// An expiring cache for a single object +pub struct ExpiringCache { + store: ArcSwapOption>, + refetch: Duration, + expire: Duration, + fetching: AtomicBool, +} + +impl ExpiringCache { + /// Constructs a new expiring cache where `fetch` is the amount of time required to trigger a data + /// refetch and `expire` is the time duration after which the stored item is guaranteed not to be returned. + /// + /// Panics if `refetch > expire`. + pub fn new(refetch: Duration, expire: Duration) -> Self { + assert!(refetch <= expire); + Self { store: Default::default(), refetch, expire, fetching: Default::default() } + } + + /// Returns the cached item or possibly fetches a new one using the `refetch_future` task. The + /// decision whether to refetch depends on the configured expiration and refetch times for this cache. + pub async fn get(&self, refetch_future: F) -> T + where + F: Future + Send + 'static, + F::Output: Send + 'static, + { + let mut fetching = false; + + { + let guard = self.store.load(); + if let Some(entry) = guard.as_ref() { + if let Some(elapsed) = Instant::now().checked_duration_since(entry.timestamp) { + if elapsed < self.refetch { + return entry.item.clone(); + } + // Refetch is triggered, attempt to capture the task + fetching = self.fetching.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst).is_ok(); + // If the fetch task is not captured and expire time is not over yet, return with prev value. Another + // thread is refetching the data but we can return with the not-too-old value + if !fetching && elapsed < self.expire { + return entry.item.clone(); + } + } + // else -- In rare cases where now < timestamp, fall through to re-update the cache + } + } + + // We reach here if either we are the refetching thread or the current data has fully expired + let new_item = refetch_future.await; + let timestamp = Instant::now(); + // Update the store even if we were not in charge of refetching - let the last thread make the final update + self.store.store(Some(Arc::new(Entry { item: new_item.clone(), timestamp }))); + + if fetching { + let result = self.fetching.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst); + assert!(result.is_ok(), "refetching was captured") + } + + new_item + } +} + +#[cfg(test)] +mod tests { + use super::ExpiringCache; + use std::time::Duration; + use tokio::join; + + #[tokio::test] + #[ignore] + // Tested during development but can be sensitive to runtime machine times so there's no point + // in keeping it part of CI. The test should be activated if the ExpiringCache struct changes. + async fn test_expiring_cache() { + let fetch = Duration::from_millis(500); + let expire = Duration::from_millis(1000); + let mid_point = Duration::from_millis(700); + let expire_point = Duration::from_millis(1200); + let cache: ExpiringCache = ExpiringCache::new(fetch, expire); + + // Test two consecutive calls + let item1 = cache + .get(async move { + println!("first call"); + 1 + }) + .await; + assert_eq!(1, item1); + let item2 = cache + .get(async move { + // cache was just updated with item1, refetch should not be triggered + panic!("should not be called"); + }) + .await; + assert_eq!(1, item2); + + // Test two calls after refetch point + // Sleep until after the refetch point but before expire + tokio::time::sleep(mid_point).await; + let call3 = cache.get(async move { + println!("third call before sleep"); + // keep this refetch busy so that call4 still gets the first item + tokio::time::sleep(Duration::from_millis(100)).await; + println!("third call after sleep"); + 3 + }); + let call4 = cache.get(async move { + // refetch is captured by call3 and we should be before expire + panic!("should not be called"); + }); + let (item3, item4) = join!(call3, call4); + println!("item 3: {}, item 4: {}", item3, item4); + assert_eq!(3, item3); + assert_eq!(1, item4); + + // Test 2 calls after expire + tokio::time::sleep(expire_point).await; + let call5 = cache.get(async move { + println!("5th call before sleep"); + tokio::time::sleep(Duration::from_millis(100)).await; + println!("5th call after sleep"); + 5 + }); + let call6 = cache.get(async move { 6 }); + let (item5, item6) = join!(call5, call6); + println!("item 5: {}, item 6: {}", item5, item6); + assert_eq!(5, item5); + assert_eq!(6, item6); + + let item7 = cache + .get(async move { + // cache was just updated with item5, refetch should not be triggered + panic!("should not be called"); + }) + .await; + // call 5 finished after call 6 + assert_eq!(5, item7); + } +} diff --git a/utils/src/git.rs b/utils/src/git.rs new file mode 100644 index 0000000000..ca62da8a7c --- /dev/null +++ b/utils/src/git.rs @@ -0,0 +1,53 @@ +use crate::hex::FromHex; +use std::fmt::Display; + +const VERSION: &str = env!("CARGO_PKG_VERSION"); + +// generated by `build.rs` +const FULL_HASH: &str = env!("RUSTY_KASPA_GIT_FULL_COMMIT_HASH"); +const SHORT_HASH: &str = env!("RUSTY_KASPA_GIT_SHORT_COMMIT_HASH"); + +/// Check if the codebase is built under a Git repository +/// and return the hash of the current commit as `Vec`. +pub fn hash() -> Option> { + FromHex::from_hex(FULL_HASH).ok() +} + +pub fn short_hash() -> Option> { + FromHex::from_hex(SHORT_HASH).ok() +} + +pub fn hash_str() -> Option<&'static str> { + #[allow(clippy::const_is_empty)] + (!FULL_HASH.is_empty()).then_some(FULL_HASH) +} + +pub fn short_hash_str() -> Option<&'static str> { + #[allow(clippy::const_is_empty)] + (!SHORT_HASH.is_empty()).then_some(SHORT_HASH) +} + +pub fn version() -> String { + if let Some(short_hash) = short_hash_str() { + format!("v{VERSION}-{short_hash}") + } else { + format!("v{VERSION}") + } +} + +pub fn with_short_hash(version: V) -> impl Display +where + V: Display, +{ + if let Some(short_hash) = short_hash_str() { + format!("{version}-{short_hash}") + } else { + version.to_string() + } +} + +#[test] +fn test_git_hash() { + println!("FULL_HASH: {:?}", hash_str()); + println!("SHORT_HASH: {:?}", short_hash_str()); +} diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 956e3a2b9d..3d1bb54384 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,7 +1,14 @@ +//! +//! # Kaspa Utilities +//! +//! General purpose utilities and various type extensions used across the Rusty Kaspa codebase. +//! + pub mod any; pub mod arc; pub mod binary_heap; pub mod channel; +pub mod expiring_cache; pub mod hashmap; pub mod hex; pub mod iter; @@ -67,6 +74,7 @@ pub mod as_slice; /// assert_eq!(test_struct, from_json); /// ``` pub mod serde_bytes; +pub mod serde_bytes_optional; /// # Examples /// @@ -190,4 +198,9 @@ pub mod sync; pub mod triggers; pub mod vec; +pub mod git; + +#[cfg(not(target_arch = "wasm32"))] pub mod fd_budget; +#[cfg(not(target_arch = "wasm32"))] +pub mod sysinfo; diff --git a/utils/src/mem_size.rs b/utils/src/mem_size.rs index c7963a40c8..449f649bc0 100644 --- a/utils/src/mem_size.rs +++ b/utils/src/mem_size.rs @@ -2,7 +2,7 @@ //! estimate sizes of run-time objects in memory, including deep heap allocations. See //! struct-level docs for more details. -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, sync::Arc}; use parking_lot::RwLock; diff --git a/utils/src/networking.rs b/utils/src/networking.rs index bb38b4d046..b7a3397780 100644 --- a/utils/src/networking.rs +++ b/utils/src/networking.rs @@ -34,7 +34,7 @@ const TS_IP_ADDRESS: &'static str = r#" /// A bucket based on an ip's prefix bytes. /// for ipv4 it consists of 6 leading zero bytes, and the first two octets, /// for ipv6 it consists of the first 8 octets, -/// encoded into a big endian u64. +/// encoded into a big endian u64. #[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)] pub struct PrefixBucket(u64); @@ -179,7 +179,7 @@ impl Deref for IpAddress { // impl BorshSerialize for IpAddress { - fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), borsh::maybestd::io::Error> { + fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), std::io::Error> { let variant_idx: u8 = match self.0 { IpAddr::V4(..) => 0u8, IpAddr::V6(..) => 1u8, @@ -198,20 +198,20 @@ impl BorshSerialize for IpAddress { } impl BorshDeserialize for IpAddress { - fn deserialize(buf: &mut &[u8]) -> ::core::result::Result { - let variant_idx: u8 = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> ::core::result::Result { + let variant_idx: u8 = BorshDeserialize::deserialize_reader(reader)?; let ip = match variant_idx { 0u8 => { - let octets: [u8; 4] = BorshDeserialize::deserialize(buf)?; + let octets: [u8; 4] = BorshDeserialize::deserialize_reader(reader)?; IpAddr::V4(Ipv4Addr::from(octets)) } 1u8 => { - let octets: [u8; 16] = BorshDeserialize::deserialize(buf)?; + let octets: [u8; 16] = BorshDeserialize::deserialize_reader(reader)?; IpAddr::V6(Ipv6Addr::from(octets)) } _ => { - let msg = borsh::maybestd::format!("Unexpected variant index: {:?}", variant_idx); - return Err(borsh::maybestd::io::Error::new(borsh::maybestd::io::ErrorKind::InvalidInput, msg)); + let msg = format!("Unexpected variant index: {:?}", variant_idx); + return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)); } }; Ok(Self(ip)) @@ -271,10 +271,14 @@ pub struct ContextualNetAddress { } impl ContextualNetAddress { - fn new(ip: IpAddress, port: Option) -> Self { + pub fn new(ip: IpAddress, port: Option) -> Self { Self { ip, port } } + pub fn has_port(&self) -> bool { + self.port.is_some() + } + pub fn normalize(&self, default_port: u16) -> NetAddress { NetAddress::new(self.ip, self.port.unwrap_or(default_port)) } @@ -286,6 +290,14 @@ impl ContextualNetAddress { pub fn loopback() -> Self { Self { ip: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)).into(), port: None } } + + pub fn port_not_specified(&self) -> bool { + self.port.is_none() + } + + pub fn with_port(&self, port: u16) -> Self { + Self { ip: self.ip, port: Some(port) } + } } impl From for ContextualNetAddress { @@ -381,15 +393,15 @@ impl Deref for PeerId { // impl BorshSerialize for PeerId { - fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), borsh::maybestd::io::Error> { + fn serialize(&self, writer: &mut W) -> ::core::result::Result<(), std::io::Error> { borsh::BorshSerialize::serialize(&self.0.as_bytes(), writer)?; Ok(()) } } impl BorshDeserialize for PeerId { - fn deserialize(buf: &mut &[u8]) -> ::core::result::Result { - let bytes: uuid::Bytes = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> ::core::result::Result { + let bytes: uuid::Bytes = BorshDeserialize::deserialize_reader(reader)?; Ok(Self::new(Uuid::from_bytes(bytes))) } } @@ -403,12 +415,12 @@ mod tests { fn test_ip_address_borsh() { // Tests for IpAddress Borsh ser/deser since we manually implemented them let ip: IpAddress = Ipv4Addr::from([44u8; 4]).into(); - let bin = ip.try_to_vec().unwrap(); + let bin = borsh::to_vec(&ip).unwrap(); let ip2: IpAddress = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(ip, ip2); let ip: IpAddress = Ipv6Addr::from([66u8; 16]).into(); - let bin = ip.try_to_vec().unwrap(); + let bin = borsh::to_vec(&ip).unwrap(); let ip2: IpAddress = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(ip, ip2); } @@ -417,12 +429,12 @@ mod tests { fn test_peer_id_borsh() { // Tests for PeerId Borsh ser/deser since we manually implemented them let id: PeerId = Uuid::new_v4().into(); - let bin = id.try_to_vec().unwrap(); + let bin = borsh::to_vec(&id).unwrap(); let id2: PeerId = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(id, id2); let id: PeerId = Uuid::from_bytes([123u8; 16]).into(); - let bin = id.try_to_vec().unwrap(); + let bin = borsh::to_vec(&id).unwrap(); let id2: PeerId = BorshDeserialize::try_from_slice(&bin).unwrap(); assert_eq!(id, id2); } diff --git a/utils/src/option.rs b/utils/src/option.rs index 9ccf96c90a..3e619f46fa 100644 --- a/utils/src/option.rs +++ b/utils/src/option.rs @@ -1,9 +1,10 @@ pub trait OptionExtensions { - fn is_none_or(&self, f: impl FnOnce(&T) -> bool) -> bool; + /// Substitute for unstable [`Option::is_none_or`] + fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool; } impl OptionExtensions for Option { - fn is_none_or(&self, f: impl FnOnce(&T) -> bool) -> bool { + fn is_none_or_ex(&self, f: impl FnOnce(&T) -> bool) -> bool { match self { Some(v) => f(v), None => true, diff --git a/utils/src/serde_bytes_optional.rs b/utils/src/serde_bytes_optional.rs new file mode 100644 index 0000000000..3087376672 --- /dev/null +++ b/utils/src/serde_bytes_optional.rs @@ -0,0 +1,111 @@ +pub use de::Deserialize; +pub use ser::Serialize; + +pub fn serialize(bytes: &T, serializer: S) -> Result +where + T: ?Sized + Serialize, + S: serde::Serializer, +{ + Serialize::serialize(bytes, serializer) +} + +pub fn deserialize<'de, T, D>(deserializer: D) -> Result +where + T: Deserialize<'de>, + D: serde::Deserializer<'de>, +{ + Deserialize::deserialize(deserializer) +} + +mod de { + use std::fmt::Display; + + pub trait Deserialize<'de>: Sized { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>; + } + + impl<'de, T: crate::serde_bytes::Deserialize<'de>> Deserialize<'de> for Option + where + >::Error: Display, + { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct OptionalVisitor { + out: std::marker::PhantomData, + } + + impl<'de, T> serde::de::Visitor<'de> for OptionalVisitor + where + T: crate::serde_bytes::Deserialize<'de>, + { + type Value = Option; + + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("optional string, str or slice, vec of bytes") + } + + fn visit_unit(self) -> Result { + Ok(None) + } + + fn visit_none(self) -> Result { + Ok(None) + } + + fn visit_some(self, deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + T::deserialize(deserializer).map(Some) + } + } + + let visitor = OptionalVisitor { out: std::marker::PhantomData }; + deserializer.deserialize_option(visitor) + } + } +} + +mod ser { + use serde::Serializer; + + pub trait Serialize { + #[allow(missing_docs)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer; + } + + impl Serialize for Option + where + T: crate::serde_bytes::Serialize + std::convert::AsRef<[u8]>, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + struct AsBytes(T); + + impl serde::Serialize for AsBytes + where + T: crate::serde_bytes::Serialize, + { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + crate::serde_bytes::Serialize::serialize(&self.0, serializer) + } + } + + match self { + Some(b) => serializer.serialize_some(&AsBytes(b)), + None => serializer.serialize_none(), + } + } + } +} diff --git a/utils/src/sync/mod.rs b/utils/src/sync/mod.rs index 40fb147cb3..14afe79772 100644 --- a/utils/src/sync/mod.rs +++ b/utils/src/sync/mod.rs @@ -1,2 +1,7 @@ pub mod rwlock; pub(crate) mod semaphore; + +#[cfg(feature = "semaphore-trace")] +pub fn semaphore_module_path() -> &'static str { + semaphore::get_module_path() +} diff --git a/utils/src/sync/semaphore.rs b/utils/src/sync/semaphore.rs index 4b94e8f2f5..c0ffec8e20 100644 --- a/utils/src/sync/semaphore.rs +++ b/utils/src/sync/semaphore.rs @@ -4,6 +4,64 @@ use std::{ time::Duration, }; +#[cfg(feature = "semaphore-trace")] +mod trace { + use super::*; + use log::debug; + use once_cell::sync::Lazy; + use std::sync::atomic::AtomicU64; + use std::time::SystemTime; + + static SYS_START: Lazy = Lazy::new(SystemTime::now); + + #[inline] + pub(super) fn sys_now() -> u64 { + SystemTime::now().duration_since(*SYS_START).unwrap_or_default().as_micros() as u64 + } + + #[derive(Debug, Default)] + pub struct TraceInner { + readers_start: AtomicU64, + readers_time: AtomicU64, + log_time: AtomicU64, + log_value: AtomicU64, + } + + impl TraceInner { + pub(super) fn mark_readers_start(&self) { + self.readers_start.store(sys_now(), Ordering::Relaxed); + } + + pub(super) fn mark_readers_end(&self) { + let start = self.readers_start.load(Ordering::Relaxed); + let now = sys_now(); + if start < now { + let readers_time = self.readers_time.fetch_add(now - start, Ordering::Relaxed) + now - start; + let log_time = self.log_time.load(Ordering::Relaxed); + if log_time + (Duration::from_secs(10).as_micros() as u64) < now { + let log_value = self.log_value.load(Ordering::Relaxed); + debug!( + "Semaphore: log interval: {:?}, readers time: {:?}, fraction: {:.4}", + Duration::from_micros(now - log_time), + Duration::from_micros(readers_time - log_value), + (readers_time - log_value) as f64 / (now - log_time) as f64 + ); + self.log_value.store(readers_time, Ordering::Relaxed); + self.log_time.store(now, Ordering::Relaxed); + } + } + } + } +} + +#[cfg(feature = "semaphore-trace")] +use trace::*; + +#[cfg(feature = "semaphore-trace")] +pub(crate) fn get_module_path() -> &'static str { + module_path!() +} + /// A low-level non-fair semaphore. The semaphore is non-fair in the sense that clients acquiring /// a lower number of permits might get their allocation before earlier clients which requested more /// permits -- if the semaphore can provide the lower allocation but not the larger. This non-fairness @@ -15,13 +73,28 @@ use std::{ pub(crate) struct Semaphore { counter: AtomicUsize, signal: Event, + #[cfg(feature = "semaphore-trace")] + trace_inner: TraceInner, } impl Semaphore { pub const MAX_PERMITS: usize = usize::MAX; - pub const fn new(available_permits: usize) -> Semaphore { - Semaphore { counter: AtomicUsize::new(available_permits), signal: Event::new() } + pub fn new(available_permits: usize) -> Semaphore { + cfg_if::cfg_if! { + if #[cfg(feature = "semaphore-trace")] { + Semaphore { + counter: AtomicUsize::new(available_permits), + signal: Event::new(), + trace_inner: Default::default(), + } + } else { + Semaphore { + counter: AtomicUsize::new(available_permits), + signal: Event::new(), + } + } + } } /// Tries to acquire `permits` slots from the semaphore. Upon success, returns the acquired slot @@ -33,7 +106,14 @@ impl Semaphore { } match self.counter.compare_exchange_weak(count, count - permits, Ordering::AcqRel, Ordering::Acquire) { - Ok(_) => return Some(count), + Ok(_) => { + #[cfg(feature = "semaphore-trace")] + if permits == 1 && count == Self::MAX_PERMITS { + // permits == 1 indicates a reader, count == Self::MAX_PERMITS indicates it is the first reader + self.trace_inner.mark_readers_start(); + } + return Some(count); + } Err(c) => count = c, } } @@ -75,6 +155,12 @@ impl Semaphore { /// Returns the released slot pub fn release(&self, permits: usize) -> usize { let slot = self.counter.fetch_add(permits, Ordering::AcqRel) + permits; + + #[cfg(feature = "semaphore-trace")] + if permits == 1 && slot == Self::MAX_PERMITS { + // permits == 1 indicates a reader, slot == Self::MAX_PERMITS indicates it is the last reader + self.trace_inner.mark_readers_end(); + } self.signal.notify(permits); slot } diff --git a/utils/src/sysinfo.rs b/utils/src/sysinfo.rs new file mode 100644 index 0000000000..ba6f25110b --- /dev/null +++ b/utils/src/sysinfo.rs @@ -0,0 +1,127 @@ +use crate::fd_budget; +use crate::git; +use crate::hex::ToHex; +use sha2::{Digest, Sha256}; +use std::fs::{read_to_string, File}; +use std::io::Read; +use std::path::PathBuf; +// use std::fs::read_to_string; +use std::sync::OnceLock; + +static SYSTEM_INFO: OnceLock = OnceLock::new(); + +#[derive(Clone)] +pub struct SystemInfo { + /// unique system (machine) identifier + pub system_id: Option>, + /// full git commit hash + pub git_hash: Option>, + /// short git commit hash + pub git_short_hash: Option>, + /// crate (workspace) version + pub version: String, + /// number of physical CPU cores + pub cpu_physical_cores: u16, + /// total system memory in bytes + pub total_memory: u64, + /// file descriptor limit of the current process + pub fd_limit: u32, + /// maximum number of sockets per CPU core + pub proxy_socket_limit_per_cpu_core: Option, +} + +// provide hex encoding for system_id, git_hash, and git_short_hash +impl std::fmt::Debug for SystemInfo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SystemInfo") + .field("system_id", &self.system_id.as_ref().map(|id| id.to_hex())) + .field("git_hash", &self.git_hash.as_ref().map(|hash| hash.to_hex())) + .field("git_short_hash", &self.git_short_hash.as_ref().map(|hash| hash.to_hex())) + .field("version", &self.version) + .field("cpu_physical_cores", &self.cpu_physical_cores) + .field("total_memory", &self.total_memory) + .field("fd_limit", &self.fd_limit) + .field("proxy_socket_limit_per_cpu_core", &self.proxy_socket_limit_per_cpu_core) + .finish() + } +} + +impl Default for SystemInfo { + fn default() -> Self { + let system_info = SYSTEM_INFO.get_or_init(|| { + let mut system = sysinfo::System::new(); + system.refresh_memory(); + let cpu_physical_cores = num_cpus::get() as u16; + let total_memory = system.total_memory(); + let fd_limit = fd_budget::limit() as u32; + let system_id = Self::try_system_id(); + let git_hash = git::hash(); + let git_short_hash = git::short_hash(); + let version = git::version(); + let proxy_socket_limit_per_cpu_core = Self::try_proxy_socket_limit_per_cpu_core(); + + SystemInfo { + system_id, + git_hash, + git_short_hash, + version, + cpu_physical_cores, + total_memory, + fd_limit, + proxy_socket_limit_per_cpu_core, + } + }); + (*system_info).clone() + } +} + +impl SystemInfo { + /// Obtain a unique system (machine) identifier. + fn try_system_id() -> Option> { + let some_id = if let Ok(mut file) = File::open("/etc/machine-id") { + // fetch the system id from /etc/machine-id + let mut machine_id = String::new(); + file.read_to_string(&mut machine_id).ok(); + machine_id.trim().to_string() + } else if let Ok(Some(mac)) = mac_address::get_mac_address() { + // fallback on the mac address + mac.to_string().trim().to_string() + } else { + // 🤷 + return None; + }; + let mut sha256 = Sha256::default(); + sha256.update(some_id.as_bytes()); + Some(sha256.finalize().to_vec()) + } + + fn try_proxy_socket_limit_per_cpu_core() -> Option { + let nginx_config_path = PathBuf::from("/etc/nginx/nginx.conf"); + if nginx_config_path.exists() { + read_to_string(nginx_config_path) + .ok() + .and_then(|content| content.lines().find(|line| line.trim().starts_with("worker_connections")).map(String::from)) + .and_then(|line| line.split_whitespace().nth(1).map(|v| v.replace(";", ""))) + .and_then(|value| value.parse::().ok()) + } else { + None + } + } +} + +impl AsRef for SystemInfo { + fn as_ref(&self) -> &SystemInfo { + self + } +} + +// #[cfg(test)] +// mod tests { +// use super::*; + +// #[test] +// fn test_system_info() { +// let system_info = SystemInfo::default(); +// println!("{:#?}", system_info); +// } +// } diff --git a/utils/src/vec.rs b/utils/src/vec.rs index 01bd59b9e6..fa1d67a279 100644 --- a/utils/src/vec.rs +++ b/utils/src/vec.rs @@ -4,6 +4,10 @@ pub trait VecExtensions { /// Inserts the provided `value` at `index` while swapping the item at index to the end of the container fn swap_insert(&mut self, index: usize, value: T); + + /// Merges two containers one into the other and returns the result. The method is identical + /// to [`Vec::append`] but can be used more ergonomically in a fluent calling fashion + fn merge(self, other: Self) -> Self; } impl VecExtensions for Vec { @@ -19,4 +23,9 @@ impl VecExtensions for Vec { let loc = self.len() - 1; self.swap(index, loc); } + + fn merge(mut self, mut other: Self) -> Self { + self.append(&mut other); + self + } } diff --git a/utils/tower/Cargo.toml b/utils/tower/Cargo.toml index 2a2f5f7962..010f8843ba 100644 --- a/utils/tower/Cargo.toml +++ b/utils/tower/Cargo.toml @@ -14,9 +14,11 @@ cfg-if.workspace = true log.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies] +bytes.workspace = true futures.workspace = true -hyper.workspace = true +http-body.workspace = true +http-body-util.workspace = true pin-project-lite.workspace = true tokio.workspace = true tower-http.workspace = true -tower.workspace = true \ No newline at end of file +tower.workspace = true diff --git a/utils/tower/src/middleware.rs b/utils/tower/src/middleware.rs index 727d8ca47d..8d0fa77c38 100644 --- a/utils/tower/src/middleware.rs +++ b/utils/tower/src/middleware.rs @@ -1,9 +1,6 @@ -use futures::ready; -use hyper::{ - body::{Bytes, HttpBody, SizeHint}, - HeaderMap, -}; -use log::*; +use bytes::Bytes; +use http_body::{Body, Frame, SizeHint}; +use log::trace; use pin_project_lite::pin_project; use std::{ pin::Pin, @@ -11,11 +8,12 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - task::{Context, Poll}, + task::{ready, Context, Poll}, }; + +pub use http_body_util::BodyExt; pub use tower::ServiceBuilder; -pub use tower_http::map_request_body::MapRequestBodyLayer; -pub use tower_http::map_response_body::MapResponseBodyLayer; +pub use tower_http::{map_request_body::MapRequestBodyLayer, map_response_body::MapResponseBodyLayer}; pin_project! { pub struct CountBytesBody { @@ -31,32 +29,29 @@ impl CountBytesBody { } } -impl HttpBody for CountBytesBody +impl Body for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { type Data = B::Data; type Error = B::Error; - fn poll_data(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { let this = self.project(); - let counter: Arc = this.counter.clone(); - match ready!(this.inner.poll_data(cx)) { - Some(Ok(chunk)) => { - debug!("[SIZE MW] response body chunk size = {}", chunk.len()); - let _previous = counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); + match ready!(this.inner.poll_frame(cx)) { + Some(Ok(frame)) => { + if let Some(chunk) = frame.data_ref() { + trace!("[SIZE MW] body chunk size = {}", chunk.len()); + let _previous = this.counter.fetch_add(chunk.len(), Ordering::Relaxed); + trace!("[SIZE MW] total count: {}", _previous); + } - Poll::Ready(Some(Ok(chunk))) + Poll::Ready(Some(Ok(frame))) } x => Poll::Ready(x), } } - fn poll_trailers(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>> { - self.project().inner.poll_trailers(cx) - } - fn is_end_stream(&self) -> bool { self.inner.is_end_stream() } @@ -68,43 +63,9 @@ where impl Default for CountBytesBody where - B: HttpBody + Default, + B: Body + Default, { fn default() -> Self { Self { inner: Default::default(), counter: Default::default() } } } - -pub fn measure_request_body_size_layer( - bytes_sent_counter: Arc, - f: F, -) -> MapRequestBodyLayer B2 + Clone> -where - B1: HttpBody + Unpin + Send + 'static, - ::Error: Send, - F: Fn(hyper::body::Body) -> B2 + Clone, -{ - MapRequestBodyLayer::new(move |mut body: B1| { - let (mut tx, new_body) = hyper::Body::channel(); - let bytes_sent_counter = bytes_sent_counter.clone(); - tokio::spawn(async move { - while let Some(Ok(chunk)) = body.data().await { - debug!("[SIZE MW] request body chunk size = {}", chunk.len()); - let _previous = bytes_sent_counter.fetch_add(chunk.len(), Ordering::Relaxed); - debug!("[SIZE MW] total count: {}", _previous); - if let Err(_err) = tx.send_data(chunk).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending data: {}", _err) - } - } - - if let Ok(Some(trailers)) = body.trailers().await { - if let Err(_err) = tx.send_trailers(trailers).await { - // error can occurs only if the channel is already closed - debug!("[SIZE MW] error sending trailers: {}", _err) - } - } - }); - f(new_body) - }) -} diff --git a/wallet/bip32/Cargo.toml b/wallet/bip32/Cargo.toml index b4eb982024..efc31baf48 100644 --- a/wallet/bip32/Cargo.toml +++ b/wallet/bip32/Cargo.toml @@ -31,6 +31,7 @@ thiserror.workspace = true wasm-bindgen.workspace = true workflow-wasm.workspace = true zeroize.workspace = true +kaspa-consensus-core.workspace = true [dev-dependencies] -faster-hex.workspace = true \ No newline at end of file +faster-hex.workspace = true diff --git a/wallet/bip32/src/address_type.rs b/wallet/bip32/src/address_type.rs index 63ea00361d..3aecfdb754 100644 --- a/wallet/bip32/src/address_type.rs +++ b/wallet/bip32/src/address_type.rs @@ -1,5 +1,10 @@ +//! +//! Address type (`Receive` or `Change`) used in HD wallet address derivation. +//! + use std::fmt; +/// Address type used in HD wallet address derivation. pub enum AddressType { Receive = 0, Change, diff --git a/wallet/bip32/src/derivation_path.rs b/wallet/bip32/src/derivation_path.rs index 414bf2bf91..6ef47703c0 100644 --- a/wallet/bip32/src/derivation_path.rs +++ b/wallet/bip32/src/derivation_path.rs @@ -6,6 +6,7 @@ use core::{ fmt::{self, Display}, str::FromStr, }; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; /// Prefix for all derivation paths. const PREFIX: &str = "m"; @@ -16,6 +17,45 @@ pub struct DerivationPath { path: Vec, } +impl<'de> Deserialize<'de> for DerivationPath { + fn deserialize(deserializer: D) -> std::result::Result + where + D: Deserializer<'de>, + { + struct DerivationPathVisitor; + impl<'de> de::Visitor<'de> for DerivationPathVisitor { + type Value = DerivationPath; + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a string containing list of permissions separated by a '+'") + } + + fn visit_str(self, value: &str) -> std::result::Result + where + E: de::Error, + { + DerivationPath::from_str(value).map_err(|err| de::Error::custom(err.to_string())) + } + fn visit_borrowed_str(self, v: &'de str) -> std::result::Result + where + E: de::Error, + { + DerivationPath::from_str(v).map_err(|err| de::Error::custom(err.to_string())) + } + } + + deserializer.deserialize_str(DerivationPathVisitor) + } +} + +impl Serialize for DerivationPath { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + impl DerivationPath { /// Iterate over the [`ChildNumber`] values in this derivation path. pub fn iter(&self) -> impl Iterator + '_ { diff --git a/wallet/bip32/src/lib.rs b/wallet/bip32/src/lib.rs index a406067f6f..1926728c43 100644 --- a/wallet/bip32/src/lib.rs +++ b/wallet/bip32/src/lib.rs @@ -32,6 +32,8 @@ pub use xkey::ExtendedKey; pub use xprivate_key::ExtendedPrivateKey; pub use xpublic_key::ExtendedPublicKey; +/// Extension for [`secp256k1::SecretKey`] that provides access +/// to [`secp256k1::PublicKey`] and the public key string representation. pub trait SecretKeyExt { fn get_public_key(&self) -> secp256k1::PublicKey; fn as_str(&self, attrs: ExtendedKeyAttrs, prefix: Prefix) -> Zeroizing; diff --git a/wallet/bip32/src/mnemonic/mod.rs b/wallet/bip32/src/mnemonic/mod.rs index 611d88b050..89262b7e25 100644 --- a/wallet/bip32/src/mnemonic/mod.rs +++ b/wallet/bip32/src/mnemonic/mod.rs @@ -7,7 +7,6 @@ mod bits; mod language; mod phrase; -//#[cfg(feature = "bip39")] pub(crate) mod seed; pub use self::{language::Language, phrase::Mnemonic, phrase::WordCount}; diff --git a/wallet/bip32/src/mnemonic/phrase.rs b/wallet/bip32/src/mnemonic/phrase.rs index 76450590a6..eaa7e7096e 100644 --- a/wallet/bip32/src/mnemonic/phrase.rs +++ b/wallet/bip32/src/mnemonic/phrase.rs @@ -23,7 +23,7 @@ pub type Entropy32 = [u8; KEY_SIZE]; pub type Entropy16 = [u8; 16]; /// Word count for a BIP39 mnemonic phrase. Identifies mnemonic as 12 or 24 word variants. -#[derive(Default, Clone, Copy, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[derive(Default, PartialEq, Eq, Clone, Copy, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "kebab-case")] pub enum WordCount { #[default] @@ -85,8 +85,8 @@ impl Mnemonic { } #[wasm_bindgen(js_name = random)] - pub fn create_random_js(word_count: JsValue) -> Result { - let word_count = word_count.as_f64().unwrap_or(24.0) as usize; + pub fn create_random_js(word_count: Option) -> Result { + let word_count = word_count.unwrap_or(24) as usize; Mnemonic::random(word_count.try_into()?, Default::default()) } @@ -229,8 +229,6 @@ impl Mnemonic { } /// Convert this mnemonic phrase into the BIP39 seed value. - //#[cfg(feature = "bip39")] - //#[cfg_attr(docsrs, doc(cfg(feature = "bip39")))] pub fn to_seed(&self, password: &str) -> Seed { let salt = Zeroizing::new(format!("mnemonic{password}")); let mut seed = [0u8; Seed::SIZE]; diff --git a/wallet/bip32/src/mnemonic/seed.rs b/wallet/bip32/src/mnemonic/seed.rs index a4c4025c64..7fd57f19a8 100644 --- a/wallet/bip32/src/mnemonic/seed.rs +++ b/wallet/bip32/src/mnemonic/seed.rs @@ -4,7 +4,6 @@ use zeroize::Zeroize; /// BIP39 seeds. // TODO(tarcieri): support for 32-byte seeds -#[cfg_attr(docsrs, doc(cfg(feature = "bip39")))] pub struct Seed(pub(crate) [u8; Seed::SIZE]); impl Seed { diff --git a/wallet/bip32/src/prefix.rs b/wallet/bip32/src/prefix.rs index 8a4bcc6273..daff8267e7 100644 --- a/wallet/bip32/src/prefix.rs +++ b/wallet/bip32/src/prefix.rs @@ -6,6 +6,7 @@ use core::{ fmt::{self, Debug, Display}, str, }; +use kaspa_consensus_core::network::{NetworkId, NetworkType}; /// BIP32 extended key prefixes a.k.a. "versions" (e.g. `xpub`, `xprv`) /// @@ -234,6 +235,18 @@ impl TryFrom<&str> for Prefix { } } +impl From for Prefix { + fn from(value: NetworkId) -> Self { + let network_type = value.network_type(); + match network_type { + NetworkType::Mainnet => Prefix::KPUB, + NetworkType::Devnet => Prefix::KTUB, + NetworkType::Simnet => Prefix::KTUB, + NetworkType::Testnet => Prefix::KTUB, + } + } +} + #[cfg(test)] mod tests { use super::Prefix; diff --git a/wallet/bip32/src/private_key.rs b/wallet/bip32/src/private_key.rs index 0d4769ee4f..d5dbc3d144 100644 --- a/wallet/bip32/src/private_key.rs +++ b/wallet/bip32/src/private_key.rs @@ -4,6 +4,7 @@ use crate::Result; pub use secp256k1::SecretKey; use secp256k1::{scalar::Scalar, Secp256k1, SignOnly}; +/// Trait for private key types which can be derived using BIP32. pub trait PrivateKey: Sized { /// Public key type which corresponds to this private key. type PublicKey: PublicKey; diff --git a/wallet/bip32/src/public_key.rs b/wallet/bip32/src/public_key.rs index 28a121811b..56fb17de51 100644 --- a/wallet/bip32/src/public_key.rs +++ b/wallet/bip32/src/public_key.rs @@ -3,7 +3,7 @@ use ripemd::{Digest, Ripemd160}; use secp256k1::{scalar::Scalar, Secp256k1, VerifyOnly}; use sha2::Sha256; -/// Trait for key types which can be derived using BIP32. +/// Trait for public key types which can be derived using BIP32. pub trait PublicKey: Sized { /// Initialize this key from bytes. fn from_bytes(bytes: PublicKeyBytes) -> Result; diff --git a/wallet/bip32/src/xpublic_key.rs b/wallet/bip32/src/xpublic_key.rs index a52d8f1a1c..ac4eb720c9 100644 --- a/wallet/bip32/src/xpublic_key.rs +++ b/wallet/bip32/src/xpublic_key.rs @@ -174,8 +174,8 @@ impl BorshDeserialize for ExtendedPublicKey where K: PublicKey, { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let Header { version, magic } = Header::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let Header { version, magic } = Header::deserialize_reader(reader)?; if magic != Self::STORAGE_MAGIC { return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid extended public key magic value")); } @@ -183,13 +183,11 @@ where return Err(std::io::Error::new(std::io::ErrorKind::Other, "Invalid extended public key version")); } - let public_key_bytes: [u8; KEY_SIZE + 1] = buf[..KEY_SIZE + 1] - .try_into() - .map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "Invalid extended public key"))?; + let mut public_key_bytes: [u8; KEY_SIZE + 1] = [0; KEY_SIZE + 1]; + reader.read_exact(&mut public_key_bytes)?; let public_key = K::from_bytes(public_key_bytes) .map_err(|_| std::io::Error::new(std::io::ErrorKind::Other, "Invalid extended public key"))?; - *buf = &buf[KEY_SIZE + 1..]; - let attrs = ExtendedKeyAttrs::deserialize(buf)?; + let attrs = ExtendedKeyAttrs::deserialize_reader(reader)?; Ok(Self { public_key, attrs }) } } diff --git a/wallet/core/Cargo.toml b/wallet/core/Cargo.toml index fb31afb310..3e057b6a77 100644 --- a/wallet/core/Cargo.toml +++ b/wallet/core/Cargo.toml @@ -71,6 +71,7 @@ kaspa-txscript.workspace = true kaspa-utils.workspace = true kaspa-wallet-keys.workspace = true kaspa-wallet-macros.workspace = true +kaspa-wallet-pskt.workspace = true kaspa-wasm-core.workspace = true kaspa-wrpc-client.workspace = true kaspa-wrpc-wasm.workspace = true @@ -125,5 +126,5 @@ serde_repr.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/wallet/core/src/account/descriptor.rs b/wallet/core/src/account/descriptor.rs index d433b7bf8f..c3bf97cc1b 100644 --- a/wallet/core/src/account/descriptor.rs +++ b/wallet/core/src/account/descriptor.rs @@ -11,12 +11,18 @@ use kaspa_wallet_macros::declare_typescript_wasm_interface as declare; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; +/// +/// Structure that represents a wallet account. This structure contains +/// properties that are common to all wallet accounts as well as +/// account-specific properties stored in a BTreeMap by each account. +/// /// @category Wallet API #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct AccountDescriptor { pub kind: AccountKind, pub account_id: AccountId, pub account_name: Option, + pub balance: Option, pub prv_key_data_ids: AssocPrvKeyDataIds, pub receive_address: Option
, pub change_address: Option
, @@ -29,11 +35,21 @@ impl AccountDescriptor { kind: AccountKind, account_id: AccountId, account_name: Option, + balance: Option, prv_key_data_ids: AssocPrvKeyDataIds, receive_address: Option
, change_address: Option
, ) -> Self { - Self { kind, account_id, account_name, prv_key_data_ids, receive_address, change_address, properties: BTreeMap::default() } + Self { + kind, + account_id, + account_name, + balance, + prv_key_data_ids, + receive_address, + change_address, + properties: BTreeMap::default(), + } } pub fn with_property(mut self, property: AccountDescriptorProperty, value: AccountDescriptorValue) -> Self { @@ -111,11 +127,12 @@ impl std::fmt::Display for AccountDescriptorValue { AccountDescriptorValue::Bool(value) => write!(f, "{}", value), AccountDescriptorValue::AddressDerivationMeta(value) => write!(f, "{}", value), AccountDescriptorValue::XPubKeys(value) => { - let mut s = String::new(); + let mut s = vec![]; for xpub in value.iter() { - s.push_str(&format!("{}\n", xpub)); + //s.push(xpub.to_string(None)); + s.push(format!("{}", xpub)); } - write!(f, "{}", s) + write!(f, "{}", s.join("\n")) } AccountDescriptorValue::Json(value) => write!(f, "{}", value), } @@ -225,6 +242,7 @@ declare! { receiveAddress? : Address, changeAddress? : Address, prvKeyDataIds : HexString[], + // balance? : Balance, [key: string]: any } "#, diff --git a/wallet/core/src/account/kind.rs b/wallet/core/src/account/kind.rs index faf968f296..511c1f4ed2 100644 --- a/wallet/core/src/account/kind.rs +++ b/wallet/core/src/account/kind.rs @@ -8,6 +8,11 @@ use std::hash::Hash; use std::str::FromStr; use workflow_wasm::convert::CastFromJs; +/// +/// Account kind is a string signature that represents an account type. +/// Account kind is used to identify the account type during +/// serialization, deserialization and various API calls. +/// /// @category Wallet SDK #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, CastFromJs)] #[wasm_bindgen] @@ -66,6 +71,7 @@ impl FromStr for AccountKind { "bip32" => Ok(BIP32_ACCOUNT_KIND.into()), "multisig" => Ok(MULTISIG_ACCOUNT_KIND.into()), "keypair" => Ok(KEYPAIR_ACCOUNT_KIND.into()), + "bip32watch" => Ok(BIP32_WATCH_ACCOUNT_KIND.into()), _ => Err(Error::InvalidAccountKind), } } @@ -95,22 +101,17 @@ impl BorshSerialize for AccountKind { } impl BorshDeserialize for AccountKind { - fn deserialize(buf: &mut &[u8]) -> IoResult { - if buf.is_empty() { - Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid AccountKind length")) - } else { - let len = buf[0]; - if buf.len() < (len as usize + 1) { - Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid AccountKind length")) - } else { - let s = str64::make( - std::str::from_utf8(&buf[1..(len as usize + 1)]) - .map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid UTF-8 sequence"))?, - ); - *buf = &buf[(len as usize + 1)..]; - Ok(Self(s)) - } - } + fn deserialize_reader(reader: &mut R) -> IoResult { + let len = ::deserialize_reader(reader)? as usize; + let mut buf = [0; 64]; + reader + .read_exact(&mut buf[0..len]) + .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, format!("Invalid AccountKind length ({err:?})")))?; + let s = str64::make( + std::str::from_utf8(&buf[..len]) + .map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidData, "Invalid UTF-8 sequence"))?, + ); + Ok(Self(s)) } } diff --git a/wallet/core/src/account/mod.rs b/wallet/core/src/account/mod.rs index b921bf4914..31c7fea9d5 100644 --- a/wallet/core/src/account/mod.rs +++ b/wallet/core/src/account/mod.rs @@ -5,8 +5,15 @@ pub mod descriptor; pub mod kind; +pub mod pskb; pub mod variants; +use kaspa_hashes::Hash; +use kaspa_wallet_pskt::bundle::Bundle; pub use kind::*; +use pskb::{ + bundle_from_pskt_generator, bundle_to_finalizer_stream, pskb_signer_for_address, pskt_to_pending_transaction, PSKBSigner, + PSKTGenerator, +}; pub use variants::*; use crate::derivation::build_derivate_paths; @@ -116,6 +123,14 @@ pub trait Account: AnySync + Send + Sync + 'static { self.context().settings.name.clone() } + fn feature(&self) -> Option { + None + } + + fn xpub_keys(&self) -> Option<&ExtendedPublicKeys> { + None + } + fn name_or_id(&self) -> String { if let Some(name) = self.name() { if name.is_empty() { @@ -348,6 +363,66 @@ pub trait Account: AnySync + Send + Sync + 'static { Ok((generator.summary(), ids)) } + async fn pskb_from_send_generator( + self: Arc, + destination: PaymentDestination, + priority_fee_sompi: Fees, + payload: Option>, + wallet_secret: Secret, + payment_secret: Option, + abortable: &Abortable, + ) -> Result { + let settings = GeneratorSettings::try_new_with_account(self.clone().as_dyn_arc(), destination, priority_fee_sompi, payload)?; + let keydata = self.prv_key_data(wallet_secret).await?; + let signer = Arc::new(PSKBSigner::new(self.clone().as_dyn_arc(), keydata, payment_secret)); + let generator = Generator::try_new(settings, None, Some(abortable))?; + let pskt_generator = PSKTGenerator::new(generator, signer, self.wallet().address_prefix()?); + bundle_from_pskt_generator(pskt_generator).await + } + + async fn pskb_sign( + self: Arc, + bundle: &Bundle, + wallet_secret: Secret, + payment_secret: Option, + sign_for_address: Option<&Address>, + ) -> Result { + let keydata = self.prv_key_data(wallet_secret).await?; + let signer = Arc::new(PSKBSigner::new(self.clone().as_dyn_arc(), keydata.clone(), payment_secret.clone())); + + let network_id = self.wallet().clone().network_id()?; + let derivation = self.as_derivation_capable()?; + + let (derivation_path, _) = + build_derivate_paths(&derivation.account_kind(), derivation.account_index(), derivation.cosigner_index())?; + + let key_fingerprint = keydata.get_xprv(payment_secret.clone().as_ref())?.public_key().fingerprint(); + + match pskb_signer_for_address(bundle, signer, network_id, sign_for_address, derivation_path, key_fingerprint).await { + Ok(signer) => Ok(signer), + Err(e) => Err(Error::from(e.to_string())), + } + } + + async fn pskb_broadcast(self: Arc, bundle: &Bundle) -> Result, Error> { + let mut ids = Vec::new(); + let mut stream = bundle_to_finalizer_stream(bundle); + + while let Some(result) = stream.next().await { + match result { + Ok(pskt) => { + let change = self.wallet().account()?.change_address()?; + let transaction = pskt_to_pending_transaction(pskt, self.wallet().network_id()?, change)?; + ids.push(transaction.try_submit(&self.wallet().rpc_api()).await?); + } + Err(e) => { + eprintln!("Error processing a PSKT from bundle: {:?}", e); + } + } + } + Ok(ids) + } + /// Execute a transfer to another wallet account. async fn transfer( self: Arc, @@ -358,13 +433,14 @@ pub trait Account: AnySync + Send + Sync + 'static { payment_secret: Option, abortable: &Abortable, notifier: Option, + guard: &WalletGuard, ) -> Result<(GeneratorSummary, Vec)> { let keydata = self.prv_key_data(wallet_secret).await?; let signer = Arc::new(Signer::new(self.clone().as_dyn_arc(), keydata, payment_secret)); let destination_account = self .wallet() - .get_account_by_id(&destination_account_id) + .get_account_by_id(&destination_account_id, guard) .await? .ok_or_else(|| Error::AccountNotFound(destination_account_id))?; @@ -524,6 +600,7 @@ pub trait DerivationCapableAccount: Account { let settings = GeneratorSettings::try_new_with_iterator( self.wallet().network_id()?, Box::new(utxos.into_iter()), + None, change_address.clone(), 1, 1, @@ -537,7 +614,7 @@ pub trait DerivationCapableAccount: Account { let mut stream = generator.stream(); while let Some(transaction) = stream.try_next().await? { - transaction.try_sign_with_keys(&keys)?; + transaction.try_sign_with_keys(&keys, None)?; let id = transaction.try_submit(&rpc).await?; if let Some(notifier) = notifier { notifier(index, aggregate_utxo_count, balance, Some(id)); diff --git a/wallet/core/src/account/pskb.rs b/wallet/core/src/account/pskb.rs new file mode 100644 index 0000000000..e71d7e4796 --- /dev/null +++ b/wallet/core/src/account/pskb.rs @@ -0,0 +1,367 @@ +//! +//! Tools for interfacing wallet accounts with PSKBs. +//! (Partial Signed Kaspa Transaction Bundles). +//! + +pub use crate::error::Error; +use crate::imports::*; +use crate::tx::PaymentOutputs; +use futures::stream; +use kaspa_bip32::{DerivationPath, KeyFingerprint, PrivateKey}; +use kaspa_consensus_client::UtxoEntry as ClientUTXO; +use kaspa_consensus_core::hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}; +use kaspa_consensus_core::tx::VerifiableTransaction; +use kaspa_consensus_core::tx::{TransactionInput, UtxoEntry}; +use kaspa_txscript::extract_script_pub_key_address; +use kaspa_txscript::opcodes::codes::OpData65; +use kaspa_txscript::script_builder::ScriptBuilder; +use kaspa_wallet_core::tx::{Generator, GeneratorSettings, PaymentDestination, PendingTransaction}; +pub use kaspa_wallet_pskt::bundle::Bundle; +use kaspa_wallet_pskt::prelude::KeySource; +use kaspa_wallet_pskt::prelude::{Finalizer, Inner, SignInputOk, Signature, Signer}; +pub use kaspa_wallet_pskt::pskt::{Creator, PSKT}; +use secp256k1::schnorr; +use secp256k1::{Message, PublicKey}; +use std::iter; + +struct PSKBSignerInner { + keydata: PrvKeyData, + account: Arc, + payment_secret: Option, + keys: Mutex>, +} + +pub struct PSKBSigner { + inner: Arc, +} + +impl PSKBSigner { + pub fn new(account: Arc, keydata: PrvKeyData, payment_secret: Option) -> Self { + Self { inner: Arc::new(PSKBSignerInner { keydata, account, payment_secret, keys: Mutex::new(AHashMap::new()) }) } + } + + pub fn ingest(&self, addresses: &[Address]) -> Result<()> { + let mut keys = self.inner.keys.lock()?; + + // Skip addresses that are already present in the key map. + let addresses = addresses.iter().filter(|a| !keys.contains_key(a)).collect::>(); + if !addresses.is_empty() { + let account = self.inner.account.clone().as_derivation_capable().expect("expecting derivation capable account"); + let (receive, change) = account.derivation().addresses_indexes(&addresses)?; + let private_keys = account.create_private_keys(&self.inner.keydata, &self.inner.payment_secret, &receive, &change)?; + for (address, private_key) in private_keys { + keys.insert(address.clone(), private_key.to_bytes()); + } + } + Ok(()) + } + + fn public_key(&self, for_address: &Address) -> Result { + let keys = self.inner.keys.lock()?; + match keys.get(for_address) { + Some(private_key) => { + let kp = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key)?; + Ok(kp.public_key()) + } + None => Err(Error::from("PSKBSigner address coverage error")), + } + } + + fn sign_schnorr(&self, for_address: &Address, message: Message) -> Result { + let keys = self.inner.keys.lock()?; + match keys.get(for_address) { + Some(private_key) => { + let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, private_key)?; + Ok(schnorr_key.sign_schnorr(message)) + } + None => Err(Error::from("PSKBSigner address coverage error")), + } + } +} + +pub struct PSKTGenerator { + generator: Generator, + signer: Arc, + prefix: Prefix, +} + +impl PSKTGenerator { + pub fn new(generator: Generator, signer: Arc, prefix: Prefix) -> Self { + Self { generator, signer, prefix } + } + + pub fn stream(&self) -> impl Stream, Error>> { + PSKTStream::new(self.generator.clone(), self.signer.clone(), self.prefix) + } +} + +struct PSKTStream { + generator_stream: Pin> + Send>>, + signer: Arc, + prefix: Prefix, +} + +impl PSKTStream { + fn new(generator: Generator, signer: Arc, prefix: Prefix) -> Self { + let generator_stream = generator.stream().map_err(Error::from); + Self { generator_stream: Box::pin(generator_stream), signer, prefix } + } +} + +impl Stream for PSKTStream { + type Item = Result, Error>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.as_ref(); + + let _prefix = this.prefix; + let _signer = this.signer.clone(); + + match self.get_mut().generator_stream.as_mut().poll_next(cx) { + Poll::Ready(Some(Ok(pending_tx))) => { + let pskt = convert_pending_tx_to_pskt(pending_tx); + Poll::Ready(Some(pskt)) + } + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), + Poll::Ready(None) => Poll::Ready(None), + Poll::Pending => Poll::Pending, + } + } +} + +fn convert_pending_tx_to_pskt(pending_tx: PendingTransaction) -> Result, Error> { + let signable_tx = pending_tx.signable_transaction(); + let verifiable_tx = signable_tx.as_verifiable(); + let populated_inputs: Vec<(&TransactionInput, &UtxoEntry)> = verifiable_tx.populated_inputs().collect(); + let pskt_inner = Inner::try_from((pending_tx.transaction(), populated_inputs.to_owned()))?; + Ok(PSKT::::from(pskt_inner)) +} + +pub async fn bundle_from_pskt_generator(generator: PSKTGenerator) -> Result { + let mut bundle: Bundle = Bundle::new(); + let mut stream = generator.stream(); + + while let Some(pskt_result) = stream.next().await { + match pskt_result { + Ok(pskt) => bundle.add_pskt(pskt), + Err(e) => return Err(e), + } + } + + Ok(bundle) +} + +pub async fn pskb_signer_for_address( + bundle: &Bundle, + signer: Arc, + network_id: NetworkId, + sign_for_address: Option<&Address>, + derivation_path: DerivationPath, + key_fingerprint: KeyFingerprint, +) -> Result { + let mut signed_bundle = Bundle::new(); + let mut reused_values = SigHashReusedValues::new(); + + // If set, sign-for address is used for signing. + // Else, all addresses from inputs are. + let addresses: Vec
= match sign_for_address { + Some(signer) => vec![signer.clone()], + None => bundle + .iter() + .flat_map(|inner| { + inner.inputs + .iter() + .filter_map(|input| input.utxo_entry.as_ref()) // Filter out None and get a reference to UtxoEntry if it exists + .filter_map(|utxo_entry| { + extract_script_pub_key_address(&utxo_entry.script_public_key.clone(), network_id.into()).ok() + }) + .collect::>() + }) + .collect(), + }; + + // Prepare the signer. + signer.ingest(addresses.as_ref())?; + + for pskt_inner in bundle.iter().cloned() { + let pskt: PSKT = PSKT::from(pskt_inner); + + let mut sign = |signer_pskt: PSKT| { + signer_pskt + .pass_signature_sync(|tx, sighash| -> Result, String> { + tx.tx + .inputs + .iter() + .enumerate() + .map(|(idx, _input)| { + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); + + // When address represents a locked UTXO, no private key is available. + // Instead, use the account receive address' private key. + let address: &Address = match sign_for_address { + Some(address) => address, + None => addresses.get(idx).expect("Input indexed address"), + }; + + let public_key = signer.public_key(address).expect("Public key for input indexed address"); + + Ok(SignInputOk { + signature: Signature::Schnorr(signer.sign_schnorr(address, msg).unwrap()), + pub_key: public_key, + key_source: Some(KeySource { key_fingerprint, derivation_path: derivation_path.clone() }), + }) + }) + .collect() + }) + .unwrap() + }; + signed_bundle.add_pskt(sign(pskt.clone())); + } + Ok(signed_bundle) +} + +pub fn finalize_pskt_one_or_more_sig_and_redeem_script(pskt: PSKT) -> Result, Error> { + let result = pskt.finalize_sync(|inner: &Inner| -> Result>, String> { + Ok(inner + .inputs + .iter() + .map(|input| -> Vec { + let signatures: Vec<_> = input + .partial_sigs + .clone() + .into_iter() + .flat_map(|(_, signature)| iter::once(OpData65).chain(signature.into_bytes()).chain([input.sighash_type.to_u8()])) + .collect(); + + signatures + .into_iter() + .chain( + input + .redeem_script + .as_ref() + .map(|redeem_script| ScriptBuilder::new().add_data(redeem_script.as_slice()).unwrap().drain().to_vec()) + .unwrap_or_default(), + ) + .collect() + }) + .collect()) + }); + + match result { + Ok(finalized_pskt) => Ok(finalized_pskt), + Err(e) => Err(Error::from(e.to_string())), + } +} + +pub fn finalize_pskt_no_sig_and_redeem_script(pskt: PSKT) -> Result, Error> { + let result = pskt.finalize_sync(|inner: &Inner| -> Result>, String> { + Ok(inner + .inputs + .iter() + .map(|input| -> Vec { + input + .redeem_script + .as_ref() + .map(|redeem_script| ScriptBuilder::new().add_data(redeem_script.as_slice()).unwrap().drain().to_vec()) + .unwrap_or_default() + }) + .collect()) + }); + + match result { + Ok(finalized_pskt) => Ok(finalized_pskt), + Err(e) => Err(Error::from(e.to_string())), + } +} + +pub fn bundle_to_finalizer_stream(bundle: &Bundle) -> impl Stream, Error>> + Send { + stream::iter(bundle.iter().cloned().collect::>()).map(move |pskt_inner| { + let pskt: PSKT = PSKT::from(pskt_inner); + let pskt_finalizer = pskt.constructor().updater().signer().finalizer(); + finalize_pskt_one_or_more_sig_and_redeem_script(pskt_finalizer) + }) +} + +pub fn pskt_to_pending_transaction( + finalized_pskt: PSKT, + network_id: NetworkId, + change_address: Address, +) -> Result { + let mass = 10; + let (signed_tx, _) = match finalized_pskt.clone().extractor() { + Ok(extractor) => match extractor.extract_tx() { + Ok(once_mass) => once_mass(mass), + Err(e) => return Err(Error::PendingTransactionFromPSKTError(e.to_string())), + }, + Err(e) => return Err(Error::PendingTransactionFromPSKTError(e.to_string())), + }; + + let inner_pskt = finalized_pskt.deref().clone(); + + let utxo_entries_ref: Vec = inner_pskt + .inputs + .iter() + .filter_map(|input| { + if let Some(ue) = input.clone().utxo_entry { + return Some(UtxoEntryReference { + utxo: Arc::new(ClientUTXO { + address: Some(extract_script_pub_key_address(&ue.script_public_key, network_id.into()).unwrap()), + amount: ue.amount, + outpoint: input.previous_outpoint.into(), + script_public_key: ue.script_public_key, + block_daa_score: ue.block_daa_score, + is_coinbase: ue.is_coinbase, + }), + }); + } + None + }) + .collect(); + + let output: Vec = signed_tx.outputs.clone(); + let recipient = extract_script_pub_key_address(&output[0].script_public_key, network_id.into())?; + let fee_u: u64 = 0; + + let utxo_iterator: Box + Send + Sync + 'static> = + Box::new(utxo_entries_ref.clone().into_iter()); + + let final_transaction_destination = PaymentDestination::PaymentOutputs(PaymentOutputs::from((recipient.clone(), output[0].value))); + + let settings = GeneratorSettings { + network_id, + multiplexer: None, + sig_op_count: 1, + minimum_signatures: 1, + change_address, + utxo_iterator, + priority_utxo_entries: None, + source_utxo_context: None, + destination_utxo_context: None, + final_transaction_priority_fee: fee_u.into(), + final_transaction_destination, + final_transaction_payload: None, + }; + + // Create the Generator + let generator = Generator::try_new(settings, None, None)?; + + // Create PendingTransaction (WIP) + let pending_tx = PendingTransaction::try_new( + &generator, + signed_tx.clone(), + utxo_entries_ref.clone(), + vec![], + None, + None, + 0, + 0, + 0, + 1, + 0, + 0, + kaspa_wallet_core::tx::DataKind::Final, + )?; + + Ok(pending_tx) +} diff --git a/wallet/core/src/account/variants/bip32.rs b/wallet/core/src/account/variants/bip32.rs index 0b2909ad09..1c120df4b4 100644 --- a/wallet/core/src/account/variants/bip32.rs +++ b/wallet/core/src/account/variants/bip32.rs @@ -70,13 +70,13 @@ impl BorshSerialize for Payload { } impl BorshDeserialize for Payload { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let xpub_keys = BorshDeserialize::deserialize(buf)?; - let account_index = BorshDeserialize::deserialize(buf)?; - let ecdsa = BorshDeserialize::deserialize(buf)?; + let xpub_keys = BorshDeserialize::deserialize_reader(reader)?; + let account_index = BorshDeserialize::deserialize_reader(reader)?; + let ecdsa = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { xpub_keys, account_index, ecdsa }) } @@ -169,6 +169,10 @@ impl Account for Bip32 { BIP32_ACCOUNT_KIND.into() } + // fn xpub_keys(&self) -> Option<&ExtendedPublicKeys> { + // None + // } + fn prv_key_data_id(&self) -> Result<&PrvKeyDataId> { Ok(&self.prv_key_data_id) } @@ -217,6 +221,7 @@ impl Account for Bip32 { BIP32_ACCOUNT_KIND.into(), *self.id(), self.name(), + self.balance(), self.prv_key_data_id.into(), self.receive_address().ok(), self.change_address().ok(), diff --git a/wallet/core/src/account/variants/bip32watch.rs b/wallet/core/src/account/variants/bip32watch.rs new file mode 100644 index 0000000000..cfadb745df --- /dev/null +++ b/wallet/core/src/account/variants/bip32watch.rs @@ -0,0 +1,252 @@ +//! +//! bip32-watch account implementation +//! + +use crate::account::Inner; +use crate::derivation::{AddressDerivationManager, AddressDerivationManagerTrait}; +use crate::imports::*; + +pub const BIP32_WATCH_ACCOUNT_KIND: &str = "kaspa-bip32-watch-standard"; + +pub struct Ctor {} + +#[async_trait] +impl Factory for Ctor { + fn name(&self) -> String { + "bip32watch".to_string() + } + + fn description(&self) -> String { + "Kaspa Core bip32-watch Account".to_string() + } + + async fn try_load( + &self, + wallet: &Arc, + storage: &AccountStorage, + meta: Option>, + ) -> Result> { + Ok(Arc::new(bip32watch::Bip32Watch::try_load(wallet, storage, meta).await?)) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub struct Payload { + pub xpub_keys: ExtendedPublicKeys, + pub ecdsa: bool, +} + +impl Payload { + pub fn new(xpub_keys: Arc>, ecdsa: bool) -> Self { + Self { xpub_keys, ecdsa } + } + + pub fn try_load(storage: &AccountStorage) -> Result { + Ok(Self::try_from_slice(storage.serialized.as_slice())?) + } +} + +impl Storable for Payload { + // a unique number used for binary + // serialization data alignment check + const STORAGE_MAGIC: u32 = 0x92014137; + // binary serialization version + const STORAGE_VERSION: u32 = 0; +} + +impl AccountStorable for Payload {} + +impl BorshSerialize for Payload { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + StorageHeader::new(Self::STORAGE_MAGIC, Self::STORAGE_VERSION).serialize(writer)?; + BorshSerialize::serialize(&self.xpub_keys, writer)?; + BorshSerialize::serialize(&self.ecdsa, writer)?; + + Ok(()) + } +} + +impl BorshDeserialize for Payload { + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let StorageHeader { version: _, .. } = + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + + let xpub_keys = BorshDeserialize::deserialize_reader(reader)?; + let ecdsa = BorshDeserialize::deserialize_reader(reader)?; + + Ok(Self { xpub_keys, ecdsa }) + } +} + +pub struct Bip32Watch { + inner: Arc, + xpub_keys: ExtendedPublicKeys, + ecdsa: bool, + derivation: Arc, +} + +impl Bip32Watch { + pub async fn try_new(wallet: &Arc, name: Option, xpub_keys: ExtendedPublicKeys, ecdsa: bool) -> Result { + let settings = AccountSettings { name, ..Default::default() }; + + let public_key = xpub_keys.first().ok_or_else(|| Error::Bip32WatchXpubRequired)?.public_key(); + + let (id, storage_key) = make_account_hashes(from_bip32_watch(public_key)); + + let inner = Arc::new(Inner::new(wallet, id, storage_key, settings)); + + let derivation = + AddressDerivationManager::new(wallet, BIP32_WATCH_ACCOUNT_KIND.into(), &xpub_keys, ecdsa, 0, None, 1, Default::default()) + .await?; + + Ok(Self { inner, xpub_keys, ecdsa, derivation }) + } + + pub async fn try_load(wallet: &Arc, storage: &AccountStorage, meta: Option>) -> Result { + let storable = Payload::try_load(storage)?; + let inner = Arc::new(Inner::from_storage(wallet, storage)); + let Payload { xpub_keys, ecdsa, .. } = storable; + let address_derivation_indexes = meta.and_then(|meta| meta.address_derivation_indexes()).unwrap_or_default(); + + let derivation = AddressDerivationManager::new( + wallet, + BIP32_WATCH_ACCOUNT_KIND.into(), + &xpub_keys, + ecdsa, + 0, + None, + 1, + address_derivation_indexes, + ) + .await?; + + Ok(Self { inner, xpub_keys, ecdsa, derivation }) + } + + pub fn get_address_range_for_scan(&self, range: std::ops::Range) -> Result> { + let receive_addresses = self.derivation.receive_address_manager().get_range_with_args(range.clone(), false)?; + let change_addresses = self.derivation.change_address_manager().get_range_with_args(range, false)?; + Ok(receive_addresses.into_iter().chain(change_addresses).collect::>()) + } + + // pub fn xpub_keys(&self) -> &ExtendedPublicKeys { + // &self.xpub_keys + // } +} + +#[async_trait] +impl Account for Bip32Watch { + fn inner(&self) -> &Arc { + &self.inner + } + + fn account_kind(&self) -> AccountKind { + BIP32_WATCH_ACCOUNT_KIND.into() + } + + fn feature(&self) -> Option { + let info = "bip32-watch"; + Some(info.into()) + } + + fn xpub_keys(&self) -> Option<&ExtendedPublicKeys> { + Some(&self.xpub_keys) + } + + fn prv_key_data_id(&self) -> Result<&PrvKeyDataId> { + Err(Error::Bip32WatchAccount) + } + + fn as_dyn_arc(self: Arc) -> Arc { + self + } + + fn sig_op_count(&self) -> u8 { + u8::try_from(self.xpub_keys.len()).unwrap() + } + + fn minimum_signatures(&self) -> u16 { + 1 + } + + fn receive_address(&self) -> Result
{ + self.derivation.receive_address_manager().current_address() + } + fn change_address(&self) -> Result
{ + self.derivation.change_address_manager().current_address() + } + + fn to_storage(&self) -> Result { + let settings = self.context().settings.clone(); + let storable = Payload::new(self.xpub_keys.clone(), self.ecdsa); + + let storage = AccountStorage::try_new( + BIP32_WATCH_ACCOUNT_KIND.into(), + self.id(), + self.storage_key(), + AssocPrvKeyDataIds::None, + settings, + storable, + )?; + + Ok(storage) + } + + fn metadata(&self) -> Result> { + let metadata = AccountMetadata::new(self.inner.id, self.derivation.address_derivation_meta()); + Ok(Some(metadata)) + } + + fn descriptor(&self) -> Result { + let descriptor = AccountDescriptor::new( + BIP32_WATCH_ACCOUNT_KIND.into(), + *self.id(), + self.name(), + self.balance(), + AssocPrvKeyDataIds::None, + self.receive_address().ok(), + self.change_address().ok(), + ) + .with_property(AccountDescriptorProperty::XpubKeys, self.xpub_keys.clone().into()) + .with_property(AccountDescriptorProperty::Ecdsa, self.ecdsa.into()) + .with_property(AccountDescriptorProperty::DerivationMeta, self.derivation.address_derivation_meta().into()); + + Ok(descriptor) + } + + fn as_derivation_capable(self: Arc) -> Result> { + Ok(self.clone()) + } +} + +impl DerivationCapableAccount for Bip32Watch { + fn derivation(&self) -> Arc { + self.derivation.clone() + } + + fn account_index(&self) -> u64 { + 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::*; + + #[test] + fn test_storage_bip32watch() -> Result<()> { + let storable_in = Payload::new(vec![make_xpub()].into(), false); + let guard = StorageGuard::new(&storable_in); + let storable_out = guard.validate()?; + + assert_eq!(storable_in.ecdsa, storable_out.ecdsa); + assert_eq!(storable_in.xpub_keys.len(), storable_out.xpub_keys.len()); + for idx in 0..storable_in.xpub_keys.len() { + assert_eq!(storable_in.xpub_keys[idx], storable_out.xpub_keys[idx]); + } + + Ok(()) + } +} diff --git a/wallet/core/src/account/variants/keypair.rs b/wallet/core/src/account/variants/keypair.rs index b6c92907f3..6381ca046b 100644 --- a/wallet/core/src/account/variants/keypair.rs +++ b/wallet/core/src/account/variants/keypair.rs @@ -69,19 +69,17 @@ impl BorshSerialize for Payload { } impl BorshDeserialize for Payload { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { use secp256k1::constants::PUBLIC_KEY_SIZE; let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let public_key_bytes: [u8; PUBLIC_KEY_SIZE] = buf[..PUBLIC_KEY_SIZE] - .try_into() - .map_err(|_| IoError::new(IoErrorKind::Other, "Unable to deserialize keypair account (public_key buffer try_into)"))?; + let mut public_key_bytes: [u8; PUBLIC_KEY_SIZE] = [0; PUBLIC_KEY_SIZE]; + reader.read_exact(&mut public_key_bytes)?; let public_key = secp256k1::PublicKey::from_slice(&public_key_bytes) .map_err(|_| IoError::new(IoErrorKind::Other, "Unable to deserialize keypair account (invalid public key)"))?; - *buf = &buf[PUBLIC_KEY_SIZE..]; - let ecdsa = BorshDeserialize::deserialize(buf)?; + let ecdsa = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { public_key, ecdsa }) } @@ -181,6 +179,7 @@ impl Account for Keypair { KEYPAIR_ACCOUNT_KIND.into(), *self.id(), self.name(), + self.balance(), self.prv_key_data_id.into(), self.receive_address().ok(), self.change_address().ok(), diff --git a/wallet/core/src/account/variants/legacy.rs b/wallet/core/src/account/variants/legacy.rs index 9967fb8614..cf05acf681 100644 --- a/wallet/core/src/account/variants/legacy.rs +++ b/wallet/core/src/account/variants/legacy.rs @@ -59,9 +59,9 @@ impl BorshSerialize for Payload { } impl BorshDeserialize for Payload { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; Ok(Self {}) } @@ -192,6 +192,7 @@ impl Account for Legacy { LEGACY_ACCOUNT_KIND.into(), *self.id(), self.name(), + self.balance(), self.prv_key_data_id.into(), self.receive_address().ok(), self.change_address().ok(), diff --git a/wallet/core/src/account/variants/mod.rs b/wallet/core/src/account/variants/mod.rs index e321e95079..fa105b4e03 100644 --- a/wallet/core/src/account/variants/mod.rs +++ b/wallet/core/src/account/variants/mod.rs @@ -3,12 +3,14 @@ //! pub mod bip32; +pub mod bip32watch; pub mod keypair; pub mod legacy; pub mod multisig; pub mod resident; pub use bip32::BIP32_ACCOUNT_KIND; +pub use bip32watch::BIP32_WATCH_ACCOUNT_KIND; pub use keypair::KEYPAIR_ACCOUNT_KIND; pub use legacy::LEGACY_ACCOUNT_KIND; pub use multisig::MULTISIG_ACCOUNT_KIND; diff --git a/wallet/core/src/account/variants/multisig.rs b/wallet/core/src/account/variants/multisig.rs index 9a8044aa20..1128f5eef3 100644 --- a/wallet/core/src/account/variants/multisig.rs +++ b/wallet/core/src/account/variants/multisig.rs @@ -70,14 +70,14 @@ impl BorshSerialize for Payload { } impl BorshDeserialize for Payload { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let xpub_keys = BorshDeserialize::deserialize(buf)?; - let cosigner_index = BorshDeserialize::deserialize(buf)?; - let minimum_signatures = BorshDeserialize::deserialize(buf)?; - let ecdsa = BorshDeserialize::deserialize(buf)?; + let xpub_keys = BorshDeserialize::deserialize_reader(reader)?; + let cosigner_index = BorshDeserialize::deserialize_reader(reader)?; + let minimum_signatures = BorshDeserialize::deserialize_reader(reader)?; + let ecdsa = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { xpub_keys, cosigner_index, minimum_signatures, ecdsa }) } @@ -157,8 +157,8 @@ impl MultiSig { self.minimum_signatures } - pub fn xpub_keys(&self) -> &ExtendedPublicKeys { - &self.xpub_keys + fn watch_only(&self) -> bool { + self.prv_key_data_ids.is_none() } } @@ -172,6 +172,17 @@ impl Account for MultiSig { MULTISIG_ACCOUNT_KIND.into() } + fn feature(&self) -> Option { + match self.watch_only() { + true => Some("multisig-watch".to_string()), + false => None, + } + } + + fn xpub_keys(&self) -> Option<&ExtendedPublicKeys> { + Some(&self.xpub_keys) + } + fn prv_key_data_id(&self) -> Result<&PrvKeyDataId> { Err(Error::AccountKindFeature) } @@ -181,8 +192,7 @@ impl Account for MultiSig { } fn sig_op_count(&self) -> u8 { - // TODO @maxim - 1 + u8::try_from(self.xpub_keys.len()).unwrap() } fn minimum_signatures(&self) -> u16 { @@ -222,6 +232,7 @@ impl Account for MultiSig { MULTISIG_ACCOUNT_KIND.into(), *self.id(), self.name(), + self.balance(), self.prv_key_data_ids.clone().try_into()?, self.receive_address().ok(), self.change_address().ok(), diff --git a/wallet/core/src/account/variants/resident.rs b/wallet/core/src/account/variants/resident.rs index 74f3868965..c7e56d3d6c 100644 --- a/wallet/core/src/account/variants/resident.rs +++ b/wallet/core/src/account/variants/resident.rs @@ -77,6 +77,7 @@ impl Account for Resident { RESIDENT_ACCOUNT_KIND.into(), *self.id(), self.name(), + self.balance(), AssocPrvKeyDataIds::None, self.receive_address().ok(), self.change_address().ok(), diff --git a/wallet/core/src/account/variants/watchonly.rs b/wallet/core/src/account/variants/watchonly.rs new file mode 100644 index 0000000000..7212ffdfce --- /dev/null +++ b/wallet/core/src/account/variants/watchonly.rs @@ -0,0 +1,300 @@ +//! +//! Watch-only account implementation +//! + +use crate::account::Inner; +use crate::derivation::{AddressDerivationManager, AddressDerivationManagerTrait}; +use crate::imports::*; + +pub const WATCH_ONLY_ACCOUNT_KIND: &str = "kaspa-watch-only-standard"; + +pub struct Ctor {} + +#[async_trait] +impl Factory for Ctor { + fn name(&self) -> String { + "watchonly".to_string() + } + + fn description(&self) -> String { + "Kaspa Core watch-only Account".to_string() + } + + async fn try_load( + &self, + wallet: &Arc, + storage: &AccountStorage, + meta: Option>, + ) -> Result> { + Ok(Arc::new(watchonly::WatchOnly::try_load(wallet, storage, meta).await?)) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +pub struct Payload { + pub xpub_keys: ExtendedPublicKeys, + pub minimum_signatures: u16, + pub ecdsa: bool, +} + +impl Payload { + pub fn new(xpub_keys: Arc>, minimum_signatures: u16, ecdsa: bool) -> Self { + Self { xpub_keys, minimum_signatures, ecdsa } + } + + pub fn try_load(storage: &AccountStorage) -> Result { + Ok(Self::try_from_slice(storage.serialized.as_slice())?) + } +} + +impl Storable for Payload { + // a unique number used for binary + // serialization data alignment check + const STORAGE_MAGIC: u32 = 0x92014137; + // binary serialization version + const STORAGE_VERSION: u32 = 0; +} + +impl AccountStorable for Payload {} + +impl BorshSerialize for Payload { + fn serialize(&self, writer: &mut W) -> std::io::Result<()> { + StorageHeader::new(Self::STORAGE_MAGIC, Self::STORAGE_VERSION).serialize(writer)?; + BorshSerialize::serialize(&self.xpub_keys, writer)?; + BorshSerialize::serialize(&self.minimum_signatures, writer)?; + BorshSerialize::serialize(&self.ecdsa, writer)?; + + Ok(()) + } +} + +impl BorshDeserialize for Payload { + fn deserialize(buf: &mut &[u8]) -> IoResult { + let StorageHeader { version: _, .. } = + StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + + let xpub_keys = BorshDeserialize::deserialize(buf)?; + let minimum_signatures = BorshDeserialize::deserialize(buf)?; + let ecdsa = BorshDeserialize::deserialize(buf)?; + + Ok(Self { xpub_keys, minimum_signatures, ecdsa }) + } +} + +pub struct WatchOnly { + inner: Arc, + xpub_keys: ExtendedPublicKeys, + minimum_signatures: u16, + ecdsa: bool, + derivation: Arc, +} + +impl WatchOnly { + pub async fn try_new( + wallet: &Arc, + name: Option, + xpub_keys: ExtendedPublicKeys, + minimum_signatures: u16, + ecdsa: bool, + ) -> Result { + let settings = AccountSettings { name, ..Default::default() }; + + let public_key = xpub_keys.first().ok_or_else(|| Error::WatchOnlyXpubRequired)?.public_key(); + + let storable = Payload::new(xpub_keys.clone(), minimum_signatures, ecdsa); + + let (id, storage_key) = match xpub_keys.len() { + 1 => make_account_hashes(from_watch_only(&public_key)), + _ => make_account_hashes(from_watch_only_multisig(&None, &storable)), + }; + let inner = Arc::new(Inner::new(wallet, id, storage_key, settings)); + + let derivation = match xpub_keys.len() { + 1 => { + AddressDerivationManager::new( + wallet, + WATCH_ONLY_ACCOUNT_KIND.into(), + &xpub_keys, + ecdsa, + 0, + None, + 1, + Default::default(), + ) + .await? + } + _ => { + AddressDerivationManager::new( + wallet, + MULTISIG_ACCOUNT_KIND.into(), + &xpub_keys, + ecdsa, + 0, + Some(u32::MIN), + minimum_signatures, + Default::default(), + ) + .await? + } + }; + + Ok(Self { inner, xpub_keys, minimum_signatures, ecdsa, derivation }) + } + + pub async fn try_load(wallet: &Arc, storage: &AccountStorage, meta: Option>) -> Result { + let storable = Payload::try_load(storage)?; + let inner = Arc::new(Inner::from_storage(wallet, storage)); + let Payload { xpub_keys, minimum_signatures, ecdsa, .. } = storable; + let address_derivation_indexes = meta.and_then(|meta| meta.address_derivation_indexes()).unwrap_or_default(); + + let derivation = match xpub_keys.len() { + 1 => { + AddressDerivationManager::new( + wallet, + WATCH_ONLY_ACCOUNT_KIND.into(), + &xpub_keys, + ecdsa, + 0, + None, + 1, + address_derivation_indexes, + ) + .await? + } + _ => { + AddressDerivationManager::new( + wallet, + MULTISIG_ACCOUNT_KIND.into(), + &xpub_keys, + ecdsa, + 0, + Some(u32::MIN), + minimum_signatures, + address_derivation_indexes, + ) + .await? + } + }; + + Ok(Self { inner, xpub_keys, minimum_signatures, ecdsa, derivation }) + } + + pub fn get_address_range_for_scan(&self, range: std::ops::Range) -> Result> { + let receive_addresses = self.derivation.receive_address_manager().get_range_with_args(range.clone(), false)?; + let change_addresses = self.derivation.change_address_manager().get_range_with_args(range, false)?; + Ok(receive_addresses.into_iter().chain(change_addresses).collect::>()) + } + + pub fn xpub_keys(&self) -> &ExtendedPublicKeys { + &self.xpub_keys + } +} + +#[async_trait] +impl Account for WatchOnly { + fn inner(&self) -> &Arc { + &self.inner + } + + fn account_kind(&self) -> AccountKind { + WATCH_ONLY_ACCOUNT_KIND.into() + } + + fn prv_key_data_id(&self) -> Result<&PrvKeyDataId> { + Err(Error::WatchOnlyAccount) + } + + fn as_dyn_arc(self: Arc) -> Arc { + self + } + + fn sig_op_count(&self) -> u8 { + u8::try_from(self.xpub_keys.len()).unwrap() + } + + fn minimum_signatures(&self) -> u16 { + self.minimum_signatures + } + + fn receive_address(&self) -> Result
{ + self.derivation.receive_address_manager().current_address() + } + fn change_address(&self) -> Result
{ + self.derivation.change_address_manager().current_address() + } + + fn to_storage(&self) -> Result { + let settings = self.context().settings.clone(); + let storable = Payload::new(self.xpub_keys.clone(), self.minimum_signatures, self.ecdsa); + + let storage = AccountStorage::try_new( + WATCH_ONLY_ACCOUNT_KIND.into(), + self.id(), + self.storage_key(), + AssocPrvKeyDataIds::None, + settings, + storable, + )?; + + Ok(storage) + } + + fn metadata(&self) -> Result> { + let metadata = AccountMetadata::new(self.inner.id, self.derivation.address_derivation_meta()); + Ok(Some(metadata)) + } + + fn descriptor(&self) -> Result { + let descriptor = AccountDescriptor::new( + WATCH_ONLY_ACCOUNT_KIND.into(), + *self.id(), + self.name(), + AssocPrvKeyDataIds::None, + self.receive_address().ok(), + self.change_address().ok(), + ) + .with_property(AccountDescriptorProperty::XpubKeys, self.xpub_keys.clone().into()) + .with_property(AccountDescriptorProperty::Ecdsa, self.ecdsa.into()) + .with_property(AccountDescriptorProperty::DerivationMeta, self.derivation.address_derivation_meta().into()); + + Ok(descriptor) + } + + fn as_derivation_capable(self: Arc) -> Result> { + Ok(self.clone()) + } + +} + +impl DerivationCapableAccount for WatchOnly { + fn derivation(&self) -> Arc { + self.derivation.clone() + } + + fn account_index(&self) -> u64 { + 0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::*; + + #[test] + fn test_storage_watchonly() -> Result<()> { + let storable_in = Payload::new(vec![make_xpub()].into(), 1, false); + let guard = StorageGuard::new(&storable_in); + let storable_out = guard.validate()?; + + assert_eq!(storable_in.minimum_signatures, storable_out.minimum_signatures); + assert_eq!(storable_in.ecdsa, storable_out.ecdsa); + assert_eq!(storable_in.xpub_keys.len(), storable_out.xpub_keys.len()); + for idx in 0..storable_in.xpub_keys.len() { + assert_eq!(storable_in.xpub_keys[idx], storable_out.xpub_keys[idx]); + } + + Ok(()) + } +} diff --git a/wallet/core/src/api/message.rs b/wallet/core/src/api/message.rs index 9cd2f830cd..e27cb2b29c 100644 --- a/wallet/core/src/api/message.rs +++ b/wallet/core/src/api/message.rs @@ -44,6 +44,47 @@ pub struct FlushResponse {} pub struct ConnectRequest { pub url: Option, pub network_id: NetworkId, + // retry on error, otherwise give up + pub retry_on_error: bool, + // block async call until connected, otherwise return immediately + // and continue attempting to connect in the background + pub block_async_connect: bool, + // require node to be synced, fail otherwise + pub require_sync: bool, +} + +impl Default for ConnectRequest { + fn default() -> Self { + Self { + url: None, + network_id: NetworkId::new(NetworkType::Mainnet), + retry_on_error: true, + block_async_connect: true, + require_sync: true, + } + } +} + +impl ConnectRequest { + pub fn with_url(self, url: Option) -> Self { + ConnectRequest { url, ..self } + } + + pub fn with_network_id(self, network_id: &NetworkId) -> Self { + ConnectRequest { network_id: *network_id, ..self } + } + + pub fn with_retry_on_error(self, retry_on_error: bool) -> Self { + ConnectRequest { retry_on_error, ..self } + } + + pub fn with_block_async_connect(self, block_async_connect: bool) -> Self { + ConnectRequest { block_async_connect, ..self } + } + + pub fn with_require_sync(self, require_sync: bool) -> Self { + ConnectRequest { require_sync, ..self } + } } #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] @@ -77,15 +118,18 @@ pub struct RetainContextRequest { #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "camelCase")] -pub struct RetainContextResponse { - // pub name : String, - // pub data: Option>>, - // pub is_connected: bool, - // pub is_synced: bool, - // pub is_open: bool, - // pub url: Option, - // pub is_wrpc_client: bool, - // pub network_id: Option, +pub struct RetainContextResponse {} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetContextRequest { + pub name: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[serde(rename_all = "camelCase")] +pub struct GetContextResponse { + pub data: Option>, } #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] diff --git a/wallet/core/src/api/mod.rs b/wallet/core/src/api/mod.rs index f0963d610b..979ef1c72f 100644 --- a/wallet/core/src/api/mod.rs +++ b/wallet/core/src/api/mod.rs @@ -1,4 +1,6 @@ //! +//! # Wallet API +//! //! Wallet API module that provides a unified interface for all wallet operations. //! diff --git a/wallet/core/src/api/traits.rs b/wallet/core/src/api/traits.rs index 2eb12907a1..357665e77b 100644 --- a/wallet/core/src/api/traits.rs +++ b/wallet/core/src/api/traits.rs @@ -21,13 +21,29 @@ pub trait WalletApi: Send + Sync + AnySync { async fn register_notifications(self: Arc, channel: Receiver) -> Result; async fn unregister_notifications(self: Arc, channel_id: u64) -> Result<()>; + /// Wrapper around [`retain_context_call()`](Self::retain_context_call). async fn retain_context(self: Arc, name: &str, data: Option>) -> Result<()> { self.retain_context_call(RetainContextRequest { name: name.to_string(), data }).await?; Ok(()) } + /// Obtain earlier retained context data using the context `name` as a key. + async fn get_context(self: Arc, name: &str) -> Result>> { + Ok(self.get_context_call(GetContextRequest { name: name.to_string() }).await?.data) + } + + /// Allows user to store string key-associated context data in the wallet subsystem runtime. + /// The context data persists only during the wallet instance runtime. + /// This can be useful if you have a front-end that connects to a + /// persistent wallet instance operating in the backend (such as a browser + /// extension popup connecting to the background page) and you need to store + /// any type of runtime data in the backend (but are limited to using only + /// the wallet interface). async fn retain_context_call(self: Arc, request: RetainContextRequest) -> Result; + /// Obtain context data stored using [`retain_context()`](Self::retain_context). + async fn get_context_call(self: Arc, request: GetContextRequest) -> Result; + /// Wrapper around [`get_status_call()`](Self::get_status_call). async fn get_status(self: Arc, name: Option<&str>) -> Result { Ok(self.get_status_call(GetStatusRequest { name: name.map(String::from) }).await?) @@ -42,8 +58,12 @@ pub trait WalletApi: Send + Sync + AnySync { /// - `is_wrpc_client` - whether the wallet is connected to a node via wRPC async fn get_status_call(self: Arc, request: GetStatusRequest) -> Result; - async fn connect(self: Arc, url: Option, network_id: NetworkId) -> Result<()> { - self.connect_call(ConnectRequest { url, network_id }).await?; + /// Synchronous connect call (blocking, single attempt, requires node sync). + async fn connect(self: Arc, url: Option, network_id: &NetworkId) -> Result<()> { + let retry_on_error = false; + let block_async_connect = true; + let require_sync = true; + self.connect_call(ConnectRequest { url, network_id: *network_id, retry_on_error, block_async_connect, require_sync }).await?; Ok(()) } @@ -51,6 +71,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// comprised of the `url` and a `network_id`. async fn connect_call(self: Arc, request: ConnectRequest) -> Result; + /// Request the wallet RPC subsystem to disconnect from the node. async fn disconnect(self: Arc) -> Result<()> { self.disconnect_call(DisconnectRequest {}).await?; Ok(()) @@ -76,6 +97,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// Ping the wallet service. Accepts an optional `u64` value that is returned in the response. async fn ping_call(self: Arc, request: PingRequest) -> Result; + /// Wrapper around [`batch_call()`](Self::batch_call). async fn batch(self: Arc) -> Result<()> { self.batch_call(BatchRequest {}).await?; Ok(()) @@ -90,6 +112,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// async fn batch_call(self: Arc, request: BatchRequest) -> Result; + /// Wrapper around [`flush_call()`](Self::flush_call). async fn flush(self: Arc, wallet_secret: Secret) -> Result<()> { self.flush_call(FlushRequest { wallet_secret }).await?; Ok(()) @@ -264,6 +287,7 @@ pub trait WalletApi: Send + Sync + AnySync { /// around this call. async fn accounts_rename_call(self: Arc, request: AccountsRenameRequest) -> Result; + /// Wrapper around [`accounts_select_call()`](Self::accounts_select_call) async fn accounts_select(self: Arc, account_id: Option) -> Result<()> { self.accounts_select_call(AccountsSelectRequest { account_id }).await?; Ok(()) @@ -400,6 +424,7 @@ pub trait WalletApi: Send + Sync + AnySync { async fn accounts_estimate_call(self: Arc, request: AccountsEstimateRequest) -> Result; /// Get a range of transaction records for a specific account id. + /// Wrapper around [`transactions_data_get_call()`](Self::transactions_data_get_call). async fn transactions_data_get_range( self: Arc, account_id: AccountId, @@ -409,8 +434,8 @@ pub trait WalletApi: Send + Sync + AnySync { self.transactions_data_get_call(TransactionsDataGetRequest::with_range(account_id, network_id, range)).await } + /// Get a range of transaction records for a specific account id. async fn transactions_data_get_call(self: Arc, request: TransactionsDataGetRequest) -> Result; - // async fn transaction_get_call(self: Arc, request: TransactionGetRequest) -> Result; /// Replaces the note of a transaction with a new note. Note is meant /// to explicitly store a user-supplied string. The note is treated @@ -435,6 +460,7 @@ pub trait WalletApi: Send + Sync + AnySync { request: TransactionsReplaceMetadataRequest, ) -> Result; + // TODO async fn address_book_enumerate_call( self: Arc, request: AddressBookEnumerateRequest, diff --git a/wallet/core/src/api/transport.rs b/wallet/core/src/api/transport.rs index 9f6485ec8a..c9e5f6de63 100644 --- a/wallet/core/src/api/transport.rs +++ b/wallet/core/src/api/transport.rs @@ -18,7 +18,7 @@ use crate::imports::*; use crate::result::Result; use crate::wallet::Wallet; use async_trait::async_trait; -use borsh::{BorshDeserialize, BorshSerialize}; +use borsh::BorshDeserialize; use kaspa_wallet_macros::{build_wallet_client_transport_interface, build_wallet_server_transport_interface}; use workflow_core::task::spawn; @@ -71,6 +71,7 @@ impl WalletApi for WalletClient { Disconnect, ChangeNetworkId, RetainContext, + GetContext, Batch, Flush, WalletEnumerate, @@ -118,7 +119,7 @@ pub trait EventHandler: Send + Sync { /// [`WalletServer`] is a server-side transport interface that declares /// API methods that can be invoked via Borsh or Serde messages containing -/// serializations created using the [`Transport`] interface. The [`WalletServer`] +/// serializations created using the [`Codec`] interface. The [`WalletServer`] /// is a counter-part to [`WalletClient`]. pub struct WalletServer { // pub wallet_api: Arc, @@ -147,6 +148,7 @@ impl WalletServer { Disconnect, ChangeNetworkId, RetainContext, + GetContext, Batch, Flush, WalletEnumerate, diff --git a/wallet/core/src/compat/gen1.rs b/wallet/core/src/compat/gen1.rs index cd66b10166..5bf5554f55 100644 --- a/wallet/core/src/compat/gen1.rs +++ b/wallet/core/src/compat/gen1.rs @@ -14,7 +14,7 @@ pub fn decrypt_mnemonic>( let mut aead = chacha20poly1305::XChaCha20Poly1305::new(Key::from_slice(&key)); let (nonce, ciphertext) = cipher.as_ref().split_at(24); - let decrypted = aead.decrypt(nonce.into(), ciphertext).unwrap(); + let decrypted = aead.decrypt(nonce.into(), ciphertext)?; Ok(unsafe { String::from_utf8_unchecked(decrypted) }) } @@ -36,8 +36,10 @@ mod test { ecdsa: false, }; - let decrypted = decrypt_mnemonic(8, file.encrypted_mnemonic, b"").unwrap(); - assert_eq!("dizzy uncover funny time weapon chat volume squirrel comic motion until diamond response remind hurt spider door strategy entire oyster hawk marriage soon fabric", decrypted); + let decrypted = decrypt_mnemonic(8, file.encrypted_mnemonic, b""); + log_info!("decrypted: {decrypted:?}"); + assert!(decrypted.is_ok(), "decrypt error"); + assert_eq!("dizzy uncover funny time weapon chat volume squirrel comic motion until diamond response remind hurt spider door strategy entire oyster hawk marriage soon fabric", decrypted.unwrap()); } #[tokio::test] diff --git a/wallet/core/src/compat/mod.rs b/wallet/core/src/compat/mod.rs index 79c8e11dd9..093b8845cc 100644 --- a/wallet/core/src/compat/mod.rs +++ b/wallet/core/src/compat/mod.rs @@ -1,3 +1,7 @@ +//! +//! Compatibility layer for legacy wallets. +//! + pub mod gen0; pub use gen0::*; pub mod gen1; diff --git a/wallet/core/src/cryptobox.rs b/wallet/core/src/cryptobox.rs index fa9b188f3b..83845eee99 100644 --- a/wallet/core/src/cryptobox.rs +++ b/wallet/core/src/cryptobox.rs @@ -1,3 +1,7 @@ +//! +//! Re-export of the `crypto_box` crate that can be used to encrypt and decrypt messages. +//! + use crate::imports::*; use crypto_box::{ aead::{Aead, AeadCore, OsRng}, @@ -5,8 +9,13 @@ use crypto_box::{ }; pub use crypto_box::{PublicKey, SecretKey}; -// https://docs.rs/crypto_box/0.9.1/crypto_box/ - +/// +/// Primitives for encrypting and decrypting messages using the `crypto_box` crate. +/// This exists primarily for the purposes of [WASM bindings](crate::wasm::cryptobox::CryptoBox) +/// to allow access to the `crypto_box` encryption functionality from within web wallets. +/// +/// +/// pub struct CryptoBox { public_key: PublicKey, codec: ChaChaBox, diff --git a/wallet/core/src/derivation.rs b/wallet/core/src/derivation.rs index 15ad785035..2e598334e8 100644 --- a/wallet/core/src/derivation.rs +++ b/wallet/core/src/derivation.rs @@ -15,7 +15,7 @@ use crate::error::Error; use crate::imports::*; use crate::result::Result; use kaspa_bip32::{AddressType, DerivationPath, ExtendedPrivateKey, ExtendedPublicKey, Language, Mnemonic, SecretKeyExt}; -use kaspa_consensus_core::network::NetworkType; +use kaspa_consensus_core::network::{NetworkType, NetworkTypeT}; use kaspa_txscript::{ extract_script_pub_key_address, multisig_redeem_script, multisig_redeem_script_ecdsa, pay_to_script_hash_script, }; @@ -204,7 +204,7 @@ impl AddressDerivationManager { let derivator: Arc = match account_kind.as_ref() { LEGACY_ACCOUNT_KIND => Arc::new(WalletDerivationManagerV0::from_extended_public_key(xpub.clone(), cosigner_index)?), MULTISIG_ACCOUNT_KIND => { - let cosigner_index = cosigner_index.ok_or(Error::InvalidAccountKind)?; + let cosigner_index = cosigner_index.unwrap_or(0); Arc::new(WalletDerivationManager::from_extended_public_key(xpub.clone(), Some(cosigner_index))?) } _ => Arc::new(WalletDerivationManager::from_extended_public_key(xpub.clone(), cosigner_index)?), @@ -458,20 +458,26 @@ pub fn create_multisig_address( /// @category Wallet SDK #[wasm_bindgen(js_name=createAddress)] pub fn create_address_js( - key: PublicKeyT, - network_type: NetworkType, + key: &PublicKeyT, + network: &NetworkTypeT, ecdsa: Option, account_kind: Option, ) -> Result
{ let public_key = PublicKey::try_cast_from(key)?; - create_address(1, vec![public_key.as_ref().try_into()?], network_type.into(), ecdsa.unwrap_or(false), account_kind) + create_address( + 1, + vec![public_key.as_ref().try_into()?], + NetworkType::try_from(network)?.into(), + ecdsa.unwrap_or(false), + account_kind, + ) } /// @category Wallet SDK #[wasm_bindgen(js_name=createMultisigAddress)] pub fn create_multisig_address_js( minimum_signatures: usize, - keys: PublicKeyArrayT, + keys: &PublicKeyArrayT, network_type: NetworkType, ecdsa: Option, account_kind: Option, diff --git a/wallet/core/src/deterministic.rs b/wallet/core/src/deterministic.rs index c383da6012..648738a731 100644 --- a/wallet/core/src/deterministic.rs +++ b/wallet/core/src/deterministic.rs @@ -2,7 +2,7 @@ //! Deterministic byte sequence generation (used by Account ids). //! -pub use crate::account::{bip32, keypair, legacy, multisig}; +pub use crate::account::{bip32, bip32watch, keypair, legacy, multisig}; use crate::encryption::sha256_hash; use crate::imports::*; use crate::storage::PrvKeyDataId; @@ -101,7 +101,7 @@ where T: AsSlice + BorshSerialize, { let mut hashes: [Hash; N] = [Hash::default(); N]; - let bytes = hashable.try_to_vec().unwrap(); + let bytes = borsh::to_vec(&hashable).unwrap(); hashes[0] = Hash::from_slice(sha256_hash(&bytes).as_ref()); for i in 1..N { hashes[i] = Hash::from_slice(sha256_hash(&hashes[i - 1].as_bytes()).as_ref()); @@ -143,7 +143,23 @@ pub fn from_multisig(prv_key_data_ids: &Option( + prv_key_data_ids: &Option>>, + data: &bip32watch::Payload, +) -> [Hash; N] { + let hashable = DeterministicHashData { + account_kind: &multisig::MULTISIG_ACCOUNT_KIND.into(), + prv_key_data_ids, + ecdsa: Some(data.ecdsa), + account_index: None, + secp256k1_public_key: None, + data: Some(borsh::to_vec(&data.xpub_keys).unwrap()), }; make_hashes(hashable) } @@ -174,6 +190,19 @@ pub fn from_public_key(account_kind: &AccountKind, public_key: & make_hashes(hashable) } +/// Create deterministic hashes from bip32-watch. +pub fn from_bip32_watch(public_key: &PublicKey) -> [Hash; N] { + let hashable: DeterministicHashData<[PrvKeyDataId; 0]> = DeterministicHashData { + account_kind: &bip32watch::BIP32_WATCH_ACCOUNT_KIND.into(), + prv_key_data_ids: &None, + ecdsa: None, + account_index: Some(0), + secp256k1_public_key: Some(public_key.serialize().to_vec()), + data: None, + }; + make_hashes(hashable) +} + /// Create deterministic hashes from arbitrary data (supplied data slice must be deterministic). pub fn from_data(account_kind: &AccountKind, data: &[u8]) -> [Hash; N] { let hashable: DeterministicHashData<[PrvKeyDataId; 0]> = DeterministicHashData { diff --git a/wallet/core/src/encryption.rs b/wallet/core/src/encryption.rs index bee57a1f59..f07b49cec7 100644 --- a/wallet/core/src/encryption.rs +++ b/wallet/core/src/encryption.rs @@ -146,7 +146,7 @@ where } pub fn encrypt(&self, secret: &Secret, encryption_kind: EncryptionKind) -> Result { - let bytes = self.0.try_to_vec()?; + let bytes = borsh::to_vec(&self.0)?; let encrypted = match encryption_kind { EncryptionKind::XChaCha20Poly1305 => encrypt_xchacha20poly1305(bytes.as_slice(), secret)?, }; diff --git a/wallet/core/src/error.rs b/wallet/core/src/error.rs index a89b1dcf00..8992a8a924 100644 --- a/wallet/core/src/error.rs +++ b/wallet/core/src/error.rs @@ -13,6 +13,7 @@ use std::sync::PoisonError; use thiserror::Error; use wasm_bindgen::JsValue; use workflow_core::abortable::Aborted; +use workflow_core::channel::{RecvError, SendError, TrySendError}; use workflow_core::sendable::*; use workflow_rpc::client::error::Error as RpcError; use workflow_wasm::jserror::*; @@ -186,7 +187,7 @@ pub enum Error { #[error("{0}")] TryFromEnum(#[from] workflow_core::enums::TryFromError), - #[error("Account factory found for type: {0}")] + #[error("Account factory not found for type: {0}")] AccountFactoryNotFound(AccountKind), #[error("Account not found: {0}")] @@ -231,6 +232,12 @@ pub enum Error { #[error("Not allowed on a resident account")] ResidentAccount, + #[error("Not allowed on an bip32-watch account")] + Bip32WatchAccount, + + #[error("At least one xpub is required for a bip32-watch account")] + Bip32WatchXpubRequired, + #[error("This feature is not supported by this account type")] AccountKindFeature, @@ -326,6 +333,14 @@ pub enum Error { #[error(transparent)] Metrics(#[from] kaspa_metrics_core::error::Error), + + #[error("Connected node is not synced")] + NotSynced, + #[error(transparent)] + Pskt(#[from] kaspa_wallet_pskt::error::Error), + + #[error("Error generating pending transaction from PSKT: {0}")] + PendingTransactionFromPSKTError(String), } impl From for Error { @@ -409,8 +424,20 @@ impl From> for Error { } } -impl From> for Error { - fn from(e: workflow_core::channel::SendError) -> Self { +impl From> for Error { + fn from(e: SendError) -> Self { + Error::Custom(e.to_string()) + } +} + +impl From for Error { + fn from(e: RecvError) -> Self { + Error::Custom(e.to_string()) + } +} + +impl From> for Error { + fn from(e: TrySendError) -> Self { Error::Custom(e.to_string()) } } diff --git a/wallet/core/src/events.rs b/wallet/core/src/events.rs index 63d7d5bcab..37816d8b20 100644 --- a/wallet/core/src/events.rs +++ b/wallet/core/src/events.rs @@ -245,6 +245,12 @@ impl Events { } } +/// +/// Event kind representing [`Events`] variant. +/// Used primarily by WASM bindings to identify event types +/// by their string representation. Can be obtained from the +/// event via [`Events::kind()`]. +/// #[derive(Clone, Copy, Debug, Serialize, Eq, PartialEq, Hash)] #[serde(rename_all = "kebab-case")] pub enum EventKind { diff --git a/wallet/core/src/factory.rs b/wallet/core/src/factory.rs index c0472059c2..178e331e54 100644 --- a/wallet/core/src/factory.rs +++ b/wallet/core/src/factory.rs @@ -6,6 +6,7 @@ use crate::imports::*; use crate::result::Result; use std::sync::OnceLock; +/// Wallet account loading factory. #[async_trait] pub trait Factory { fn name(&self) -> String; @@ -22,6 +23,7 @@ type FactoryMap = AHashMap static EXTERNAL: OnceLock> = OnceLock::new(); static INITIALIZED: AtomicBool = AtomicBool::new(false); +/// Global factory registry accessor. pub fn factories() -> &'static FactoryMap { static FACTORIES: OnceLock = OnceLock::new(); FACTORIES.get_or_init(|| { @@ -32,6 +34,7 @@ pub fn factories() -> &'static FactoryMap { (LEGACY_ACCOUNT_KIND.into(), Arc::new(legacy::Ctor {})), (MULTISIG_ACCOUNT_KIND.into(), Arc::new(multisig::Ctor {})), (KEYPAIR_ACCOUNT_KIND.into(), Arc::new(keypair::Ctor {})), + (BIP32_WATCH_ACCOUNT_KIND.into(), Arc::new(bip32watch::Ctor {})), ]; let external = EXTERNAL.get_or_init(|| Mutex::new(AHashMap::new())).lock().unwrap().clone(); @@ -40,6 +43,7 @@ pub fn factories() -> &'static FactoryMap { }) } +/// Registers a new global account factory. pub fn register(kind: AccountKind, factory: Arc) { if INITIALIZED.load(Ordering::Relaxed) { panic!("Factory registrations must occur before the framework initialization"); diff --git a/wallet/core/src/imports.rs b/wallet/core/src/imports.rs index 2d2ce79fda..9129d8349f 100644 --- a/wallet/core/src/imports.rs +++ b/wallet/core/src/imports.rs @@ -17,7 +17,6 @@ pub use crate::rpc::Rpc; pub use crate::rpc::{DynRpcApi, RpcCtl}; pub use crate::serializer::*; pub use crate::storage::*; -pub use crate::tx::MassCombinationStrategy; pub use crate::utxo::balance::Balance; pub use crate::utxo::scan::{Scan, ScanExtent}; pub use crate::utxo::{Maturity, NetworkParams, OutgoingTransaction, UtxoContext, UtxoEntryReference, UtxoProcessor}; @@ -25,7 +24,9 @@ pub use crate::wallet::*; pub use crate::{storage, utils}; pub use ahash::{AHashMap, AHashSet}; -pub use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; +pub use async_std::sync::{ + Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard, RwLock as AsyncRwLock, RwLockReadGuard as AsyncRwLockReadGuard, +}; pub use async_trait::async_trait; pub use borsh::{BorshDeserialize, BorshSerialize}; pub use cfg_if::cfg_if; @@ -49,6 +50,7 @@ pub use std::collections::{HashMap, HashSet}; pub use std::pin::Pin; pub use std::str::FromStr; pub use std::sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}; +pub use std::sync::LazyLock; pub use std::sync::{Arc, Mutex, MutexGuard, RwLock}; pub use std::task::{Context, Poll}; pub use wasm_bindgen::prelude::*; diff --git a/wallet/core/src/lib.rs b/wallet/core/src/lib.rs index 6029d6c4c4..09cc3ca7fc 100644 --- a/wallet/core/src/lib.rs +++ b/wallet/core/src/lib.rs @@ -1,16 +1,21 @@ //! -//! Kaspa Wallet Core - Multi-platform Rust framework for Kaspa Wallet. +//! # Kaspa Wallet Core +//! +//! Multi-platform Rust framework for Kaspa Wallet. //! //! This framework provides a series of APIs and primitives //! to simplify building applications that interface with //! the Kaspa p2p network. //! -//! Included are low-level primitives +//! For key generation and derivation, please see the +//! [`kaspa_wallet_keys`] crate. +//! +//! This crate included are low-level primitives //! such as [`UtxoProcessor`](crate::utxo::UtxoProcessor) //! and [`UtxoContext`](crate::utxo::UtxoContext) that provide //! various levels of automation as well as higher-level //! APIs such as [`Wallet`](crate::wallet::Wallet), -//! [`Account`](crate::account::Account) (managed via +//! [`Account`](crate::account::Account) (managed via the //! [`WalletApi`](crate::api::WalletApi) trait) //! that offer a fully-featured wallet implementation //! backed by a multi-platform data storage layer capable of @@ -28,15 +33,28 @@ //! to satisfy the requested amount exceeds the maximum //! allowed transaction mass. //! +//! Key generation and derivation is available in the +//! [`kaspa_wallet_keys`] crate. +//! //! The framework can operate -//! within native Rust applications as well as within the NodeJS -//! and browser environments via WASM32. +//! within native Rust applications as well as within NodeJS, Bun +//! and browser environments via the WASM32 SDK. //! -//! For JavaScript / TypeScript environments, there are two +//! WASM32 SDK documentation is available at: +//! +//! +//! For NodeJS JavaScript and TypeScript environments, there are two //! available NPM modules: //! - //! - //! +//! NOTE: for security reasons (to mitigate potential upstream vendor +//! attacks) it is always recommended to build WASM SDK from source or +//! download pre-built redistributables. +//! +//! Latest development builds of the WASM32 SDK can be found at: +//! +//! //! The `kaspa-wasm` module is a pure WASM32 module that includes //! the entire wallet framework, but does not support RPC due to an absence //! of a native WebSocket in NodeJs environment, while @@ -54,36 +72,6 @@ extern crate alloc; extern crate self as kaspa_wallet_core; -// use cfg_if::cfg_if; - -// cfg_if! { -// if #[cfg(feature = "wasm32-core")] { -// // pub mod wasm; -// // pub use wasm::*; - -// pub mod account; -// pub mod api; -// pub mod compat; -// pub mod derivation; -// pub mod deterministic; -// pub mod encryption; -// pub mod error; -// pub mod events; -// pub mod factory; -// mod imports; -// pub mod message; -// pub mod prelude; -// pub mod result; -// pub mod rpc; -// pub mod serializer; -// pub mod settings; -// pub mod storage; -// pub mod tx; -// pub mod utils; -// pub mod utxo; -// pub mod wallet; - -// } else if #[cfg(any(feature = "wasm32-sdk", not(target_arch = "wasm32")))] { pub mod account; pub mod api; pub mod compat; @@ -107,9 +95,6 @@ pub mod tx; pub mod utils; pub mod utxo; pub mod wallet; -// } - -// } #[cfg(any(feature = "wasm32-sdk", feature = "wasm32-core"))] pub mod wasm; @@ -119,5 +104,10 @@ pub fn version() -> String { env!("CARGO_PKG_VERSION").to_string() } +/// Returns the version of the Wallet framework combined with short git hash. +pub fn version_with_git_hash() -> String { + kaspa_utils::git::with_short_hash(env!("CARGO_PKG_VERSION")).to_string() +} + #[cfg(test)] pub mod tests; diff --git a/wallet/core/src/message.rs b/wallet/core/src/message.rs index 160c8f0407..01dc78676b 100644 --- a/wallet/core/src/message.rs +++ b/wallet/core/src/message.rs @@ -5,6 +5,7 @@ use kaspa_hashes::{Hash, PersonalMessageSigningHash}; use secp256k1::{Error, XOnlyPublicKey}; +/// A personal message (text) that can be signed. #[derive(Clone)] pub struct PersonalMessage<'a>(pub &'a str); diff --git a/wallet/core/src/metrics.rs b/wallet/core/src/metrics.rs index 87a3f99131..b0edb1f885 100644 --- a/wallet/core/src/metrics.rs +++ b/wallet/core/src/metrics.rs @@ -1,6 +1,13 @@ +//! +//! Primitives for network metrics. +//! + use crate::imports::*; -// use kaspa_metrics_core::MetricsSnapshot; +/// Metrics posted by the wallet subsystem. +/// See [`UtxoProcessor::start_metrics`] to enable metrics processing. +/// This struct contains mempool size that can be used to estimate +/// current network congestion. #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(tag = "type", content = "data")] #[serde(rename_all = "kebab-case")] @@ -8,37 +15,19 @@ pub enum MetricsUpdate { WalletMetrics { #[serde(rename = "mempoolSize")] mempool_size: u64, - #[serde(rename = "nodePeers")] - node_peers: u32, - #[serde(rename = "networkTPS")] - network_tps: f64, }, - // NodeMetrics { - // snapshot : Box - // } } +/// [`MetricsUpdate`] variant identifier. #[derive(Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub enum MetricsUpdateKind { WalletMetrics, - // NodeMetrics } impl MetricsUpdate { pub fn kind(&self) -> MetricsUpdateKind { match self { MetricsUpdate::WalletMetrics { .. } => MetricsUpdateKind::WalletMetrics, - // MetricsUpdate::NodeMetrics { .. } => MetricsUpdateKind::NodeMetrics } } } - -// impl MetricsUpdate { -// pub fn wallet_metrics(mempool_size: u64, peers: usize) -> Self { -// MetricsUpdate::WalletMetrics { mempool_size, peers } -// } - -// pub fn node_metrics(snapshot: MetricsSnapshot) -> Self { -// MetricsUpdate::NodeMetrics(Box::new(snapshot)) -// } -// } diff --git a/wallet/core/src/prelude.rs b/wallet/core/src/prelude.rs index eb9bb2b1e9..741ea0b1e9 100644 --- a/wallet/core/src/prelude.rs +++ b/wallet/core/src/prelude.rs @@ -1,6 +1,5 @@ //! -//! A module which is typically glob imported. -//! Contains most commonly used imports. +//! Re-exports of the most commonly used types and traits in this crate. //! pub use crate::account::descriptor::AccountDescriptor; @@ -14,9 +13,14 @@ pub use crate::rpc::{ConnectOptions, ConnectStrategy, DynRpcApi}; pub use crate::settings::WalletSettings; pub use crate::storage::{IdT, Interface, PrvKeyDataId, PrvKeyDataInfo, TransactionId, TransactionRecord, WalletDescriptor}; pub use crate::tx::{Fees, PaymentDestination, PaymentOutput, PaymentOutputs}; +pub use crate::utils::{ + kaspa_suffix, kaspa_to_sompi, sompi_to_kaspa, sompi_to_kaspa_string, sompi_to_kaspa_string_with_suffix, try_kaspa_str_to_sompi, + try_kaspa_str_to_sompi_i64, +}; pub use crate::utxo::balance::{Balance, BalanceStrings}; pub use crate::wallet::args::*; pub use crate::wallet::Wallet; +pub use async_std::sync::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; pub use kaspa_addresses::{Address, Prefix as AddressPrefix}; pub use kaspa_bip32::{Language, Mnemonic, WordCount}; pub use kaspa_wallet_keys::secret::Secret; diff --git a/wallet/core/src/rpc.rs b/wallet/core/src/rpc.rs index 999e09e302..b75f4b5b28 100644 --- a/wallet/core/src/rpc.rs +++ b/wallet/core/src/rpc.rs @@ -6,13 +6,17 @@ use std::sync::Arc; pub use kaspa_rpc_core::api::ctl::RpcCtl; pub use kaspa_rpc_core::api::rpc::RpcApi; -pub type DynRpcApi = dyn RpcApi; -pub type NotificationChannel = kaspa_utils::channel::Channel; pub use kaspa_rpc_core::notify::mode::NotificationMode; pub use kaspa_wrpc_client::client::{ConnectOptions, ConnectStrategy}; pub use kaspa_wrpc_client::Resolver; pub use kaspa_wrpc_client::WrpcEncoding; +/// Type alias for [`dyn RpcApi`](RpcApi). +pub type DynRpcApi = dyn RpcApi; +/// Type alias for a concrete [`Channel`](kaspa_utils::channel::Channel) +/// used for handling RPC [`Notification`](kaspa_rpc_core::Notification) events. +pub type NotificationChannel = kaspa_utils::channel::Channel; + /// RPC adaptor class that holds the [`RpcApi`] /// and [`RpcCtl`] instances. #[derive(Clone)] diff --git a/wallet/core/src/serializer.rs b/wallet/core/src/serializer.rs index 391f813fa1..d7451a3c20 100644 --- a/wallet/core/src/serializer.rs +++ b/wallet/core/src/serializer.rs @@ -59,8 +59,8 @@ impl BorshSerialize for StorageHeader { } impl BorshDeserialize for StorageHeader { - fn deserialize(buf: &mut &[u8]) -> std::io::Result { - let (magic, version): (u32, u32) = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> std::io::Result { + let (magic, version): (u32, u32) = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { magic, version }) } } diff --git a/wallet/core/src/settings.rs b/wallet/core/src/settings.rs index 35fde44866..f861c5a134 100644 --- a/wallet/core/src/settings.rs +++ b/wallet/core/src/settings.rs @@ -13,6 +13,7 @@ use std::path::PathBuf; use workflow_core::enums::Describe; use workflow_store::fs; +/// Wallet settings enumeration. #[derive(Describe, Debug, Clone, Serialize, Deserialize, Hash, Eq, PartialEq, Ord, PartialOrd)] #[serde(rename_all = "lowercase")] pub enum WalletSettings { @@ -36,6 +37,8 @@ pub trait DefaultSettings: Sized { async fn defaults() -> Vec<(Self, Value)>; } +/// Platform neutral settings store (stores the settings K:V map +/// in a file or the browser `localStorage`). #[derive(Debug, Clone)] pub struct SettingsStore where @@ -170,10 +173,12 @@ where } } +/// Returns the wallet data storage folder `~/.kaspa`. pub fn application_folder() -> Result { Ok(fs::resolve_path(storage::local::default_storage_folder())?) } +/// If missing, creates the wallet data storage folder `~/.kaspa`. pub async fn ensure_application_folder() -> Result<()> { let path = application_folder()?; log_info!("Creating application folder: `{}`", path.display()); diff --git a/wallet/core/src/storage/account.rs b/wallet/core/src/storage/account.rs index da0c2df32b..a33585a255 100644 --- a/wallet/core/src/storage/account.rs +++ b/wallet/core/src/storage/account.rs @@ -26,10 +26,10 @@ impl BorshSerialize for AccountSettings { } impl BorshDeserialize for AccountSettings { - fn deserialize(buf: &mut &[u8]) -> IoResult { - let _version: u32 = BorshDeserialize::deserialize(buf)?; - let name = BorshDeserialize::deserialize(buf)?; - let meta = BorshDeserialize::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> IoResult { + let _version: u32 = BorshDeserialize::deserialize_reader(reader)?; + let name = BorshDeserialize::deserialize_reader(reader)?; + let meta = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { name, meta }) } @@ -63,7 +63,7 @@ impl AccountStorage { where A: AccountStorable, { - Ok(Self { id: *id, storage_key: *storage_key, kind, prv_key_data_ids, settings, serialized: serialized.try_to_vec()? }) + Ok(Self { id: *id, storage_key: *storage_key, kind, prv_key_data_ids, settings, serialized: borsh::to_vec(&serialized)? }) } pub fn id(&self) -> &AccountId { @@ -107,16 +107,16 @@ impl BorshSerialize for AccountStorage { } impl BorshDeserialize for AccountStorage { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - - let kind = BorshDeserialize::deserialize(buf)?; - let id = BorshDeserialize::deserialize(buf)?; - let storage_key = BorshDeserialize::deserialize(buf)?; - let prv_key_data_ids = BorshDeserialize::deserialize(buf)?; - let settings = BorshDeserialize::deserialize(buf)?; - let serialized = BorshDeserialize::deserialize(buf)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + + let kind = BorshDeserialize::deserialize_reader(reader)?; + let id = BorshDeserialize::deserialize_reader(reader)?; + let storage_key = BorshDeserialize::deserialize_reader(reader)?; + let prv_key_data_ids = BorshDeserialize::deserialize_reader(reader)?; + let settings = BorshDeserialize::deserialize_reader(reader)?; + let serialized = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { kind, id, storage_key, prv_key_data_ids, settings, serialized }) } diff --git a/wallet/core/src/storage/binding.rs b/wallet/core/src/storage/binding.rs index 45eac4a7e9..18f988e2d2 100644 --- a/wallet/core/src/storage/binding.rs +++ b/wallet/core/src/storage/binding.rs @@ -5,6 +5,45 @@ use crate::imports::*; use crate::utxo::{UtxoContextBinding as UtxoProcessorBinding, UtxoContextId}; +#[wasm_bindgen(typescript_custom_section)] +const ITransactionRecord: &'static str = r#" + +/** + * Type of a binding record. + * @see {@link IBinding}, {@link ITransactionDataVariant}, {@link ITransactionRecord} + * @category Wallet SDK + */ +export enum BindingType { + /** + * The data structure is associated with a user-supplied id. + * @see {@link IBinding} + */ + Custom = "custom", + /** + * The data structure is associated with a wallet account. + * @see {@link IBinding}, {@link Account} + */ + Account = "account", +} + +/** + * Internal transaction data contained within the transaction record. + * @see {@link ITransactionRecord} + * @category Wallet SDK + */ +export interface IBinding { + type : BindingType; + data : HexString; +} +"#; + +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(extends = Object, typescript_type = "IBinding")] + #[derive(Clone, Debug, PartialEq, Eq)] + pub type BindingT; +} + #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] #[serde(rename_all = "kebab-case")] #[serde(tag = "type", content = "id")] diff --git a/wallet/core/src/storage/keydata/data.rs b/wallet/core/src/storage/keydata/data.rs index 37a734854c..5b480d937c 100644 --- a/wallet/core/src/storage/keydata/data.rs +++ b/wallet/core/src/storage/keydata/data.rs @@ -44,11 +44,12 @@ impl BorshSerialize for PrvKeyDataVariant { } impl BorshDeserialize for PrvKeyDataVariant { - fn deserialize(buf: &mut &[u8]) -> IoResult { - let StorageHeader { version: _, .. } = StorageHeader::deserialize(buf)?.try_magic(Self::MAGIC)?.try_version(Self::VERSION)?; + fn deserialize_reader(reader: &mut R) -> IoResult { + let StorageHeader { version: _, .. } = + StorageHeader::deserialize_reader(reader)?.try_magic(Self::MAGIC)?.try_version(Self::VERSION)?; - let kind: PrvKeyDataVariantKind = BorshDeserialize::deserialize(buf)?; - let string: String = BorshDeserialize::deserialize(buf)?; + let kind: PrvKeyDataVariantKind = BorshDeserialize::deserialize_reader(reader)?; + let string: String = BorshDeserialize::deserialize_reader(reader)?; match kind { PrvKeyDataVariantKind::Mnemonic => Ok(Self::Mnemonic(string)), diff --git a/wallet/core/src/storage/local/interface.rs b/wallet/core/src/storage/local/interface.rs index 1e998eb61f..c4ada71e07 100644 --- a/wallet/core/src/storage/local/interface.rs +++ b/wallet/core/src/storage/local/interface.rs @@ -131,7 +131,7 @@ impl LocalStoreInner { async fn try_export(&self, wallet_secret: &Secret, _options: WalletExportOptions) -> Result> { let wallet = self.cache.read().unwrap().to_wallet(None, wallet_secret)?; - Ok(wallet.try_to_vec()?) + Ok(borsh::to_vec(&wallet)?) } fn storage(&self) -> Arc { diff --git a/wallet/core/src/storage/local/payload.rs b/wallet/core/src/storage/local/payload.rs index 2cc5c9091e..8424dd4e19 100644 --- a/wallet/core/src/storage/local/payload.rs +++ b/wallet/core/src/storage/local/payload.rs @@ -67,13 +67,13 @@ impl BorshSerialize for Payload { } impl BorshDeserialize for Payload { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let prv_key_data = BorshDeserialize::deserialize(buf)?; - let accounts = BorshDeserialize::deserialize(buf)?; - let address_book = BorshDeserialize::deserialize(buf)?; - let encrypt_transactions = BorshDeserialize::deserialize(buf)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + let prv_key_data = BorshDeserialize::deserialize_reader(reader)?; + let accounts = BorshDeserialize::deserialize_reader(reader)?; + let address_book = BorshDeserialize::deserialize_reader(reader)?; + let encrypt_transactions = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { prv_key_data, accounts, address_book, encrypt_transactions }) } diff --git a/wallet/core/src/storage/local/transaction/fsio.rs b/wallet/core/src/storage/local/transaction/fsio.rs index ac44a136d7..a57a440112 100644 --- a/wallet/core/src/storage/local/transaction/fsio.rs +++ b/wallet/core/src/storage/local/transaction/fsio.rs @@ -65,7 +65,10 @@ impl TransactionStore { match fs::readdir(folder, true).await { Ok(mut files) => { // we reverse the order of the files so that the newest files are first - files.sort_by_key(|f| std::cmp::Reverse(f.metadata().unwrap().created())); + files.sort_by_key(|f| { + let meta = f.metadata().expect("fsio: missing file metadata"); + std::cmp::Reverse(meta.created().or_else(|| meta.modified()).unwrap_or_default()) + }); for file in files { if let Ok(id) = TransactionId::from_hex(file.file_name()) { @@ -115,7 +118,7 @@ impl TransactionRecordStore for TransactionStore { let mut transactions = vec![]; for id in ids { - let path = folder.join(&id.to_hex()); + let path = folder.join(id.to_hex()); match read(&path, None).await { Ok(tx) => { transactions.push(Arc::new(tx)); @@ -144,7 +147,7 @@ impl TransactionRecordStore for TransactionStore { let mut located = 0; for id in ids { - let path = folder.join(&id.to_hex()); + let path = folder.join(id.to_hex()); match read(&path, None).await { Ok(tx) => { @@ -167,7 +170,7 @@ impl TransactionRecordStore for TransactionStore { let iter = ids.iter().skip(range.start).take(range.len()); for id in iter { - let path = folder.join(&id.to_hex()); + let path = folder.join(id.to_hex()); match read(&path, None).await { Ok(tx) => { transactions.push(Arc::new(tx)); @@ -315,6 +318,6 @@ async fn write(path: &Path, record: &TransactionRecord, secret: Option<&Secret>, } else { Encryptable::from(record.clone()) }; - fs::write(path, &data.try_to_vec()?).await?; + fs::write(path, &borsh::to_vec(&data)?).await?; Ok(()) } diff --git a/wallet/core/src/storage/local/transaction/indexdb.rs b/wallet/core/src/storage/local/transaction/indexdb.rs index e4190aa1ff..463508f611 100644 --- a/wallet/core/src/storage/local/transaction/indexdb.rs +++ b/wallet/core/src/storage/local/transaction/indexdb.rs @@ -26,35 +26,104 @@ pub struct Inner { impl Inner { async fn open_db(&self, db_name: String) -> Result { call_async_no_send!(async move { - let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, 1) + let mut db_req: OpenDbRequest = IdbDatabase::open_u32(&db_name, 2) .map_err(|err| Error::Custom(format!("Failed to open indexdb database {:?}", err)))?; - - fn on_upgrade_needed(evt: &IdbVersionChangeEvent) -> Result<(), JsValue> { - // Check if the object store exists; create it if it doesn't - if !evt.db().object_store_names().any(|n| n == TRANSACTIONS_STORE_NAME) { + let fix_timestamp = Arc::new(Mutex::new(false)); + let fix_timestamp_clone = fix_timestamp.clone(); + let on_upgrade_needed = move |evt: &IdbVersionChangeEvent| -> Result<(), JsValue> { + let old_version = evt.old_version(); + if old_version < 1.0 { let object_store = evt.db().create_object_store(TRANSACTIONS_STORE_NAME)?; + let db_index_params = IdbIndexParameters::new(); + db_index_params.set_unique(true); object_store.create_index_with_params( TRANSACTIONS_STORE_ID_INDEX, &IdbKeyPath::str(TRANSACTIONS_STORE_ID_INDEX), - IdbIndexParameters::new().unique(true), + &db_index_params, )?; object_store.create_index_with_params( TRANSACTIONS_STORE_TIMESTAMP_INDEX, &IdbKeyPath::str(TRANSACTIONS_STORE_TIMESTAMP_INDEX), - IdbIndexParameters::new().unique(false), + &db_index_params, )?; object_store.create_index_with_params( TRANSACTIONS_STORE_DATA_INDEX, &IdbKeyPath::str(TRANSACTIONS_STORE_DATA_INDEX), - IdbIndexParameters::new().unique(false), + &db_index_params, )?; + + // these changes are not required for new db + } else if old_version < 2.0 { + *fix_timestamp_clone.lock().unwrap() = true; } + // // Check if the object store exists; create it if it doesn't + // if !evt.db().object_store_names().any(|n| n == TRANSACTIONS_STORE_NAME) { + + // } Ok(()) - } + }; db_req.set_on_upgrade_needed(Some(on_upgrade_needed)); - db_req.await.map_err(|err| Error::Custom(format!("Open database request failed for indexdb database {:?}", err))) + let db = + db_req.await.map_err(|err| Error::Custom(format!("Open database request failed for indexdb database {:?}", err)))?; + + if *fix_timestamp.lock().unwrap() { + log_info!("DEBUG: fixing timestamp"); + let idb_tx = db + .transaction_on_one_with_mode(TRANSACTIONS_STORE_NAME, IdbTransactionMode::Readwrite) + .map_err(|err| Error::Custom(format!("Failed to open indexdb transaction for reading {:?}", err)))?; + let store = idb_tx + .object_store(TRANSACTIONS_STORE_NAME) + .map_err(|err| Error::Custom(format!("Failed to open indexdb object store for reading {:?}", err)))?; + let binding = store + .index(TRANSACTIONS_STORE_TIMESTAMP_INDEX) + .map_err(|err| Error::Custom(format!("Failed to open indexdb indexed store cursor {:?}", err)))?; + let cursor = binding + .open_cursor_with_range_and_direction(&JsValue::NULL, web_sys::IdbCursorDirection::Prev) + .map_err(|err| Error::Custom(format!("Failed to open indexdb store cursor for reading {:?}", err)))?; + let cursor = cursor.await.map_err(|err| Error::Custom(format!("Failed to open indexdb store cursor {:?}", err)))?; + + // let next_year_date = Date::new_0(); + // next_year_date.set_full_year(next_year_date.get_full_year() + 1); + // let next_year_ts = next_year_date.get_time(); + + if let Some(cursor) = cursor { + loop { + let js_value = cursor.value(); + if let Ok(record) = transaction_record_from_js_value(&js_value, None) { + if record.unixtime_msec.is_some() { + let new_js_value = transaction_record_to_js_value(&record, None, ENCRYPTION_KIND)?; + + //log_info!("DEBUG: new_js_value: {:?}", new_js_value); + + cursor + .update(&new_js_value) + .map_err(|err| Error::Custom(format!("Failed to update record timestamp {:?}", err)))? + .await + .map_err(|err| Error::Custom(format!("Failed to update record timestamp {:?}", err)))?; + } + } + if let Ok(b) = cursor.continue_cursor() { + match b.await { + Ok(b) => { + if !b { + break; + } + } + Err(err) => { + log_info!("DEBUG IDB: Loading transaction error, cursor.continue_cursor() {:?}", err); + break; + } + } + } else { + break; + } + } + } + } + + Ok(db) }) } } @@ -218,36 +287,73 @@ impl TransactionRecordStore for TransactionStore { binding: &Binding, network_id: &NetworkId, _filter: Option>, - _range: std::ops::Range, + range: std::ops::Range, ) -> Result { - // log_info!("DEBUG IDB: Loading transaction records for range {:?}", _range); - + log_info!("DEBUG IDB: Loading transaction records for range {:?}", range); let binding_str = binding.to_hex(); let network_id_str = network_id.to_string(); let db_name = self.make_db_name(&binding_str, &network_id_str); - let inner = self.inner().clone(); - call_async_no_send!(async move { let db = inner.open_db(db_name).await?; - let idb_tx = db .transaction_on_one_with_mode(TRANSACTIONS_STORE_NAME, IdbTransactionMode::Readonly) .map_err(|err| Error::Custom(format!("Failed to open indexdb transaction for reading {:?}", err)))?; - let store = idb_tx .object_store(TRANSACTIONS_STORE_NAME) .map_err(|err| Error::Custom(format!("Failed to open indexdb object store for reading {:?}", err)))?; - - let array = store - .get_all() - .map_err(|err| Error::Custom(format!("Failed to get transaction record from indexdb {:?}", err)))? + let total = store + .count() + .map_err(|err| Error::Custom(format!("Failed to count indexdb records {:?}", err)))? .await - .map_err(|err| Error::Custom(format!("Failed to get transaction record from indexdb {:?}", err)))?; - - let transactions = array + .map_err(|err| Error::Custom(format!("Failed to count indexdb records from future {:?}", err)))?; + + let binding = store + .index(TRANSACTIONS_STORE_TIMESTAMP_INDEX) + .map_err(|err| Error::Custom(format!("Failed to open indexdb indexed store cursor {:?}", err)))?; + let cursor = binding + .open_cursor_with_range_and_direction(&JsValue::NULL, web_sys::IdbCursorDirection::Prev) + .map_err(|err| Error::Custom(format!("Failed to open indexdb store cursor for reading {:?}", err)))?; + let mut records = vec![]; + let cursor = cursor.await.map_err(|err| Error::Custom(format!("Failed to open indexdb store cursor {:?}", err)))?; + if let Some(cursor) = cursor { + if range.start > 0 { + let res = cursor + .advance(range.start as u32) + .map_err(|err| Error::Custom(format!("Unable to advance indexdb cursor {:?}", err)))? + .await; + let _res = res.map_err(|err| Error::Custom(format!("Unable to advance indexdb cursor future {:?}", err)))?; + // if !res { + // //return Err(Error::Custom(format!("Unable to advance indexdb cursor future {:?}", err))); + // } + } + let count = range.end - range.start; + loop { + if records.len() < count { + records.push(cursor.value()); + if let Ok(b) = cursor.continue_cursor() { + match b.await { + Ok(b) => { + if !b { + break; + } + } + Err(err) => { + log_info!("DEBUG IDB: Loading transaction error, cursor.continue_cursor() {:?}", err); + break; + } + } + } else { + break; + } + } else { + break; + } + } + } + let transactions = records .iter() - .filter_map(|js_value| match transaction_record_from_js_value(&js_value, None) { + .filter_map(|js_value| match transaction_record_from_js_value(js_value, None) { Ok(transaction_record) => Some(Arc::new(transaction_record)), Err(err) => { log_error!("Failed to deserialize transaction record from indexdb {:?}", err); @@ -256,8 +362,7 @@ impl TransactionRecordStore for TransactionStore { }) .collect::>(); - let total = transactions.len() as u64; - Ok(TransactionRangeResult { transactions, total }) + Ok(TransactionRangeResult { transactions, total: total.into() }) }) } @@ -285,7 +390,7 @@ impl TransactionRecordStore for TransactionStore { let inner = inner_guard.lock().unwrap().clone(); call_async_no_send!(async move { - for (db_name, items) in &items.into_iter().group_by(|item| item.db_name.clone()) { + for (db_name, items) in &items.into_iter().chunk_by(|item| item.db_name.clone()) { let db = inner.open_db(db_name).await?; let idb_tx = db @@ -474,16 +579,14 @@ fn transaction_record_to_js_value( ) -> Result { let id = transaction_record.id.to_string(); let unixtime_msec = transaction_record.unixtime_msec; - let mut borsh_data = vec![]; - ::serialize(transaction_record, &mut borsh_data)?; let id_js_value = JsValue::from_str(&id); let timestamp_js_value = match unixtime_msec { Some(unixtime_msec) => { - let unixtime_sec = (unixtime_msec / 1000) as u32; + //let unixtime_sec = (unixtime_msec / 1000) as u32; let date = Date::new_0(); - date.set_utc_seconds(unixtime_sec); + date.set_time(unixtime_msec as f64); date.into() } None => JsValue::NULL, @@ -494,7 +597,7 @@ fn transaction_record_to_js_value( } else { Encryptable::from(transaction_record.clone()) }; - let encryped_data_vec = encryped_data.try_to_vec()?; + let encryped_data_vec = borsh::to_vec(&encryped_data)?; let borsh_data_uint8_arr = Uint8Array::from(encryped_data_vec.as_slice()); let borsh_data_js_value = borsh_data_uint8_arr.into(); @@ -519,6 +622,6 @@ fn transaction_record_from_js_value(js_value: &JsValue, secret: Option<&Secret>) Ok(transaction_record.0) } else { - Err(Error::Custom("supplied argument must be an object".to_string())) + Err(Error::Custom("supplied argument must be an object, found ({js_value:?})".to_string())) } } diff --git a/wallet/core/src/storage/local/wallet.rs b/wallet/core/src/storage/local/wallet.rs index afea36ad37..482ecd70ff 100644 --- a/wallet/core/src/storage/local/wallet.rs +++ b/wallet/core/src/storage/local/wallet.rs @@ -61,7 +61,7 @@ impl WalletStorage { cfg_if! { if #[cfg(target_arch = "wasm32")] { - let serialized = BorshSerialize::try_to_vec(self)?; + let serialized = borsh::to_vec(self)?; fs::write(store.filename(), serialized.as_slice()).await?; } else { // make this platform-specific to avoid creating @@ -101,8 +101,8 @@ impl BorshSerialize for WalletStorage { } impl BorshDeserialize for WalletStorage { - fn deserialize(buf: &mut &[u8]) -> IoResult { - let StorageHeader { magic, version, .. } = StorageHeader::deserialize(buf)?; + fn deserialize_reader(reader: &mut R) -> IoResult { + let StorageHeader { magic, version, .. } = StorageHeader::deserialize_reader(reader)?; if magic != Self::STORAGE_MAGIC { return Err(IoError::new( @@ -118,12 +118,12 @@ impl BorshDeserialize for WalletStorage { )); } - let title = BorshDeserialize::deserialize(buf)?; - let user_hint = BorshDeserialize::deserialize(buf)?; - let encryption_kind = BorshDeserialize::deserialize(buf)?; - let payload = BorshDeserialize::deserialize(buf)?; - let metadata = BorshDeserialize::deserialize(buf)?; - let transactions = BorshDeserialize::deserialize(buf)?; + let title = BorshDeserialize::deserialize_reader(reader)?; + let user_hint = BorshDeserialize::deserialize_reader(reader)?; + let encryption_kind = BorshDeserialize::deserialize_reader(reader)?; + let payload = BorshDeserialize::deserialize_reader(reader)?; + let metadata = BorshDeserialize::deserialize_reader(reader)?; + let transactions = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { title, user_hint, encryption_kind, payload, metadata, transactions }) } diff --git a/wallet/core/src/storage/metadata.rs b/wallet/core/src/storage/metadata.rs index 0eacb902d0..d421a17edd 100644 --- a/wallet/core/src/storage/metadata.rs +++ b/wallet/core/src/storage/metadata.rs @@ -47,12 +47,12 @@ impl BorshSerialize for AccountMetadata { } impl BorshDeserialize for AccountMetadata { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let id = BorshDeserialize::deserialize(buf)?; - let indexes = BorshDeserialize::deserialize(buf)?; + let id = BorshDeserialize::deserialize_reader(reader)?; + let indexes = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { id, indexes }) } diff --git a/wallet/core/src/storage/mod.rs b/wallet/core/src/storage/mod.rs index 21b30186a6..2516bcd483 100644 --- a/wallet/core/src/storage/mod.rs +++ b/wallet/core/src/storage/mod.rs @@ -18,7 +18,7 @@ pub mod transaction; pub use account::{AccountSettings, AccountStorable, AccountStorage}; pub use address::AddressBookEntry; -pub use binding::Binding; +pub use binding::{Binding, BindingT}; pub use hint::Hint; pub use id::IdT; pub use interface::{ diff --git a/wallet/core/src/storage/transaction/data.rs b/wallet/core/src/storage/transaction/data.rs index 51fff3df80..e976574fd1 100644 --- a/wallet/core/src/storage/transaction/data.rs +++ b/wallet/core/src/storage/transaction/data.rs @@ -282,42 +282,42 @@ impl BorshSerialize for TransactionData { } impl BorshDeserialize for TransactionData { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - let kind: TransactionKind = BorshDeserialize::deserialize(buf)?; + let kind: TransactionKind = BorshDeserialize::deserialize_reader(reader)?; match kind { TransactionKind::Reorg => { - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Reorg { utxo_entries, aggregate_input_value }) } TransactionKind::Incoming => { - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Incoming { utxo_entries, aggregate_input_value }) } TransactionKind::Stasis => { - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Stasis { utxo_entries, aggregate_input_value }) } TransactionKind::External => { - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::External { utxo_entries, aggregate_input_value }) } TransactionKind::Batch => { - let fees: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_output_value: u64 = BorshDeserialize::deserialize(buf)?; - let transaction: Transaction = BorshDeserialize::deserialize(buf)?; - let payment_value: Option = BorshDeserialize::deserialize(buf)?; - let change_value: u64 = BorshDeserialize::deserialize(buf)?; - let accepted_daa_score: Option = BorshDeserialize::deserialize(buf)?; - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; + let fees: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_output_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let transaction: Transaction = BorshDeserialize::deserialize_reader(reader)?; + let payment_value: Option = BorshDeserialize::deserialize_reader(reader)?; + let change_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let accepted_daa_score: Option = BorshDeserialize::deserialize_reader(reader)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Batch { fees, aggregate_input_value, @@ -330,14 +330,14 @@ impl BorshDeserialize for TransactionData { }) } TransactionKind::Outgoing => { - let fees: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_output_value: u64 = BorshDeserialize::deserialize(buf)?; - let transaction: Transaction = BorshDeserialize::deserialize(buf)?; - let payment_value: Option = BorshDeserialize::deserialize(buf)?; - let change_value: u64 = BorshDeserialize::deserialize(buf)?; - let accepted_daa_score: Option = BorshDeserialize::deserialize(buf)?; - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; + let fees: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_output_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let transaction: Transaction = BorshDeserialize::deserialize_reader(reader)?; + let payment_value: Option = BorshDeserialize::deserialize_reader(reader)?; + let change_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let accepted_daa_score: Option = BorshDeserialize::deserialize_reader(reader)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Outgoing { fees, aggregate_input_value, @@ -350,14 +350,14 @@ impl BorshDeserialize for TransactionData { }) } TransactionKind::TransferIncoming => { - let fees: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_output_value: u64 = BorshDeserialize::deserialize(buf)?; - let transaction: Transaction = BorshDeserialize::deserialize(buf)?; - let payment_value: Option = BorshDeserialize::deserialize(buf)?; - let change_value: u64 = BorshDeserialize::deserialize(buf)?; - let accepted_daa_score: Option = BorshDeserialize::deserialize(buf)?; - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; + let fees: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_output_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let transaction: Transaction = BorshDeserialize::deserialize_reader(reader)?; + let payment_value: Option = BorshDeserialize::deserialize_reader(reader)?; + let change_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let accepted_daa_score: Option = BorshDeserialize::deserialize_reader(reader)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::TransferIncoming { fees, aggregate_input_value, @@ -370,14 +370,14 @@ impl BorshDeserialize for TransactionData { }) } TransactionKind::TransferOutgoing => { - let fees: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_output_value: u64 = BorshDeserialize::deserialize(buf)?; - let transaction: Transaction = BorshDeserialize::deserialize(buf)?; - let payment_value: Option = BorshDeserialize::deserialize(buf)?; - let change_value: u64 = BorshDeserialize::deserialize(buf)?; - let accepted_daa_score: Option = BorshDeserialize::deserialize(buf)?; - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; + let fees: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_output_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let transaction: Transaction = BorshDeserialize::deserialize_reader(reader)?; + let payment_value: Option = BorshDeserialize::deserialize_reader(reader)?; + let change_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let accepted_daa_score: Option = BorshDeserialize::deserialize_reader(reader)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::TransferOutgoing { fees, aggregate_input_value, @@ -390,13 +390,13 @@ impl BorshDeserialize for TransactionData { }) } TransactionKind::Change => { - let aggregate_input_value: u64 = BorshDeserialize::deserialize(buf)?; - let aggregate_output_value: u64 = BorshDeserialize::deserialize(buf)?; - let transaction: Transaction = BorshDeserialize::deserialize(buf)?; - let payment_value: Option = BorshDeserialize::deserialize(buf)?; - let change_value: u64 = BorshDeserialize::deserialize(buf)?; - let accepted_daa_score: Option = BorshDeserialize::deserialize(buf)?; - let utxo_entries: Vec = BorshDeserialize::deserialize(buf)?; + let aggregate_input_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let aggregate_output_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let transaction: Transaction = BorshDeserialize::deserialize_reader(reader)?; + let payment_value: Option = BorshDeserialize::deserialize_reader(reader)?; + let change_value: u64 = BorshDeserialize::deserialize_reader(reader)?; + let accepted_daa_score: Option = BorshDeserialize::deserialize_reader(reader)?; + let utxo_entries: Vec = BorshDeserialize::deserialize_reader(reader)?; Ok(TransactionData::Change { aggregate_input_value, aggregate_output_value, diff --git a/wallet/core/src/storage/transaction/record.rs b/wallet/core/src/storage/transaction/record.rs index 7a04571331..05be3b69f2 100644 --- a/wallet/core/src/storage/transaction/record.rs +++ b/wallet/core/src/storage/transaction/record.rs @@ -4,7 +4,7 @@ use super::*; use crate::imports::*; -use crate::storage::Binding; +use crate::storage::{Binding, BindingT}; use crate::tx::PendingTransactionInner; use workflow_core::time::{unixtime_as_millis_u64, unixtime_to_locale_string}; use workflow_wasm::utils::try_get_js_value_prop; @@ -289,7 +289,9 @@ export interface ITransactionRecord { extern "C" { #[wasm_bindgen(extends = Object, typescript_type = "ITransactionRecord")] #[derive(Clone, Debug, PartialEq, Eq)] - pub type ITransactionRecord; + pub type TransactionRecordT; + #[wasm_bindgen(extends = Object, typescript_type = "ITransactionData")] + pub type TransactionDataT; } #[wasm_bindgen(inspectable)] @@ -318,11 +320,12 @@ pub struct TransactionRecord { #[serde(rename = "unixtimeMsec")] #[wasm_bindgen(js_name = unixtimeMsec)] pub unixtime_msec: Option, + #[wasm_bindgen(skip)] pub value: u64, #[wasm_bindgen(skip)] pub binding: Binding, #[serde(rename = "blockDaaScore")] - #[wasm_bindgen(js_name = blockDaaScore)] + #[wasm_bindgen(skip)] pub block_daa_score: u64, #[serde(rename = "network")] #[wasm_bindgen(js_name = network)] @@ -378,9 +381,9 @@ impl TransactionRecord { let params = NetworkParams::from(self.network_id); let maturity = if self.is_coinbase() { - params.coinbase_transaction_maturity_period_daa + params.coinbase_transaction_maturity_period_daa() } else { - params.user_transaction_maturity_period_daa + params.user_transaction_maturity_period_daa() }; if current_daa_score < self.block_daa_score() + maturity { @@ -431,9 +434,9 @@ impl TransactionRecord { pub fn maturity_progress(&self, current_daa_score: u64) -> Option { let params = NetworkParams::from(self.network_id); let maturity = if self.is_coinbase() { - params.coinbase_transaction_maturity_period_daa + params.coinbase_transaction_maturity_period_daa() } else { - params.user_transaction_maturity_period_daa + params.user_transaction_maturity_period_daa() }; if current_daa_score < self.block_daa_score + maturity { @@ -784,14 +787,24 @@ impl TransactionRecord { #[wasm_bindgen] impl TransactionRecord { + #[wasm_bindgen(getter, js_name = "value")] + pub fn value_as_js_bigint(&self) -> BigInt { + self.value.into() + } + + #[wasm_bindgen(getter, js_name = "blockDaaScore")] + pub fn block_daa_score_as_js_bigint(&self) -> BigInt { + self.block_daa_score.into() + } + #[wasm_bindgen(getter, js_name = "binding")] - pub fn binding_as_js_value(&self) -> JsValue { - serde_wasm_bindgen::to_value(&self.binding).unwrap() + pub fn binding_as_js_value(&self) -> BindingT { + serde_wasm_bindgen::to_value(&self.binding).unwrap().unchecked_into() } #[wasm_bindgen(getter, js_name = "data")] - pub fn data_as_js_value(&self) -> JsValue { - try_get_js_value_prop(&serde_wasm_bindgen::to_value(&self.transaction_data).unwrap(), "data").unwrap() + pub fn data_as_js_value(&self) -> TransactionDataT { + try_get_js_value_prop(&serde_wasm_bindgen::to_value(&self.transaction_data).unwrap(), "data").unwrap().unchecked_into() } #[wasm_bindgen(getter, js_name = "type")] @@ -837,19 +850,19 @@ impl BorshSerialize for TransactionRecord { } impl BorshDeserialize for TransactionRecord { - fn deserialize(buf: &mut &[u8]) -> IoResult { + fn deserialize_reader(reader: &mut R) -> IoResult { let StorageHeader { version: _, .. } = - StorageHeader::deserialize(buf)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; - - let id = BorshDeserialize::deserialize(buf)?; - let unixtime = BorshDeserialize::deserialize(buf)?; - let value = BorshDeserialize::deserialize(buf)?; - let binding = BorshDeserialize::deserialize(buf)?; - let block_daa_score = BorshDeserialize::deserialize(buf)?; - let network_id = BorshDeserialize::deserialize(buf)?; - let transaction_data = BorshDeserialize::deserialize(buf)?; - let note = BorshDeserialize::deserialize(buf)?; - let metadata = BorshDeserialize::deserialize(buf)?; + StorageHeader::deserialize_reader(reader)?.try_magic(Self::STORAGE_MAGIC)?.try_version(Self::STORAGE_VERSION)?; + + let id = BorshDeserialize::deserialize_reader(reader)?; + let unixtime = BorshDeserialize::deserialize_reader(reader)?; + let value = BorshDeserialize::deserialize_reader(reader)?; + let binding = BorshDeserialize::deserialize_reader(reader)?; + let block_daa_score = BorshDeserialize::deserialize_reader(reader)?; + let network_id = BorshDeserialize::deserialize_reader(reader)?; + let transaction_data = BorshDeserialize::deserialize_reader(reader)?; + let note = BorshDeserialize::deserialize_reader(reader)?; + let metadata = BorshDeserialize::deserialize_reader(reader)?; Ok(Self { id, unixtime_msec: unixtime, value, binding, block_daa_score, network_id, transaction_data, note, metadata }) } @@ -861,7 +874,7 @@ impl BorshDeserialize for TransactionRecord { // } // } -impl From for ITransactionRecord { +impl From for TransactionRecordT { fn from(record: TransactionRecord) -> Self { JsValue::from(record).unchecked_into() } diff --git a/wallet/core/src/tests/rpc_core_mock.rs b/wallet/core/src/tests/rpc_core_mock.rs index 6c335d59a6..4d10cdd9b1 100644 --- a/wallet/core/src/tests/rpc_core_mock.rs +++ b/wallet/core/src/tests/rpc_core_mock.rs @@ -9,7 +9,7 @@ use kaspa_notify::scope::Scope; use kaspa_notify::subscription::context::SubscriptionContext; use kaspa_notify::subscription::{MutationPolicies, UtxosChangedMutationPolicy}; use kaspa_rpc_core::api::ctl::RpcCtl; -use kaspa_rpc_core::{api::rpc::RpcApi, *}; +use kaspa_rpc_core::{api::connection::DynRpcConnection, api::rpc::RpcApi, *}; use kaspa_rpc_core::{notify::connection::ChannelConnection, RpcResult}; use std::sync::Arc; @@ -83,7 +83,7 @@ impl Default for RpcCoreMock { #[async_trait] impl RpcApi for RpcCoreMock { // This fn needs to succeed while the client connects - async fn get_info_call(&self, _request: GetInfoRequest) -> RpcResult { + async fn get_info_call(&self, _connection: Option<&DynRpcConnection>, _request: GetInfoRequest) -> RpcResult { Ok(GetInfoResponse { p2p_id: "wallet-mock".to_string(), mempool_size: 1234, @@ -95,133 +95,237 @@ impl RpcApi for RpcCoreMock { }) } - async fn ping_call(&self, _request: PingRequest) -> RpcResult { + async fn ping_call(&self, _connection: Option<&DynRpcConnection>, _request: PingRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_metrics_call(&self, _request: GetMetricsRequest) -> RpcResult { + async fn get_metrics_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMetricsRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_connections_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetConnectionsRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_server_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetServerInfoRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_system_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSystemInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_server_info_call(&self, _request: GetServerInfoRequest) -> RpcResult { + async fn get_sync_status_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSyncStatusRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sync_status_call(&self, _request: GetSyncStatusRequest) -> RpcResult { + async fn get_current_network_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCurrentNetworkRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_current_network_call(&self, _request: GetCurrentNetworkRequest) -> RpcResult { + async fn submit_block_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitBlockRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn submit_block_call(&self, _request: SubmitBlockRequest) -> RpcResult { + async fn get_block_template_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockTemplateRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_template_call(&self, _request: GetBlockTemplateRequest) -> RpcResult { + async fn get_peer_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetPeerAddressesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_peer_addresses_call(&self, _request: GetPeerAddressesRequest) -> RpcResult { + async fn get_sink_call(&self, _connection: Option<&DynRpcConnection>, _request: GetSinkRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sink_call(&self, _request: GetSinkRequest) -> RpcResult { + async fn get_mempool_entry_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMempoolEntryRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_mempool_entry_call(&self, _request: GetMempoolEntryRequest) -> RpcResult { + async fn get_mempool_entries_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetMempoolEntriesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_mempool_entries_call(&self, _request: GetMempoolEntriesRequest) -> RpcResult { + async fn get_connected_peer_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetConnectedPeerInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_connected_peer_info_call(&self, _request: GetConnectedPeerInfoRequest) -> RpcResult { + async fn submit_transaction_replacement_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitTransactionReplacementRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn add_peer_call(&self, _request: AddPeerRequest) -> RpcResult { + async fn add_peer_call(&self, _connection: Option<&DynRpcConnection>, _request: AddPeerRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn submit_transaction_call(&self, _request: SubmitTransactionRequest) -> RpcResult { + async fn submit_transaction_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: SubmitTransactionRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_call(&self, _request: GetBlockRequest) -> RpcResult { + async fn get_block_call(&self, _connection: Option<&DynRpcConnection>, _request: GetBlockRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_subnetwork_call(&self, _request: GetSubnetworkRequest) -> RpcResult { + async fn get_subnetwork_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSubnetworkRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_virtual_chain_from_block_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetVirtualChainFromBlockRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_blocks_call(&self, _request: GetBlocksRequest) -> RpcResult { + async fn get_blocks_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlocksRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_count_call(&self, _request: GetBlockCountRequest) -> RpcResult { + async fn get_block_count_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockCountRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_block_dag_info_call(&self, _request: GetBlockDagInfoRequest) -> RpcResult { + async fn get_block_dag_info_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBlockDagInfoRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn resolve_finality_conflict_call( &self, + _connection: Option<&DynRpcConnection>, _request: ResolveFinalityConflictRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn shutdown_call(&self, _request: ShutdownRequest) -> RpcResult { + async fn shutdown_call(&self, _connection: Option<&DynRpcConnection>, _request: ShutdownRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_headers_call(&self, _request: GetHeadersRequest) -> RpcResult { + async fn get_headers_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetHeadersRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_balance_by_address_call(&self, _request: GetBalanceByAddressRequest) -> RpcResult { + async fn get_balance_by_address_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetBalanceByAddressRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_balances_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetBalancesByAddressesRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_utxos_by_addresses_call(&self, _request: GetUtxosByAddressesRequest) -> RpcResult { + async fn get_utxos_by_addresses_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetUtxosByAddressesRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_sink_blue_score_call(&self, _request: GetSinkBlueScoreRequest) -> RpcResult { + async fn get_sink_blue_score_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetSinkBlueScoreRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn ban_call(&self, _request: BanRequest) -> RpcResult { + async fn ban_call(&self, _connection: Option<&DynRpcConnection>, _request: BanRequest) -> RpcResult { Err(RpcError::NotImplemented) } - async fn unban_call(&self, _request: UnbanRequest) -> RpcResult { + async fn unban_call(&self, _connection: Option<&DynRpcConnection>, _request: UnbanRequest) -> RpcResult { Err(RpcError::NotImplemented) } async fn estimate_network_hashes_per_second_call( &self, + _connection: Option<&DynRpcConnection>, _request: EstimateNetworkHashesPerSecondRequest, ) -> RpcResult { Err(RpcError::NotImplemented) @@ -229,22 +333,52 @@ impl RpcApi for RpcCoreMock { async fn get_mempool_entries_by_addresses_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetMempoolEntriesByAddressesRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } - async fn get_coin_supply_call(&self, _request: GetCoinSupplyRequest) -> RpcResult { + async fn get_coin_supply_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCoinSupplyRequest, + ) -> RpcResult { Err(RpcError::NotImplemented) } async fn get_daa_score_timestamp_estimate_call( &self, + _connection: Option<&DynRpcConnection>, _request: GetDaaScoreTimestampEstimateRequest, ) -> RpcResult { Err(RpcError::NotImplemented) } + async fn get_fee_estimate_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetFeeEstimateRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_fee_estimate_experimental_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetFeeEstimateExperimentalRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + + async fn get_current_block_color_call( + &self, + _connection: Option<&DynRpcConnection>, + _request: GetCurrentBlockColorRequest, + ) -> RpcResult { + Err(RpcError::NotImplemented) + } + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Notification API diff --git a/wallet/core/src/tests/storage.rs b/wallet/core/src/tests/storage.rs index 7257311602..5af0bcbae5 100644 --- a/wallet/core/src/tests/storage.rs +++ b/wallet/core/src/tests/storage.rs @@ -20,11 +20,11 @@ where } pub fn validate(&self) -> Result { - let bytes = self.try_to_vec()?; + let bytes = borsh::to_vec(self)?; let transform = Self::try_from_slice(bytes.as_slice())?; assert_eq!(transform.before, 0xdeadbeef); assert_eq!(transform.after, 0xbaadf00d); - let transform_bytes = transform.try_to_vec()?; + let transform_bytes = borsh::to_vec(&transform)?; assert_eq!(bytes, transform_bytes); Ok(transform.storable) } diff --git a/wallet/core/src/tx/generator/generator.rs b/wallet/core/src/tx/generator/generator.rs index 533f2e0464..398ba1b4dc 100644 --- a/wallet/core/src/tx/generator/generator.rs +++ b/wallet/core/src/tx/generator/generator.rs @@ -39,21 +39,22 @@ //! interface or via an async Stream interface. //! //! Q: Why is this not implemented as a single loop? +//! //! A: There are a number of requirements that need to be handled: //! -//! 1. UTXO entry consumption while creating inputs may results in -//! additional fees, requiring additional UTXO entries to cover -//! the fees. Goto 1. (this is a classic issue, can be solved using padding) +//! 1. UTXO entry consumption while creating inputs may result in +//! additional fees, requiring additional UTXO entries to cover +//! the fees. Goto 1. (this is a classic issue, can be solved using padding) //! -//! 2. The overall design strategy for this processor is to allow -//! concurrent processing of a large number of transactions and UTXOs. -//! This implementation avoids in-memory aggregation of all -//! transactions that may result in OOM conditions. +//! 2. The overall design strategy for this processor is to allow +//! concurrent processing of a large number of transactions and UTXOs. +//! This implementation avoids in-memory aggregation of all +//! transactions that may result in OOM conditions. //! -//! 3. If used with a large UTXO set, the transaction generation process -//! needs to be asynchronous to avoid blocking the main thread. In the -//! context of WASM32 SDK, not doing that while working with large -//! UTXO sets will result in a browser UI freezing. +//! 3. If used with a large UTXO set, the transaction generation process +//! needs to be asynchronous to avoid blocking the main thread. In the +//! context of WASM32 SDK, not doing that while working with large +//! UTXO sets will result in a browser UI freezing. //! use crate::imports::*; @@ -65,6 +66,7 @@ use crate::tx::{ use crate::utxo::{NetworkParams, UtxoContext, UtxoEntryReference}; use kaspa_consensus_client::UtxoEntry; use kaspa_consensus_core::constants::UNACCEPTED_DAA_SCORE; +use kaspa_consensus_core::mass::Kip9Version; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; use kaspa_consensus_core::tx::{Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}; use kaspa_txscript::pay_to_address_script; @@ -87,6 +89,11 @@ const TRANSACTION_MASS_BOUNDARY_FOR_STAGE_INPUT_ACCUMULATION: u64 = MAXIMUM_STAN struct Context { /// iterator containing UTXO entries available for transaction generation utxo_source_iterator: Box + Send + Sync + 'static>, + /// List of priority UTXO entries, that are consumed before polling the iterator + priority_utxo_entries: Option>, + /// HashSet containing priority UTXO entries, used for filtering + /// for potential duplicates from the iterator + priority_utxo_entry_filter: Option>, /// total number of UTXOs consumed by the single generator instance aggregated_utxos: usize, /// total fees of all transactions issued by @@ -209,7 +216,7 @@ struct Data { impl Data { fn new(calc: &MassCalculator) -> Self { - let aggregate_mass = calc.blank_transaction_mass(); + let aggregate_mass = calc.blank_transaction_compute_mass(); Data { inputs: vec![], @@ -260,7 +267,7 @@ struct Inner { // Current network id network_id: NetworkId, // Current network params - network_params: NetworkParams, + network_params: &'static NetworkParams, // Source Utxo Context (Used for source UtxoEntry aggregation) source_utxo_context: Option, @@ -271,7 +278,6 @@ struct Inner { // typically a number of keys required to sign the transaction sig_op_count: u8, // number of minimum signatures required to sign the transaction - #[allow(dead_code)] minimum_signatures: u16, // change address change_address: Address, @@ -339,6 +345,7 @@ impl Generator { multiplexer, utxo_iterator, source_utxo_context: utxo_context, + priority_utxo_entries, sig_op_count, minimum_signatures, change_address, @@ -350,7 +357,7 @@ impl Generator { let network_type = NetworkType::from(network_id); let network_params = NetworkParams::from(network_id); - let mass_calculator = MassCalculator::new(&network_id.into(), &network_params); + let mass_calculator = MassCalculator::new(&network_id.into(), network_params); let (final_transaction_outputs, final_transaction_amount) = match final_transaction_destination { PaymentDestination::Change => { @@ -394,12 +401,13 @@ impl Generator { return Err(Error::GeneratorChangeAddressNetworkTypeMismatch); } - let standard_change_output_mass = - mass_calculator.calc_mass_for_output(&TransactionOutput::new(0, pay_to_address_script(&change_address))); - let signature_mass_per_input = mass_calculator.calc_signature_mass(minimum_signatures); - let final_transaction_outputs_compute_mass = mass_calculator.calc_mass_for_outputs(&final_transaction_outputs); + let standard_change_output_mass = mass_calculator + .calc_compute_mass_for_client_transaction_output(&TransactionOutput::new(0, pay_to_address_script(&change_address))); + let signature_mass_per_input = mass_calculator.calc_compute_mass_for_signature(minimum_signatures); + let final_transaction_outputs_compute_mass = + mass_calculator.calc_compute_mass_for_client_transaction_outputs(&final_transaction_outputs); let final_transaction_payload = final_transaction_payload.unwrap_or_default(); - let final_transaction_payload_mass = mass_calculator.calc_mass_for_payload(final_transaction_payload.len()); + let final_transaction_payload_mass = mass_calculator.calc_compute_mass_for_payload(final_transaction_payload.len()); let final_transaction_outputs_harmonic = mass_calculator.calc_storage_mass_output_harmonic(&final_transaction_outputs).ok_or(Error::MassCalculationError)?; @@ -414,8 +422,14 @@ impl Generator { return Err(Error::GeneratorTransactionOutputsAreTooHeavy { mass: mass_sanity_check, kind: "compute mass" }); } + let priority_utxo_entry_filter = priority_utxo_entries.as_ref().map(|entries| entries.iter().cloned().collect()); + // remap to VecDeque as this list gets drained + let priority_utxo_entries = priority_utxo_entries.map(|entries| entries.into_iter().collect::>()); + let context = Mutex::new(Context { utxo_source_iterator: utxo_iterator, + priority_utxo_entries, + priority_utxo_entry_filter, number_of_transactions: 0, aggregated_utxos: 0, aggregate_fees: 0, @@ -464,7 +478,7 @@ impl Generator { /// Returns current [`NetworkParams`] pub fn network_params(&self) -> &NetworkParams { - &self.inner.network_params + self.inner.network_params } /// The underlying [`UtxoContext`] (if available). @@ -527,15 +541,29 @@ impl Generator { } /// Get next UTXO entry. This function obtains UTXO in the following order: - /// 1. From the UTXO stash (used to store UTxOs that were not used in the previous transaction) + /// 1. From the UTXO stash (used to store UTxOs that were consumed during previous transaction generation but were rejected due to various conditions, such as mass overflow) /// 2. From the current stage - /// 3. From the UTXO source iterator + /// 3. From priority UTXO entries + /// 4. From the UTXO source iterator (while filtering against priority UTXO entries) fn get_utxo_entry(&self, context: &mut Context, stage: &mut Stage) -> Option { context .utxo_stash .pop_front() .or_else(|| stage.utxo_iterator.as_mut().and_then(|utxo_stage_iterator| utxo_stage_iterator.next())) - .or_else(|| context.utxo_source_iterator.next()) + .or_else(|| context.priority_utxo_entries.as_mut().and_then(|entries| entries.pop_front())) + .or_else(|| loop { + let utxo_entry = context.utxo_source_iterator.next()?; + + if let Some(filter) = context.priority_utxo_entry_filter.as_ref() { + if filter.contains(&utxo_entry) { + // skip the entry from the iterator intake + // if it has been supplied as a priority entry + continue; + } + } + + break Some(utxo_entry); + }) } /// Calculate relay transaction mass for the current transaction `data` @@ -553,16 +581,18 @@ impl Generator { /// /// The general processing pattern can be described as follows: /// - /// loop { - /// 1. Obtain UTXO entry from [`Generator::get_utxo_entry()`] - /// 2. Check if UTXO entries have been depleted, if so, handle sweep processing. - /// 3. Create a new Input for the transaction from the UTXO entry. - /// 4. Check if the transaction mass threshold has been reached, if so, yield the transaction. - /// 5. Register input with the [`Data`] structures. - /// 6. Check if the final transaction amount has been reached, if so, yield the transaction. - /// } - /// - /// + /** + loop { + 1. Obtain UTXO entry from [`Generator::get_utxo_entry()`] + 2. Check if UTXO entries have been depleted, if so, handle sweep processing. + 3. Create a new Input for the transaction from the UTXO entry. + 4. Check if the transaction mass threshold has been reached, if so, yield the transaction. + 5. Register input with the [`Data`] structures. + 6. Check if the final transaction amount has been reached, if so, yield the transaction. + + } + */ + fn generate_transaction_data(&self, context: &mut Context, stage: &mut Stage) -> Result<(DataKind, Data)> { let calc = &self.inner.mass_calculator; let mut data = Data::new(calc); @@ -633,14 +663,14 @@ impl Generator { let input = TransactionInput::new(utxo.outpoint.clone().into(), vec![], 0, self.inner.sig_op_count); let input_amount = utxo.amount(); - let input_compute_mass = calc.calc_mass_for_input(&input) + self.inner.signature_mass_per_input; + let input_compute_mass = calc.calc_compute_mass_for_client_transaction_input(&input) + self.inner.signature_mass_per_input; // NOTE: relay transactions have no storage mass // mass threshold reached, yield transaction if data.aggregate_mass + input_compute_mass + self.inner.standard_change_output_compute_mass - + self.inner.network_params.additional_compound_transaction_mass + + self.inner.network_params.additional_compound_transaction_mass() > MAXIMUM_STANDARD_TRANSACTION_MASS { // note, we've used input for mass boundary calc and now abandon it @@ -648,7 +678,7 @@ impl Generator { context.utxo_stash.push_back(utxo_entry_reference); data.aggregate_mass += - self.inner.standard_change_output_compute_mass + self.inner.network_params.additional_compound_transaction_mass; + self.inner.standard_change_output_compute_mass + self.inner.network_params.additional_compound_transaction_mass(); data.transaction_fees = self.calc_relay_transaction_compute_fees(data); stage.aggregate_fees += data.transaction_fees; context.aggregate_fees += data.transaction_fees; @@ -675,7 +705,6 @@ impl Generator { Ok((DataKind::NoOp, data)) } else if stage.number_of_transactions > 0 { data.aggregate_mass += self.inner.standard_change_output_compute_mass; - data.change_output_value = Some(data.aggregate_input_value - data.transaction_fees); Ok((DataKind::Edge, data)) } else if data.aggregate_input_value < data.transaction_fees { Err(Error::InsufficientFunds { additional_needed: data.transaction_fees - data.aggregate_input_value, origin: "relay" }) @@ -836,8 +865,11 @@ impl Generator { calc.calc_storage_mass_output_harmonic_single(change_value) + self.inner.final_transaction_outputs_harmonic; let storage_mass_with_change = self.calc_storage_mass(data, output_harmonic_with_change); + // TODO - review and potentially simplify: + // this profiles the storage mass with change and without change + // and decides which one to use based on the fees if storage_mass_with_change == 0 - || (self.inner.network_params.mass_combination_strategy == MassCombinationStrategy::Max + || (self.inner.network_params.kip9_version() == Kip9Version::Beta // max(compute vs storage) && storage_mass_with_change < compute_mass_with_change) { 0 @@ -846,8 +878,8 @@ impl Generator { if storage_mass_with_change < storage_mass_no_change { storage_mass_with_change } else { - let fees_with_change = calc.calc_fee_for_storage_mass(storage_mass_with_change); - let fees_no_change = calc.calc_fee_for_storage_mass(storage_mass_no_change); + let fees_with_change = calc.calc_fee_for_mass(storage_mass_with_change); + let fees_no_change = calc.calc_fee_for_mass(storage_mass_no_change); let difference = fees_with_change.saturating_sub(fees_no_change); if difference > change_value { @@ -878,7 +910,7 @@ impl Generator { let compute_mass = data.aggregate_mass + self.inner.standard_change_output_compute_mass - + self.inner.network_params.additional_compound_transaction_mass; + + self.inner.network_params.additional_compound_transaction_mass(); let compute_fees = calc.calc_minimum_transaction_fee_from_mass(compute_mass); // TODO - consider removing this as calculated storage mass should produce `0` value @@ -943,7 +975,7 @@ impl Generator { addresses, aggregate_input_value, change_output_value, - aggregate_mass, + aggregate_mass: _, transaction_fees, .. } = data; @@ -951,7 +983,6 @@ impl Generator { let change_output_value = change_output_value.unwrap_or(0); let mut final_outputs = self.inner.final_transaction_outputs.clone(); - // let mut final_outputs = context.final_transaction_outputs.clone(); if self.inner.final_transaction_priority_fee.receiver_pays() { let output = final_outputs.get_mut(0).expect("include fees requires one output"); @@ -962,10 +993,13 @@ impl Generator { } } - if change_output_value > 0 { - let output = TransactionOutput::new(change_output_value, pay_to_address_script(&self.inner.change_address)); - final_outputs.push(output); - } + let change_output_index = if change_output_value > 0 { + let change_output_index = Some(final_outputs.len()); + final_outputs.push(TransactionOutput::new(change_output_value, pay_to_address_script(&self.inner.change_address))); + change_output_index + } else { + None + }; let aggregate_output_value = final_outputs.iter().map(|output| output.value).sum::(); // TODO - validate that this is still correct @@ -987,6 +1021,17 @@ impl Generator { self.inner.final_transaction_payload.clone(), ); + let transaction_mass = self.inner.mass_calculator.calc_overall_mass_for_unsigned_consensus_transaction( + &tx, + &utxo_entry_references, + self.inner.minimum_signatures, + )?; + if transaction_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + // this should never occur as we should not produce transactions higher than the mass limit + return Err(Error::MassCalculationError); + } + tx.set_mass(transaction_mass); + context.final_transaction_id = Some(tx.id()); context.number_of_transactions += 1; @@ -996,10 +1041,12 @@ impl Generator { utxo_entry_references, addresses.into_iter().collect(), self.final_transaction_value_no_fees(), + change_output_index, change_output_value, aggregate_input_value, aggregate_output_value, - aggregate_mass, + self.inner.minimum_signatures, + transaction_mass, transaction_fees, kind, )?)) @@ -1010,7 +1057,7 @@ impl Generator { utxo_entry_references, addresses, aggregate_input_value, - aggregate_mass, + aggregate_mass: _, transaction_fees, change_output_value, .. @@ -1022,22 +1069,35 @@ impl Generator { let script_public_key = pay_to_address_script(&self.inner.change_address); let output = TransactionOutput::new(output_value, script_public_key.clone()); let tx = Transaction::new(0, inputs, vec![output], 0, SUBNETWORK_ID_NATIVE, 0, vec![]); + + let mut transaction_mass = self.inner.mass_calculator.calc_overall_mass_for_unsigned_consensus_transaction( + &tx, + &utxo_entry_references, + self.inner.minimum_signatures, + )?; + transaction_mass = transaction_mass.saturating_add(self.inner.network_params.additional_compound_transaction_mass()); + if transaction_mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + // this should never occur as we should not produce transactions higher than the mass limit + return Err(Error::MassCalculationError); + } + tx.set_mass(transaction_mass); + context.number_of_transactions += 1; - let utxo_entry_reference = + let previous_batch_utxo_entry_reference = Self::create_batch_utxo_entry_reference(tx.id(), output_value, script_public_key, &self.inner.change_address); match kind { DataKind::Node => { // store resulting UTXO in the current stage let stage = context.stage.as_mut().unwrap(); - stage.utxo_accumulator.push(utxo_entry_reference); + stage.utxo_accumulator.push(previous_batch_utxo_entry_reference); stage.number_of_transactions += 1; } DataKind::Edge => { // store resulting UTXO in the current stage and create a new stage let mut stage = context.stage.take().unwrap(); - stage.utxo_accumulator.push(utxo_entry_reference); + stage.utxo_accumulator.push(previous_batch_utxo_entry_reference); stage.number_of_transactions += 1; context.stage.replace(Box::new(Stage::new(*stage))); } @@ -1050,10 +1110,12 @@ impl Generator { utxo_entry_references, addresses.into_iter().collect(), self.final_transaction_value_no_fees(), + None, output_value, aggregate_input_value, output_value, - aggregate_mass, + self.inner.minimum_signatures, + transaction_mass, transaction_fees, kind, )?)) diff --git a/wallet/core/src/tx/generator/pending.rs b/wallet/core/src/tx/generator/pending.rs index cd757e54b6..8b4beddf2c 100644 --- a/wallet/core/src/tx/generator/pending.rs +++ b/wallet/core/src/tx/generator/pending.rs @@ -8,7 +8,8 @@ use crate::result::Result; use crate::rpc::DynRpcApi; use crate::tx::{DataKind, Generator}; use crate::utxo::{UtxoContext, UtxoEntryId, UtxoEntryReference}; -use kaspa_consensus_core::sign::sign_with_multiple_v2; +use kaspa_consensus_core::hashing::sighash_type::SigHashType; +use kaspa_consensus_core::sign::{sign_input, sign_with_multiple_v2, Signed}; use kaspa_consensus_core::tx::{SignableTransaction, Transaction, TransactionId}; use kaspa_rpc_core::{RpcTransaction, RpcTransactionId}; @@ -27,12 +28,18 @@ pub(crate) struct PendingTransactionInner { pub(crate) is_submitted: AtomicBool, /// Payment value of the transaction (transaction destination amount) pub(crate) payment_value: Option, + /// The index (position) of the change output in the transaction + pub(crate) change_output_index: Option, /// Change value of the transaction (transaction change amount) pub(crate) change_output_value: u64, /// Total aggregate value of all inputs pub(crate) aggregate_input_value: u64, /// Total aggregate value of all outputs pub(crate) aggregate_output_value: u64, + /// Minimum number of signatures required for the transaction + /// (passed in during transaction creation). This value is used + /// to estimate the mass of the transaction. + pub(crate) minimum_signatures: u16, // Transaction mass pub(crate) mass: u64, /// Fees of the transaction @@ -48,8 +55,10 @@ impl std::fmt::Debug for PendingTransaction { .field("utxo_entries", &self.inner.utxo_entries) .field("addresses", &self.inner.addresses) .field("payment_value", &self.inner.payment_value) + .field("change_output_index", &self.inner.change_output_index) .field("change_output_value", &self.inner.change_output_value) .field("aggregate_input_value", &self.inner.aggregate_input_value) + .field("minimum_signatures", &self.inner.minimum_signatures) .field("mass", &self.inner.mass) .field("fees", &self.inner.fees) .field("kind", &self.inner.kind) @@ -74,9 +83,11 @@ impl PendingTransaction { utxo_entries: Vec, addresses: Vec
, payment_value: Option, + change_output_index: Option, change_output_value: u64, aggregate_input_value: u64, aggregate_output_value: u64, + minimum_signatures: u16, mass: u64, fees: u64, kind: DataKind, @@ -94,9 +105,11 @@ impl PendingTransaction { addresses, is_submitted: AtomicBool::new(false), payment_value, + change_output_index, change_output_value, aggregate_input_value, aggregate_output_value, + minimum_signatures, mass, fees, kind, @@ -134,6 +147,14 @@ impl PendingTransaction { self.inner.fees } + pub fn mass(&self) -> u64 { + self.inner.mass + } + + pub fn minimum_signatures(&self) -> u16 { + self.inner.minimum_signatures + } + pub fn aggregate_input_value(&self) -> u64 { self.inner.aggregate_input_value } @@ -146,6 +167,10 @@ impl PendingTransaction { self.inner.payment_value } + pub fn change_output_index(&self) -> Option { + self.inner.change_output_index + } + pub fn change_value(&self) -> u64 { self.inner.change_output_value } @@ -223,9 +248,50 @@ impl PendingTransaction { Ok(()) } - pub fn try_sign_with_keys(&self, privkeys: &[[u8; 32]]) -> Result<()> { + pub fn create_input_signature(&self, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Result> { + let mutable_tx = self.inner.signable_tx.lock()?.clone(); + let verifiable_tx = mutable_tx.as_verifiable(); + + Ok(sign_input(&verifiable_tx, input_index, private_key, hash_type)) + } + + pub fn fill_input(&self, input_index: usize, signature_script: Vec) -> Result<()> { + let mut mutable_tx = self.inner.signable_tx.lock()?.clone(); + mutable_tx.tx.inputs[input_index].signature_script = signature_script; + *self.inner.signable_tx.lock().unwrap() = mutable_tx; + + Ok(()) + } + + pub fn sign_input(&self, input_index: usize, private_key: &[u8; 32], hash_type: SigHashType) -> Result<()> { + let mut mutable_tx = self.inner.signable_tx.lock()?.clone(); + + let signature_script = { + let verifiable_tx = &mutable_tx.as_verifiable(); + sign_input(verifiable_tx, input_index, private_key, hash_type) + }; + + mutable_tx.tx.inputs[input_index].signature_script = signature_script; + *self.inner.signable_tx.lock().unwrap() = mutable_tx; + + Ok(()) + } + + pub fn try_sign_with_keys(&self, privkeys: &[[u8; 32]], check_fully_signed: Option) -> Result<()> { let mutable_tx = self.inner.signable_tx.lock()?.clone(); - let signed_tx = sign_with_multiple_v2(mutable_tx, privkeys).fully_signed()?; + let signed = sign_with_multiple_v2(mutable_tx, privkeys); + + let signed_tx = match signed { + Signed::Fully(tx) => tx, + Signed::Partially(_) => { + if check_fully_signed.unwrap_or(true) { + signed.fully_signed()? + } else { + signed.unwrap() + } + } + }; + *self.inner.signable_tx.lock().unwrap() = signed_tx; Ok(()) } diff --git a/wallet/core/src/tx/generator/settings.rs b/wallet/core/src/tx/generator/settings.rs index 0055d8fb4f..34fd1bb6ef 100644 --- a/wallet/core/src/tx/generator/settings.rs +++ b/wallet/core/src/tx/generator/settings.rs @@ -20,6 +20,8 @@ pub struct GeneratorSettings { pub utxo_iterator: Box + Send + Sync + 'static>, // Utxo Context pub source_utxo_context: Option, + // Priority utxo entries that are consumed before others + pub priority_utxo_entries: Option>, // typically a number of keys required to sign the transaction pub sig_op_count: u8, // number of minimum signatures required to sign the transaction @@ -77,6 +79,7 @@ impl GeneratorSettings { change_address, utxo_iterator: Box::new(utxo_iterator), source_utxo_context: Some(account.utxo_context().clone()), + priority_utxo_entries: None, final_transaction_priority_fee: final_priority_fee, final_transaction_destination, @@ -89,6 +92,7 @@ impl GeneratorSettings { pub fn try_new_with_context( utxo_context: UtxoContext, + priority_utxo_entries: Option>, change_address: Address, sig_op_count: u8, minimum_signatures: u16, @@ -108,6 +112,7 @@ impl GeneratorSettings { change_address, utxo_iterator: Box::new(utxo_iterator), source_utxo_context: Some(utxo_context), + priority_utxo_entries, final_transaction_priority_fee: final_priority_fee, final_transaction_destination, @@ -121,6 +126,7 @@ impl GeneratorSettings { pub fn try_new_with_iterator( network_id: NetworkId, utxo_iterator: Box + Send + Sync + 'static>, + priority_utxo_entries: Option>, change_address: Address, sig_op_count: u8, minimum_signatures: u16, @@ -137,6 +143,7 @@ impl GeneratorSettings { change_address, utxo_iterator: Box::new(utxo_iterator), source_utxo_context: None, + priority_utxo_entries, final_transaction_priority_fee: final_priority_fee, final_transaction_destination, diff --git a/wallet/core/src/tx/generator/test.rs b/wallet/core/src/tx/generator/test.rs index 1368c51f2b..990698b722 100644 --- a/wallet/core/src/tx/generator/test.rs +++ b/wallet/core/src/tx/generator/test.rs @@ -16,7 +16,7 @@ use workflow_log::style; use super::*; -const DISPLAY_LOGS: bool = true; +const DISPLAY_LOGS: bool = false; const DISPLAY_EXPECTED: bool = true; #[derive(Clone, Copy, Debug)] @@ -169,12 +169,11 @@ fn validate(pt: &PendingTransaction) { ); let calc = MassCalculator::new(&pt.network_type().into(), network_params); - let additional_mass = if pt.is_final() { 0 } else { network_params.additional_compound_transaction_mass }; - let compute_mass = calc.calc_mass_for_signed_transaction(&tx, 1); + let additional_mass = if pt.is_final() { 0 } else { network_params.additional_compound_transaction_mass() }; + let compute_mass = calc.calc_compute_mass_for_unsigned_consensus_transaction(&tx, pt.minimum_signatures()); let utxo_entries = pt.utxo_entries().values().cloned().collect::>(); - let storage_mass = calc.calc_storage_mass_for_transaction(false, &utxo_entries, &tx.outputs).unwrap_or_default(); - + let storage_mass = calc.calc_storage_mass_for_transaction_parts(&utxo_entries, &tx.outputs).unwrap_or(u64::MAX); let calculated_mass = calc.combine_mass(compute_mass, storage_mass) + additional_mass; assert_eq!(pt.inner.mass, calculated_mass, "pending transaction mass does not match calculated mass"); @@ -199,19 +198,14 @@ where let pt_fees = pt.fees(); let calc = MassCalculator::new(&pt.network_type().into(), network_params); - let additional_mass = if pt.is_final() { 0 } else { network_params.additional_compound_transaction_mass }; + let additional_mass = if pt.is_final() { 0 } else { network_params.additional_compound_transaction_mass() }; - let compute_mass = calc.calc_mass_for_signed_transaction(&tx, 1); + let compute_mass = calc.calc_compute_mass_for_unsigned_consensus_transaction(&tx, pt.minimum_signatures()); let utxo_entries = pt.utxo_entries().values().cloned().collect::>(); - let storage_mass = calc.calc_storage_mass_for_transaction(false, &utxo_entries, &tx.outputs).unwrap_or_default(); + let storage_mass = calc.calc_storage_mass_for_transaction_parts(&utxo_entries, &tx.outputs).unwrap_or(u64::MAX); if DISPLAY_LOGS && storage_mass != 0 { - println!( - "calculated storage mass: {} calculated_compute_mass: {} total: {}", - storage_mass, - compute_mass, - storage_mass + compute_mass - ); + println!("calculated storage mass: {} calculated_compute_mass: {}", storage_mass, compute_mass,); } let calculated_mass = calc.combine_mass(compute_mass, storage_mass) + additional_mass; @@ -329,6 +323,21 @@ impl Harness { self.clone() } + pub fn accumulate(self: &Rc, count: usize) -> Rc { + for _n in 0..count { + if DISPLAY_LOGS { + println!( + "{}", + style(format!("accumulate gathering transaction: {} ({})", _n, self.accumulator.borrow().list.len())).magenta() + ); + } + let ptx = self.generator.generate_transaction().unwrap().unwrap(); + ptx.accumulate(&mut self.accumulator.borrow_mut()); + } + // println!("accumulated `{}` transactions", self.accumulator.borrow().list.len()); + self.clone() + } + pub fn validate(self: &Rc) -> Rc { while let Some(pt) = self.generator.generate_transaction().unwrap() { pt.accumulate(&mut self.accumulator.borrow_mut()).validate(); @@ -338,7 +347,16 @@ impl Harness { pub fn finalize(self: Rc) { let pt = self.generator.generate_transaction().unwrap(); - assert!(pt.is_none(), "expected no more transactions"); + if pt.is_some() { + let mut pending = self.generator.generate_transaction().unwrap(); + let mut count = 1; + while pending.is_some() { + count += 1; + pending = self.generator.generate_transaction().unwrap(); + } + + panic!("received extra `{}` unexpected transactions", count); + } let summary = self.generator.summary(); if DISPLAY_LOGS { println!("{:#?}", summary); @@ -392,6 +410,7 @@ where let sig_op_count = 1; let minimum_signatures = 1; let utxo_iterator: Box + Send + Sync + 'static> = Box::new(utxo_entries.into_iter()); + let priority_utxo_entries = None; let source_utxo_context = None; let destination_utxo_context = None; let final_priority_fee = fees; @@ -406,6 +425,7 @@ where change_address, utxo_iterator, source_utxo_context, + priority_utxo_entries, destination_utxo_context, final_transaction_priority_fee: final_priority_fee, final_transaction_destination, @@ -648,7 +668,7 @@ fn test_generator_inputs_100_outputs_1_fees_exclude_insufficient_funds() -> Resu } #[test] -fn test_generator_inputs_903_outputs_2_fees_exclude() -> Result<()> { +fn test_generator_inputs_1k_outputs_2_fees_exclude() -> Result<()> { generator(test_network_id(), &[10.0; 1_000], &[], Fees::sender(Kaspa(5.0)), [(output_address, Kaspa(9_000.0))].as_slice()) .unwrap() .harness() @@ -680,3 +700,28 @@ fn test_generator_inputs_903_outputs_2_fees_exclude() -> Result<()> { Ok(()) } + +#[test] +fn test_generator_inputs_32k_outputs_2_fees_exclude() -> Result<()> { + let f = 130.0; + generator( + test_network_id(), + &[f; 32_747], + &[], + Fees::sender(Kaspa(10_000.0)), + [(output_address, Kaspa(f * 32_747.0 - 10_001.0))].as_slice(), + ) + .unwrap() + .harness() + .accumulate(379) + .finalize(); + Ok(()) +} + +#[test] +fn test_generator_inputs_250k_outputs_2_sweep() -> Result<()> { + let f = 130.0; + let generator = make_generator(test_network_id(), &[f; 250_000], &[], Fees::None, change_address, PaymentDestination::Change); + generator.unwrap().harness().accumulate(2875).finalize(); + Ok(()) +} diff --git a/wallet/core/src/tx/mass.rs b/wallet/core/src/tx/mass.rs index b5583ddf34..16a489dc74 100644 --- a/wallet/core/src/tx/mass.rs +++ b/wallet/core/src/tx/mass.rs @@ -2,20 +2,16 @@ //! Transaction mass calculator. //! +use crate::error::Error; +use crate::result::Result; use crate::utxo::NetworkParams; +use kaspa_consensus_client as kcc; use kaspa_consensus_client::UtxoEntryReference; +use kaspa_consensus_core::mass::{calc_storage_mass as consensus_calc_storage_mass, Kip9Version}; use kaspa_consensus_core::tx::{Transaction, TransactionInput, TransactionOutput, SCRIPT_VECTOR_SIZE}; use kaspa_consensus_core::{config::params::Params, constants::*, subnets::SUBNETWORK_ID_SIZE}; use kaspa_hashes::HASH_SIZE; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum MassCombinationStrategy { - /// `MassCombinator::Add` adds the storage and compute mass. - Add, - /// `MassCombinator::Max` returns the maximum of the storage and compute mass. - Max, -} - // pub const ECDSA_SIGNATURE_SIZE: u64 = 64; // pub const SCHNORR_SIGNATURE_SIZE: u64 = 64; pub const SIGNATURE_SIZE: u64 = 1 + 64 + 1; //1 byte for OP_DATA_65 + 64 (length of signature) + 1 byte for sig hash type @@ -222,7 +218,7 @@ pub struct MassCalculator { mass_per_script_pub_key_byte: u64, mass_per_sig_op: u64, storage_mass_parameter: u64, - mass_combination_strategy: MassCombinationStrategy, + kip9_version: Kip9Version, } impl MassCalculator { @@ -232,7 +228,7 @@ impl MassCalculator { mass_per_script_pub_key_byte: consensus_params.mass_per_script_pub_key_byte, mass_per_sig_op: consensus_params.mass_per_sig_op, storage_mass_parameter: consensus_params.storage_mass_parameter, - mass_combination_strategy: network_params.mass_combination_strategy, + kip9_version: network_params.kip9_version(), } } @@ -243,117 +239,107 @@ impl MassCalculator { } } - pub fn calc_mass_for_transaction(&self, tx: &Transaction) -> u64 { - self.blank_transaction_mass() - + self.calc_mass_for_payload(tx.payload.len()) - + self.calc_mass_for_outputs(&tx.outputs) - + self.calc_mass_for_inputs(&tx.inputs) + pub fn calc_compute_mass_for_signed_consensus_transaction(&self, tx: &Transaction) -> u64 { + let payload_len = tx.payload.len(); + self.blank_transaction_compute_mass() + + self.calc_compute_mass_for_payload(payload_len) + + self.calc_compute_mass_for_client_transaction_outputs(&tx.outputs) + + self.calc_compute_mass_for_client_transaction_inputs(&tx.inputs) } - pub fn blank_transaction_mass(&self) -> u64 { + pub(crate) fn blank_transaction_compute_mass(&self) -> u64 { blank_transaction_serialized_byte_size() * self.mass_per_tx_byte } - pub fn calc_mass_for_payload(&self, payload_byte_size: usize) -> u64 { + pub(crate) fn calc_compute_mass_for_payload(&self, payload_byte_size: usize) -> u64 { payload_byte_size as u64 * self.mass_per_tx_byte } - pub fn calc_mass_for_outputs(&self, outputs: &[TransactionOutput]) -> u64 { - outputs.iter().map(|output| self.calc_mass_for_output(output)).sum() + pub(crate) fn calc_compute_mass_for_client_transaction_outputs(&self, outputs: &[TransactionOutput]) -> u64 { + outputs.iter().map(|output| self.calc_compute_mass_for_client_transaction_output(output)).sum() } - pub fn calc_mass_for_inputs(&self, inputs: &[TransactionInput]) -> u64 { - inputs.iter().map(|input| self.calc_mass_for_input(input)).sum::() + pub(crate) fn calc_compute_mass_for_client_transaction_inputs(&self, inputs: &[TransactionInput]) -> u64 { + inputs.iter().map(|input| self.calc_compute_mass_for_client_transaction_input(input)).sum::() } - pub fn calc_mass_for_output(&self, output: &TransactionOutput) -> u64 { + pub(crate) fn calc_compute_mass_for_client_transaction_output(&self, output: &TransactionOutput) -> u64 { + // +2 for u16 version self.mass_per_script_pub_key_byte * (2 + output.script_public_key.script().len() as u64) + transaction_output_serialized_byte_size(output) * self.mass_per_tx_byte } - pub fn calc_mass_for_input(&self, input: &TransactionInput) -> u64 { + pub(crate) fn calc_compute_mass_for_client_transaction_input(&self, input: &TransactionInput) -> u64 { input.sig_op_count as u64 * self.mass_per_sig_op + transaction_input_serialized_byte_size(input) * self.mass_per_tx_byte } - pub fn calc_signature_mass(&self, minimum_signatures: u16) -> u64 { - let minimum_signatures = std::cmp::max(1, minimum_signatures); - SIGNATURE_SIZE * self.mass_per_tx_byte * minimum_signatures as u64 + pub(crate) fn calc_compute_mass_for_signature(&self, minimum_signatures: u16) -> u64 { + SIGNATURE_SIZE * self.mass_per_tx_byte * minimum_signatures.max(1) as u64 } - pub fn calc_signature_mass_for_inputs(&self, number_of_inputs: usize, minimum_signatures: u16) -> u64 { - let minimum_signatures = std::cmp::max(1, minimum_signatures); - SIGNATURE_SIZE * self.mass_per_tx_byte * minimum_signatures as u64 * number_of_inputs as u64 + pub fn calc_signature_compute_mass_for_inputs(&self, number_of_inputs: usize, minimum_signatures: u16) -> u64 { + SIGNATURE_SIZE * self.mass_per_tx_byte * minimum_signatures.max(1) as u64 * number_of_inputs as u64 } pub fn calc_minimum_transaction_fee_from_mass(&self, mass: u64) -> u64 { calc_minimum_required_transaction_relay_fee(mass) } - pub fn calc_mass_for_signed_transaction(&self, tx: &Transaction, minimum_signatures: u16) -> u64 { - self.calc_mass_for_transaction(tx) + self.calc_signature_mass_for_inputs(tx.inputs.len(), minimum_signatures) + pub fn calc_compute_mass_for_unsigned_consensus_transaction(&self, tx: &Transaction, minimum_signatures: u16) -> u64 { + self.calc_compute_mass_for_signed_consensus_transaction(tx) + + self.calc_signature_compute_mass_for_inputs(tx.inputs.len(), minimum_signatures) } - pub fn calc_minium_transaction_relay_fee(&self, tx: &Transaction, minimum_signatures: u16) -> u64 { - let mass = self.calc_mass_for_transaction(tx) + self.calc_signature_mass_for_inputs(tx.inputs.len(), minimum_signatures); - calc_minimum_required_transaction_relay_fee(mass) + // provisional + #[inline(always)] + pub fn calc_fee_for_mass(&self, mass: u64) -> u64 { + mass } - pub fn calc_tx_storage_fee(&self, is_coinbase: bool, inputs: &[UtxoEntryReference], outputs: &[TransactionOutput]) -> u64 { - self.calc_fee_for_storage_mass(self.calc_storage_mass_for_transaction(is_coinbase, inputs, outputs).unwrap_or(u64::MAX)) + pub fn combine_mass(&self, compute_mass: u64, storage_mass: u64) -> u64 { + match self.kip9_version { + Kip9Version::Alpha => compute_mass.saturating_add(storage_mass), + Kip9Version::Beta => compute_mass.max(storage_mass), + } } - pub fn calc_fee_for_storage_mass(&self, mass: u64) -> u64 { - mass + /// Calculates the overall mass of this transaction, combining both compute and storage masses. + pub fn calc_overall_mass_for_unsigned_client_transaction(&self, tx: &kcc::Transaction, minimum_signatures: u16) -> Result { + let cctx = Transaction::from(tx); + let storage_mass = self.calc_storage_mass_for_transaction(tx)?.ok_or(Error::MassCalculationError)?; + let compute_mass = self.calc_compute_mass_for_unsigned_consensus_transaction(&cctx, minimum_signatures); + Ok(self.combine_mass(compute_mass, storage_mass)) } - pub fn combine_mass(&self, compute_mass: u64, storage_mass: u64) -> u64 { - match self.mass_combination_strategy { - MassCombinationStrategy::Add => compute_mass + storage_mass, - MassCombinationStrategy::Max => std::cmp::max(compute_mass, storage_mass), - } + pub fn calc_overall_mass_for_unsigned_consensus_transaction( + &self, + tx: &Transaction, + utxos: &[UtxoEntryReference], + minimum_signatures: u16, + ) -> Result { + let storage_mass = self.calc_storage_mass_for_transaction_parts(utxos, &tx.outputs).ok_or(Error::MassCalculationError)?; + let compute_mass = self.calc_compute_mass_for_unsigned_consensus_transaction(tx, minimum_signatures); + Ok(self.combine_mass(compute_mass, storage_mass)) + } + + pub fn calc_storage_mass_for_transaction(&self, tx: &kcc::Transaction) -> Result> { + let utxos = tx.utxo_entry_references()?; + let outputs = tx.outputs(); + Ok(self.calc_storage_mass_for_transaction_parts(&utxos, &outputs)) } - pub fn calc_storage_mass_for_transaction( + pub fn calc_storage_mass_for_transaction_parts( &self, - is_coinbase: bool, inputs: &[UtxoEntryReference], outputs: &[TransactionOutput], ) -> Option { - if is_coinbase { - return Some(0); - } - /* The code below computes the following formula: - - max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) - - where C is the mass storage parameter, O is the set of output values, I is the set of - input values, H(S) := |S|/sum_{s in S} 1 / s is the harmonic mean over the set S and - A(S) := sum_{s in S} / |S| is the arithmetic mean. - - See the (to date unpublished) KIP-0009 for more details - */ - - // Since we are doing integer division, we perform the multiplication with C over the inner - // fractions, otherwise we'll get a sum of zeros or ones. - // - // If sum of fractions overflowed (nearly impossible, requires 10^7 outputs for C = 10^12), - // we return `None` indicating mass is incomputable - - let harmonic_outs = outputs - .iter() - .map(|out| self.storage_mass_parameter / out.value) - .try_fold(0u64, |total, current| total.checked_add(current))?; // C·|O|/H(O) - - // Total supply is bounded, so a sum of existing UTXO entries cannot overflow (nor can it be zero) - let sum_ins = inputs.iter().map(|entry| entry.amount()).sum::(); // |I|·A(I) - let ins_len = inputs.len() as u64; - let mean_ins = sum_ins / ins_len; - - // Inner fraction must be with C and over the mean value, in order to maximize precision. - // We can saturate the overall expression at u64::MAX since we lower-bound the subtraction below by zero anyway - let arithmetic_ins = ins_len.saturating_mul(self.storage_mass_parameter / mean_ins); // C·|I|/A(I) - - Some(harmonic_outs.saturating_sub(arithmetic_ins)) // max( 0 , C·( |O|/H(O) - |I|/A(I) ) ) + consensus_calc_storage_mass( + false, + inputs.iter().map(|entry| entry.amount()), + outputs.iter().map(|out| out.value), + self.kip9_version, + self.storage_mass_parameter, + ) } pub fn calc_storage_mass_output_harmonic(&self, outputs: &[TransactionOutput]) -> Option { diff --git a/wallet/core/src/tx/payment.rs b/wallet/core/src/tx/payment.rs index 0cce1f30f3..c164e0d789 100644 --- a/wallet/core/src/tx/payment.rs +++ b/wallet/core/src/tx/payment.rs @@ -30,12 +30,19 @@ export interface IPaymentOutput { #[wasm_bindgen] extern "C" { + /// WASM (TypeScript) type representing a single payment output (`IPaymentOutput`). + /// @category Wallet SDK #[wasm_bindgen(typescript_type = "IPaymentOutput")] pub type IPaymentOutput; + /// WASM (TypeScript) type representing multiple payment outputs (`IPaymentOutput[]`). + /// @category Wallet SDK #[wasm_bindgen(typescript_type = "IPaymentOutput[]")] pub type IPaymentOutputArray; } +/// A Rust data structure representing a payment destination. +/// A payment destination is used to signal Generator where to send the funds. +/// The destination can be a change address or a set of [`PaymentOutput`]. #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub enum PaymentDestination { Change, @@ -51,6 +58,9 @@ impl PaymentDestination { } } +/// A Rust data structure representing a single payment +/// output containing a destination address and amount. +/// /// @category Wallet SDK #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize, CastFromJs)] #[wasm_bindgen(inspectable)] @@ -62,8 +72,11 @@ pub struct PaymentOutput { impl TryCastFromJs for PaymentOutput { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(array) = value.as_ref().dyn_ref::() { let length = array.length(); if length != 2 { @@ -74,7 +87,7 @@ impl TryCastFromJs for PaymentOutput { Ok(Self { address, amount }) } } else if let Some(object) = Object::try_from(value.as_ref()) { - let address = object.get_cast::
("address")?.into_owned(); + let address = object.cast_into::
("address")?; let amount = object.get_u64("amount")?; Ok(Self { address, amount }) } else { @@ -145,8 +158,11 @@ impl PaymentOutputs { impl TryCastFromJs for PaymentOutputs { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let outputs = if let Some(output_array) = value.as_ref().dyn_ref::() { let vec = output_array.to_vec(); vec.into_iter().map(PaymentOutput::try_owned_from).collect::, _>>()? diff --git a/wallet/core/src/utxo/balance.rs b/wallet/core/src/utxo/balance.rs index ce189e124e..f16ce94ff6 100644 --- a/wallet/core/src/utxo/balance.rs +++ b/wallet/core/src/utxo/balance.rs @@ -10,6 +10,7 @@ pub enum DeltaStyle { } #[derive(Default, Debug, Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +#[borsh(use_discriminant = true)] pub enum Delta { #[default] NoChange = 0, diff --git a/wallet/core/src/utxo/context.rs b/wallet/core/src/utxo/context.rs index 51ef0e5ea9..39575a64f6 100644 --- a/wallet/core/src/utxo/context.rs +++ b/wallet/core/src/utxo/context.rs @@ -299,7 +299,7 @@ impl UtxoContext { context.mature.sorted_insert_binary_asc_by_key(utxo_entry.clone(), |entry| entry.amount_as_ref()); } else { let params = NetworkParams::from(self.processor().network_id()?); - match utxo_entry.maturity(¶ms, current_daa_score) { + match utxo_entry.maturity(params, current_daa_score) { Maturity::Stasis => { context.stasis.insert(utxo_entry.id().clone(), utxo_entry.clone()); self.processor() @@ -319,7 +319,7 @@ impl UtxoContext { } Ok(()) } else { - log_warn!("ignoring duplicate utxo entry"); + // log_warn!("Warning: Ignoring duplicate UTXO entry"); Ok(()) } } @@ -346,8 +346,8 @@ impl UtxoContext { } else { remove_mature_ids.push(id); } - } else { - log_error!("Error: UTXO not found in UtxoContext map!"); + } else if context.outgoing.get(&utxo.transaction_id()).is_none() { + // log_warm!("Warning: UTXO not found in UtxoContext map!"); } } @@ -374,10 +374,10 @@ impl UtxoContext { context.mature.sorted_insert_binary_asc_by_key(utxo_entry.clone(), |entry| entry.amount_as_ref()); } else { log_error!("Error: non-pending utxo promotion!"); - unreachable!("Error: non-pending utxo promotion!"); } } + // sanity check if self.context().outgoing.get(&txid).is_some() { unreachable!("Error: promotion of the outgoing transaction!"); } @@ -421,14 +421,14 @@ impl UtxoContext { let mut context = self.context(); let mut pending = vec![]; - let mut mature = vec![]; + let mut mature = Vec::with_capacity(utxo_entries.len()); let params = NetworkParams::from(self.processor().network_id()?); for utxo_entry in utxo_entries.into_iter() { if let std::collections::hash_map::Entry::Vacant(e) = context.map.entry(utxo_entry.id()) { e.insert(utxo_entry.clone()); - match utxo_entry.maturity(¶ms, current_daa_score) { + match utxo_entry.maturity(params, current_daa_score) { Maturity::Stasis => { context.stasis.insert(utxo_entry.id().clone(), utxo_entry.clone()); self.processor() @@ -444,7 +444,6 @@ impl UtxoContext { } Maturity::Confirmed => { mature.push(utxo_entry.clone()); - context.mature.sorted_insert_binary_asc_by_key(utxo_entry.clone(), |entry| entry.amount_as_ref()); } } } else { @@ -452,6 +451,9 @@ impl UtxoContext { } } + context.mature.extend(mature.iter().cloned()); + context.mature.sort_by_key(|entry| entry.amount()); + (pending, mature) }; @@ -482,19 +484,25 @@ impl UtxoContext { // the final payments (not compound transactions) // and outgoing transactions that have not yet // been accepted + let mut outgoing_without_batch_tx = 0; let mut outgoing: u64 = 0; let mut consumed: u64 = 0; - for tx in context.outgoing.values() { - if !tx.is_accepted() { - if let Some(payment_value) = tx.payment_value() { + + let transactions = context.outgoing.values().filter(|tx| !tx.is_accepted()); + for tx in transactions { + if let Some(payment_value) = tx.payment_value() { + consumed += tx.aggregate_input_value(); + if tx.is_batch() { + outgoing += tx.fees() + tx.aggregate_output_value(); + } else { // final tx outgoing += tx.fees() + payment_value; - consumed += tx.aggregate_input_value(); - } else { - // compound tx has no payment value - outgoing += tx.fees() + tx.aggregate_output_value(); - consumed += tx.aggregate_input_value() + outgoing_without_batch_tx += payment_value; } + } else { + // compound tx has no payment value + outgoing += tx.fees() + tx.aggregate_output_value(); + consumed += tx.aggregate_input_value(); } } @@ -502,13 +510,12 @@ impl UtxoContext { // this condition does not occur. This is a temporary // log for a fixed bug, but we want to keep the check // just in case. - if mature + consumed < outgoing { - log_error!("Error: outgoing transaction value exceeds available balance"); + if consumed < outgoing { + log_error!("Error: outgoing transaction value exceeds available balance, mature: {mature}, consumed: {consumed}, outgoing: {outgoing}"); } let mature = (mature + consumed).saturating_sub(outgoing); - - Balance::new(mature, pending, outgoing, context.mature.len(), context.pending.len(), context.stasis.len()) + Balance::new(mature, pending, outgoing_without_batch_tx, context.mature.len(), context.pending.len(), context.stasis.len()) } pub(crate) async fn handle_utxo_added(&self, utxos: Vec, current_daa_score: u64) -> Result<()> { @@ -526,13 +533,15 @@ impl UtxoContext { let force_maturity_if_outgoing = outgoing_transaction.is_some(); let is_coinbase_stasis = - utxos.first().map(|utxo| matches!(utxo.maturity(¶ms, current_daa_score), Maturity::Stasis)).unwrap_or_default(); - - for utxo in utxos.iter() { - if let Err(err) = self.insert(utxo.clone(), current_daa_score, force_maturity_if_outgoing).await { - // TODO - remove `Result<>` from insert at a later date once - // we are confident that the insert will never result in an error. - log_error!("{}", err); + utxos.first().map(|utxo| matches!(utxo.maturity(params, current_daa_score), Maturity::Stasis)).unwrap_or_default(); + let is_batch = outgoing_transaction.as_ref().map_or_else(|| false, |tx| tx.is_batch()); + if !is_batch { + for utxo in utxos.iter() { + if let Err(err) = self.insert(utxo.clone(), current_daa_score, force_maturity_if_outgoing).await { + // TODO - remove `Result<>` from insert at a later date once + // we are confident that the insert will never result in an error. + log_error!("{}", err); + } } } @@ -564,21 +573,20 @@ impl UtxoContext { Ok(()) } - pub(crate) async fn handle_utxo_removed(&self, mut utxos: Vec, current_daa_score: u64) -> Result<()> { + pub(crate) async fn handle_utxo_removed(&self, utxos: Vec, current_daa_score: u64) -> Result<()> { // remove UTXOs from account set let outgoing_transactions = self.processor().outgoing(); + #[allow(clippy::mutable_key_type)] let mut accepted_outgoing_transactions = HashSet::::new(); - utxos.retain(|utxo| { + for utxo in &utxos { for outgoing_transaction in outgoing_transactions.iter() { if outgoing_transaction.utxo_entries().contains_key(&utxo.id()) { accepted_outgoing_transactions.insert((*outgoing_transaction).clone()); - return false; } } - true - }); + } for accepted_outgoing_transaction in accepted_outgoing_transactions.into_iter() { if accepted_outgoing_transaction.is_batch() { diff --git a/wallet/core/src/utxo/processor.rs b/wallet/core/src/utxo/processor.rs index e788272f23..f6480f333e 100644 --- a/wallet/core/src/utxo/processor.rs +++ b/wallet/core/src/utxo/processor.rs @@ -14,13 +14,13 @@ use kaspa_notify::{ use kaspa_rpc_core::{ api::{ ctl::{RpcCtl, RpcState}, - ops::RPC_API_VERSION, + ops::{RPC_API_REVISION, RPC_API_VERSION}, }, message::UtxosChangedNotification, GetServerInfoResponse, }; use kaspa_wrpc_client::KaspaRpcClient; -use workflow_core::channel::{Channel, DuplexChannel}; +use workflow_core::channel::{Channel, DuplexChannel, Sender}; use workflow_core::task::spawn; use crate::events::Events; @@ -33,8 +33,6 @@ use kaspa_rpc_core::{ notify::connection::{ChannelConnection, ChannelType}, Notification, }; -// use workflow_core::task; -// use kaspa_metrics_core::{Metrics,Metric}; pub struct Inner { /// Coinbase UTXOs in stasis @@ -58,10 +56,11 @@ pub struct Inner { sync_proc: SyncMonitor, multiplexer: Multiplexer>, wallet_bus: Option>, - notification_guard: AsyncMutex<()>, + notification_guard: AsyncRwLock<()>, connect_disconnect_guard: AsyncMutex<()>, metrics: Arc, metrics_kinds: Mutex>, + connection_signaler: Mutex>>>, } impl Inner { @@ -91,6 +90,7 @@ impl Inner { connect_disconnect_guard: Default::default(), metrics: Arc::new(Metrics::default()), metrics_kinds: Mutex::new(vec![]), + connection_signaler: Mutex::new(None), } } } @@ -159,8 +159,8 @@ impl UtxoProcessor { &self.inner.multiplexer } - pub async fn notification_lock(&self) -> AsyncMutexGuard<()> { - self.inner.notification_guard.lock().await + pub async fn notification_lock(&self) -> AsyncRwLockReadGuard<()> { + self.inner.notification_guard.read().await } pub fn sync_proc(&self) -> &SyncMonitor { @@ -180,8 +180,10 @@ impl UtxoProcessor { } pub fn network_params(&self) -> Result<&'static NetworkParams> { + // pub fn network_params(&self) -> Result { let network_id = (*self.inner.network_id.lock().unwrap()).ok_or(Error::MissingNetworkId)?; - Ok(network_id.into()) + Ok(NetworkParams::from(network_id)) + // Ok(network_id.into()) } pub fn pending(&self) -> &DashMap { @@ -264,6 +266,7 @@ impl UtxoProcessor { Ok(()) } + #[allow(clippy::mutable_key_type)] pub async fn handle_pending(&self, current_daa_score: u64) -> Result<()> { let params = self.network_params()?; @@ -329,7 +332,7 @@ impl UtxoProcessor { } async fn handle_outgoing(&self, current_daa_score: u64) -> Result<()> { - let longevity = self.network_params()?.user_transaction_maturity_period_daa; + let longevity = self.network_params()?.user_transaction_maturity_period_daa(); self.inner.outgoing.retain(|_, outgoing| { if outgoing.acceptance_daa_score() != 0 && (outgoing.acceptance_daa_score() + longevity) < current_daa_score { @@ -388,6 +391,7 @@ impl UtxoProcessor { pub async fn handle_utxo_changed(&self, utxos: UtxosChangedNotification) -> Result<()> { let current_daa_score = self.current_daa_score().expect("DAA score expected when handling UTXO Changed notifications"); + #[allow(clippy::mutable_key_type)] let mut updated_contexts: HashSet = HashSet::default(); let removed = (*utxos.removed).clone().into_iter().filter_map(|entry| entry.address.clone().map(|address| (address, entry))); @@ -437,14 +441,21 @@ impl UtxoProcessor { pub async fn init_state_from_server(&self) -> Result { let GetServerInfoResponse { + rpc_api_version, + rpc_api_revision, server_version, network_id: server_network_id, has_utxo_index, is_synced, virtual_daa_score, - rpc_api_version, } = self.rpc_api().get_server_info().await?; + if rpc_api_version > RPC_API_VERSION { + let current = format!("{RPC_API_VERSION}.{RPC_API_REVISION}"); + let connected = format!("{rpc_api_version}.{rpc_api_revision}"); + return Err(Error::RpcApiVersion(current, connected)); + } + if !has_utxo_index { self.notify(Events::UtxoIndexNotEnabled { url: self.rpc_url() }).await?; return Err(Error::MissingUtxoIndex); @@ -455,12 +466,6 @@ impl UtxoProcessor { return Err(Error::InvalidNetworkType(network_id.to_string(), server_network_id.to_string())); } - if rpc_api_version[0] > RPC_API_VERSION[0] || rpc_api_version[1] > RPC_API_VERSION[1] { - let current = RPC_API_VERSION.iter().map(|v| v.to_string()).collect::>().join("."); - let connected = rpc_api_version.iter().map(|v| v.to_string()).collect::>().join("."); - return Err(Error::RpcApiVersion(current, connected)); - } - self.inner.current_daa_score.store(virtual_daa_score, Ordering::SeqCst); log_trace!("Connected to kaspad: '{server_version}' on '{server_network_id}'; SYNC: {is_synced} DAA: {virtual_daa_score}"); @@ -487,12 +492,30 @@ impl UtxoProcessor { Ok(()) } + /// Allows use to supply a channel Sender that will + /// receive the result of the wRPC connection attempt. + pub fn set_connection_signaler(&self, signal: Sender>) { + *self.inner.connection_signaler.lock().unwrap() = Some(signal); + } + + fn signal_connection(&self, result: std::result::Result<(), String>) -> bool { + let signal = self.inner.connection_signaler.lock().unwrap().take(); + if let Some(signal) = signal.as_ref() { + let _ = signal.try_send(result); + true + } else { + false + } + } + pub async fn handle_connect(&self) -> Result<()> { let _ = self.inner.connect_disconnect_guard.lock().await; match self.handle_connect_impl().await { Err(err) => { - log_error!("UtxoProcessor: error while connecting to node: {err}"); + if !self.signal_connection(Err(err.to_string())) { + log_error!("UtxoProcessor: error while connecting to node: {err}"); + } self.notify(Events::UtxoProcError { message: err.to_string() }).await?; if let Some(client) = self.rpc_client() { // try force disconnect the client if we have failed @@ -501,7 +524,10 @@ impl UtxoProcessor { } Err(err) } - Ok(_) => Ok(()), + Ok(_) => { + self.signal_connection(Ok(())); + Ok(()) + } } } @@ -549,7 +575,7 @@ impl UtxoProcessor { } async fn handle_notification(&self, notification: Notification) -> Result<()> { - let _lock = self.notification_lock().await; + let _lock = self.inner.notification_guard.write().await; match notification { Notification::VirtualDaaScoreChanged(virtual_daa_score_changed_notification) => { @@ -578,9 +604,7 @@ impl UtxoProcessor { match kind { MetricsUpdateKind::WalletMetrics => { let mempool_size = snapshot.get(&Metric::NetworkMempoolSize) as u64; - let node_peers = snapshot.get(&Metric::NodeActivePeers) as u32; - let network_tps = snapshot.get(&Metric::NetworkTransactionsPerSecond); - let metrics = MetricsUpdate::WalletMetrics { mempool_size, node_peers, network_tps }; + let metrics = MetricsUpdate::WalletMetrics { mempool_size }; self.try_notify(Events::Metrics { network_id: self.network_id()?, metrics })?; } } @@ -631,15 +655,11 @@ impl UtxoProcessor { // handle RPC channel connection and disconnection events match msg { RpcState::Connected => { - if !this.is_connected() { - if let Err(err) = this.handle_connect().await { - log_error!("UtxoProcessor error: {err}"); - } else { - this.inner.multiplexer.try_broadcast(Box::new(Events::Connect { - network_id : this.network_id().expect("network id expected during connection"), - url : this.rpc_url() - })).unwrap_or_else(|err| log_error!("{err}")); - } + if !this.is_connected() && this.handle_connect().await.is_ok() { + this.inner.multiplexer.try_broadcast(Box::new(Events::Connect { + network_id : this.network_id().expect("network id expected during connection"), + url : this.rpc_url() + })).unwrap_or_else(|err| log_error!("{err}")); } }, RpcState::Disconnected => { diff --git a/wallet/core/src/utxo/reference.rs b/wallet/core/src/utxo/reference.rs index 5e35f9e8b7..7bc0ec287c 100644 --- a/wallet/core/src/utxo/reference.rs +++ b/wallet/core/src/utxo/reference.rs @@ -34,14 +34,14 @@ pub trait UtxoEntryReferenceExtension { impl UtxoEntryReferenceExtension for UtxoEntryReference { fn maturity(&self, params: &NetworkParams, current_daa_score: u64) -> Maturity { if self.is_coinbase() { - if self.block_daa_score() + params.coinbase_transaction_stasis_period_daa > current_daa_score { + if self.block_daa_score() + params.coinbase_transaction_stasis_period_daa() > current_daa_score { Maturity::Stasis - } else if self.block_daa_score() + params.coinbase_transaction_maturity_period_daa > current_daa_score { + } else if self.block_daa_score() + params.coinbase_transaction_maturity_period_daa() > current_daa_score { Maturity::Pending } else { Maturity::Confirmed } - } else if self.block_daa_score() + params.user_transaction_maturity_period_daa > current_daa_score { + } else if self.block_daa_score() + params.user_transaction_maturity_period_daa() > current_daa_score { Maturity::Pending } else { Maturity::Confirmed diff --git a/wallet/core/src/utxo/scan.rs b/wallet/core/src/utxo/scan.rs index f01257c965..fff6effa94 100644 --- a/wallet/core/src/utxo/scan.rs +++ b/wallet/core/src/utxo/scan.rs @@ -55,6 +55,9 @@ impl Scan { } pub async fn scan(&self, utxo_context: &UtxoContext) -> Result<()> { + // block notifications while scanning... + let _lock = utxo_context.processor().notification_lock().await; + match &self.provider { Provider::AddressManager(address_manager) => self.scan_with_address_manager(address_manager, utxo_context).await, Provider::AddressSet(addresses) => self.scan_with_address_set(addresses, utxo_context).await, @@ -86,9 +89,9 @@ impl Scan { let ts = Instant::now(); let resp = utxo_context.processor().rpc_api().get_utxos_by_addresses(addresses).await?; - let elapsed_msec = ts.elapsed().as_secs_f32(); - if elapsed_msec > 1.0 { - log_warn!("get_utxos_by_address() fetched {} entries in: {} msec", resp.len(), elapsed_msec); + let elapsed_sec = ts.elapsed().as_secs_f32(); + if elapsed_sec > 1.0 { + log_warn!("get_utxos_by_address() fetched {} entries in: {} msec", resp.len(), elapsed_sec); } yield_executor().await; diff --git a/wallet/core/src/utxo/settings.rs b/wallet/core/src/utxo/settings.rs index 3890263be0..6828d73cfe 100644 --- a/wallet/core/src/utxo/settings.rs +++ b/wallet/core/src/utxo/settings.rs @@ -4,58 +4,94 @@ //! use crate::imports::*; +use kaspa_consensus_core::mass::Kip9Version; #[derive(Debug)] pub struct NetworkParams { - pub coinbase_transaction_maturity_period_daa: u64, + pub coinbase_transaction_maturity_period_daa: AtomicU64, pub coinbase_transaction_stasis_period_daa: u64, - pub user_transaction_maturity_period_daa: u64, - pub mass_combination_strategy: MassCombinationStrategy, + pub user_transaction_maturity_period_daa: AtomicU64, + pub kip9_version: Kip9Version, pub additional_compound_transaction_mass: u64, } -pub const MAINNET_NETWORK_PARAMS: NetworkParams = NetworkParams { - coinbase_transaction_maturity_period_daa: 100, +impl NetworkParams { + #[inline] + pub fn coinbase_transaction_maturity_period_daa(&self) -> u64 { + self.coinbase_transaction_maturity_period_daa.load(Ordering::Relaxed) + } + + #[inline] + pub fn coinbase_transaction_stasis_period_daa(&self) -> u64 { + self.coinbase_transaction_stasis_period_daa + } + + #[inline] + pub fn user_transaction_maturity_period_daa(&self) -> u64 { + self.user_transaction_maturity_period_daa.load(Ordering::Relaxed) + } + + #[inline] + pub fn kip9_version(&self) -> Kip9Version { + self.kip9_version + } + + #[inline] + pub fn additional_compound_transaction_mass(&self) -> u64 { + self.additional_compound_transaction_mass + } + + pub fn set_coinbase_transaction_maturity_period_daa(&self, value: u64) { + self.coinbase_transaction_maturity_period_daa.store(value, Ordering::Relaxed); + } + + pub fn set_user_transaction_maturity_period_daa(&self, value: u64) { + self.user_transaction_maturity_period_daa.store(value, Ordering::Relaxed); + } +} + +static MAINNET_NETWORK_PARAMS: LazyLock = LazyLock::new(|| NetworkParams { + coinbase_transaction_maturity_period_daa: AtomicU64::new(100), coinbase_transaction_stasis_period_daa: 50, - user_transaction_maturity_period_daa: 10, - mass_combination_strategy: MassCombinationStrategy::Add, - additional_compound_transaction_mass: 0, -}; + user_transaction_maturity_period_daa: AtomicU64::new(10), + kip9_version: Kip9Version::Beta, + additional_compound_transaction_mass: 100, +}); -pub const TESTNET10_NETWORK_PARAMS: NetworkParams = NetworkParams { - coinbase_transaction_maturity_period_daa: 100, +static TESTNET10_NETWORK_PARAMS: LazyLock = LazyLock::new(|| NetworkParams { + coinbase_transaction_maturity_period_daa: AtomicU64::new(100), coinbase_transaction_stasis_period_daa: 50, - user_transaction_maturity_period_daa: 10, - mass_combination_strategy: MassCombinationStrategy::Add, + user_transaction_maturity_period_daa: AtomicU64::new(10), + kip9_version: Kip9Version::Beta, additional_compound_transaction_mass: 100, -}; +}); -pub const TESTNET11_NETWORK_PARAMS: NetworkParams = NetworkParams { - coinbase_transaction_maturity_period_daa: 1_000, +static TESTNET11_NETWORK_PARAMS: LazyLock = LazyLock::new(|| NetworkParams { + coinbase_transaction_maturity_period_daa: AtomicU64::new(1_000), coinbase_transaction_stasis_period_daa: 500, - user_transaction_maturity_period_daa: 100, - mass_combination_strategy: MassCombinationStrategy::Add, + user_transaction_maturity_period_daa: AtomicU64::new(100), + kip9_version: Kip9Version::Alpha, additional_compound_transaction_mass: 100, -}; +}); -pub const DEVNET_NETWORK_PARAMS: NetworkParams = NetworkParams { - coinbase_transaction_maturity_period_daa: 100, +static SIMNET_NETWORK_PARAMS: LazyLock = LazyLock::new(|| NetworkParams { + coinbase_transaction_maturity_period_daa: AtomicU64::new(100), coinbase_transaction_stasis_period_daa: 50, - user_transaction_maturity_period_daa: 10, - mass_combination_strategy: MassCombinationStrategy::Add, + user_transaction_maturity_period_daa: AtomicU64::new(10), + kip9_version: Kip9Version::Alpha, additional_compound_transaction_mass: 0, -}; +}); -pub const SIMNET_NETWORK_PARAMS: NetworkParams = NetworkParams { - coinbase_transaction_maturity_period_daa: 100, +static DEVNET_NETWORK_PARAMS: LazyLock = LazyLock::new(|| NetworkParams { + coinbase_transaction_maturity_period_daa: AtomicU64::new(100), coinbase_transaction_stasis_period_daa: 50, - user_transaction_maturity_period_daa: 10, - mass_combination_strategy: MassCombinationStrategy::Add, + user_transaction_maturity_period_daa: AtomicU64::new(10), + kip9_version: Kip9Version::Beta, additional_compound_transaction_mass: 0, -}; +}); -impl From for &'static NetworkParams { - fn from(value: NetworkId) -> Self { +impl NetworkParams { + pub fn from(value: NetworkId) -> &'static NetworkParams { match value.network_type { NetworkType::Mainnet => &MAINNET_NETWORK_PARAMS, NetworkType::Testnet => match value.suffix { @@ -70,18 +106,27 @@ impl From for &'static NetworkParams { } } -impl From for NetworkParams { - fn from(value: NetworkId) -> Self { - match value.network_type { - NetworkType::Mainnet => MAINNET_NETWORK_PARAMS, - NetworkType::Testnet => match value.suffix { - Some(10) => TESTNET10_NETWORK_PARAMS, - Some(11) => TESTNET11_NETWORK_PARAMS, - Some(x) => panic!("Testnet suffix {} is not supported", x), - None => panic!("Testnet suffix not provided"), - }, - NetworkType::Devnet => DEVNET_NETWORK_PARAMS, - NetworkType::Simnet => SIMNET_NETWORK_PARAMS, - } +/// Set the coinbase transaction maturity period DAA score for a given network. +/// This controls the DAA period after which the user transactions are considered mature +/// and the wallet subsystem emits the transaction maturity event. +pub fn set_coinbase_transaction_maturity_period_daa(network_id: &NetworkId, value: u64) { + let network_params = NetworkParams::from(*network_id); + if value <= network_params.coinbase_transaction_stasis_period_daa() { + panic!( + "Coinbase transaction maturity period must be greater than the stasis period of {} DAA", + network_params.coinbase_transaction_stasis_period_daa() + ); + } + network_params.set_coinbase_transaction_maturity_period_daa(value); +} + +/// Set the user transaction maturity period DAA score for a given network. +/// This controls the DAA period after which the user transactions are considered mature +/// and the wallet subsystem emits the transaction maturity event. +pub fn set_user_transaction_maturity_period_daa(network_id: &NetworkId, value: u64) { + let network_params = NetworkParams::from(*network_id); + if value == 0 { + panic!("User transaction maturity period must be greater than 0"); } + network_params.set_user_transaction_maturity_period_daa(value); } diff --git a/wallet/core/src/wallet/api.rs b/wallet/core/src/wallet/api.rs index 313759ba72..93becef420 100644 --- a/wallet/core/src/wallet/api.rs +++ b/wallet/core/src/wallet/api.rs @@ -1,5 +1,5 @@ //! -//! [`WalletApi`] trait implementation for [`Wallet`]. +//! [`WalletApi`] trait implementation for the [`Wallet`] struct. //! use crate::api::{message::*, traits::WalletApi}; @@ -20,6 +20,9 @@ impl WalletApi for super::Wallet { } async fn get_status_call(self: Arc, request: GetStatusRequest) -> Result { + let guard = self.guard(); + let guard = guard.lock().await; + let GetStatusRequest { name } = request; let context = name.and_then(|name| self.inner.retained_contexts.lock().unwrap().get(&name).cloned()); @@ -34,7 +37,7 @@ impl WalletApi for super::Wallet { let (wallet_descriptor, account_descriptors) = if self.is_open() { let wallet_descriptor = self.descriptor(); - let account_descriptors = self.account_descriptors().await.ok(); + let account_descriptors = self.account_descriptors(&guard).await.ok(); (wallet_descriptor, account_descriptors) } else { (None, None) @@ -59,15 +62,17 @@ impl WalletApi for super::Wallet { if let Some(data) = data { self.inner.retained_contexts.lock().unwrap().insert(name, Arc::new(data)); - Ok(RetainContextResponse {}) } else { self.inner.retained_contexts.lock().unwrap().remove(&name); - // let data = self.inner.retained_contexts.lock().unwrap().get(&name).cloned(); Ok(RetainContextResponse {}) } + } - // self.retain_context(retain); + async fn get_context_call(self: Arc, request: GetContextRequest) -> Result { + let GetContextRequest { name } = request; + let data = self.inner.retained_contexts.lock().unwrap().get(&name).map(|data| (**data).clone()); + Ok(GetContextResponse { data }) } // ------------------------------------------------------------------------------------- @@ -75,22 +80,37 @@ impl WalletApi for super::Wallet { async fn connect_call(self: Arc, request: ConnectRequest) -> Result { use workflow_rpc::client::{ConnectOptions, ConnectStrategy}; - let ConnectRequest { url, network_id } = request; + let ConnectRequest { url, network_id, retry_on_error, block_async_connect, require_sync } = request; if let Some(wrpc_client) = self.try_wrpc_client().as_ref() { - // self.set_network_id(network_id)?; + let strategy = if retry_on_error { ConnectStrategy::Retry } else { ConnectStrategy::Fallback }; - // let network_type = NetworkType::from(network_id); let url = url .map(|url| wrpc_client.parse_url_with_network_type(url, network_id.into()).map_err(|e| e.to_string())) .transpose()?; - let options = ConnectOptions { block_async_connect: false, strategy: ConnectStrategy::Retry, url, ..Default::default() }; + let options = ConnectOptions { block_async_connect, strategy, url, ..Default::default() }; wrpc_client.disconnect().await?; self.set_network_id(&network_id)?; + let processor = self.utxo_processor().clone(); + let (sender, receiver) = oneshot(); + + // set connection signaler that gets triggered + // by utxo processor when connection occurs + processor.set_connection_signaler(sender); + + // connect rpc wrpc_client.connect(Some(options)).await.map_err(|e| e.to_string())?; - Ok(ConnectResponse {}) + + // wait for connection signal, cascade if error + receiver.recv().await?.map_err(Error::custom)?; + + if require_sync && !self.is_synced() { + Err(Error::NotSynced) + } else { + Ok(ConnectResponse {}) + } } else { Err(Error::NotWrpcClient) } @@ -143,9 +163,12 @@ impl WalletApi for super::Wallet { } async fn wallet_open_call(self: Arc, request: WalletOpenRequest) -> Result { + let guard = self.guard(); + let guard = guard.lock().await; + let WalletOpenRequest { wallet_secret, filename, account_descriptors, legacy_accounts } = request; let args = WalletOpenArgs { account_descriptors, legacy_accounts: legacy_accounts.unwrap_or_default() }; - let account_descriptors = self.open(&wallet_secret, filename, args).await?; + let account_descriptors = self.open(&wallet_secret, filename, args, &guard).await?; Ok(WalletOpenResponse { account_descriptors }) } @@ -159,7 +182,11 @@ impl WalletApi for super::Wallet { if !self.is_open() { return Err(Error::WalletNotOpen); } - self.reload(reactivate).await?; + + let guard = self.guard(); + let guard = guard.lock().await; + + self.reload(reactivate, &guard).await?; Ok(WalletReloadResponse {}) } @@ -222,7 +249,10 @@ impl WalletApi for super::Wallet { async fn accounts_rename_call(self: Arc, request: AccountsRenameRequest) -> Result { let AccountsRenameRequest { account_id, name, wallet_secret } = request; - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + let guard = self.guard(); + let guard = guard.lock().await; + + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; account.rename(&wallet_secret, name.as_deref()).await?; Ok(AccountsRenameResponse {}) @@ -231,8 +261,11 @@ impl WalletApi for super::Wallet { async fn accounts_select_call(self: Arc, request: AccountsSelectRequest) -> Result { let AccountsSelectRequest { account_id } = request; + let guard = self.guard(); + let guard = guard.lock().await; + if let Some(account_id) = account_id { - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; self.select(Some(&account)).await?; } else { self.select(None).await?; @@ -243,34 +276,20 @@ impl WalletApi for super::Wallet { } async fn accounts_enumerate_call(self: Arc, _request: AccountsEnumerateRequest) -> Result { - // let iter = self.inner.store.as_account_store().unwrap().iter(None).await.unwrap(); - // let wallet = self.clone(); - - // let stream = iter.then(move |stored| { - // let wallet = wallet.clone(); - - // async move { - // let (stored_account, stored_metadata) = stored.unwrap(); - // if let Some(account) = wallet.legacy_accounts().get(&stored_account.id) { - // account.descriptor() - // } else if let Some(account) = wallet.active_accounts().get(&stored_account.id) { - // account.descriptor() - // } else { - // try_load_account(&wallet, stored_account, stored_metadata).await?.descriptor() - // } - // } - // }); - - // let account_descriptors = stream.try_collect::>().await?; - - let account_descriptors = self.account_descriptors().await?; + let guard = self.guard(); + let guard = guard.lock().await; + + let account_descriptors = self.account_descriptors(&guard).await?; Ok(AccountsEnumerateResponse { account_descriptors }) } async fn accounts_activate_call(self: Arc, request: AccountsActivateRequest) -> Result { let AccountsActivateRequest { account_ids } = request; - self.activate_accounts(account_ids.as_deref()).await?; + let guard = self.guard(); + let guard = guard.lock().await; + + self.activate_accounts(account_ids.as_deref(), &guard).await?; Ok(AccountsActivateResponse {}) } @@ -278,7 +297,10 @@ impl WalletApi for super::Wallet { async fn accounts_deactivate_call(self: Arc, request: AccountsDeactivateRequest) -> Result { let AccountsDeactivateRequest { account_ids } = request; - self.deactivate_accounts(account_ids.as_deref()).await?; + let guard = self.guard(); + let guard = guard.lock().await; + + self.deactivate_accounts(account_ids.as_deref(), &guard).await?; Ok(AccountsDeactivateResponse {}) } @@ -296,7 +318,10 @@ impl WalletApi for super::Wallet { async fn accounts_create_call(self: Arc, request: AccountsCreateRequest) -> Result { let AccountsCreateRequest { wallet_secret, account_create_args } = request; - let account = self.create_account(&wallet_secret, account_create_args, true).await?; + let guard = self.guard(); + let guard = guard.lock().await; + + let account = self.create_account(&wallet_secret, account_create_args, true, &guard).await?; let account_descriptor = account.descriptor()?; Ok(AccountsCreateResponse { account_descriptor }) @@ -308,8 +333,12 @@ impl WalletApi for super::Wallet { ) -> Result { let AccountsEnsureDefaultRequest { wallet_secret, payment_secret, account_kind, mnemonic_phrase } = request; - let account_descriptor = - self.ensure_default_account_impl(&wallet_secret, payment_secret.as_ref(), account_kind, mnemonic_phrase.as_ref()).await?; + let guard = self.guard(); + let guard = guard.lock().await; + + let account_descriptor = self + .ensure_default_account_impl(&wallet_secret, payment_secret.as_ref(), account_kind, mnemonic_phrase.as_ref(), &guard) + .await?; Ok(AccountsEnsureDefaultResponse { account_descriptor }) } @@ -321,7 +350,11 @@ impl WalletApi for super::Wallet { async fn accounts_get_call(self: Arc, request: AccountsGetRequest) -> Result { let AccountsGetRequest { account_id } = request; - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + + let guard = self.guard(); + let guard = guard.lock().await; + + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; let account_descriptor = account.descriptor().unwrap(); Ok(AccountsGetResponse { account_descriptor }) } @@ -332,7 +365,10 @@ impl WalletApi for super::Wallet { ) -> Result { let AccountsCreateNewAddressRequest { account_id, kind } = request; - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + let guard = self.guard(); + let guard = guard.lock().await; + + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; let address = match kind { NewAddressKind::Receive => account.as_derivation_capable()?.new_receive_address().await?, @@ -345,7 +381,9 @@ impl WalletApi for super::Wallet { async fn accounts_send_call(self: Arc, request: AccountsSendRequest) -> Result { let AccountsSendRequest { account_id, wallet_secret, payment_secret, destination, priority_fee_sompi, payload } = request; - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + let guard = self.guard(); + let guard = guard.lock().await; + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; let abortable = Abortable::new(); let (generator_summary, transaction_ids) = @@ -364,7 +402,11 @@ impl WalletApi for super::Wallet { transfer_amount_sompi, } = request; - let source_account = self.get_account_by_id(&source_account_id).await?.ok_or(Error::AccountNotFound(source_account_id))?; + let guard = self.guard(); + let guard = guard.lock().await; + + let source_account = + self.get_account_by_id(&source_account_id, &guard).await?.ok_or(Error::AccountNotFound(source_account_id))?; let abortable = Abortable::new(); let (generator_summary, transaction_ids) = source_account @@ -376,6 +418,7 @@ impl WalletApi for super::Wallet { payment_secret, &abortable, None, + &guard, ) .await?; @@ -385,7 +428,9 @@ impl WalletApi for super::Wallet { async fn accounts_estimate_call(self: Arc, request: AccountsEstimateRequest) -> Result { let AccountsEstimateRequest { account_id, destination, priority_fee_sompi, payload } = request; - let account = self.get_account_by_id(&account_id).await?.ok_or(Error::AccountNotFound(account_id))?; + let guard = self.guard(); + let guard = guard.lock().await; + let account = self.get_account_by_id(&account_id, &guard).await?.ok_or(Error::AccountNotFound(account_id))?; // Abort currently running async estimate for the same account if present. The estimate // call can be invoked continuously by the client/UI. If the estimate call is diff --git a/wallet/core/src/wallet/args.rs b/wallet/core/src/wallet/args.rs index 05d57c445e..f0168f7406 100644 --- a/wallet/core/src/wallet/args.rs +++ b/wallet/core/src/wallet/args.rs @@ -3,7 +3,6 @@ //! use crate::imports::*; -// use crate::secret::Secret; use crate::storage::interface::CreateArgs; use crate::storage::{Hint, PrvKeyDataId}; use borsh::{BorshDeserialize, BorshSerialize}; @@ -113,6 +112,18 @@ impl AccountCreateArgsBip32 { } } +#[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] +pub struct AccountCreateArgsBip32Watch { + pub account_name: Option, + pub xpub_keys: Vec, +} + +impl AccountCreateArgsBip32Watch { + pub fn new(account_name: Option, xpub_keys: Vec) -> Self { + Self { account_name, xpub_keys } + } +} + #[derive(Clone, Debug, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct PrvKeyDataArgs { pub prv_key_data_id: PrvKeyDataId, @@ -142,6 +153,9 @@ pub enum AccountCreateArgs { name: Option, minimum_signatures: u16, }, + Bip32Watch { + account_args: AccountCreateArgsBip32Watch, + }, } impl AccountCreateArgs { diff --git a/wallet/core/src/wallet/maps.rs b/wallet/core/src/wallet/maps.rs index 430f54e568..232e6fbeb4 100644 --- a/wallet/core/src/wallet/maps.rs +++ b/wallet/core/src/wallet/maps.rs @@ -5,6 +5,7 @@ use crate::imports::*; +/// A thread-safe map of [`AccountId`] to [`Account`] instances. #[derive(Default, Clone)] pub struct ActiveAccountMap(Arc>>>); diff --git a/wallet/core/src/wallet/mod.rs b/wallet/core/src/wallet/mod.rs index d20f041adb..d7c9b6c76e 100644 --- a/wallet/core/src/wallet/mod.rs +++ b/wallet/core/src/wallet/mod.rs @@ -1,12 +1,21 @@ //! -//! Kaspa wallet runtime implementation. +//! # Kaspa wallet runtime implementation. //! +//! This module contains a Rust implementation of the Kaspa wallet that +//! can be used in native Rust as well as WASM32 (Browser, NodeJs, Bun) +//! environments. +//! +//! This wallet is not meant to be used directly, but rather through the +//! use of the [`WalletApi`] trait. +//! + pub mod api; pub mod args; pub mod maps; pub use args::*; use crate::account::ScanNotifier; +use crate::api::traits::WalletApi; use crate::compat::gen1::decrypt_mnemonic; use crate::error::Error::Custom; use crate::factory::try_load_account; @@ -21,9 +30,12 @@ use kaspa_notify::{ listener::ListenerId, scope::{Scope, VirtualDaaScoreChangedScope}, }; +use kaspa_wallet_keys::xpub::NetworkTaggedXpub; use kaspa_wrpc_client::{KaspaRpcClient, Resolver, WrpcEncoding}; use workflow_core::task::spawn; +pub type WalletGuard<'l> = AsyncMutexGuard<'l, ()>; + #[derive(Debug)] pub struct EncryptedMnemonic> { pub cipher: T, // raw @@ -77,7 +89,8 @@ pub enum WalletBusMessage { Discovery { record: TransactionRecord }, } -pub struct Inner { +/// Internal wallet state. +struct Inner { active_accounts: ActiveAccountMap, legacy_accounts: ActiveAccountMap, listener_id: Mutex>, @@ -90,6 +103,9 @@ pub struct Inner { wallet_bus: Channel, estimation_abortables: Mutex>, retained_contexts: Mutex>>>, + // Mutex used to protect concurrent access to accounts at the wallet api level + guard: Arc>, + account_guard: Arc>, } /// @@ -104,6 +120,13 @@ pub struct Wallet { inner: Arc, } +impl Default for Wallet { + fn default() -> Self { + let storage = Wallet::local_store().expect("Unable to initialize local storage"); + Wallet::try_new(storage, None, None).unwrap() + } +} + impl Wallet { pub fn local_store() -> Result> { Ok(Arc::new(LocalStore::try_new(false)?)) @@ -121,14 +144,6 @@ impl Wallet { let rpc_client = Arc::new(KaspaRpcClient::new_with_args(WrpcEncoding::Borsh, Some("wrpc://127.0.0.1:17110"), resolver, network_id, None)?); - // pub fn try_with_wrpc(store: Arc, network_id: Option) -> Result { - // let rpc_client = Arc::new(KaspaRpcClient::new_with_args( - // WrpcEncoding::Borsh, - // NotificationMode::MultiListeners, - // "wrpc://127.0.0.1:17110", - // None, - // )?); - let rpc_ctl = rpc_client.ctl().clone(); let rpc_api: Arc = rpc_client; let rpc = Rpc::new(rpc_api, rpc_ctl); @@ -155,14 +170,46 @@ impl Wallet { wallet_bus, estimation_abortables: Mutex::new(HashMap::new()), retained_contexts: Mutex::new(HashMap::new()), + guard: Arc::new(AsyncMutex::new(())), + account_guard: Arc::new(AsyncMutex::new(())), }), }; Ok(wallet) } - pub fn inner(&self) -> &Arc { - &self.inner + pub fn to_arc(self) -> Arc { + Arc::new(self) + } + + /// Helper fn for creating the wallet using a builder pattern. + pub fn with_network_id(self, network_id: NetworkId) -> Self { + self.set_network_id(&network_id).expect("Unable to set network id"); + self + } + + pub fn with_resolver(self, resolver: Resolver) -> Self { + self.wrpc_client().set_resolver(resolver).expect("Unable to set resolver"); + self + } + + pub fn with_url(self, url: Option<&str>) -> Self { + self.wrpc_client().set_url(url).expect("Unable to set url"); + self + } + + // + // Mutex used to protect concurrent access to accounts + // at the wallet api level. This is a global lock that + // is required by various wallet operations. + // + // Due to the fact that Rust Wallet API is async, it is + // possible for clients to concurrently execute API calls + // that can "trip over each-other", causing incorrect + // account states. + // + pub fn guard(&self) -> Arc> { + self.inner.guard.clone() } pub fn is_resident(&self) -> Result { @@ -204,10 +251,12 @@ impl Wallet { Ok(()) } - pub async fn reload(self: &Arc, reactivate: bool) -> Result<()> { + pub async fn reload(self: &Arc, reactivate: bool, _guard: &WalletGuard<'_>) -> Result<()> { if self.is_open() { // similar to reset(), but effectively reboots the wallet + // let _guard = self.inner.guard.lock().await; + let accounts = self.active_accounts().collect(); let account_descriptors = Some(accounts.iter().map(|account| account.descriptor()).collect::>>()?); let wallet_descriptor = self.store().descriptor(); @@ -293,6 +342,8 @@ impl Wallet { filename: Option, args: WalletOpenArgs, ) -> Result>> { + // let _guard = self.inner.guard.lock().await; + let filename = filename.or_else(|| self.settings().get(WalletSettings::Wallet)); // let name = Some(make_filename(&name, &None)); @@ -329,20 +380,21 @@ impl Wallet { None }; - let account_descriptors = accounts - .as_ref() - .map(|accounts| accounts.iter().map(|account| account.descriptor()).collect::>>()) - .transpose()?; - - if let Some(accounts) = accounts { - for account in accounts.into_iter() { + if let Some(accounts) = &accounts { + for account in accounts.iter() { if let Ok(legacy_account) = account.clone().as_legacy_account() { - self.legacy_accounts().insert(account); legacy_account.create_private_context(wallet_secret, None, None).await?; + log_info!("create_private_context, open_impl: receive_address: {:?}", account.receive_address()); + self.legacy_accounts().insert(account.clone()); } } } + let account_descriptors = accounts + .as_ref() + .map(|accounts| accounts.iter().map(|account| account.descriptor()).collect::>>()) + .transpose()?; + self.notify(Events::WalletOpen { wallet_descriptor: wallet_name, account_descriptors: account_descriptors.clone() }).await?; let hint = self.store().get_user_hint().await?; @@ -357,6 +409,7 @@ impl Wallet { wallet_secret: &Secret, filename: Option, args: WalletOpenArgs, + _guard: &WalletGuard<'_>, ) -> Result>> { // This is a wrapper of open_impl() that catches errors and notifies the UI match self.open_impl(wallet_secret, filename, args).await { @@ -369,6 +422,8 @@ impl Wallet { } async fn activate_accounts_impl(self: &Arc, account_ids: Option<&[AccountId]>) -> Result> { + // let _guard = self.inner.guard.lock().await; + let stored_accounts = if let Some(ids) = account_ids { self.inner.store.as_account_store().unwrap().load_multiple(ids).await? } else { @@ -399,7 +454,7 @@ impl Wallet { } /// Activates accounts (performs account address space counts, initializes balance tracking, etc.) - pub async fn activate_accounts(self: &Arc, account_ids: Option<&[AccountId]>) -> Result<()> { + pub async fn activate_accounts(self: &Arc, account_ids: Option<&[AccountId]>, _guard: &WalletGuard<'_>) -> Result<()> { // This is a wrapper of activate_accounts_impl() that catches errors and notifies the UI if let Err(err) = self.activate_accounts_impl(account_ids).await { self.notify(Events::WalletError { message: err.to_string() }).await?; @@ -409,7 +464,9 @@ impl Wallet { } } - pub async fn deactivate_accounts(self: &Arc, ids: Option<&[AccountId]>) -> Result<()> { + pub async fn deactivate_accounts(self: &Arc, ids: Option<&[AccountId]>, _guard: &WalletGuard<'_>) -> Result<()> { + let _guard = self.inner.guard.lock().await; + let (ids, futures) = if let Some(ids) = ids { let accounts = ids.iter().map(|id| self.active_accounts().get(id).ok_or(Error::AccountNotFound(*id))).collect::>>()?; @@ -424,7 +481,9 @@ impl Wallet { Ok(()) } - pub async fn account_descriptors(self: Arc) -> Result> { + pub async fn account_descriptors(self: Arc, _guard: &WalletGuard<'_>) -> Result> { + // let _guard = self.inner.guard.lock().await; + let iter = self.inner.store.as_account_store().unwrap().iter(None).await.unwrap(); let wallet = self.clone(); @@ -462,6 +521,10 @@ impl Wallet { self.try_rpc_api().and_then(|api| api.clone().downcast_arc::().ok()) } + pub fn wrpc_client(&self) -> Arc { + self.try_rpc_api().and_then(|api| api.clone().downcast_arc::().ok()).unwrap() + } + pub fn rpc_api(&self) -> Arc { self.utxo_processor().rpc_api() } @@ -487,6 +550,14 @@ impl Wallet { Ok(()) } + pub fn as_api(self: &Arc) -> Arc { + self.clone() + } + + pub fn to_api(self) -> Arc { + Arc::new(self) + } + pub fn multiplexer(&self) -> &Multiplexer> { &self.inner.multiplexer } @@ -604,6 +675,7 @@ impl Wallet { wallet_secret: &Secret, account_create_args: AccountCreateArgs, notify: bool, + _guard: &WalletGuard<'_>, ) -> Result> { let account = match account_create_args { AccountCreateArgs::Bip32 { prv_key_data_args, account_args } => { @@ -616,6 +688,7 @@ impl Wallet { AccountCreateArgs::Multisig { prv_key_data_args, additional_xpub_keys, name, minimum_signatures } => { self.create_account_multisig(wallet_secret, prv_key_data_args, additional_xpub_keys, name, minimum_signatures).await? } + AccountCreateArgs::Bip32Watch { account_args } => self.create_account_bip32_watch(wallet_secret, account_args).await?, }; if notify { @@ -724,7 +797,13 @@ impl Wallet { let account_index = if let Some(account_index) = account_index { account_index } else { - account_store.clone().len(Some(prv_key_data_id)).await? as u64 + let accounts = account_store.clone().iter(Some(prv_key_data_id)).await?.collect::>().await; + + accounts + .into_iter() + .filter(|a| a.as_ref().ok().and_then(|(a, _)| (a.kind == BIP32_ACCOUNT_KIND).then_some(true)).unwrap_or(false)) + .collect::>() + .len() as u64 }; let xpub_key = prv_key_data.create_xpub(payment_secret, BIP32_ACCOUNT_KIND.into(), account_index).await?; @@ -743,6 +822,36 @@ impl Wallet { Ok(account) } + pub async fn create_account_bip32_watch( + self: &Arc, + wallet_secret: &Secret, + account_args: AccountCreateArgsBip32Watch, + ) -> Result> { + let account_store = self.inner.store.clone().as_account_store()?; + + let AccountCreateArgsBip32Watch { account_name, xpub_keys } = account_args; + + let xpub_keys = Arc::new( + xpub_keys + .into_iter() + .map(|xpub_key| { + ExtendedPublicKeySecp256k1::from_str(&xpub_key).map_err(|err| Error::InvalidExtendedPublicKey(xpub_key, err)) + }) + .collect::>>()?, + ); + + let account: Arc = Arc::new(bip32watch::Bip32Watch::try_new(self, account_name, xpub_keys, false).await?); + + if account_store.load_single(account.id()).await?.is_some() { + return Err(Error::AccountAlreadyExists(*account.id())); + } + + self.inner.store.clone().as_account_store()?.store_single(&account.to_storage()?, None).await?; + self.inner.store.commit(wallet_secret).await?; + + Ok(account) + } + async fn create_account_legacy( self: &Arc, wallet_secret: &Secret, @@ -760,6 +869,12 @@ impl Wallet { .ok_or_else(|| Error::PrivateKeyNotFound(prv_key_data_id))?; let account: Arc = Arc::new(legacy::Legacy::try_new(self, account_name, prv_key_data.id).await?); + if let Ok(legacy_account) = account.clone().as_legacy_account() { + legacy_account.create_private_context(wallet_secret, None, None).await?; + log_info!("create_private_context: create_account_legacy, receive_address: {:?}", account.receive_address()); + self.legacy_accounts().insert(account.clone()); + //legacy_account.clear_private_context().await?; + } if account_store.load_single(account.id()).await?.is_some() { return Err(Error::AccountAlreadyExists(*account.id())); @@ -847,7 +962,13 @@ impl Wallet { Ok((wallet_descriptor, storage_descriptor, mnemonic, account)) } - pub async fn get_account_by_id(self: &Arc, account_id: &AccountId) -> Result>> { + pub async fn get_account_by_id( + self: &Arc, + account_id: &AccountId, + _guard: &WalletGuard<'_>, + ) -> Result>> { + let _guard = self.inner.account_guard.lock().await; + if let Some(account) = self.active_accounts().get(account_id) { Ok(Some(account.clone())) } else { @@ -1036,7 +1157,11 @@ impl Wallet { Ok(matches) } - pub async fn accounts(self: &Arc, filter: Option) -> Result>>> { + pub async fn accounts( + self: &Arc, + filter: Option, + _guard: &WalletGuard<'_>, + ) -> Result>>> { let iter = self.inner.store.as_account_store().unwrap().iter(filter).await.unwrap(); let wallet = self.clone(); @@ -1557,6 +1682,7 @@ impl Wallet { payment_secret: Option<&Secret>, kind: AccountKind, mnemonic_phrase: Option<&Secret>, + guard: &WalletGuard<'_>, ) -> Result { if kind != BIP32_ACCOUNT_KIND { return Err(Error::custom("Account kind is not supported")); @@ -1582,13 +1708,17 @@ impl Wallet { let account_create_args = AccountCreateArgs::new_bip32(prv_key_data_id, payment_secret.cloned(), None, None); - let account = self.clone().create_account(wallet_secret, account_create_args, false).await?; + let account = self.clone().create_account(wallet_secret, account_create_args, false, guard).await?; self.store().flush(wallet_secret).await?; Ok(account.descriptor()?) } } + + pub fn network_format_xpub(&self, xpub_key: &ExtendedPublicKeySecp256k1) -> String { + NetworkTaggedXpub::from((xpub_key.clone(), self.network_id().unwrap())).to_string() + } } // fn decrypt_mnemonic>( diff --git a/wallet/core/src/wasm/api/message.rs b/wallet/core/src/wasm/api/message.rs index 33bf5e00dd..8a023267b8 100644 --- a/wallet/core/src/wasm/api/message.rs +++ b/wallet/core/src/wasm/api/message.rs @@ -153,8 +153,16 @@ declare! { * @category Wallet API */ export interface IConnectRequest { - url : string; + // destination wRPC node URL (if omitted, the resolver is used) + url? : string; + // network identifier networkId : NetworkId | string; + // retry on error + retryOnError? : boolean; + // block async connect (method will not return until the connection is established) + block? : boolean; + // require node to be synced (fail otherwise) + requireSync? : boolean; } "#, } @@ -162,7 +170,10 @@ declare! { try_from! ( args: IConnectRequest, ConnectRequest, { let url = args.try_get_string("url")?; let network_id = args.get_network_id("networkId")?; - Ok(ConnectRequest { url, network_id }) + let retry_on_error = args.try_get_bool("retryOnError")?.unwrap_or(true); + let block_async_connect = args.try_get_bool("block")?.unwrap_or(false); + let require_sync = args.try_get_bool("requireSync")?.unwrap_or(true); + Ok(ConnectRequest { url, network_id, retry_on_error, block_async_connect, require_sync }) }); declare! { @@ -971,7 +982,7 @@ try_from! (args: IAccountsDiscoveryRequest, AccountsDiscoveryRequest, { let discovery_kind = if let Some(discovery_kind) = discovery_kind.as_string() { discovery_kind.parse()? } else { - AccountsDiscoveryKind::try_cast_from(&discovery_kind)? + AccountsDiscoveryKind::try_enum_from(&discovery_kind)? }; let account_scan_extent = args.get_u32("accountScanExtent")?; let address_scan_extent = args.get_u32("addressScanExtent")?; @@ -1312,7 +1323,7 @@ try_from!(args: IAccountsCreateNewAddressRequest, AccountsCreateNewAddressReques let value = args.get_value("addressKind")?; let kind: NewAddressKind = if let Some(string) = value.as_string() { string.parse()? - } else if let Ok(kind) = NewAddressKind::try_cast_from(&value) { + } else if let Ok(kind) = NewAddressKind::try_enum_from(&value) { kind } else { NewAddressKind::Receive diff --git a/wallet/core/src/wasm/cryptobox.rs b/wallet/core/src/wasm/cryptobox.rs index 1892b08d22..957d4fc35a 100644 --- a/wallet/core/src/wasm/cryptobox.rs +++ b/wallet/core/src/wasm/cryptobox.rs @@ -35,8 +35,11 @@ impl CryptoBoxPrivateKey { impl TryCastFromJs for CryptoBoxPrivateKey { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let secret_key = value.as_ref().try_as_vec_u8()?; if secret_key.len() != KEY_SIZE { return Err(Error::InvalidPrivateKeyLength); @@ -63,8 +66,11 @@ pub struct CryptoBoxPublicKey { impl TryCastFromJs for CryptoBoxPublicKey { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let public_key = value.as_ref().try_as_vec_u8()?; if public_key.len() != KEY_SIZE { Err(Error::InvalidPublicKeyLength) @@ -100,7 +106,7 @@ impl std::ops::Deref for CryptoBoxPublicKey { /// /// CryptoBox allows for encrypting and decrypting messages using the `crypto_box` crate. /// -/// https://docs.rs/crypto_box/0.9.1/crypto_box/ +/// /// /// @category Wallet SDK /// @@ -114,7 +120,7 @@ pub struct CryptoBox { impl CryptoBox { #[wasm_bindgen(constructor)] #[allow(non_snake_case)] - pub fn ctor(secretKey: CryptoBoxPrivateKeyT, peerPublicKey: CryptoBoxPublicKeyT) -> Result { + pub fn ctor(secretKey: &CryptoBoxPrivateKeyT, peerPublicKey: &CryptoBoxPublicKeyT) -> Result { let secret_key = CryptoBoxPrivateKey::try_cast_from(secretKey)?; let peer_public_key = CryptoBoxPublicKey::try_cast_from(peerPublicKey)?; Ok(Self { inner: Arc::new(NativeCryptoBox::new(&secret_key, &peer_public_key)) }) diff --git a/wallet/core/src/wasm/message.rs b/wallet/core/src/wasm/message.rs index 7df6f16720..25c7f399ad 100644 --- a/wallet/core/src/wasm/message.rs +++ b/wallet/core/src/wasm/message.rs @@ -28,10 +28,10 @@ extern "C" { #[wasm_bindgen(js_name = signMessage)] pub fn js_sign_message(value: ISignMessage) -> Result { if let Some(object) = Object::try_from(&value) { - let private_key = object.get_cast::("privateKey")?; + let private_key = object.cast_into::("privateKey")?; let raw_msg = object.get_string("message")?; let mut privkey_bytes = [0u8; 32]; - privkey_bytes.copy_from_slice(&private_key.as_ref().secret_bytes()); + privkey_bytes.copy_from_slice(&private_key.secret_bytes()); let pm = PersonalMessage(&raw_msg); let sig_vec = sign_message(&pm, &privkey_bytes)?; privkey_bytes.zeroize(); @@ -66,7 +66,7 @@ extern "C" { #[wasm_bindgen(js_name = verifyMessage, skip_jsdoc)] pub fn js_verify_message(value: IVerifyMessage) -> Result { if let Some(object) = Object::try_from(&value) { - let public_key = object.get_cast::("publicKey")?; + let public_key = object.cast_into::("publicKey")?; let raw_msg = object.get_string("message")?; let signature = object.get_string("signature")?; @@ -74,7 +74,7 @@ pub fn js_verify_message(value: IVerifyMessage) -> Result { let mut signature_bytes = [0u8; 64]; faster_hex::hex_decode(signature.as_bytes(), &mut signature_bytes)?; - Ok(verify_message(&pm, &signature_bytes.to_vec(), &public_key.as_ref().xonly_public_key).is_ok()) + Ok(verify_message(&pm, &signature_bytes.to_vec(), &public_key.xonly_public_key).is_ok()) } else { Err(Error::custom("Failed to parse input")) } diff --git a/wallet/core/src/wasm/notify.rs b/wallet/core/src/wasm/notify.rs index 61d1d16876..3383e1353e 100644 --- a/wallet/core/src/wasm/notify.rs +++ b/wallet/core/src/wasm/notify.rs @@ -32,33 +32,12 @@ cfg_if! { } /** - * {@link UtxoProcessor} notification event data. - * @category Wallet SDK - */ - export type UtxoProcessorEventData = IConnectEvent - | IDisconnectEvent - | IUtxoIndexNotEnabledEvent - | ISyncStateEvent - | IServerStatusEvent - | IUtxoProcErrorEvent - | IDaaScoreChangeEvent - | IPendingEvent - | IReorgEvent - | IStasisEvent - | IMaturityEvent - | IDiscoveryEvent - | IBalanceEvent - | IErrorEvent - | undefined - ; - - /** - * UtxoProcessor notification event data map. + * {@link UtxoProcessor} notification event data map. * * @category Wallet API */ export type UtxoProcessorEventMap = { - "connect":IConnectEvent, + "connect": IConnectEvent, "disconnect": IDisconnectEvent, "utxo-index-not-enabled": IUtxoIndexNotEnabledEvent, "sync-state": ISyncStateEvent, @@ -80,10 +59,13 @@ cfg_if! { * * @category Wallet API */ - export type IUtxoProcessorEvent = { - [K in keyof UtxoProcessorEventMap]: { event: K, data: UtxoProcessorEventMap[K] } - }[keyof UtxoProcessorEventMap]; + export type UtxoProcessorEvent = { + [K in T]: { + type: K, + data: UtxoProcessorEventMap[K] + } + }[T]; /** * {@link UtxoProcessor} notification callback type. @@ -95,7 +77,8 @@ cfg_if! { * * @category Wallet SDK */ - export type UtxoProcessorNotificationCallback = (event: IUtxoProcessorEvent) => void; + + export type UtxoProcessorNotificationCallback = (event: UtxoProcessorEvent) => void; "#; #[wasm_bindgen] @@ -150,85 +133,53 @@ cfg_if! { Error = "error", } - - /** - * {@link Wallet} notification event data payload. - * @category Wallet API - */ - export type WalletEventData = IConnectEvent - | IDisconnectEvent - | IUtxoIndexNotEnabledEvent - | ISyncStateEvent - | IWalletHintEvent - | IWalletOpenEvent - | IWalletCreateEvent - | IWalletReloadEvent - | IWalletErrorEvent - // | IWalletCloseEvent - | IPrvKeyDataCreateEvent - | IAccountActivationEvent - | IAccountDeactivationEvent - | IAccountSelectionEvent - | IAccountCreateEvent - | IAccountUpdateEvent - | IServerStatusEvent - // | IUtxoProcStartEvent - // | IUtxoProcStopEvent - | IUtxoProcErrorEvent - | IDaaScoreChangeEvent - | IPendingEvent - | IReorgEvent - | IStasisEvent - | IMaturityEvent - | IDiscoveryEvent - | IBalanceEvent - | IErrorEvent - | undefined - ; - /** * Wallet notification event data map. * @see {@link Wallet.addEventListener} * @category Wallet API */ export type WalletEventMap = { - "connect": IConnectEvent, - "disconnect": IDisconnectEvent, - "utxo-index-not-enabled": IUtxoIndexNotEnabledEvent, - "sync-state": ISyncStateEvent, - "wallet-hint": IWalletHintEvent, - "wallet-open": IWalletOpenEvent, - "wallet-create": IWalletCreateEvent, - "wallet-reload": IWalletReloadEvent, - "wallet-error": IWalletErrorEvent, - "wallet-close": undefined, - "prv-key-data-create": IPrvKeyDataCreateEvent, - "account-activation": IAccountActivationEvent, - "account-deactivation": IAccountDeactivationEvent, - "account-selection": IAccountSelectionEvent, - "account-create": IAccountCreateEvent, - "account-update": IAccountUpdateEvent, - "server-status": IServerStatusEvent, - "utxo-proc-start": undefined, - "utxo-proc-stop": undefined, - "utxo-proc-error": IUtxoProcErrorEvent, - "daa-score-change": IDaaScoreChangeEvent, - "pending": IPendingEvent, - "reorg": IReorgEvent, - "stasis": IStasisEvent, - "maturity": IMaturityEvent, - "discovery": IDiscoveryEvent, - "balance": IBalanceEvent, - "error": IErrorEvent, + "connect": IConnectEvent, + "disconnect": IDisconnectEvent, + "utxo-index-not-enabled": IUtxoIndexNotEnabledEvent, + "sync-state": ISyncStateEvent, + "wallet-hint": IWalletHintEvent, + "wallet-open": IWalletOpenEvent, + "wallet-create": IWalletCreateEvent, + "wallet-reload": IWalletReloadEvent, + "wallet-error": IWalletErrorEvent, + "wallet-close": undefined, + "prv-key-data-create": IPrvKeyDataCreateEvent, + "account-activation": IAccountActivationEvent, + "account-deactivation": IAccountDeactivationEvent, + "account-selection": IAccountSelectionEvent, + "account-create": IAccountCreateEvent, + "account-update": IAccountUpdateEvent, + "server-status": IServerStatusEvent, + "utxo-proc-start": undefined, + "utxo-proc-stop": undefined, + "utxo-proc-error": IUtxoProcErrorEvent, + "daa-score-change": IDaaScoreChangeEvent, + "pending": IPendingEvent, + "reorg": IReorgEvent, + "stasis": IStasisEvent, + "maturity": IMaturityEvent, + "discovery": IDiscoveryEvent, + "balance": IBalanceEvent, + "error": IErrorEvent, } /** * {@link Wallet} notification event interface. * @category Wallet API */ - export type IWalletEvent = { - [K in keyof WalletEventMap]: { type: K, data: WalletEventMap[K] } - }[keyof WalletEventMap]; + export type IWalletEvent = { + [K in T]: { + type: K, + data: WalletEventMap[K] + } + }[T]; + /** * Wallet notification callback type. @@ -240,7 +191,7 @@ cfg_if! { * * @category Wallet API */ - export type WalletNotificationCallback = (event: IWalletEvent) => void; + export type WalletNotificationCallback = (event: IWalletEvent) => void; "#; #[wasm_bindgen] diff --git a/wallet/core/src/wasm/signer.rs b/wallet/core/src/wasm/signer.rs index e2ff8e6fb2..157f06d909 100644 --- a/wallet/core/src/wasm/signer.rs +++ b/wallet/core/src/wasm/signer.rs @@ -2,10 +2,13 @@ use crate::imports::*; use crate::result::Result; use js_sys::Array; use kaspa_consensus_client::{sign_with_multiple_v3, Transaction}; +use kaspa_consensus_core::hashing::wasm::SighashType; +use kaspa_consensus_core::sign::sign_input; use kaspa_consensus_core::tx::PopulatedTransaction; use kaspa_consensus_core::{hashing::sighash_type::SIG_HASH_ALL, sign::verify}; use kaspa_hashes::Hash; use kaspa_wallet_keys::privatekey::PrivateKey; +use kaspa_wasm_core::types::HexString; use serde_wasm_bindgen::from_value; #[wasm_bindgen] @@ -31,26 +34,26 @@ impl TryFrom for Vec { /// `signTransaction()` is a helper function to sign a transaction using a private key array or a signer array. /// @category Wallet SDK #[wasm_bindgen(js_name = "signTransaction")] -pub fn js_sign_transaction(tx: Transaction, signer: PrivateKeyArrayT, verify_sig: bool) -> Result { +pub fn js_sign_transaction(tx: &Transaction, signer: &PrivateKeyArrayT, verify_sig: bool) -> Result { if signer.is_array() { let mut private_keys: Vec<[u8; 32]> = vec![]; - for key in Array::from(&signer).iter() { - let key = PrivateKey::try_cast_from(key).map_err(|_| Error::Custom("Unable to cast PrivateKey".to_string()))?; + for key in Array::from(signer).iter() { + let key = PrivateKey::try_cast_from(&key).map_err(|_| Error::Custom("Unable to cast PrivateKey".to_string()))?; private_keys.push(key.as_ref().secret_bytes()); } let tx = sign_transaction(tx, &private_keys, verify_sig).map_err(|err| Error::Custom(format!("Unable to sign: {err:?}")))?; private_keys.zeroize(); - Ok(tx) + Ok(tx.clone()) } else { Err(Error::custom("signTransaction() requires an array of signatures")) } } -pub fn sign_transaction(tx: Transaction, private_keys: &[[u8; 32]], verify_sig: bool) -> Result { +fn sign_transaction<'a>(tx: &'a Transaction, private_keys: &[[u8; 32]], verify_sig: bool) -> Result<&'a Transaction> { let tx = sign(tx, private_keys)?; if verify_sig { - let (cctx, utxos) = tx.tx_and_utxos(); + let (cctx, utxos) = tx.tx_and_utxos()?; let populated_transaction = PopulatedTransaction::new(&cctx, utxos); verify(&populated_transaction)?; } @@ -60,10 +63,32 @@ pub fn sign_transaction(tx: Transaction, private_keys: &[[u8; 32]], verify_sig: /// Sign a transaction using schnorr, returns a new transaction with the signatures added. /// The resulting transaction may be partially signed if the supplied keys are not sufficient /// to sign all of its inputs. -pub fn sign(tx: Transaction, privkeys: &[[u8; 32]]) -> Result { +pub fn sign<'a>(tx: &'a Transaction, privkeys: &[[u8; 32]]) -> Result<&'a Transaction> { Ok(sign_with_multiple_v3(tx, privkeys)?.unwrap()) } +/// `createInputSignature()` is a helper function to sign a transaction input with a specific SigHash type using a private key. +/// @category Wallet SDK +#[wasm_bindgen(js_name = "createInputSignature")] +pub fn create_input_signature( + tx: &Transaction, + input_index: u8, + private_key: &PrivateKey, + sighash_type: Option, +) -> Result { + let (cctx, utxos) = tx.tx_and_utxos()?; + let populated_transaction = PopulatedTransaction::new(&cctx, utxos); + + let signature = sign_input( + &populated_transaction, + input_index.into(), + &private_key.secret_bytes(), + sighash_type.unwrap_or(SighashType::All).into(), + ); + + Ok(signature.to_hex().into()) +} + /// @category Wallet SDK #[wasm_bindgen(js_name=signScriptHash)] pub fn sign_script_hash(script_hash: JsValue, privkey: &PrivateKey) -> Result { @@ -72,7 +97,7 @@ pub fn sign_script_hash(script_hash: JsValue, privkey: &PrivateKey) -> Result Result> { +fn sign_hash(sig_hash: Hash, privkey: &[u8; 32]) -> Result> { let msg = secp256k1::Message::from_digest_slice(sig_hash.as_bytes().as_slice())?; let schnorr_key = secp256k1::Keypair::from_seckey_slice(secp256k1::SECP256K1, privkey)?; let sig: [u8; 64] = *schnorr_key.sign_schnorr(msg).as_ref(); diff --git a/wallet/core/src/wasm/tx/consensus.rs b/wallet/core/src/wasm/tx/consensus.rs deleted file mode 100644 index f1400d0915..0000000000 --- a/wallet/core/src/wasm/tx/consensus.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::tx::consensus as core; -use kaspa_addresses::Address; -use kaspa_consensus_core::{config::params::Params, network::NetworkType}; -use wasm_bindgen::prelude::*; - -/// @category Wallet SDK -#[wasm_bindgen] -pub struct ConsensusParams { - params: Params, -} - -impl From for ConsensusParams { - fn from(params: Params) -> Self { - Self { params } - } -} - -impl From for Params { - fn from(cp: ConsensusParams) -> Self { - cp.params - } -} - -/// find Consensus parameters for given Address -/// @category Wallet SDK -#[wasm_bindgen(js_name = getConsensusParametersByAddress)] -pub fn get_consensus_params_by_address(address: &Address) -> ConsensusParams { - core::get_consensus_params_by_address(address).into() -} - -/// find Consensus parameters for given NetworkType -/// @category Wallet SDK -#[wasm_bindgen(js_name = getConsensusParametersByNetwork)] -pub fn get_consensus_params_by_network(network: NetworkType) -> ConsensusParams { - core::get_consensus_params_by_network(&network).into() -} diff --git a/wallet/core/src/wasm/tx/fees.rs b/wallet/core/src/wasm/tx/fees.rs index ea38c6ebf6..200634cd56 100644 --- a/wallet/core/src/wasm/tx/fees.rs +++ b/wallet/core/src/wasm/tx/fees.rs @@ -39,7 +39,7 @@ impl TryFrom for Fees { } else if let Ok(object) = args.dyn_into::() { let amount = object.get_u64("amount")?; if let Some(source) = object.try_get_value("source")? { - let source = FeeSource::try_cast_from(&source)?; + let source = FeeSource::try_enum_from(&source)?; match source { FeeSource::SenderPays => Ok(Fees::SenderPays(amount)), FeeSource::ReceiverPays => Ok(Fees::ReceiverPays(amount)), diff --git a/wallet/core/src/wasm/tx/generator/generator.rs b/wallet/core/src/wasm/tx/generator/generator.rs index 5303b4d3e6..5724b84811 100644 --- a/wallet/core/src/wasm/tx/generator/generator.rs +++ b/wallet/core/src/wasm/tx/generator/generator.rs @@ -64,6 +64,15 @@ interface IGeneratorSettingsObject { * interface, or a {@link UtxoContext} instance. */ entries: IUtxoEntry[] | UtxoEntryReference[] | UtxoContext; + /** + * Optional UTXO entries that will be consumed before those available in `entries`. + * You can use this property to apply custom input selection logic. + * Please note that these inputs are consumed first, then `entries` are consumed + * to generate a desirable transaction output amount. If transaction mass + * overflows, these inputs will be consumed into a batch/sweep transaction + * where the destination if the `changeAddress`. + */ + priorityEntries?: IUtxoEntry[] | UtxoEntryReference[], /** * Optional number of signature operations in the transaction. */ @@ -147,6 +156,7 @@ impl Generator { let GeneratorSettings { network_id, source, + priority_utxo_entries, multiplexer, final_transaction_destination, change_address, @@ -167,6 +177,7 @@ impl Generator { native::GeneratorSettings::try_new_with_iterator( network_id, Box::new(utxo_entries.into_iter()), + priority_utxo_entries, change_address, sig_op_count, minimum_signatures, @@ -182,6 +193,7 @@ impl Generator { native::GeneratorSettings::try_new_with_context( utxo_context.into(), + priority_utxo_entries, change_address, sig_op_count, minimum_signatures, @@ -244,6 +256,7 @@ enum GeneratorSource { struct GeneratorSettings { pub network_id: Option, pub source: GeneratorSource, + pub priority_utxo_entries: Option>, pub multiplexer: Option>>, pub final_transaction_destination: PaymentDestination, pub change_address: Option
, @@ -263,18 +276,20 @@ impl TryFrom for GeneratorSettings { let final_transaction_destination: PaymentDestination = if outputs.is_undefined() { PaymentDestination::Change } else { PaymentOutputs::try_owned_from(outputs)?.into() }; - let change_address = args.try_get_cast::
("changeAddress")?.map(Cast::into_owned); + let change_address = args.try_cast_into::
("changeAddress")?; let final_priority_fee = args.get::("priorityFee")?.try_into()?; - let generator_source = if let Ok(Some(context)) = args.try_get_cast::("entries") { - GeneratorSource::UtxoContext(context.into_owned()) + let generator_source = if let Ok(Some(context)) = args.try_cast_into::("entries") { + GeneratorSource::UtxoContext(context) } else if let Some(utxo_entries) = args.try_get_value("entries")? { GeneratorSource::UtxoEntries(utxo_entries.try_into_utxo_entry_references()?) } else { - return Err(Error::custom("'entries', 'context' or 'account' property is required for Generator")); + return Err(Error::custom("'entries' property is required for Generator")); }; + let priority_utxo_entries = args.try_get_value("priorityEntries")?.map(|v| v.try_into_utxo_entry_references()).transpose()?; + let sig_op_count = args.get_value("sigOpCount")?; let sig_op_count = if !sig_op_count.is_undefined() { sig_op_count.as_f64().expect("sigOpCount should be a number") as u8 } else { 1 }; @@ -291,6 +306,7 @@ impl TryFrom for GeneratorSettings { let settings = GeneratorSettings { network_id, source: generator_source, + priority_utxo_entries, multiplexer: None, final_transaction_destination, change_address, diff --git a/wallet/core/src/wasm/tx/generator/pending.rs b/wallet/core/src/wasm/tx/generator/pending.rs index dfa84c9bd7..58c36375d6 100644 --- a/wallet/core/src/wasm/tx/generator/pending.rs +++ b/wallet/core/src/wasm/tx/generator/pending.rs @@ -3,8 +3,10 @@ use crate::result::Result; use crate::tx::generator as native; use crate::wasm::PrivateKeyArrayT; use kaspa_consensus_client::{numeric, string}; -use kaspa_consensus_client::{ITransaction, Transaction}; +use kaspa_consensus_client::{Transaction, TransactionT}; +use kaspa_consensus_core::hashing::wasm::SighashType; use kaspa_wallet_keys::privatekey::PrivateKey; +use kaspa_wasm_core::types::{BinaryT, HexString}; use kaspa_wrpc_wasm::RpcClient; /// @category Wallet SDK @@ -15,11 +17,13 @@ pub struct PendingTransaction { #[wasm_bindgen] impl PendingTransaction { + /// Transaction Id #[wasm_bindgen(getter)] pub fn id(&self) -> String { self.inner.id().to_string() } + /// Total amount transferred to the destination (aggregate output - change). #[wasm_bindgen(getter, js_name = paymentAmount)] pub fn payment_value(&self) -> JsValue { if let Some(payment_value) = self.inner.payment_value() { @@ -29,26 +33,44 @@ impl PendingTransaction { } } + /// Change amount (if any). #[wasm_bindgen(getter, js_name = changeAmount)] pub fn change_value(&self) -> BigInt { BigInt::from(self.inner.change_value()) } + /// Total transaction fees (network fees + priority fees). #[wasm_bindgen(getter, js_name = feeAmount)] pub fn fees(&self) -> BigInt { BigInt::from(self.inner.fees()) } + /// Calculated transaction mass. + #[wasm_bindgen(getter)] + pub fn mass(&self) -> BigInt { + BigInt::from(self.inner.mass()) + } + + /// Minimum number of signatures required by the transaction. + /// (as specified during the transaction creation). + #[wasm_bindgen(getter, js_name = "minimumSignatures")] + pub fn minimum_signatures(&self) -> u16 { + self.inner.minimum_signatures() + } + + /// Total aggregate input amount. #[wasm_bindgen(getter, js_name = aggregateInputAmount)] pub fn aggregate_input_value(&self) -> BigInt { BigInt::from(self.inner.aggregate_input_value()) } + /// Total aggregate output amount. #[wasm_bindgen(getter, js_name = aggregateOutputAmount)] pub fn aggregate_output_value(&self) -> BigInt { BigInt::from(self.inner.aggregate_output_value()) } + /// Transaction type ("batch" or "final"). #[wasm_bindgen(getter, js_name = "type")] pub fn kind(&self) -> String { if self.inner.is_batch() { @@ -65,21 +87,54 @@ impl PendingTransaction { self.inner.addresses().iter().map(|address| JsValue::from(address.to_string())).collect() } + /// Provides a list of UTXO entries used by the transaction. #[wasm_bindgen(js_name = getUtxoEntries)] pub fn get_utxo_entries(&self) -> Array { self.inner.utxo_entries().values().map(|utxo_entry| JsValue::from(utxo_entry.clone())).collect() } - /// Sign transaction with supplied [`Array`] or [`PrivateKey`] or an array of + /// Creates and returns a signature for the input at the specified index. + #[wasm_bindgen(js_name = createInputSignature)] + pub fn create_input_signature( + &self, + input_index: u8, + private_key: &PrivateKey, + sighash_type: Option, + ) -> Result { + let signature = self.inner.create_input_signature( + input_index.into(), + &private_key.secret_bytes(), + sighash_type.unwrap_or(SighashType::All).into(), + )?; + + Ok(signature.to_hex().into()) + } + + /// Sets a signature to the input at the specified index. + #[wasm_bindgen(js_name = fillInput)] + pub fn fill_input(&self, input_index: u8, signature_script: BinaryT) -> Result<()> { + self.inner.fill_input(input_index.into(), signature_script.try_as_vec_u8()?) + } + + /// Signs the input at the specified index with the supplied private key + /// and an optional SighashType. + #[wasm_bindgen(js_name = signInput)] + pub fn sign_input(&self, input_index: u8, private_key: &PrivateKey, sighash_type: Option) -> Result<()> { + self.inner.sign_input(input_index.into(), &private_key.secret_bytes(), sighash_type.unwrap_or(SighashType::All).into())?; + + Ok(()) + } + + /// Signs transaction with supplied [`Array`] or [`PrivateKey`] or an array of /// raw private key bytes (encoded as `Uint8Array` or as hex strings) - pub fn sign(&self, js_value: PrivateKeyArrayT) -> Result<()> { + pub fn sign(&self, js_value: PrivateKeyArrayT, check_fully_signed: Option) -> Result<()> { if let Ok(keys) = js_value.dyn_into::() { let keys = keys .iter() - .map(PrivateKey::try_cast_from) + .map(PrivateKey::try_owned_from) .collect::, kaspa_wallet_keys::error::Error>>()?; - let mut keys = keys.iter().map(|key| key.as_ref().secret_bytes()).collect::>(); - self.inner.try_sign_with_keys(&keys)?; + let mut keys = keys.iter().map(|key| key.secret_bytes()).collect::>(); + self.inner.try_sign_with_keys(&keys, check_fully_signed)?; keys.zeroize(); Ok(()) } else { @@ -92,6 +147,14 @@ impl PendingTransaction { /// {@link UtxoContext} if one was used to create the transaction /// and will return UTXOs back to {@link UtxoContext} in case of /// a failed submission. + /// + /// # Important + /// + /// Make sure to consume the returned `txid` value. Always invoke this method + /// as follows `let txid = await pendingTransaction.submit(rpc);`. If you do not + /// consume the returned value and the rpc object is temporary, the GC will + /// collect the `rpc` object passed to submit() potentially causing a panic. + /// /// @see {@link RpcClient.submitTransaction} pub async fn submit(&self, wasm_rpc_client: &RpcClient) -> Result { let rpc: Arc = wasm_rpc_client.client().clone(); @@ -110,7 +173,7 @@ impl PendingTransaction { /// @see {@link ISerializableTransaction} /// @see {@link Transaction}, {@link ISerializableTransaction} #[wasm_bindgen(js_name = "serializeToObject")] - pub fn serialize_to_object(&self) -> Result { + pub fn serialize_to_object(&self) -> Result { Ok(numeric::SerializableTransaction::from_cctx_transaction(&self.inner.transaction(), self.inner.utxo_entries())? .serialize_to_object()? .into()) diff --git a/wallet/core/src/wasm/tx/mass.rs b/wallet/core/src/wasm/tx/mass.rs index cc522fd8e0..af04a55b18 100644 --- a/wallet/core/src/wasm/tx/mass.rs +++ b/wallet/core/src/wasm/tx/mass.rs @@ -1,151 +1,98 @@ use crate::imports::NetworkParams; use crate::result::Result; -use crate::tx::mass; -use crate::wasm::tx::*; +use crate::tx::{mass, MAXIMUM_STANDARD_TRANSACTION_MASS}; use kaspa_consensus_client::*; use kaspa_consensus_core::config::params::Params; -use kaspa_consensus_core::tx as cctx; -use std::sync::Arc; +use kaspa_consensus_core::network::{NetworkId, NetworkIdT}; use wasm_bindgen::prelude::*; use workflow_wasm::convert::*; +/// `maximumStandardTransactionMass()` returns the maximum transaction +/// size allowed by the network. +/// /// @category Wallet SDK -#[wasm_bindgen] -pub struct MassCalculator { - mc: Arc, +/// @see {@link calculateTransactionMass} +/// @see {@link updateTransactionMass} +/// @see {@link calculateTransactionFee} +#[wasm_bindgen(js_name = maximumStandardTransactionMass)] +pub fn maximum_standard_transaction_mass() -> u64 { + MAXIMUM_STANDARD_TRANSACTION_MASS } -#[wasm_bindgen] -impl MassCalculator { - #[wasm_bindgen(constructor)] - pub fn new(cp: ConsensusParams) -> Self { - let consensus_params = Params::from(cp); - let network_params = NetworkParams::from(consensus_params.net); - Self { mc: Arc::new(mass::MassCalculator::new(&consensus_params, &network_params)) } - } - - #[wasm_bindgen(js_name=isDust)] - pub fn is_dust(&self, amount: u64) -> bool { - self.mc.is_dust(amount) - } - - /// `isTransactionOutputDust()` returns whether or not the passed transaction output - /// amount is considered dust or not based on the configured minimum transaction - /// relay fee. - /// - /// Dust is defined in terms of the minimum transaction relay fee. In particular, - /// if the cost to the network to spend coins is more than 1/3 of the minimum - /// transaction relay fee, it is considered dust. - /// - /// It is exposed by `MiningManager` for use by transaction generators and wallets. - #[wasm_bindgen(js_name=isTransactionOutputDust)] - pub fn is_transaction_output_dust(transaction_output: &JsValue) -> Result { - let transaction_output = TransactionOutput::try_from(transaction_output)?; - let transaction_output = cctx::TransactionOutput::from(&transaction_output); - Ok(mass::is_transaction_output_dust(&transaction_output)) - } - - /// `minimumRelayTransactionFee()` specifies the minimum transaction fee for a transaction to be accepted to - /// the mempool and relayed. It is specified in sompi per 1kg (or 1000 grams) of transaction mass. - /// - /// `pub(crate) const MINIMUM_RELAY_TRANSACTION_FEE: u64 = 1000;` - #[wasm_bindgen(js_name=minimumRelayTransactionFee)] - pub fn minimum_relay_transaction_fee() -> u32 { - mass::MINIMUM_RELAY_TRANSACTION_FEE as u32 - } - - /// `maximumStandardTransactionMass()` is the maximum mass allowed for transactions that - /// are considered standard and will therefore be relayed and considered for mining. - /// - /// `pub const MAXIMUM_STANDARD_TRANSACTION_MASS: u64 = 100_000;` - #[wasm_bindgen(js_name=maximumStandardTransactionMass)] - pub fn maximum_standard_transaction_mass() -> u32 { - mass::MAXIMUM_STANDARD_TRANSACTION_MASS as u32 - } - - /// minimum_required_transaction_relay_fee returns the minimum transaction fee required - /// for a transaction with the passed mass to be accepted into the mempool and relayed. - #[wasm_bindgen(js_name=minimumRequiredTransactionRelayFee)] - pub fn calc_minimum_required_transaction_relay_fee(mass: u32) -> u32 { - mass::calc_minimum_required_transaction_relay_fee(mass as u64) as u32 - } - - #[wasm_bindgen(js_name=calcMassForTransaction)] - pub fn calc_mass_for_transaction(&self, tx: &JsValue) -> Result { - let tx = Transaction::try_cast_from(tx)?; - let tx = cctx::Transaction::from(tx.as_ref()); - Ok(self.mc.calc_mass_for_transaction(&tx) as u32) - } - - #[wasm_bindgen(js_name=blankTransactionSerializedByteSize)] - pub fn blank_transaction_serialized_byte_size() -> u32 { - mass::blank_transaction_serialized_byte_size() as u32 - } - - #[wasm_bindgen(js_name=blankTransactionMass)] - pub fn blank_transaction_mass(&self) -> u32 { - self.mc.blank_transaction_mass() as u32 - } - - #[wasm_bindgen(js_name=calcMassForPayload)] - pub fn calc_mass_for_payload(&self, payload_byte_size: usize) -> u32 { - self.mc.calc_mass_for_payload(payload_byte_size) as u32 - } - - #[wasm_bindgen(js_name=calcMassForOutputs)] - pub fn calc_mass_for_outputs(&self, outputs: JsValue) -> Result { - let outputs = outputs - .dyn_into::()? - .iter() - .map(TransactionOutput::try_from) - .collect::, kaspa_consensus_client::error::Error>>()?; - let outputs = outputs.iter().map(|output| self.calc_mass_for_output(output)).collect::>>()?; - Ok(outputs.iter().sum()) - } - - #[wasm_bindgen(js_name=calcMassForInputs)] - pub fn calc_mass_for_inputs(&self, inputs: JsValue) -> Result { - let inputs = inputs - .dyn_into::()? - .iter() - .map(TransactionInput::try_owned_from) - .collect::, kaspa_consensus_client::error::Error>>()?; - let inputs = inputs.iter().map(|input| self.calc_mass_for_input(input)).collect::>>()?; - Ok(inputs.iter().sum()) - } - - #[wasm_bindgen(js_name=calcMassForOutput)] - pub fn calc_mass_for_output(&self, output: &TransactionOutput) -> Result { - // let output = TransactionOutput::try_from(output)?; - let output = cctx::TransactionOutput::from(output); - Ok(self.mc.calc_mass_for_output(&output) as u32) - } - - #[wasm_bindgen(js_name=calcMassForInput)] - pub fn calc_mass_for_input(&self, input: &TransactionInput) -> Result { - // let input = TransactionInput::try_from(input)?; - let input = cctx::TransactionInput::from(input); - Ok(self.mc.calc_mass_for_input(&input) as u32) - } - - #[wasm_bindgen(js_name=calcSignatureMass)] - pub fn calc_signature_mass(&self, minimum_signatures: u16) -> u32 { - self.mc.calc_signature_mass(minimum_signatures) as u32 - } - - #[wasm_bindgen(js_name=calcSignatureMassForInputs)] - pub fn calc_signature_mass_for_inputs(&self, number_of_inputs: usize, minimum_signatures: u16) -> u32 { - self.mc.calc_signature_mass_for_inputs(number_of_inputs, minimum_signatures) as u32 - } +/// `calculateTransactionMass()` returns the mass of the passed transaction. +/// If the transaction is invalid, or the mass can not be calculated +/// the function throws an error. +/// +/// The mass value must not exceed the maximum standard transaction mass +/// that can be obtained using `maximumStandardTransactionMass()`. +/// +/// @category Wallet SDK +/// @see {@link maximumStandardTransactionMass} +/// +#[wasm_bindgen(js_name = calculateTransactionMass)] +pub fn calculate_unsigned_transaction_mass(network_id: NetworkIdT, tx: &TransactionT, minimum_signatures: Option) -> Result { + let tx = Transaction::try_cast_from(tx)?; + let network_id = NetworkId::try_owned_from(network_id)?; + let consensus_params = Params::from(network_id); + let network_params = NetworkParams::from(network_id); + let mc = mass::MassCalculator::new(&consensus_params, network_params); + mc.calc_overall_mass_for_unsigned_client_transaction(tx.as_ref(), minimum_signatures.unwrap_or(1)) +} - #[wasm_bindgen(js_name=calcMinimumTransactionRelayFeeFromMass)] - pub fn calc_minimum_transaction_relay_fee_from_mass(&self, mass: u64) -> u32 { - self.mc.calc_minimum_transaction_fee_from_mass(mass) as u32 +/// `updateTransactionMass()` updates the mass property of the passed transaction. +/// If the transaction is invalid, the function throws an error. +/// +/// The function returns `true` if the mass is within the maximum standard transaction mass and +/// the transaction mass is updated. Otherwise, the function returns `false`. +/// +/// This is similar to `calculateTransactionMass()` but modifies the supplied +/// `Transaction` object. +/// +/// @category Wallet SDK +/// @see {@link maximumStandardTransactionMass} +/// @see {@link calculateTransactionMass} +/// @see {@link calculateTransactionFee} +/// +#[wasm_bindgen(js_name = updateTransactionMass)] +pub fn update_unsigned_transaction_mass(network_id: NetworkIdT, tx: &Transaction, minimum_signatures: Option) -> Result { + let network_id = NetworkId::try_owned_from(network_id)?; + let consensus_params = Params::from(network_id); + let network_params = NetworkParams::from(network_id); + let mc = mass::MassCalculator::new(&consensus_params, network_params); + let mass = mc.calc_overall_mass_for_unsigned_client_transaction(tx, minimum_signatures.unwrap_or(1))?; + if mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + Ok(false) + } else { + tx.set_mass(mass); + Ok(true) } +} - #[wasm_bindgen(js_name=calcMiniumTxRelayFee)] - pub fn calc_minimum_transaction_relay_fee(&self, transaction: &Transaction, minimum_signatures: u16) -> Result { - let tx = cctx::Transaction::from(transaction); - Ok(self.mc.calc_minium_transaction_relay_fee(&tx, minimum_signatures) as u32) +/// `calculateTransactionFee()` returns minimum fees needed for the transaction to be +/// accepted by the network. If the transaction is invalid or the mass can not be calculated, +/// the function throws an error. If the mass exceeds the maximum standard transaction mass, +/// the function returns `undefined`. +/// +/// @category Wallet SDK +/// @see {@link maximumStandardTransactionMass} +/// @see {@link calculateTransactionMass} +/// @see {@link updateTransactionMass} +/// +#[wasm_bindgen(js_name = calculateTransactionFee)] +pub fn calculate_unsigned_transaction_fee( + network_id: NetworkIdT, + tx: &TransactionT, + minimum_signatures: Option, +) -> Result> { + let tx = Transaction::try_cast_from(tx)?; + let network_id = NetworkId::try_owned_from(network_id)?; + let consensus_params = Params::from(network_id); + let network_params = NetworkParams::from(network_id); + let mc = mass::MassCalculator::new(&consensus_params, network_params); + let mass = mc.calc_overall_mass_for_unsigned_client_transaction(tx.as_ref(), minimum_signatures.unwrap_or(1))?; + if mass > MAXIMUM_STANDARD_TRANSACTION_MASS { + Ok(None) + } else { + Ok(Some(mc.calc_fee_for_mass(mass))) } } diff --git a/wallet/core/src/wasm/tx/mod.rs b/wallet/core/src/wasm/tx/mod.rs index df826a9971..742bb89cab 100644 --- a/wallet/core/src/wasm/tx/mod.rs +++ b/wallet/core/src/wasm/tx/mod.rs @@ -1,10 +1,8 @@ -pub mod consensus; pub mod fees; pub mod generator; pub mod mass; pub mod utils; -pub use self::consensus::*; pub use self::fees::*; pub use self::generator::*; pub use self::mass::*; diff --git a/wallet/core/src/wasm/tx/utils.rs b/wallet/core/src/wasm/tx/utils.rs index 0d911af76c..c1229444a1 100644 --- a/wallet/core/src/wasm/tx/utils.rs +++ b/wallet/core/src/wasm/tx/utils.rs @@ -1,14 +1,11 @@ use crate::imports::*; use crate::result::Result; use crate::tx::{IPaymentOutputArray, PaymentOutputs}; -use crate::wasm::tx::consensus::get_consensus_params_by_address; use crate::wasm::tx::generator::*; -use crate::wasm::tx::mass::MassCalculator; -use kaspa_addresses::{Address, AddressT}; use kaspa_consensus_client::*; use kaspa_consensus_core::subnets::SUBNETWORK_ID_NATIVE; -//use kaspa_consensus_wasm::*; use kaspa_wallet_macros::declare_typescript_wasm_interface as declare; +use kaspa_wasm_core::types::BinaryT; use workflow_core::runtime::is_web; /// Create a basic transaction without any mass limit checks. @@ -17,32 +14,19 @@ use workflow_core::runtime::is_web; pub fn create_transaction_js( utxo_entry_source: IUtxoEntryArray, outputs: IPaymentOutputArray, - change_address: AddressT, priority_fee: BigInt, - payload: JsValue, - sig_op_count: JsValue, - minimum_signatures: JsValue, + payload: Option, + sig_op_count: Option, ) -> crate::result::Result { - let change_address = Address::try_cast_from(change_address)?; - let params = get_consensus_params_by_address(change_address.as_ref()); - let mc = MassCalculator::new(params); - let utxo_entries = if let Some(utxo_entries) = utxo_entry_source.dyn_ref::() { - utxo_entries.to_vec().iter().map(UtxoEntryReference::try_cast_from).collect::, _>>()? + utxo_entries.to_vec().iter().map(UtxoEntryReference::try_owned_from).collect::, _>>()? } else { return Err(Error::custom("utxo_entries must be an array")); }; let priority_fee: u64 = priority_fee.try_into().map_err(|err| Error::custom(format!("invalid fee value: {err}")))?; - let payload = payload.try_as_vec_u8().ok().unwrap_or_default(); + let payload = payload.and_then(|payload| payload.try_as_vec_u8().ok()).unwrap_or_default(); let outputs = PaymentOutputs::try_owned_from(outputs)?; - let sig_op_count = - if !sig_op_count.is_undefined() { sig_op_count.as_f64().expect("sigOpCount should be a number") as u8 } else { 1 }; - - let minimum_signatures = if !minimum_signatures.is_undefined() { - minimum_signatures.as_f64().expect("minimumSignatures should be a number") as u16 - } else { - 1 - }; + let sig_op_count = sig_op_count.unwrap_or(1); // --- @@ -53,10 +37,10 @@ pub fn create_transaction_js( .into_iter() .enumerate() .map(|(sequence, reference)| { - let UtxoEntryReference { utxo } = reference.as_ref(); + let UtxoEntryReference { utxo } = &reference; total_input_amount += utxo.amount(); - entries.push(reference.as_ref().clone()); - TransactionInput::new(utxo.outpoint.clone(), vec![], sequence as u64, sig_op_count, Some(reference.into_owned())) + entries.push(reference.clone()); + TransactionInput::new(utxo.outpoint.clone(), None, sequence as u64, sig_op_count, Some(reference)) }) .collect::>(); @@ -64,12 +48,8 @@ pub fn create_transaction_js( return Err(format!("priority fee({priority_fee}) > amount({total_input_amount})").into()); } - // TODO - Calculate mass and fees - let outputs: Vec = outputs.into(); - let transaction = Transaction::new(None, 0, inputs, outputs, 0, SUBNETWORK_ID_NATIVE, 0, payload)?; - let _fee = mc.calc_minimum_transaction_relay_fee(&transaction, minimum_signatures); - //let mtx = SignableTransaction::new(transaction, entries.into()); + let transaction = Transaction::new(None, 0, inputs, outputs, 0, SUBNETWORK_ID_NATIVE, 0, payload, 0)?; Ok(transaction) } diff --git a/wallet/core/src/wasm/utxo/context.rs b/wallet/core/src/wasm/utxo/context.rs index 2b69eb0c47..3298a4829e 100644 --- a/wallet/core/src/wasm/utxo/context.rs +++ b/wallet/core/src/wasm/utxo/context.rs @@ -147,6 +147,7 @@ impl UtxoContext { self.inner().clear().await } + #[wasm_bindgen(getter, js_name = "isActive")] pub fn active(&self) -> bool { let processor = self.inner().processor(); processor.try_rpc_ctl().map(|ctl| ctl.is_connected()).unwrap_or(false) && processor.is_connected() && processor.is_running() @@ -251,7 +252,10 @@ impl From for native::UtxoContext { impl TryCastFromJs for UtxoContext { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { Ok(Self::try_ref_from_js_value_as_cast(value)?) } } @@ -265,15 +269,15 @@ impl TryFrom for UtxoContextCreateArgs { type Error = Error; fn try_from(value: IUtxoContextArgs) -> std::result::Result { if let Some(object) = Object::try_from(&value) { - let processor = object.get_cast::("processor")?; + let processor = object.cast_into::("processor")?; - let binding = if let Some(id) = object.try_get_cast::("id")? { - UtxoContextBinding::Id(UtxoContextId::new(id.into_owned())) + let binding = if let Some(id) = object.try_cast_into::("id")? { + UtxoContextBinding::Id(UtxoContextId::new(id)) } else { UtxoContextBinding::default() }; - Ok(UtxoContextCreateArgs { binding, processor: processor.into_owned() }) + Ok(UtxoContextCreateArgs { binding, processor }) } else { Err(Error::custom("UtxoProcessor: supplied value must be an object")) } diff --git a/wallet/core/src/wasm/utxo/processor.rs b/wallet/core/src/wasm/utxo/processor.rs index 0e41d8f773..d68f10f763 100644 --- a/wallet/core/src/wasm/utxo/processor.rs +++ b/wallet/core/src/wasm/utxo/processor.rs @@ -63,14 +63,14 @@ cfg_if! { /** * @param {UtxoProcessorNotificationCallback} callback */ - addEventListener(callback:UtxoProcessorNotificationCallback): void; + addEventListener(callback: UtxoProcessorNotificationCallback): void; /** * @param {UtxoProcessorEventType} event * @param {UtxoProcessorNotificationCallback} [callback] */ - addEventListener( - event: M, - callback: (eventData: UtxoProcessorEventMap[M]) => void + addEventListener( + event: E, + callback: UtxoProcessorNotificationCallback ) }"#; } @@ -153,11 +153,54 @@ impl UtxoProcessor { self.inner.processor.set_network_id(network_id.as_ref()); Ok(()) } + + #[wasm_bindgen(getter, js_name = "isActive")] + pub fn is_active(&self) -> bool { + let processor = &self.inner.processor; + processor.try_rpc_ctl().map(|ctl| ctl.is_connected()).unwrap_or(false) && processor.is_connected() && processor.is_running() + } + + /// + /// Set the coinbase transaction maturity period DAA score for a given network. + /// This controls the DAA period after which the user transactions are considered mature + /// and the wallet subsystem emits the transaction maturity event. + /// + /// @see {@link TransactionRecord} + /// @see {@link IUtxoProcessorEvent} + /// + /// @category Wallet SDK + /// + #[wasm_bindgen(js_name = "setCoinbaseTransactionMaturityDAA")] + pub fn set_coinbase_transaction_maturity_period_daa_js(network_id: &NetworkIdT, value: u64) -> Result<()> { + let network_id = NetworkId::try_cast_from(network_id)?.into_owned(); + crate::utxo::set_coinbase_transaction_maturity_period_daa(&network_id, value); + Ok(()) + } + + /// + /// Set the user transaction maturity period DAA score for a given network. + /// This controls the DAA period after which the user transactions are considered mature + /// and the wallet subsystem emits the transaction maturity event. + /// + /// @see {@link TransactionRecord} + /// @see {@link IUtxoProcessorEvent} + /// + /// @category Wallet SDK + /// + #[wasm_bindgen(js_name = "setUserTransactionMaturityDAA")] + pub fn set_user_transaction_maturity_period_daa_js(network_id: &NetworkIdT, value: u64) -> Result<()> { + let network_id = NetworkId::try_cast_from(network_id)?.into_owned(); + crate::utxo::set_user_transaction_maturity_period_daa(&network_id, value); + Ok(()) + } } impl TryCastFromJs for UtxoProcessor { type Error = workflow_wasm::error::Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { Self::try_ref_from_js_value_as_cast(value) } } diff --git a/wallet/core/src/wasm/wallet/account.rs b/wallet/core/src/wasm/wallet/account.rs deleted file mode 100644 index 976e1df622..0000000000 --- a/wallet/core/src/wasm/wallet/account.rs +++ /dev/null @@ -1,155 +0,0 @@ -use crate::account as native; -use crate::imports::*; -use crate::tx::PaymentOutputs; -use crate::wasm::utxo::UtxoContext; -use kaspa_consensus_core::network::NetworkTypeT; -use kaspa_wallet_keys::keypair::Keypair; -use workflow_core::abortable::Abortable; - -/// -/// The `Account` class is a wallet account that can be used to send and receive payments. -/// -/// -/// @category Wallet API -#[wasm_bindgen(inspectable)] -#[derive(Clone, CastFromJs)] -pub struct Account { - inner: Arc, - #[wasm_bindgen(getter_with_clone)] - pub context: UtxoContext, -} - -impl Account { - pub async fn try_new(inner: Arc) -> Result { - let context = inner.utxo_context().clone(); - Ok(Self { inner, context: context.into() }) - } -} - -#[wasm_bindgen] -impl Account { - pub fn ctor(js_value: JsValue) -> Result { - let AccountCreateArgs {} = js_value.try_into()?; - - todo!(); - - // Ok(account) - } - - #[wasm_bindgen(getter)] - pub fn balance(&self) -> JsValue { - match self.inner.balance() { - Some(balance) => crate::wasm::Balance::from(balance).into(), - None => JsValue::UNDEFINED, - } - } - - #[wasm_bindgen(getter, js_name = "type")] - pub fn account_kind(&self) -> String { - self.inner.account_kind().to_string() - } - - #[wasm_bindgen(js_name = balanceStrings)] - pub fn balance_strings(&self, network_type: &NetworkTypeT) -> Result { - match self.inner.balance() { - Some(balance) => Ok(crate::wasm::Balance::from(balance).to_balance_strings(network_type)?.into()), - None => Ok(JsValue::UNDEFINED), - } - } - - #[wasm_bindgen(getter, js_name = "receiveAddress")] - pub fn receive_address(&self) -> Result { - Ok(self.inner.receive_address()?.to_string()) - } - - #[wasm_bindgen(getter, js_name = "changeAddress")] - pub fn change_address(&self) -> Result { - Ok(self.inner.change_address()?.to_string()) - } - - #[wasm_bindgen(js_name = "deriveReceiveAddress")] - pub async fn derive_receive_address(&self) -> Result
{ - let account = self.inner.clone().as_derivation_capable()?; - let receive_address = account.new_receive_address().await?; - Ok(receive_address) - } - - #[wasm_bindgen(js_name = "deriveChangeAddress")] - pub async fn derive_change_address(&self) -> Result
{ - let account = self.inner.clone().as_derivation_capable()?; - let change_address = account.new_change_address().await?; - Ok(change_address) - } - - pub async fn scan(&self) -> Result<()> { - self.inner.clone().scan(None, None).await - } - - pub async fn send(&self, js_value: JsValue) -> Result { - let _args = AccountSendArgs::try_from(js_value)?; - - todo!() - } -} - -impl From for Arc { - fn from(account: Account) -> Self { - account.inner - } -} - -impl TryFrom<&JsValue> for Account { - type Error = Error; - fn try_from(js_value: &JsValue) -> std::result::Result { - Ok(Account::try_ref_from_js_value(js_value)?.clone()) - } -} - -pub struct AccountSendArgs { - pub outputs: PaymentOutputs, - pub priority_fee_sompi: Option, - pub include_fees_in_amount: bool, - - pub wallet_secret: Secret, - pub payment_secret: Option, - pub abortable: Abortable, -} - -impl TryFrom for AccountSendArgs { - type Error = Error; - fn try_from(js_value: JsValue) -> std::result::Result { - if let Some(object) = Object::try_from(&js_value) { - let outputs = object.get_cast::("outputs")?.into_owned(); - - let priority_fee_sompi = object.get_u64("priorityFee").ok(); - let include_fees_in_amount = object.get_bool("includeFeesInAmount").unwrap_or(false); - let abortable = object.get("abortable").ok().and_then(|v| Abortable::try_from(&v).ok()).unwrap_or_default(); - - let wallet_secret = object.get_string("walletSecret")?.into(); - let payment_secret = object.get_value("paymentSecret")?.as_string().map(|s| s.into()); - - let send_args = - AccountSendArgs { outputs, priority_fee_sompi, include_fees_in_amount, wallet_secret, payment_secret, abortable }; - - Ok(send_args) - } else { - Err("Argument to Account::send() must be an object".into()) - } - } -} - -pub struct AccountCreateArgs {} - -impl TryFrom for AccountCreateArgs { - type Error = Error; - fn try_from(value: JsValue) -> std::result::Result { - if let Some(object) = Object::try_from(&value) { - let _keypair = object.try_get_cast::("keypair")?; - let _public_key = object.try_get_cast::("keypair")?; - - Ok(AccountCreateArgs {}) - } else { - Err(Error::custom("Account: supplied value must be an object")) - } - } -} diff --git a/wallet/core/src/wasm/wallet/mod.rs b/wallet/core/src/wasm/wallet/mod.rs index cacd73628d..2ae871c1e3 100644 --- a/wallet/core/src/wasm/wallet/mod.rs +++ b/wallet/core/src/wasm/wallet/mod.rs @@ -1,4 +1,3 @@ -pub mod account; pub mod keydata; #[allow(clippy::module_inception)] pub mod wallet; diff --git a/wallet/keys/Cargo.toml b/wallet/keys/Cargo.toml index 7300c1de57..b352f42a92 100644 --- a/wallet/keys/Cargo.toml +++ b/wallet/keys/Cargo.toml @@ -46,5 +46,5 @@ zeroize.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies] tokio.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/wallet/keys/src/derivation/gen0/hd.rs b/wallet/keys/src/derivation/gen0/hd.rs index dd85f8fe5b..3b7da0e758 100644 --- a/wallet/keys/src/derivation/gen0/hd.rs +++ b/wallet/keys/src/derivation/gen0/hd.rs @@ -176,7 +176,7 @@ impl PubkeyDerivationManagerV0 { return Ok(*key); } - Err(crate::error::Error::Custom("PubkeyDerivationManagerV0 initialization is pending (Error: 102).".into())) + Err(crate::error::Error::Custom("PubkeyDerivationManagerV0 initialization is pending (Error: 105).".into())) } pub fn create_address(key: &secp256k1::PublicKey, prefix: AddressPrefix, _ecdsa: bool) -> Result
{ diff --git a/wallet/keys/src/derivation/gen0/mod.rs b/wallet/keys/src/derivation/gen0/mod.rs index d250326268..0de5da286e 100644 --- a/wallet/keys/src/derivation/gen0/mod.rs +++ b/wallet/keys/src/derivation/gen0/mod.rs @@ -1,4 +1,4 @@ -//! Derivation management for legacy account derivation scheme `972` +//! Derivation management for legacy account derivation scheme based on `'972` derivation path (deprecated). mod hd; pub use hd::{PubkeyDerivationManagerV0, WalletDerivationManagerV0}; diff --git a/wallet/keys/src/derivation/gen1/mod.rs b/wallet/keys/src/derivation/gen1/mod.rs index 1822c4d7a2..5ec859b59a 100644 --- a/wallet/keys/src/derivation/gen1/mod.rs +++ b/wallet/keys/src/derivation/gen1/mod.rs @@ -1,3 +1,4 @@ -/// Derivation management for the Kaspa standard derivation scheme `111111'` +//! Derivation management for the Kaspa standard derivation scheme `'111111'` + mod hd; pub use hd::{PubkeyDerivationManager, WalletDerivationManager}; diff --git a/wallet/keys/src/derivation/mod.rs b/wallet/keys/src/derivation/mod.rs index a63201194a..cfa80b4f70 100644 --- a/wallet/keys/src/derivation/mod.rs +++ b/wallet/keys/src/derivation/mod.rs @@ -1,3 +1,7 @@ +//! +//! Derivation utilities used by the integrated Kaspa Wallet API. +//! + pub mod gen0; pub mod gen1; pub mod traits; diff --git a/wallet/keys/src/derivation_path.rs b/wallet/keys/src/derivation_path.rs index a4c3efe691..a5389ca37e 100644 --- a/wallet/keys/src/derivation_path.rs +++ b/wallet/keys/src/derivation_path.rs @@ -1,7 +1,13 @@ +//! +//! Implementation of the [`DerivationPath`] manager for arbitrary derivation paths. +//! + use crate::imports::*; use workflow_wasm::prelude::*; +/// /// Key derivation path +/// /// @category Wallet SDK #[derive(Clone, CastFromJs)] #[wasm_bindgen] @@ -51,8 +57,11 @@ impl DerivationPath { impl TryCastFromJs for DerivationPath { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let value = value.as_ref(); if let Some(path) = value.as_string() { Ok(DerivationPath::new(&path)?) diff --git a/wallet/keys/src/error.rs b/wallet/keys/src/error.rs index 7a868b13a6..0059a09420 100644 --- a/wallet/keys/src/error.rs +++ b/wallet/keys/src/error.rs @@ -52,6 +52,9 @@ pub enum Error { #[error("Invalid PublicKey (must be a string or an instance of PrivateKey)")] InvalidPublicKey, + #[error("XOnlyPublicKey can not be used for ECDSA")] + InvalidXOnlyPublicKeyForECDSA, + #[error("Invalid PublicKey Array (must be string[] or PrivateKey[])")] InvalidPublicKeyArray, diff --git a/wallet/keys/src/imports.rs b/wallet/keys/src/imports.rs index 6597aad3c3..a1c0a5e0d2 100644 --- a/wallet/keys/src/imports.rs +++ b/wallet/keys/src/imports.rs @@ -15,7 +15,7 @@ pub use borsh::{BorshDeserialize, BorshSerialize}; pub use js_sys::Array; pub use kaspa_addresses::{Address, Version as AddressVersion}; pub use kaspa_bip32::{ChildNumber, ExtendedPrivateKey, ExtendedPublicKey, SecretKey}; -pub use kaspa_consensus_core::network::NetworkTypeT; +pub use kaspa_consensus_core::network::{NetworkId, NetworkTypeT}; pub use kaspa_utils::hex::*; pub use kaspa_wasm_core::types::*; pub use serde::{Deserialize, Serialize}; diff --git a/wallet/keys/src/keypair.rs b/wallet/keys/src/keypair.rs index 3bed0152b7..2cc3d57607 100644 --- a/wallet/keys/src/keypair.rs +++ b/wallet/keys/src/keypair.rs @@ -2,6 +2,8 @@ //! [`keypair`](mod@self) module encapsulates [`Keypair`] and [`PrivateKey`]. //! The [`Keypair`] provides access to the secret and public keys. //! +//! # JavaScript Example +//! //! ```javascript //! //! let keypair = Keypair.random(); @@ -56,7 +58,8 @@ impl Keypair { } /// Get the [`Address`] of this Keypair's [`PublicKey`]. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = keypair.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddress)] // pub fn to_address(&self, network_type: NetworkType) -> Result
{ @@ -67,7 +70,8 @@ impl Keypair { } /// Get `ECDSA` [`Address`] of this Keypair's [`PublicKey`]. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = keypair.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddressECDSA)] pub fn to_address_ecdsa(&self, network: &NetworkTypeT) -> Result
{ @@ -100,7 +104,10 @@ impl Keypair { impl TryCastFromJs for Keypair { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { Ok(Self::try_ref_from_js_value_as_cast(value)?) } } diff --git a/wallet/keys/src/lib.rs b/wallet/keys/src/lib.rs index bec8747d05..86984e36ab 100644 --- a/wallet/keys/src/lib.rs +++ b/wallet/keys/src/lib.rs @@ -1,3 +1,10 @@ +//! +//! # Kaspa Wallet Keys +//! +//! This crate provides tools for creating and managing Kaspa wallet keys. +//! This includes extended key generation and derivation. +//! + pub mod derivation; pub mod derivation_path; pub mod error; diff --git a/wallet/keys/src/prelude.rs b/wallet/keys/src/prelude.rs index 1aed7c5353..5d3af82dda 100644 --- a/wallet/keys/src/prelude.rs +++ b/wallet/keys/src/prelude.rs @@ -1,3 +1,7 @@ +//! +//! Re-exports of the most commonly used types and traits in this crate. +//! + pub use crate::derivation_path::*; pub use crate::keypair::*; pub use crate::privatekey::*; diff --git a/wallet/keys/src/privatekey.rs b/wallet/keys/src/privatekey.rs index 825bf395fa..554bdf36e3 100644 --- a/wallet/keys/src/privatekey.rs +++ b/wallet/keys/src/privatekey.rs @@ -68,7 +68,8 @@ impl PrivateKey { } /// Get the [`Address`] of the PublicKey generated from this PrivateKey. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = privateKey.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddress)] pub fn to_address(&self, network: &NetworkTypeT) -> Result
{ @@ -80,7 +81,8 @@ impl PrivateKey { } /// Get `ECDSA` [`Address`] of the PublicKey generated from this PrivateKey. - /// Receives a [`NetworkType`] to determine the prefix of the address. + /// Receives a [`NetworkType`](kaspa_consensus_core::network::NetworkType) + /// to determine the prefix of the address. /// JavaScript: `let address = privateKey.toAddress(NetworkType.MAINNET);`. #[wasm_bindgen(js_name = toAddressECDSA)] pub fn to_address_ecdsa(&self, network: &NetworkTypeT) -> Result
{ @@ -93,8 +95,11 @@ impl PrivateKey { impl TryCastFromJs for PrivateKey { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(hex_str) = value.as_ref().as_string() { Self::try_new(hex_str.as_str()) } else if Array::is_array(value.as_ref()) { diff --git a/wallet/keys/src/privkeygen.rs b/wallet/keys/src/privkeygen.rs index ff2f3bd8fa..474dec8ec1 100644 --- a/wallet/keys/src/privkeygen.rs +++ b/wallet/keys/src/privkeygen.rs @@ -1,3 +1,7 @@ +//! +//! [`PrivateKeyGenerator`] helper for generating private key derivations from an extended private key (XPrv). +//! + use crate::derivation::gen1::WalletDerivationManager; use crate::imports::*; diff --git a/wallet/keys/src/pubkeygen.rs b/wallet/keys/src/pubkeygen.rs index 272e638455..a61eeb5ada 100644 --- a/wallet/keys/src/pubkeygen.rs +++ b/wallet/keys/src/pubkeygen.rs @@ -1,3 +1,7 @@ +//! +//! [`PublicKeyGenerator`] helper for generating public key derivations from an extended public key (XPub). +//! + use crate::derivation::gen1::WalletDerivationManager; use crate::derivation::traits::WalletDerivationManagerTrait; use crate::imports::*; @@ -21,7 +25,7 @@ pub struct PublicKeyGenerator { #[wasm_bindgen] impl PublicKeyGenerator { #[wasm_bindgen(js_name=fromXPub)] - pub fn from_xpub(kpub: XPubT, cosigner_index: Option) -> Result { + pub fn from_xpub(kpub: &XPubT, cosigner_index: Option) -> Result { let kpub = XPub::try_cast_from(kpub)?; let xpub = kpub.as_ref().inner(); let hd_wallet = WalletDerivationManager::from_extended_public_key(xpub.clone(), cosigner_index)?; @@ -193,7 +197,7 @@ impl PublicKeyGenerator { #[wasm_bindgen(js_name=changeAddressAsString)] #[allow(non_snake_case)] pub fn change_address_as_string(&self, networkType: &NetworkTypeT, index: u32) -> Result { - Ok(PublicKey::from(self.hd_wallet.receive_pubkey_manager().derive_pubkey(index)?) + Ok(PublicKey::from(self.hd_wallet.change_pubkey_manager().derive_pubkey(index)?) .to_address(networkType.try_into()?)? .to_string()) } diff --git a/wallet/keys/src/publickey.rs b/wallet/keys/src/publickey.rs index f262a12f4a..235eb80804 100644 --- a/wallet/keys/src/publickey.rs +++ b/wallet/keys/src/publickey.rs @@ -1,6 +1,6 @@ //! -//! [`keypair`](mod@self) module encapsulates [`Keypair`] and [`PrivateKey`]. -//! The [`Keypair`] provides access to the secret and public keys. +//! [`keypair`](mod@self) module encapsulates [`Keypair`](crate::keypair::Keypair) and [`PrivateKey`]. +//! The [`Keypair`](crate::keypair::Keypair) provides access to the secret and public keys. //! //! ```javascript //! @@ -17,10 +17,12 @@ //! ``` //! -use kaspa_consensus_core::network::NetworkType; - use crate::imports::*; +use kaspa_consensus_core::network::NetworkType; +use ripemd::{Digest, Ripemd160}; +use sha2::Sha256; + /// Data structure that envelopes a PublicKey. /// Only supports Schnorr-based addresses. /// @category Wallet SDK @@ -69,6 +71,17 @@ impl PublicKey { pub fn to_x_only_public_key(&self) -> XOnlyPublicKey { self.xonly_public_key.into() } + + /// Compute a 4-byte key fingerprint for this public key as a hex string. + /// Default implementation uses `RIPEMD160(SHA256(public_key))`. + pub fn fingerprint(&self) -> Option { + if let Some(public_key) = self.public_key.as_ref() { + let digest = Ripemd160::digest(Sha256::digest(public_key.serialize().as_slice())); + Some(digest[..4].as_ref().to_hex().into()) + } else { + None + } + } } impl PublicKey { @@ -81,9 +94,13 @@ impl PublicKey { #[inline] pub fn to_address_ecdsa(&self, network_type: NetworkType) -> Result
{ - let payload = &self.xonly_public_key.serialize(); - let address = Address::new(network_type.into(), AddressVersion::PubKeyECDSA, payload); - Ok(address) + if let Some(public_key) = self.public_key.as_ref() { + let payload = &public_key.serialize(); + let address = Address::new(network_type.into(), AddressVersion::PubKeyECDSA, payload); + Ok(address) + } else { + Err(Error::InvalidXOnlyPublicKeyForECDSA) + } } } @@ -138,8 +155,11 @@ extern "C" { impl TryCastFromJs for PublicKey { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { let value = value.as_ref(); if let Some(hex_str) = value.as_string() { Ok(PublicKey::try_new(hex_str.as_str())?) @@ -150,13 +170,13 @@ impl TryCastFromJs for PublicKey { } } -impl TryFrom for Vec { +impl TryFrom<&PublicKeyArrayT> for Vec { type Error = Error; - fn try_from(value: PublicKeyArrayT) -> Result { + fn try_from(value: &PublicKeyArrayT) -> Result { if value.is_array() { - let array = Array::from(&value); - let pubkeys = array.iter().map(PublicKey::try_cast_from).collect::>>()?; - Ok(pubkeys.iter().map(|pk| pk.as_ref().try_into()).collect::>>()?) + let array = Array::from(value); + let pubkeys = array.iter().map(PublicKey::try_owned_from).collect::>>()?; + Ok(pubkeys.iter().map(|pk| pk.try_into()).collect::>>()?) } else { Err(Error::InvalidPublicKeyArray) } diff --git a/wallet/keys/src/secret.rs b/wallet/keys/src/secret.rs index 99d94f5bed..d934ce4ec8 100644 --- a/wallet/keys/src/secret.rs +++ b/wallet/keys/src/secret.rs @@ -1,10 +1,10 @@ //! -//! Secret container for sensitive data. Performs zeroization on drop. +//! Secret container for sensitive data. Performs data erasure (zeroization) on drop. //! use crate::imports::*; -/// Secret container for sensitive data. Performs memory zeroization on drop. +/// Secret container for sensitive data. Performs memory erasure (zeroization) on drop. #[derive(Clone, Serialize, Deserialize, BorshSerialize, BorshDeserialize)] pub struct Secret(Vec); diff --git a/wallet/keys/src/types.rs b/wallet/keys/src/types.rs index c006d28729..11c5678349 100644 --- a/wallet/keys/src/types.rs +++ b/wallet/keys/src/types.rs @@ -1,5 +1,5 @@ //! -//! Type aliases used by the wallet framework. +//! Key-related type aliases used by the wallet framework. //! use std::sync::Arc; diff --git a/wallet/keys/src/xprv.rs b/wallet/keys/src/xprv.rs index bc4b681cf8..c19e0b9cc8 100644 --- a/wallet/keys/src/xprv.rs +++ b/wallet/keys/src/xprv.rs @@ -1,3 +1,9 @@ +//! +//! Extended private key ([`XPrv`]). +//! + +use kaspa_bip32::{ChainCode, KeyFingerprint}; + use crate::imports::*; /// @@ -13,7 +19,7 @@ use crate::imports::*; /// #[derive(Clone, CastFromJs)] -#[wasm_bindgen] +#[wasm_bindgen(inspectable)] pub struct XPrv { inner: ExtendedPrivateKey, } @@ -41,9 +47,9 @@ impl XPrv { } #[wasm_bindgen(js_name=deriveChild)] - pub fn derive_child(&self, chile_number: u32, hardened: Option) -> Result { - let chile_number = ChildNumber::new(chile_number, hardened.unwrap_or(false))?; - let inner = self.inner.derive_child(chile_number)?; + pub fn derive_child(&self, child_number: u32, hardened: Option) -> Result { + let child_number = ChildNumber::new(child_number, hardened.unwrap_or(false))?; + let inner = self.inner.derive_child(child_number)?; Ok(Self { inner }) } @@ -70,6 +76,60 @@ impl XPrv { let public_key = self.inner.public_key(); Ok(public_key.into()) } + + #[wasm_bindgen(js_name = toPrivateKey)] + pub fn to_private_key(&self) -> Result { + let private_key = self.inner.private_key(); + Ok(private_key.into()) + } + + // ~~~~ Getters ~~~~ + + #[wasm_bindgen(getter)] + pub fn xprv(&self) -> Result { + let str = self.inner.to_extended_key("kprv".try_into()?).to_string(); + Ok(str) + } + + #[wasm_bindgen(getter, js_name = "privateKey")] + pub fn private_key_as_hex_string(&self) -> String { + use kaspa_bip32::PrivateKey; + self.inner.private_key().to_bytes().to_vec().to_hex() + } + + #[wasm_bindgen(getter)] + pub fn depth(&self) -> u8 { + self.inner.attrs().depth + } + + #[wasm_bindgen(getter, js_name = parentFingerprint)] + pub fn parent_fingerprint_as_hex_string(&self) -> String { + self.inner.attrs().parent_fingerprint.to_vec().to_hex() + } + + #[wasm_bindgen(getter, js_name = childNumber)] + pub fn child_number(&self) -> u32 { + self.inner.attrs().child_number.into() + } + + #[wasm_bindgen(getter, js_name = chainCode)] + pub fn chain_code_as_hex_string(&self) -> String { + self.inner.attrs().chain_code.to_vec().to_hex() + } +} + +impl XPrv { + pub fn private_key(&self) -> &SecretKey { + self.inner.private_key() + } + + pub fn parent_fingerprint(&self) -> KeyFingerprint { + self.inner.attrs().parent_fingerprint + } + + pub fn chain_code(&self) -> ChainCode { + self.inner.attrs().chain_code + } } impl<'a> From<&'a XPrv> for &'a ExtendedPrivateKey { @@ -86,8 +146,11 @@ extern "C" { impl TryCastFromJs for XPrv { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(xprv) = value.as_ref().as_string() { Ok(XPrv::from_xprv_str(xprv)?) } else { diff --git a/wallet/keys/src/xpub.rs b/wallet/keys/src/xpub.rs index ded247edf0..8706f3fc91 100644 --- a/wallet/keys/src/xpub.rs +++ b/wallet/keys/src/xpub.rs @@ -1,3 +1,10 @@ +//! +//! Extended public key ([`XPub`]). +//! + +use kaspa_bip32::{ChainCode, KeyFingerprint, Prefix}; +use std::{fmt, str::FromStr}; + use crate::imports::*; /// @@ -12,7 +19,7 @@ use crate::imports::*; /// @category Wallet SDK /// #[derive(Clone, CastFromJs)] -#[wasm_bindgen] +#[wasm_bindgen(inspectable)] pub struct XPub { inner: ExtendedPublicKey, } @@ -32,9 +39,9 @@ impl XPub { } #[wasm_bindgen(js_name=deriveChild)] - pub fn derive_child(&self, chile_number: u32, hardened: Option) -> Result { - let chile_number = ChildNumber::new(chile_number, hardened.unwrap_or(false))?; - let inner = self.inner.derive_child(chile_number)?; + pub fn derive_child(&self, child_number: u32, hardened: Option) -> Result { + let child_number = ChildNumber::new(child_number, hardened.unwrap_or(false))?; + let inner = self.inner.derive_child(child_number)?; Ok(Self { inner }) } @@ -55,6 +62,44 @@ impl XPub { pub fn public_key(&self) -> PublicKey { self.inner.public_key().into() } + + // ~~~~ Getters ~~~~ + + #[wasm_bindgen(getter)] + pub fn xpub(&self) -> Result { + let str = self.inner.to_extended_key("kpub".try_into()?).to_string(); + Ok(str) + } + + #[wasm_bindgen(getter)] + pub fn depth(&self) -> u8 { + self.inner.attrs().depth + } + + #[wasm_bindgen(getter, js_name = parentFingerprint)] + pub fn parent_fingerprint_as_hex_string(&self) -> String { + self.inner.attrs().parent_fingerprint.to_vec().to_hex() + } + + #[wasm_bindgen(getter, js_name = childNumber)] + pub fn child_number(&self) -> u32 { + self.inner.attrs().child_number.into() + } + + #[wasm_bindgen(getter, js_name = chainCode)] + pub fn chain_code_as_hex_string(&self) -> String { + self.inner.attrs().chain_code.to_vec().to_hex() + } +} + +impl XPub { + pub fn parent_fingerprint(&self) -> KeyFingerprint { + self.inner.attrs().parent_fingerprint + } + + pub fn chain_code(&self) -> ChainCode { + self.inner.attrs().chain_code + } } impl From> for XPub { @@ -71,8 +116,11 @@ extern "C" { impl TryCastFromJs for XPub { type Error = Error; - fn try_cast_from(value: impl AsRef) -> Result, Self::Error> { - Self::resolve(&value, || { + fn try_cast_from<'a, R>(value: &'a R) -> Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { if let Some(xpub) = value.as_ref().as_string() { Ok(XPub::try_new(xpub.as_str())?) } else { @@ -81,3 +129,26 @@ impl TryCastFromJs for XPub { }) } } + +pub struct NetworkTaggedXpub { + pub xpub: ExtendedPublicKey, + pub network_id: NetworkId, +} +// impl NetworkTaggedXpub { + +// } + +impl fmt::Display for NetworkTaggedXpub { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let obj: XPub = self.xpub.clone().into(); + write!(f, "{}", obj.inner.to_string(Some(Prefix::from(self.network_id)))) + } +} + +type TaggedXpub = (ExtendedPublicKey, NetworkId); + +impl From for NetworkTaggedXpub { + fn from(value: TaggedXpub) -> Self { + Self { xpub: value.0, network_id: value.1 } + } +} diff --git a/wallet/macros/src/wallet/client.rs b/wallet/macros/src/wallet/client.rs index 8bbc51b8a6..c942af013e 100644 --- a/wallet/macros/src/wallet/client.rs +++ b/wallet/macros/src/wallet/client.rs @@ -61,7 +61,7 @@ impl ToTokens for RpcTable { { match __self.codec { Codec::Borsh(ref codec) => { - Ok(#response_type::try_from_slice(&codec.call(op, request.try_to_vec()?).await?)?) + Ok(#response_type::try_from_slice(&codec.call(op, borsh::to_vec(&request)?).await?)?) }, Codec::Serde(ref codec) => { let request = serde_json::to_string(&request)?; diff --git a/wallet/macros/src/wallet/server.rs b/wallet/macros/src/wallet/server.rs index a1e7bf13e9..073d33021e 100644 --- a/wallet/macros/src/wallet/server.rs +++ b/wallet/macros/src/wallet/server.rs @@ -38,7 +38,7 @@ impl ToTokens for RpcTable { targets_borsh.push(quote! { #hash_64 => { - Ok(self.wallet_api().#fn_call(#request_type::try_from_slice(&request)?).await?.try_to_vec()?) + Ok(borsh::to_vec(&self.wallet_api().#fn_call(#request_type::try_from_slice(&request)?).await?)?) } }); diff --git a/wallet/pskt/Cargo.toml b/wallet/pskt/Cargo.toml new file mode 100644 index 0000000000..b3fff1bfaf --- /dev/null +++ b/wallet/pskt/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "kaspa-wallet-pskt" +keywords = ["kaspa", "wallet", "pskt", "psbt", "bip-370"] +description = "Partially Signed Kaspa Transaction" +categories = ["cryptography::cryptocurrencies"] +rust-version.workspace = true +version.workspace = true +authors.workspace = true +license.workspace = true +repository.workspace = true +edition.workspace = true +include.workspace = true + +[lib] +crate-type = ["cdylib", "lib"] + +[features] +wasm32-sdk = ["kaspa-consensus-client/wasm32-sdk"] +wasm32-types = ["kaspa-consensus-client/wasm32-types"] + +[dependencies] +kaspa-addresses.workspace = true +kaspa-bip32.workspace = true +kaspa-consensus-client.workspace = true +kaspa-consensus-core.workspace = true +kaspa-txscript-errors.workspace = true +kaspa-txscript.workspace = true +kaspa-utils.workspace = true + +bincode.workspace = true +derive_builder.workspace = true +js-sys.workspace = true +futures.workspace = true +hex.workspace = true +secp256k1.workspace = true +serde_repr.workspace = true +serde-value.workspace = true +serde.workspace = true +thiserror.workspace = true +wasm-bindgen.workspace = true +serde_json.workspace = true +serde-wasm-bindgen.workspace = true +workflow-wasm.workspace = true + +[dev-dependencies] +serde_json.workspace = true diff --git a/wallet/pskt/examples/multisig.rs b/wallet/pskt/examples/multisig.rs new file mode 100644 index 0000000000..fb011402fb --- /dev/null +++ b/wallet/pskt/examples/multisig.rs @@ -0,0 +1,121 @@ +use kaspa_consensus_core::{ + hashing::sighash::{calc_schnorr_signature_hash, SigHashReusedValues}, + tx::{TransactionId, TransactionOutpoint, UtxoEntry}, +}; +use kaspa_txscript::{multisig_redeem_script, opcodes::codes::OpData65, pay_to_script_hash_script, script_builder::ScriptBuilder}; +use kaspa_wallet_pskt::prelude::{ + Combiner, Creator, Extractor, Finalizer, Inner, InputBuilder, SignInputOk, Signature, Signer, Updater, PSKT, +}; +use secp256k1::{rand::thread_rng, Keypair}; +use std::{iter, str::FromStr}; + +fn main() { + let kps = [Keypair::new(secp256k1::SECP256K1, &mut thread_rng()), Keypair::new(secp256k1::SECP256K1, &mut thread_rng())]; + let redeem_script = multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2).unwrap(); + // Create the PSKT. + let created = PSKT::::default().inputs_modifiable().outputs_modifiable(); + let ser = serde_json::to_string_pretty(&created).expect("Failed to serialize after creation"); + println!("Serialized after creation: {}", ser); + + // The first constructor entity receives the PSKT and adds an input. + let pskt: PSKT = serde_json::from_str(&ser).expect("Failed to deserialize"); + // let in_0 = dummy_out_point(); + let input_0 = InputBuilder::default() + .utxo_entry(UtxoEntry { + amount: 12793000000000, + script_public_key: pay_to_script_hash_script(&redeem_script), + block_daa_score: 36151168, + is_coinbase: false, + }) + .previous_outpoint(TransactionOutpoint { + transaction_id: TransactionId::from_str("63020db736215f8b1105a9281f7bcbb6473d965ecc45bb2fb5da59bd35e6ff84").unwrap(), + index: 0, + }) + .sig_op_count(2) + .redeem_script(redeem_script) + .build() + .unwrap(); + let pskt_in0 = pskt.constructor().input(input_0); + let ser_in_0 = serde_json::to_string_pretty(&pskt_in0).expect("Failed to serialize after adding first input"); + println!("Serialized after adding first input: {}", ser_in_0); + + let combiner_pskt: PSKT = serde_json::from_str(&ser).expect("Failed to deserialize"); + let combined_pskt = (combiner_pskt + pskt_in0).unwrap(); + let ser_combined = serde_json::to_string_pretty(&combined_pskt).expect("Failed to serialize after adding output"); + println!("Serialized after combining: {}", ser_combined); + + // The PSKT is now ready for handling with the updater role. + let updater_pskt: PSKT = serde_json::from_str(&ser_combined).expect("Failed to deserialize"); + let updater_pskt = updater_pskt.set_sequence(u64::MAX, 0).expect("Failed to set sequence"); + let ser_updated = serde_json::to_string_pretty(&updater_pskt).expect("Failed to serialize after setting sequence"); + println!("Serialized after setting sequence: {}", ser_updated); + + let signer_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); + let mut reused_values = SigHashReusedValues::new(); + let mut sign = |signer_pskt: PSKT, kp: &Keypair| { + signer_pskt + .pass_signature_sync(|tx, sighash| -> Result, String> { + let tx = dbg!(tx); + tx.tx + .inputs + .iter() + .enumerate() + .map(|(idx, _input)| { + let hash = calc_schnorr_signature_hash(&tx.as_verifiable(), idx, sighash[idx], &mut reused_values); + let msg = secp256k1::Message::from_digest_slice(hash.as_bytes().as_slice()).unwrap(); + Ok(SignInputOk { + signature: Signature::Schnorr(kp.sign_schnorr(msg)), + pub_key: kp.public_key(), + key_source: None, + }) + }) + .collect() + }) + .unwrap() + }; + let signed_0 = sign(signer_pskt.clone(), &kps[0]); + let signed_1 = sign(signer_pskt, &kps[1]); + let combiner_pskt: PSKT = serde_json::from_str(&ser_updated).expect("Failed to deserialize"); + let combined_signed = (combiner_pskt + signed_0).and_then(|combined| combined + signed_1).unwrap(); + let ser_combined_signed = serde_json::to_string_pretty(&combined_signed).expect("Failed to serialize after combining signed"); + println!("Combined Signed: {}", ser_combined_signed); + let pskt_finalizer: PSKT = serde_json::from_str(&ser_combined_signed).expect("Failed to deserialize"); + let pskt_finalizer = pskt_finalizer + .finalize_sync(|inner: &Inner| -> Result>, String> { + Ok(inner + .inputs + .iter() + .map(|input| -> Vec { + // todo actually required count can be retrieved from redeem_script, sigs can be taken from partial sigs according to required count + // considering xpubs sorted order + + let signatures: Vec<_> = kps + .iter() + .flat_map(|kp| { + let sig = input.partial_sigs.get(&kp.public_key()).unwrap().into_bytes(); + iter::once(OpData65).chain(sig).chain([input.sighash_type.to_u8()]) + }) + .collect(); + signatures + .into_iter() + .chain( + ScriptBuilder::new() + .add_data(input.redeem_script.as_ref().unwrap().as_slice()) + .unwrap() + .drain() + .iter() + .cloned(), + ) + .collect() + }) + .collect()) + }) + .unwrap(); + let ser_finalized = serde_json::to_string_pretty(&pskt_finalizer).expect("Failed to serialize after finalizing"); + println!("Finalized: {}", ser_finalized); + + let extractor_pskt: PSKT = serde_json::from_str(&ser_finalized).expect("Failed to deserialize"); + let tx = extractor_pskt.extract_tx().unwrap()(10).0; + let ser_tx = serde_json::to_string_pretty(&tx).unwrap(); + println!("Tx: {}", ser_tx); +} diff --git a/wallet/pskt/src/bundle.rs b/wallet/pskt/src/bundle.rs new file mode 100644 index 0000000000..6c926c6665 --- /dev/null +++ b/wallet/pskt/src/bundle.rs @@ -0,0 +1,358 @@ +use crate::error::Error; +use crate::prelude::*; +use crate::pskt::{Inner as PSKTInner, PSKT}; +// use crate::wasm::result; + +use kaspa_addresses::{Address, Prefix}; +// use kaspa_bip32::Prefix; +use kaspa_consensus_core::network::{NetworkId, NetworkType}; +use kaspa_consensus_core::tx::{ScriptPublicKey, TransactionOutpoint, UtxoEntry}; + +use hex; +use kaspa_txscript::{extract_script_pub_key_address, pay_to_address_script, pay_to_script_hash_script}; +use serde::{Deserialize, Serialize}; +use std::ops::Deref; + +/// +/// Bundle is a [`PSKT`] bundle - a sequence of PSKT transactions +/// meant for batch processing and transport as a +/// single serialized payload. +/// +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Bundle(pub Vec); + +impl From> for Bundle { + fn from(pskt: PSKT) -> Self { + Bundle(vec![pskt.deref().clone()]) + } +} + +impl From>> for Bundle { + fn from(pskts: Vec>) -> Self { + let inner_list = pskts.into_iter().map(|pskt| pskt.deref().clone()).collect(); + Bundle(inner_list) + } +} + +impl Bundle { + pub fn new() -> Self { + Self(Vec::new()) + } + + /// Adds an Inner instance to the bundle + pub fn add_inner(&mut self, inner: PSKTInner) { + self.0.push(inner); + } + + /// Adds a PSKT instance to the bundle + pub fn add_pskt(&mut self, pskt: PSKT) { + self.0.push(pskt.deref().clone()); + } + + /// Merges another bundle into the current bundle + pub fn merge(&mut self, other: Bundle) { + for inner in other.0 { + self.0.push(inner); + } + } + + /// Iterator over the inner PSKT instances + pub fn iter(&self) -> std::slice::Iter { + self.0.iter() + } + + pub fn serialize(&self) -> Result { + Ok(format!("PSKB{}", hex::encode(serde_json::to_string(self)?))) + } + + pub fn deserialize(hex_data: &str) -> Result { + if let Some(hex_data) = hex_data.strip_prefix("PSKB") { + Ok(serde_json::from_slice(hex::decode(hex_data)?.as_slice())?) + } else { + Err(Error::PskbPrefixError) + } + } + + pub fn display_format(&self, network_id: NetworkId, sompi_formatter: F) -> String + where + F: Fn(u64, &NetworkType) -> String, + { + let mut result = "".to_string(); + + for (pskt_index, bundle_inner) in self.0.iter().enumerate() { + let pskt: PSKT = PSKT::::from(bundle_inner.to_owned()); + + result.push_str(&format!("\r\nPSKT #{:02}\r\n", pskt_index + 1)); + + for (key_inner, input) in pskt.clone().inputs.iter().enumerate() { + result.push_str(&format!("Input #{:02}\r\n", key_inner + 1)); + + if let Some(utxo_entry) = &input.utxo_entry { + result.push_str(&format!(" amount: {}\r\n", sompi_formatter(utxo_entry.amount, &NetworkType::from(network_id)))); + result.push_str(&format!( + " address: {}\r\n", + extract_script_pub_key_address(&utxo_entry.script_public_key, Prefix::from(network_id)) + .expect("Input address") + )); + } + } + + result.push_str("---\r\n"); + + for (key_inner, output) in pskt.clone().outputs.iter().enumerate() { + result.push_str(&format!("Output #{:02}\r\n", key_inner + 1)); + result.push_str(&format!(" amount: {}\r\n", sompi_formatter(output.amount, &NetworkType::from(network_id)))); + result.push_str(&format!( + " address: {}\r\n", + extract_script_pub_key_address(&output.script_public_key, Prefix::from(network_id)).expect("Input address") + )); + } + } + result + } +} + +impl AsRef<[PSKTInner]> for Bundle { + fn as_ref(&self) -> &[PSKTInner] { + self.0.as_slice() + } +} + +impl TryFrom for Bundle { + type Error = Error; + fn try_from(value: String) -> Result { + Bundle::deserialize(&value) + } +} + +impl TryFrom<&str> for Bundle { + type Error = Error; + fn try_from(value: &str) -> Result { + Bundle::deserialize(value) + } +} +impl TryFrom for String { + type Error = Error; + fn try_from(value: Bundle) -> Result { + match Bundle::serialize(&value) { + Ok(output) => Ok(output.to_owned()), + Err(e) => Err(Error::PskbSerializeError(e.to_string())), + } + } +} + +impl Default for Bundle { + fn default() -> Self { + Self::new() + } +} + +pub fn lock_script_sig_templating(payload: String, pubkey_bytes: Option<&[u8]>) -> Result, Error> { + let mut payload_bytes: Vec = hex::decode(payload)?; + + if let Some(pubkey) = pubkey_bytes { + let placeholder = b"{{pubkey}}"; + + // Search for the placeholder in payload bytes to be replaced by public key. + if let Some(pos) = payload_bytes.windows(placeholder.len()).position(|window| window == placeholder) { + payload_bytes.splice(pos..pos + placeholder.len(), pubkey.iter().cloned()); + } + } + Ok(payload_bytes) +} + +pub fn script_sig_to_address(script_sig: &[u8], prefix: kaspa_addresses::Prefix) -> Result { + extract_script_pub_key_address(&pay_to_script_hash_script(script_sig), prefix).map_err(Error::P2SHExtractError) +} + +pub fn unlock_utxos_as_pskb( + utxo_references: Vec<(UtxoEntry, TransactionOutpoint)>, + recipient: &Address, + script_sig: Vec, + priority_fee_sompi_per_transaction: u64, +) -> Result { + // Fee per transaction. + // Check if each UTXO's amounts can cover priority fee. + utxo_references + .iter() + .map(|(entry, _)| { + if entry.amount <= priority_fee_sompi_per_transaction { + return Err(Error::ExcessUnlockFeeError); + } + Ok(()) + }) + .collect::, _>>()?; + + let recipient_spk = pay_to_address_script(recipient); + let (successes, errors): (Vec<_>, Vec<_>) = utxo_references + .into_iter() + .map(|(utxo_entry, outpoint)| { + unlock_utxo(&utxo_entry, &outpoint, &recipient_spk, &script_sig, priority_fee_sompi_per_transaction) + }) + .partition(Result::is_ok); + + let successful_bundles: Vec<_> = successes.into_iter().filter_map(Result::ok).collect(); + let error_list: Vec<_> = errors.into_iter().filter_map(Result::err).collect(); + + if !error_list.is_empty() { + return Err(Error::MultipleUnlockUtxoError(error_list)); + } + + let merged_bundle = successful_bundles.into_iter().fold(None, |acc: Option, bundle| match acc { + Some(mut merged_bundle) => { + merged_bundle.merge(bundle); + Some(merged_bundle) + } + None => Some(bundle), + }); + + match merged_bundle { + None => Err("Generating an empty PSKB".into()), + Some(bundle) => Ok(bundle), + } +} + +pub fn unlock_utxo( + utxo_entry: &UtxoEntry, + outpoint: &TransactionOutpoint, + script_public_key: &ScriptPublicKey, + script_sig: &[u8], + priority_fee_sompi: u64, +) -> Result { + let input = InputBuilder::default() + .utxo_entry(utxo_entry.to_owned()) + .previous_outpoint(outpoint.to_owned()) + .sig_op_count(1) + .redeem_script(script_sig.to_vec()) + .build()?; + + let output = OutputBuilder::default() + .amount(utxo_entry.amount - priority_fee_sompi) + .script_public_key(script_public_key.clone()) + .build()?; + + let pskt: PSKT = PSKT::::default().constructor().input(input).output(output); + Ok(pskt.into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::prelude::*; + use crate::role::Creator; + use crate::role::*; + use kaspa_consensus_core::tx::{TransactionId, TransactionOutpoint, UtxoEntry}; + use kaspa_txscript::{multisig_redeem_script, pay_to_script_hash_script}; + use secp256k1::Secp256k1; + use secp256k1::{rand::thread_rng, Keypair}; + use std::str::FromStr; + use std::sync::Once; + + static INIT: Once = Once::new(); + static mut CONTEXT: Option)>> = None; + + fn mock_context() -> &'static ([Keypair; 2], Vec) { + unsafe { + INIT.call_once(|| { + let kps = [Keypair::new(&Secp256k1::new(), &mut thread_rng()), Keypair::new(&Secp256k1::new(), &mut thread_rng())]; + let redeem_script: Vec = multisig_redeem_script(kps.iter().map(|pk| pk.x_only_public_key().0.serialize()), 2) + .expect("Test multisig redeem script"); + + CONTEXT = Some(Box::new((kps, redeem_script))); + }); + + CONTEXT.as_ref().unwrap() + } + } + + // Mock multisig PSKT from example + fn mock_pskt_constructor() -> PSKT { + let (_, redeem_script) = mock_context(); + let pskt = PSKT::::default().inputs_modifiable().outputs_modifiable(); + let input_0 = InputBuilder::default() + .utxo_entry(UtxoEntry { + amount: 12793000000000, + script_public_key: pay_to_script_hash_script(redeem_script), + block_daa_score: 36151168, + is_coinbase: false, + }) + .previous_outpoint(TransactionOutpoint { + transaction_id: TransactionId::from_str("63020db736215f8b1105a9281f7bcbb6473d965ecc45bb2fb5da59bd35e6ff84").unwrap(), + index: 0, + }) + .sig_op_count(2) + .redeem_script(redeem_script.to_owned()) + .build() + .expect("Mock PSKT constructor"); + + pskt.constructor().input(input_0) + } + + #[test] + fn test_pskb_serialization() { + let constructor = mock_pskt_constructor(); + let bundle = Bundle::from(constructor.clone()); + + println!("Bundle: {}", serde_json::to_string(&bundle).unwrap()); + + // Serialize Bundle + let serialized = bundle.serialize().map_err(|err| format!("Unable to serialize bundle: {err}")).unwrap(); + println!("Serialized: {}", serialized); + + assert!(!bundle.0.is_empty()); + + match Bundle::deserialize(&serialized) { + Ok(bundle_constructor_deser) => { + println!("Deserialized: {:?}", bundle_constructor_deser); + let pskt_constructor_deser: Option> = + bundle_constructor_deser.0.first().map(|inner| PSKT::from(inner.clone())); + match pskt_constructor_deser { + Some(_) => println!("PSKT deserialized successfully"), + None => println!("No elements in the inner list to deserialize"), + } + } + Err(e) => { + eprintln!("Failed to deserialize: {}", e); + panic!() + } + } + } + + #[test] + fn test_pskb_bundle_creation() { + let bundle = Bundle::new(); + assert!(bundle.0.is_empty()); + } + + #[test] + fn test_pskb_new_with_pskt() { + let pskt = PSKT::::default(); + let bundle = Bundle::from(pskt); + assert_eq!(bundle.0.len(), 1); + } + + #[test] + fn test_pskb_add_pskt() { + let mut bundle = Bundle::new(); + let pskt = PSKT::::default(); + bundle.add_pskt(pskt); + assert_eq!(bundle.0.len(), 1); + } + + #[test] + fn test_pskb_merge_bundles() { + let mut bundle1 = Bundle::new(); + let mut bundle2 = Bundle::new(); + + let inner1 = PSKTInner::default(); + let inner2 = PSKTInner::default(); + + bundle1.add_inner(inner1.clone()); + bundle2.add_inner(inner2.clone()); + + bundle1.merge(bundle2); + + assert_eq!(bundle1.0.len(), 2); + } +} diff --git a/wallet/pskt/src/convert.rs b/wallet/pskt/src/convert.rs new file mode 100644 index 0000000000..a3956c3bb5 --- /dev/null +++ b/wallet/pskt/src/convert.rs @@ -0,0 +1,115 @@ +//! +//! Conversion functions for converting between +//! the [`kaspa_consensus_client`], [`kaspa_consensus_core`] +//! and [`kaspa_wallet_pskt`](crate) types. +//! + +use crate::error::Error; +use crate::input::{Input, InputBuilder}; +use crate::output::{Output, OutputBuilder}; +use crate::pskt::{Global, Inner}; +use kaspa_consensus_client::{Transaction, TransactionInput, TransactionInputInner, TransactionOutput, TransactionOutputInner}; +use kaspa_consensus_core::tx as cctx; + +impl TryFrom for Inner { + type Error = Error; + fn try_from(_transaction: Transaction) -> Result { + Inner::try_from(cctx::Transaction::from(&_transaction)) + } +} + +impl TryFrom for Input { + type Error = Error; + fn try_from(input: TransactionInput) -> std::result::Result { + let TransactionInputInner { previous_outpoint, signature_script: _, sequence: _, sig_op_count, utxo } = &*input.inner(); + + let input = InputBuilder::default() + .utxo_entry(utxo.as_ref().ok_or(Error::MissingUtxoEntry)?.into()) + .previous_outpoint(previous_outpoint.into()) + // .sequence(*sequence) + // min_time + // partial_sigs + // sighash_type + // redeem_script + .sig_op_count(*sig_op_count) + // bip32_derivations + // final_script_sig + .build()?; + + Ok(input) + } +} + +impl TryFrom for Output { + type Error = Error; + fn try_from(output: TransactionOutput) -> std::result::Result { + // Self::Transaction(transaction) + + let TransactionOutputInner { value, script_public_key } = &*output.inner(); + + let output = OutputBuilder::default() + .amount(*value) + .script_public_key(script_public_key.clone()) + // .redeem_script + // .bip32_derivations + // .proprietaries + // .unknowns + .build()?; + + Ok(output) + } +} + +impl TryFrom<(cctx::Transaction, Vec<(&cctx::TransactionInput, &cctx::UtxoEntry)>)> for Inner { + type Error = Error; // Define your error type + + fn try_from( + (transaction, populated_inputs): (cctx::Transaction, Vec<(&cctx::TransactionInput, &cctx::UtxoEntry)>), + ) -> Result { + let inputs: Result, Self::Error> = populated_inputs + .into_iter() + .map(|(input, utxo)| { + InputBuilder::default() + .utxo_entry(utxo.to_owned().clone()) + .previous_outpoint(input.previous_outpoint) + .sig_op_count(input.sig_op_count) + .build() + .map_err(Error::TxToInnerConversionInputBuildingError) + // Handle the error + }) + .collect::>(); + + let outputs: Result, Self::Error> = transaction + .outputs + .iter() + .map(|output| { + Output::try_from(TransactionOutput::from(output.to_owned())).map_err(|e| Error::TxToInnerConversionError(Box::new(e))) + }) + .collect::>(); + + Ok(Inner { global: Global::default(), inputs: inputs?, outputs: outputs? }) + } +} + +impl TryFrom for Inner { + type Error = Error; + fn try_from(transaction: cctx::Transaction) -> Result { + let inputs = transaction + .inputs + .iter() + .map(|input| { + Input::try_from(TransactionInput::from(input.to_owned())).map_err(|e| Error::TxToInnerConversionError(Box::new(e))) + }) + .collect::>()?; + + let outputs = transaction + .outputs + .iter() + .map(|output| { + Output::try_from(TransactionOutput::from(output.to_owned())).map_err(|e| Error::TxToInnerConversionError(Box::new(e))) + }) + .collect::>()?; + + Ok(Inner { global: Global::default(), inputs, outputs }) + } +} diff --git a/wallet/pskt/src/error.rs b/wallet/pskt/src/error.rs new file mode 100644 index 0000000000..f3fd835701 --- /dev/null +++ b/wallet/pskt/src/error.rs @@ -0,0 +1,70 @@ +//! Error types for the PSKT crate. + +use kaspa_txscript_errors::TxScriptError; + +use crate::input::InputBuilderError; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("{0}")] + Custom(String), + #[error(transparent)] + ConstructorError(#[from] ConstructorError), + #[error("OutputNotModifiable")] + OutOfBounds, + #[error("Missing UTXO entry")] + MissingUtxoEntry, + #[error("Missing redeem script")] + MissingRedeemScript, + #[error(transparent)] + InputBuilder(#[from] crate::input::InputBuilderError), + #[error(transparent)] + OutputBuilder(#[from] crate::output::OutputBuilderError), + #[error("Serialization error: {0}")] + HexDecodeError(#[from] hex::FromHexError), + #[error("Json deserialize error: {0}")] + JsonDeserializeError(#[from] serde_json::Error), + #[error("Serialize error")] + PskbSerializeError(String), + #[error("Unlock utxo error")] + MultipleUnlockUtxoError(Vec), + #[error("Unlock fees exceed available amount")] + ExcessUnlockFeeError, + #[error("Transaction output to output conversion error")] + TxToInnerConversionError(#[source] Box), + #[error("Transaction input building error in conversion")] + TxToInnerConversionInputBuildingError(#[source] InputBuilderError), + #[error("P2SH extraction error")] + P2SHExtractError(#[source] TxScriptError), + #[error("PSKB hex serialization error: {0}")] + PskbSerializeToHexError(String), + #[error("PSKB serialization requires 'PSKB' prefix")] + PskbPrefixError, + #[error("PSKT serialization requires 'PSKT' prefix")] + PsktPrefixError, +} +#[derive(thiserror::Error, Debug)] +pub enum ConstructorError { + #[error("InputNotModifiable")] + InputNotModifiable, + #[error("OutputNotModifiable")] + OutputNotModifiable, +} + +impl From for Error { + fn from(err: String) -> Self { + Self::Custom(err) + } +} + +impl From<&str> for Error { + fn from(err: &str) -> Self { + Self::Custom(err.to_string()) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum ConversionError { + #[error("Invalid output conversion")] + InvalidOutput, +} diff --git a/wallet/pskt/src/global.rs b/wallet/pskt/src/global.rs new file mode 100644 index 0000000000..ad98f11d30 --- /dev/null +++ b/wallet/pskt/src/global.rs @@ -0,0 +1,170 @@ +//! Global PSKT data. + +use crate::pskt::{KeySource, Version}; +use crate::utils::combine_if_no_conflicts; +use derive_builder::Builder; +use kaspa_consensus_core::tx::TransactionId; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{btree_map, BTreeMap}, + ops::Add, +}; + +type Xpub = kaspa_bip32::ExtendedPublicKey; + +#[derive(Debug, Clone, Builder, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[builder(default)] +pub struct Global { + /// The version number of this PSKT. + pub version: Version, + /// The version number of the transaction being built. + pub tx_version: u16, + #[builder(setter(strip_option))] + /// The transaction locktime to use if no inputs specify a required locktime. + pub fallback_lock_time: Option, + + pub inputs_modifiable: bool, + pub outputs_modifiable: bool, + + /// The number of inputs in this PSKT. + pub input_count: usize, + /// The number of outputs in this PSKT. + pub output_count: usize, + /// A map from xpub to the used key fingerprint and derivation path as defined by BIP 32. + pub xpubs: BTreeMap, + pub id: Option, + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + /// Unknown key-value pairs for this output. + #[serde(flatten)] + pub unknowns: BTreeMap, +} + +impl Add for Global { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.version != rhs.version { + return Err(CombineError::VersionMismatch { this: self.version, that: rhs.version }); + } + if self.tx_version != rhs.tx_version { + return Err(CombineError::TxVersionMismatch { this: self.tx_version, that: rhs.tx_version }); + } + self.fallback_lock_time = match (self.fallback_lock_time, rhs.fallback_lock_time) { + (Some(lhs), Some(rhs)) if lhs != rhs => return Err(CombineError::LockTimeMismatch { this: lhs, that: rhs }), + (Some(v), _) | (_, Some(v)) => Some(v), + _ => None, + }; + // todo discussable, maybe throw error + self.inputs_modifiable &= rhs.inputs_modifiable; + self.outputs_modifiable &= rhs.outputs_modifiable; + self.input_count = self.input_count.max(rhs.input_count); + self.output_count = self.output_count.max(rhs.output_count); + // BIP 174: The Combiner must remove any duplicate key-value pairs, in accordance with + // the specification. It can pick arbitrarily when conflicts occur. + + // Merging xpubs + for (xpub, KeySource { key_fingerprint: fingerprint1, derivation_path: derivation1 }) in rhs.xpubs { + match self.xpubs.entry(xpub) { + btree_map::Entry::Vacant(entry) => { + entry.insert(KeySource::new(fingerprint1, derivation1)); + } + btree_map::Entry::Occupied(mut entry) => { + // Here in case of the conflict we select the version with algorithm: + // 1) if everything is equal we do nothing + // 2) report an error if + // - derivation paths are equal and fingerprints are not + // - derivation paths are of the same length, but not equal + // - derivation paths has different length, but the shorter one + // is not the strict suffix of the longer one + // 3) choose longest derivation otherwise + + let KeySource { key_fingerprint: fingerprint2, derivation_path: derivation2 } = entry.get().clone(); + + if (derivation1 == derivation2 && fingerprint1 == fingerprint2) + || (derivation1.len() < derivation2.len() + && derivation1.as_ref() == &derivation2.as_ref()[derivation2.len() - derivation1.len()..]) + { + continue; + } else if derivation2.as_ref() == &derivation1.as_ref()[derivation1.len() - derivation2.len()..] { + entry.insert(KeySource::new(fingerprint1, derivation1)); + continue; + } + return Err(CombineError::InconsistentKeySources(entry.key().clone())); + } + } + } + self.id = match (self.id, rhs.id) { + (Some(lhs), Some(rhs)) if lhs != rhs => return Err(CombineError::TransactionIdMismatch { this: lhs, that: rhs }), + (Some(v), _) | (_, Some(v)) => Some(v), + _ => None, + }; + + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + Ok(self) + } +} + +impl Default for Global { + fn default() -> Self { + Global { + version: Version::Zero, + tx_version: kaspa_consensus_core::constants::TX_VERSION, + fallback_lock_time: None, + inputs_modifiable: false, + outputs_modifiable: false, + input_count: 0, + output_count: 0, + xpubs: Default::default(), + id: None, + proprietaries: Default::default(), + unknowns: Default::default(), + } + } +} + +/// Error combining two global maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The version numbers are not the same")] + /// The version numbers are not the same. + VersionMismatch { + /// Attempted to combine a PSKT with `this` version. + this: Version, + /// Into a PSKT with `that` version. + that: Version, + }, + #[error("The transaction version numbers are not the same")] + TxVersionMismatch { + /// Attempted to combine a PSKT with `this` tx version. + this: u16, + /// Into a PSKT with `that` tx version. + that: u16, + }, + #[error("The transaction lock times are not the same")] + LockTimeMismatch { + /// Attempted to combine a PSKT with `this` lock times. + this: u64, + /// Into a PSKT with `that` lock times. + that: u64, + }, + #[error("The transaction ids are not the same")] + TransactionIdMismatch { + /// Attempted to combine a PSKT with `this` tx id. + this: TransactionId, + /// Into a PSKT with `that` tx id. + that: TransactionId, + }, + + #[error("combining PSKT, key-source conflict for xpub {0}")] + /// Xpubs have inconsistent key sources. + InconsistentKeySources(Xpub), + + #[error("Two different unknown field values")] + NotCompatibleUnknownField(crate::utils::Error), + #[error("Two different proprietary values")] + NotCompatibleProprietary(crate::utils::Error), +} diff --git a/wallet/pskt/src/input.rs b/wallet/pskt/src/input.rs new file mode 100644 index 0000000000..8d01a7d48d --- /dev/null +++ b/wallet/pskt/src/input.rs @@ -0,0 +1,168 @@ +//! PSKT input structure. + +use crate::pskt::{KeySource, PartialSigs}; +use crate::utils::{combine_if_no_conflicts, Error as CombineMapErr}; +use derive_builder::Builder; +use kaspa_consensus_core::{ + hashing::sighash_type::{SigHashType, SIG_HASH_ALL}, + tx::{TransactionId, TransactionOutpoint, UtxoEntry}, +}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, marker::PhantomData, ops::Add}; + +// todo add unknown field? combine them by deduplicating, if there are different values - return error? +#[derive(Builder, Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +#[builder(default)] +#[builder(setter(skip))] +pub struct Input { + #[builder(setter(strip_option))] + pub utxo_entry: Option, + #[builder(setter)] + pub previous_outpoint: TransactionOutpoint, + /// The sequence number of this input. + /// + /// If omitted, assumed to be the final sequence number + pub sequence: Option, + #[builder(setter)] + /// The minimum Unix timestamp that this input requires to be set as the transaction's lock time. + pub min_time: Option, + /// A map from public keys to their corresponding signature as would be + /// pushed to the stack from a scriptSig. + pub partial_sigs: PartialSigs, + #[builder(setter)] + /// The sighash type to be used for this input. Signatures for this input + /// must use the sighash type. + pub sighash_type: SigHashType, + #[serde(with = "kaspa_utils::serde_bytes_optional")] + #[builder(setter(strip_option))] + /// The redeem script for this input. + pub redeem_script: Option>, + #[builder(setter(strip_option))] + pub sig_op_count: Option, + /// A map from public keys needed to sign this input to their corresponding + /// master key fingerprints and derivation paths. + pub bip32_derivations: BTreeMap>, + #[serde(with = "kaspa_utils::serde_bytes_optional")] + /// The finalized, fully-constructed scriptSig with signatures and any other + /// scripts necessary for this input to pass validation. + pub final_script_sig: Option>, + #[serde(skip_serializing, default)] + pub(crate) hidden: PhantomData<()>, // prevents manual filling of fields + #[builder(setter)] + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + #[serde(flatten)] + #[builder(setter)] + /// Unknown key-value pairs for this output. + pub unknowns: BTreeMap, +} + +impl Default for Input { + fn default() -> Self { + Self { + utxo_entry: Default::default(), + previous_outpoint: Default::default(), + sequence: Default::default(), + min_time: Default::default(), + partial_sigs: Default::default(), + sighash_type: SIG_HASH_ALL, + redeem_script: Default::default(), + sig_op_count: Default::default(), + bip32_derivations: Default::default(), + final_script_sig: Default::default(), + hidden: Default::default(), + proprietaries: Default::default(), + unknowns: Default::default(), + } + } +} + +impl Add for Input { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.previous_outpoint.transaction_id != rhs.previous_outpoint.transaction_id { + return Err(CombineError::PreviousTxidMismatch { + this: self.previous_outpoint.transaction_id, + that: rhs.previous_outpoint.transaction_id, + }); + } + + if self.previous_outpoint.index != rhs.previous_outpoint.index { + return Err(CombineError::SpentOutputIndexMismatch { + this: self.previous_outpoint.index, + that: rhs.previous_outpoint.index, + }); + } + self.utxo_entry = match (self.utxo_entry.take(), rhs.utxo_entry) { + (None, None) => None, + (Some(utxo), None) | (None, Some(utxo)) => Some(utxo), + (Some(left), Some(right)) if left == right => Some(left), + (Some(left), Some(right)) => return Err(CombineError::NotCompatibleUtxos { this: left, that: right }), + }; + + // todo discuss merging. if sequence is equal - combine, otherwise use input which has bigger sequence number as is + self.sequence = self.sequence.max(rhs.sequence); + self.min_time = self.min_time.max(rhs.min_time); + self.partial_sigs.extend(rhs.partial_sigs); + // todo combine sighash? or always use sighash all since all signatures must be passed after completion of construction step + // self.sighash_type + + self.redeem_script = match (self.redeem_script.take(), rhs.redeem_script) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + + // todo Does Combiner allowed to change final script sig?? + self.final_script_sig = match (self.final_script_sig.take(), rhs.final_script_sig) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + + self.bip32_derivations = combine_if_no_conflicts(self.bip32_derivations, rhs.bip32_derivations)?; + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + + Ok(self) + } +} + +/// Error combining two input maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The previous txids are not the same")] + PreviousTxidMismatch { + /// Attempted to combine a PSKT with `this` previous txid. + this: TransactionId, + /// Into a PSKT with `that` previous txid. + that: TransactionId, + }, + #[error("The spent output indexes are not the same")] + SpentOutputIndexMismatch { + /// Attempted to combine a PSKT with `this` spent output index. + this: u32, + /// Into a PSKT with `that` spent output index. + that: u32, + }, + #[error("Two different redeem scripts detected")] + NotCompatibleRedeemScripts { this: Vec, that: Vec }, + #[error("Two different utxos detected")] + NotCompatibleUtxos { this: UtxoEntry, that: UtxoEntry }, + + #[error("Two different derivations for the same key")] + NotCompatibleBip32Derivations(#[from] CombineMapErr>), + #[error("Two different unknown field values")] + NotCompatibleUnknownField(CombineMapErr), + #[error("Two different proprietary values")] + NotCompatibleProprietary(CombineMapErr), +} diff --git a/wallet/pskt/src/lib.rs b/wallet/pskt/src/lib.rs new file mode 100644 index 0000000000..ed57e9b1ed --- /dev/null +++ b/wallet/pskt/src/lib.rs @@ -0,0 +1,32 @@ +//! +//! PSKT is a crate for working with Partially Signed Kaspa Transactions (PSKTs). +//! This crate provides following primitives: `PSKT`, `PSKTBuilder` and `Bundle`. +//! The `Bundle` struct is used for PSKT exchange payload serialization and carries +//! multiple `PSKT` instances allowing for exchange of Kaspa sweep transactions. +//! + +pub mod bundle; +pub mod error; +pub mod global; +pub mod input; +pub mod output; +pub mod pskt; +pub mod role; +pub mod wasm; + +mod convert; +mod utils; + +pub mod prelude { + pub use crate::bundle::Bundle; + pub use crate::bundle::*; + pub use crate::global::Global; + pub use crate::input::Input; + pub use crate::output::Output; + pub use crate::pskt::*; + + // not quite sure why it warns of unused imports, + // perhaps due to the fact that enums have no variants? + #[allow(unused_imports)] + pub use crate::role::*; +} diff --git a/wallet/pskt/src/output.rs b/wallet/pskt/src/output.rs new file mode 100644 index 0000000000..36b09edaea --- /dev/null +++ b/wallet/pskt/src/output.rs @@ -0,0 +1,85 @@ +//! PSKT output structure. + +use crate::pskt::KeySource; +use crate::utils::combine_if_no_conflicts; +use derive_builder::Builder; +use kaspa_consensus_core::tx::ScriptPublicKey; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, ops::Add}; + +#[derive(Builder, Default, Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +#[builder(default)] +pub struct Output { + /// The output's amount (serialized as sompi). + pub amount: u64, + /// The script for this output, also known as the scriptPubKey. + pub script_public_key: ScriptPublicKey, + #[builder(setter(strip_option))] + #[serde(with = "kaspa_utils::serde_bytes_optional")] + /// The redeem script for this output. + pub redeem_script: Option>, + /// A map from public keys needed to spend this output to their + /// corresponding master key fingerprints and derivation paths. + pub bip32_derivations: BTreeMap>, + /// Proprietary key-value pairs for this output. + pub proprietaries: BTreeMap, + #[serde(flatten)] + /// Unknown key-value pairs for this output. + pub unknowns: BTreeMap, +} + +impl Add for Output { + type Output = Result; + + fn add(mut self, rhs: Self) -> Self::Output { + if self.amount != rhs.amount { + return Err(CombineError::AmountMismatch { this: self.amount, that: rhs.amount }); + } + if self.script_public_key != rhs.script_public_key { + return Err(CombineError::ScriptPubkeyMismatch { this: self.script_public_key, that: rhs.script_public_key }); + } + self.redeem_script = match (self.redeem_script.take(), rhs.redeem_script) { + (None, None) => None, + (Some(script), None) | (None, Some(script)) => Some(script), + (Some(script_left), Some(script_right)) if script_left == script_right => Some(script_left), + (Some(script_left), Some(script_right)) => { + return Err(CombineError::NotCompatibleRedeemScripts { this: script_left, that: script_right }) + } + }; + self.bip32_derivations = combine_if_no_conflicts(self.bip32_derivations, rhs.bip32_derivations)?; + self.proprietaries = + combine_if_no_conflicts(self.proprietaries, rhs.proprietaries).map_err(CombineError::NotCompatibleProprietary)?; + self.unknowns = combine_if_no_conflicts(self.unknowns, rhs.unknowns).map_err(CombineError::NotCompatibleUnknownField)?; + + Ok(self) + } +} + +/// Error combining two output maps. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error("The amounts are not the same")] + AmountMismatch { + /// Attempted to combine a PSKT with `this` previous txid. + this: u64, + /// Into a PSKT with `that` previous txid. + that: u64, + }, + #[error("The script_pubkeys are not the same")] + ScriptPubkeyMismatch { + /// Attempted to combine a PSKT with `this` script_pubkey. + this: ScriptPublicKey, + /// Into a PSKT with `that` script_pubkey. + that: ScriptPublicKey, + }, + #[error("Two different redeem scripts detected")] + NotCompatibleRedeemScripts { this: Vec, that: Vec }, + + #[error("Two different derivations for the same key")] + NotCompatibleBip32Derivations(#[from] crate::utils::Error>), + #[error("Two different unknown field values")] + NotCompatibleUnknownField(crate::utils::Error), + #[error("Two different proprietary values")] + NotCompatibleProprietary(crate::utils::Error), +} diff --git a/wallet/pskt/src/pskt.rs b/wallet/pskt/src/pskt.rs new file mode 100644 index 0000000000..73f87a628f --- /dev/null +++ b/wallet/pskt/src/pskt.rs @@ -0,0 +1,493 @@ +//! +//! Partially Signed Kaspa Transaction (PSKT) +//! + +use kaspa_bip32::{secp256k1, DerivationPath, KeyFingerprint}; +use serde::{Deserialize, Serialize}; +use serde_repr::{Deserialize_repr, Serialize_repr}; +use std::{collections::BTreeMap, fmt::Display, fmt::Formatter, future::Future, marker::PhantomData, ops::Deref}; + +pub use crate::error::Error; +pub use crate::global::{Global, GlobalBuilder}; +pub use crate::input::{Input, InputBuilder}; +pub use crate::output::{Output, OutputBuilder}; +pub use crate::role::{Combiner, Constructor, Creator, Extractor, Finalizer, Signer, Updater}; +use kaspa_consensus_core::tx::UtxoEntry; +use kaspa_consensus_core::{ + hashing::{sighash::SigHashReusedValues, sighash_type::SigHashType}, + subnets::SUBNETWORK_ID_NATIVE, + tx::{MutableTransaction, SignableTransaction, Transaction, TransactionId, TransactionInput, TransactionOutput}, +}; +use kaspa_txscript::{caches::Cache, TxScriptEngine}; + +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Inner { + /// The global map. + pub global: Global, + /// The corresponding key-value map for each input in the unsigned transaction. + pub inputs: Vec, + /// The corresponding key-value map for each output in the unsigned transaction. + pub outputs: Vec, +} + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Serialize_repr, Deserialize_repr)] +#[repr(u8)] +pub enum Version { + #[default] + Zero = 0, +} + +impl Display for Version { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + Version::Zero => write!(f, "{}", Version::Zero as u8), + } + } +} + +/// Full information on the used extended public key: fingerprint of the +/// master extended public key and a derivation path from it. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub struct KeySource { + #[serde(with = "kaspa_utils::serde_bytes_fixed")] + pub key_fingerprint: KeyFingerprint, + pub derivation_path: DerivationPath, +} + +impl KeySource { + pub fn new(key_fingerprint: KeyFingerprint, derivation_path: DerivationPath) -> Self { + Self { key_fingerprint, derivation_path } + } +} + +pub type PartialSigs = BTreeMap; + +#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Copy, Clone)] +#[serde(rename_all = "camelCase")] +pub enum Signature { + ECDSA(secp256k1::ecdsa::Signature), + Schnorr(secp256k1::schnorr::Signature), +} + +impl Signature { + pub fn into_bytes(self) -> [u8; 64] { + match self { + Signature::ECDSA(s) => s.serialize_compact(), + Signature::Schnorr(s) => s.serialize(), + } + } +} + +/// +/// A Partially Signed Kaspa Transaction (PSKT) is a standardized format +/// that allows multiple participants to collaborate in creating and signing +/// a Kaspa transaction. PSKT enables the exchange of incomplete transaction +/// data between different wallets or entities, allowing each participant +/// to add their signature or inputs in stages. This facilitates more complex +/// transaction workflows, such as multi-signature setups or hardware wallet +/// interactions, by ensuring that sensitive data remains secure while +/// enabling cooperation across different devices or platforms without +/// exposing private keys. +/// +/// Please note that due to transaction mass limits and potential of +/// a wallet aggregating large UTXO sets, the PSKT [`Bundle`](crate::bundle::Bundle) primitive +/// is used to represent a collection of PSKTs and should be used for +/// PSKT serialization and transport. PSKT is an internal implementation +/// primitive that represents each transaction in the bundle. +/// +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PSKT { + #[serde(flatten)] + inner_pskt: Inner, + #[serde(skip_serializing, default)] + role: PhantomData, +} + +impl From for PSKT { + fn from(inner_pskt: Inner) -> Self { + PSKT { inner_pskt, role: Default::default() } + } +} + +impl Clone for PSKT { + fn clone(&self) -> Self { + PSKT { inner_pskt: self.inner_pskt.clone(), role: Default::default() } + } +} + +impl Deref for PSKT { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.inner_pskt + } +} + +impl PSKT { + fn unsigned_tx(&self) -> SignableTransaction { + let tx = Transaction::new( + self.global.tx_version, + self.inputs + .iter() + .map(|Input { previous_outpoint, sequence, sig_op_count, .. }| TransactionInput { + previous_outpoint: *previous_outpoint, + signature_script: vec![], + sequence: sequence.unwrap_or(u64::MAX), + sig_op_count: sig_op_count.unwrap_or(0), + }) + .collect(), + self.outputs + .iter() + .map(|Output { amount, script_public_key, .. }: &Output| TransactionOutput { + value: *amount, + script_public_key: script_public_key.clone(), + }) + .collect(), + self.determine_lock_time(), + SUBNETWORK_ID_NATIVE, + 0, + vec![], + ); + let entries = self.inputs.iter().filter_map(|Input { utxo_entry, .. }| utxo_entry.clone()).collect(); + SignableTransaction::with_entries(tx, entries) + } + + fn calculate_id_internal(&self) -> TransactionId { + self.unsigned_tx().tx.id() + } + + fn determine_lock_time(&self) -> u64 { + self.inputs.iter().map(|input: &Input| input.min_time).max().unwrap_or(self.global.fallback_lock_time).unwrap_or(0) + } + + pub fn to_hex(&self) -> Result { + Ok(format!("PSKT{}", hex::encode(serde_json::to_string(self)?))) + } + + pub fn from_hex(hex_data: &str) -> Result { + if let Some(hex_data) = hex_data.strip_prefix("PSKT") { + Ok(serde_json::from_slice(hex::decode(hex_data)?.as_slice())?) + } else { + Err(Error::PsktPrefixError) + } + } +} + +impl Default for PSKT { + fn default() -> Self { + PSKT { inner_pskt: Default::default(), role: Default::default() } + } +} + +impl PSKT { + /// Sets the fallback lock time. + pub fn fallback_lock_time(mut self, fallback: u64) -> Self { + self.inner_pskt.global.fallback_lock_time = Some(fallback); + self + } + + // todo generic const + /// Sets the inputs modifiable bit in the transaction modifiable flags. + pub fn inputs_modifiable(mut self) -> Self { + self.inner_pskt.global.inputs_modifiable = true; + self + } + // todo generic const + /// Sets the outputs modifiable bit in the transaction modifiable flags. + pub fn outputs_modifiable(mut self) -> Self { + self.inner_pskt.global.outputs_modifiable = true; + self + } + + pub fn constructor(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + // todo generic const + /// Marks that the `PSKT` can not have any more inputs added to it. + pub fn no_more_inputs(mut self) -> Self { + self.inner_pskt.global.inputs_modifiable = false; + self + } + // todo generic const + /// Marks that the `PSKT` can not have any more outputs added to it. + pub fn no_more_outputs(mut self) -> Self { + self.inner_pskt.global.outputs_modifiable = false; + self + } + + /// Adds an input to the PSKT. + pub fn input(mut self, input: Input) -> Self { + self.inner_pskt.inputs.push(input); + self.inner_pskt.global.input_count += 1; + self + } + + /// Adds an output to the PSKT. + pub fn output(mut self, output: Output) -> Self { + self.inner_pskt.outputs.push(output); + self.inner_pskt.global.output_count += 1; + self + } + + /// Returns a PSKT [`Updater`] once construction is completed. + pub fn updater(self) -> PSKT { + let pskt = self.no_more_inputs().no_more_outputs(); + PSKT { inner_pskt: pskt.inner_pskt, role: Default::default() } + } + + pub fn signer(self) -> PSKT { + self.updater().signer() + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + pub fn set_sequence(mut self, n: u64, input_index: usize) -> Result { + self.inner_pskt.inputs.get_mut(input_index).ok_or(Error::OutOfBounds)?.sequence = Some(n); + Ok(self) + } + + pub fn signer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + // todo use iterator instead of vector + pub fn pass_signature_sync(mut self, sign_fn: SignFn) -> Result + where + E: Display, + SignFn: FnOnce(SignableTransaction, Vec) -> Result, E>, + { + let unsigned_tx = self.unsigned_tx(); + let sighashes = self.inputs.iter().map(|input| input.sighash_type).collect(); + self.inner_pskt.inputs.iter_mut().zip(sign_fn(unsigned_tx, sighashes)?).for_each( + |(input, SignInputOk { signature, pub_key, key_source })| { + input.bip32_derivations.insert(pub_key, key_source); + input.partial_sigs.insert(pub_key, signature); + }, + ); + + Ok(self) + } + // todo use iterator instead of vector + pub async fn pass_signature(mut self, sign_fn: SignFn) -> Result + where + E: Display, + Fut: Future, E>>, + SignFn: FnOnce(SignableTransaction, Vec) -> Fut, + { + let unsigned_tx = self.unsigned_tx(); + let sighashes = self.inputs.iter().map(|input| input.sighash_type).collect(); + self.inner_pskt.inputs.iter_mut().zip(sign_fn(unsigned_tx, sighashes).await?).for_each( + |(input, SignInputOk { signature, pub_key, key_source })| { + input.bip32_derivations.insert(pub_key, key_source); + input.partial_sigs.insert(pub_key, signature); + }, + ); + Ok(self) + } + + pub fn calculate_id(&self) -> TransactionId { + self.calculate_id_internal() + } + + pub fn finalizer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + + pub fn combiner(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SignInputOk { + pub signature: Signature, + pub pub_key: secp256k1::PublicKey, + pub key_source: Option, +} + +impl std::ops::Add> for PSKT { + type Output = Result; + + fn add(mut self, mut rhs: PSKT) -> Self::Output { + self.inner_pskt.global = (self.inner_pskt.global + rhs.inner_pskt.global)?; + macro_rules! combine { + ($left:expr, $right:expr, $err: ty) => { + if $left.len() > $right.len() { + $left.iter_mut().zip($right.iter_mut()).try_for_each(|(left, right)| -> Result<(), $err> { + *left = (std::mem::take(left) + std::mem::take(right))?; + Ok(()) + })?; + $left + } else { + $right.iter_mut().zip($left.iter_mut()).try_for_each(|(left, right)| -> Result<(), $err> { + *left = (std::mem::take(left) + std::mem::take(right))?; + Ok(()) + })?; + $right + } + }; + } + // todo add sort to build deterministic combination + self.inner_pskt.inputs = combine!(self.inner_pskt.inputs, rhs.inner_pskt.inputs, crate::input::CombineError); + self.inner_pskt.outputs = combine!(self.inner_pskt.outputs, rhs.inner_pskt.outputs, crate::output::CombineError); + Ok(self) + } +} + +impl PSKT { + pub fn signer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } + pub fn finalizer(self) -> PSKT { + PSKT { inner_pskt: self.inner_pskt, role: Default::default() } + } +} + +impl PSKT { + pub fn finalize_sync( + self, + final_sig_fn: impl FnOnce(&Inner) -> Result>, E>, + ) -> Result> { + let sigs = final_sig_fn(&self); + self.finalize_internal(sigs) + } + + pub async fn finalize(self, final_sig_fn: F) -> Result> + where + E: Display, + F: FnOnce(&Inner) -> Fut, + Fut: Future>, E>>, + { + let sigs = final_sig_fn(&self).await; + self.finalize_internal(sigs) + } + + pub fn id(&self) -> Option { + self.global.id + } + + pub fn extractor(self) -> Result, TxNotFinalized> { + if self.global.id.is_none() { + Err(TxNotFinalized {}) + } else { + Ok(PSKT { inner_pskt: self.inner_pskt, role: Default::default() }) + } + } + + fn finalize_internal(mut self, sigs: Result>, E>) -> Result> { + let sigs = sigs?; + if sigs.len() != self.inputs.len() { + return Err(FinalizeError::WrongFinalizedSigsCount { expected: self.inputs.len(), actual: sigs.len() }); + } + self.inner_pskt.inputs.iter_mut().enumerate().zip(sigs).try_for_each(|((idx, input), sig)| { + if sig.is_empty() { + return Err(FinalizeError::EmptySignature(idx)); + } + input.sequence = Some(input.sequence.unwrap_or(u64::MAX)); // todo discussable + input.final_script_sig = Some(sig); + Ok(()) + })?; + self.inner_pskt.global.id = Some(self.calculate_id_internal()); + Ok(self) + } +} + +impl PSKT { + pub fn extract_tx_unchecked(self) -> Result (Transaction, Vec>), TxNotFinalized> { + let tx = self.unsigned_tx(); + let entries = tx.entries; + let mut tx = tx.tx; + tx.inputs.iter_mut().zip(self.inner_pskt.inputs).try_for_each(|(dest, src)| { + dest.signature_script = src.final_script_sig.ok_or(TxNotFinalized {})?; + Ok(()) + })?; + Ok(move |mass| { + tx.set_mass(mass); + (tx, entries) + }) + } + + pub fn extract_tx(self) -> Result (Transaction, Vec>), ExtractError> { + let (tx, entries) = self.extract_tx_unchecked()?(0); + + let tx = MutableTransaction::with_entries(tx, entries.into_iter().flatten().collect()); + use kaspa_consensus_core::tx::VerifiableTransaction; + { + let tx = tx.as_verifiable(); + let cache = Cache::new(10_000); + let mut reused_values = SigHashReusedValues::new(); + + tx.populated_inputs().enumerate().try_for_each(|(idx, (input, entry))| { + TxScriptEngine::from_transaction_input(&tx, input, idx, entry, &mut reused_values, &cache)?.execute()?; + >::Ok(()) + })?; + } + let entries = tx.entries; + let tx = tx.tx; + let closure = move |mass| { + tx.set_mass(mass); + (tx, entries) + }; + Ok(closure) + } +} + +/// Error combining pskt. +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum CombineError { + #[error(transparent)] + Global(#[from] crate::global::CombineError), + #[error(transparent)] + Inputs(#[from] crate::input::CombineError), + #[error(transparent)] + Outputs(#[from] crate::output::CombineError), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum FinalizeError { + #[error("Signatures count mismatch")] + WrongFinalizedSigsCount { expected: usize, actual: usize }, + #[error("Signatures at index: {0} is empty")] + EmptySignature(usize), + #[error(transparent)] + FinalaziCb(#[from] E), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +pub enum ExtractError { + #[error(transparent)] + TxScriptError(#[from] kaspa_txscript_errors::TxScriptError), + #[error(transparent)] + TxNotFinalized(#[from] TxNotFinalized), +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[error("Transaction is not finalized")] +pub struct TxNotFinalized {} + +#[cfg(test)] +mod tests { + + // #[test] + // fn it_works() { + // let result = add(2, 2); + // assert_eq!(result, 4); + // } +} diff --git a/wallet/pskt/src/role.rs b/wallet/pskt/src/role.rs new file mode 100644 index 0000000000..2d6daa47df --- /dev/null +++ b/wallet/pskt/src/role.rs @@ -0,0 +1,29 @@ +//! PSKT roles. + +/// Initializes the PSKT with 0 inputs and 0 outputs. +/// Reference: [BIP-370: Creator](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#creator) +pub enum Creator {} + +/// Adds inputs and outputs to the PSKT. +/// Reference: [BIP-370: Constructor](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#constructor) +pub enum Constructor {} + +/// Can set the sequence number. +/// Reference: [BIP-370: Updater](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#updater) +pub enum Updater {} + +/// Creates cryptographic signatures for the inputs using private keys. +/// Reference: [BIP-370: Signer](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#signer) +pub enum Signer {} + +/// Merges multiple PSKTs into one. +/// Reference: [BIP-174: Combiner](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#combiner) +pub enum Combiner {} + +/// Completes the PSKT, ensuring all inputs have valid signatures, and finalizes the transaction. +/// Reference: [BIP-174: Input Finalizer](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki#input-finalizer) +pub enum Finalizer {} + +/// Extracts the final transaction from the PSKT once all parts are in place and the PSKT is fully signed. +/// Reference: [BIP-370: Transaction Extractor](https://github.com/bitcoin/bips/blob/master/bip-0370.mediawiki#transaction-extractor) +pub enum Extractor {} diff --git a/wallet/pskt/src/utils.rs b/wallet/pskt/src/utils.rs new file mode 100644 index 0000000000..357b61bc70 --- /dev/null +++ b/wallet/pskt/src/utils.rs @@ -0,0 +1,31 @@ +//! Utility functions for the PSKT module. + +use std::collections::BTreeMap; + +// todo optimize without cloning +pub fn combine_if_no_conflicts(mut lhs: BTreeMap, rhs: BTreeMap) -> Result, Error> +where + V: Eq + Clone, + K: Ord + Clone, +{ + if lhs.len() >= rhs.len() { + if let Some((field, rhs, lhs)) = + rhs.iter().map(|(k, v)| (k, v, lhs.get(k))).find(|(_, v, rhs_v)| rhs_v.is_some_and(|rv| rv != *v)) + { + Err(Error { field: field.clone(), lhs: lhs.unwrap().clone(), rhs: rhs.clone() }) + } else { + lhs.extend(rhs); + Ok(lhs) + } + } else { + combine_if_no_conflicts(rhs, lhs) + } +} + +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] +#[error("Conflict")] +pub struct Error { + pub field: K, + pub lhs: V, + pub rhs: V, +} diff --git a/wallet/pskt/src/wasm/bundle.rs b/wallet/pskt/src/wasm/bundle.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/wallet/pskt/src/wasm/bundle.rs @@ -0,0 +1 @@ + diff --git a/wallet/pskt/src/wasm/error.rs b/wallet/pskt/src/wasm/error.rs new file mode 100644 index 0000000000..77fb0d8b16 --- /dev/null +++ b/wallet/pskt/src/wasm/error.rs @@ -0,0 +1,64 @@ +use super::pskt::State; +use thiserror::Error; +use wasm_bindgen::prelude::*; + +#[derive(Error, Debug)] +pub enum Error { + #[error("{0}")] + Custom(String), + + #[error("Unexpected state: {0}")] + State(String), + + #[error("Constructor argument must be a valid payload, another PSKT instance, Transaction or undefined")] + Ctor(String), + + #[error("Invalid payload")] + InvalidPayload, + + #[error("Transaction not finalized")] + TxNotFinalized(#[from] crate::pskt::TxNotFinalized), + + #[error(transparent)] + Wasm(#[from] workflow_wasm::error::Error), + + #[error("Create state is not allowed for PSKT initialized from transaction or a payload")] + CreateNotAllowed, + + #[error("PSKT must be initialized with a payload or CREATE role")] + NotInitialized, + + #[error(transparent)] + ConsensusClient(#[from] kaspa_consensus_client::error::Error), + + #[error(transparent)] + Pskt(#[from] crate::error::Error), +} + +impl Error { + pub fn custom(msg: T) -> Self { + Error::Custom(msg.to_string()) + } + + pub fn state(state: impl AsRef) -> Self { + Error::State(state.as_ref().display().to_string()) + } +} + +impl From<&str> for Error { + fn from(msg: &str) -> Self { + Error::Custom(msg.to_string()) + } +} + +impl From for Error { + fn from(msg: String) -> Self { + Error::Custom(msg) + } +} + +impl From for JsValue { + fn from(err: Error) -> Self { + JsValue::from_str(&err.to_string()) + } +} diff --git a/wallet/pskt/src/wasm/input.rs b/wallet/pskt/src/wasm/input.rs new file mode 100644 index 0000000000..b6a827daf1 --- /dev/null +++ b/wallet/pskt/src/wasm/input.rs @@ -0,0 +1 @@ +// TODO - InputBuilder & Input diff --git a/wallet/pskt/src/wasm/mod.rs b/wallet/pskt/src/wasm/mod.rs new file mode 100644 index 0000000000..f5e9bea3fb --- /dev/null +++ b/wallet/pskt/src/wasm/mod.rs @@ -0,0 +1,6 @@ +pub mod bundle; +pub mod error; +pub mod input; +pub mod output; +pub mod pskt; +pub mod result; diff --git a/wallet/pskt/src/wasm/output.rs b/wallet/pskt/src/wasm/output.rs new file mode 100644 index 0000000000..eb91824d1a --- /dev/null +++ b/wallet/pskt/src/wasm/output.rs @@ -0,0 +1 @@ +// TODO - OutputBuilder & Output diff --git a/wallet/pskt/src/wasm/pskt.rs b/wallet/pskt/src/wasm/pskt.rs new file mode 100644 index 0000000000..8ee370a4b9 --- /dev/null +++ b/wallet/pskt/src/wasm/pskt.rs @@ -0,0 +1,320 @@ +use crate::pskt::PSKT as Native; +use crate::role::*; +use kaspa_consensus_core::tx::TransactionId; +use wasm_bindgen::prelude::*; +// use js_sys::Object; +use crate::pskt::Inner; +use kaspa_consensus_client::{Transaction, TransactionInput, TransactionInputT, TransactionOutput, TransactionOutputT}; +use serde::{Deserialize, Serialize}; +use std::sync::MutexGuard; +use std::sync::{Arc, Mutex}; +use workflow_wasm::{ + convert::{Cast, CastFromJs, TryCastFromJs}, + // extensions::object::*, + // error::Error as CastError, +}; + +use super::error::*; +use super::result::*; + +#[derive(Clone, Serialize, Deserialize)] +#[serde(tag = "state", content = "payload")] +pub enum State { + NoOp(Option), + Creator(Native), + Constructor(Native), + Updater(Native), + Signer(Native), + Combiner(Native), + Finalizer(Native), + Extractor(Native), +} + +impl AsRef for State { + fn as_ref(&self) -> &State { + self + } +} + +impl State { + // this is not a Display trait intentionally + pub fn display(&self) -> &'static str { + match self { + State::NoOp(_) => "Init", + State::Creator(_) => "Creator", + State::Constructor(_) => "Constructor", + State::Updater(_) => "Updater", + State::Signer(_) => "Signer", + State::Combiner(_) => "Combiner", + State::Finalizer(_) => "Finalizer", + State::Extractor(_) => "Extractor", + } + } +} + +impl From for PSKT { + fn from(state: State) -> Self { + PSKT { state: Arc::new(Mutex::new(Some(state))) } + } +} + +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(typescript_type = "PSKT | Transaction | string | undefined")] + pub type CtorT; +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct Payload { + data: String, +} + +impl TryFrom for Native { + type Error = Error; + + fn try_from(value: Payload) -> Result { + let Payload { data } = value; + if data.starts_with("PSKT") { + unimplemented!("PSKT binary serialization") + } else { + Ok(serde_json::from_str(&data).map_err(|err| format!("Invalid JSON: {err}"))?) + } + } +} + +#[wasm_bindgen(inspectable)] +#[derive(Clone, CastFromJs)] +pub struct PSKT { + state: Arc>>, +} + +impl TryCastFromJs for PSKT { + type Error = Error; + fn try_cast_from<'a, R>(value: &'a R) -> std::result::Result, Self::Error> + where + R: AsRef + 'a, + { + Self::resolve(value, || { + if let Some(data) = value.as_ref().as_string() { + let pskt_inner: Inner = serde_json::from_str(&data).map_err(|_| Error::InvalidPayload)?; + Ok(PSKT::from(State::NoOp(Some(pskt_inner)))) + } else if let Ok(transaction) = Transaction::try_owned_from(value) { + let pskt_inner: Inner = transaction.try_into()?; + Ok(PSKT::from(State::NoOp(Some(pskt_inner)))) + } else { + Err(Error::InvalidPayload) + } + }) + } +} + +#[wasm_bindgen] +impl PSKT { + #[wasm_bindgen(constructor)] + pub fn new(payload: CtorT) -> Result { + PSKT::try_owned_from(payload.unchecked_into::().as_ref()).map_err(|err| Error::Ctor(err.to_string())) + } + + #[wasm_bindgen(getter, js_name = "role")] + pub fn role_getter(&self) -> String { + self.state().as_ref().unwrap().display().to_string() + } + + #[wasm_bindgen(getter, js_name = "payload")] + pub fn payload_getter(&self) -> JsValue { + let state = self.state(); + serde_wasm_bindgen::to_value(state.as_ref().unwrap()).unwrap() + } + + fn state(&self) -> MutexGuard> { + self.state.lock().unwrap() + } + + fn take(&self) -> State { + self.state.lock().unwrap().take().unwrap() + } + + fn replace(&self, state: State) -> Result { + self.state.lock().unwrap().replace(state); + Ok(self.clone()) + } + + /// Change role to `CREATOR` + /// #[wasm_bindgen(js_name = toCreator)] + pub fn creator(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => match inner { + None => State::Creator(Native::default()), + Some(_) => Err(Error::CreateNotAllowed)?, + }, + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `CONSTRUCTOR` + #[wasm_bindgen(js_name = toConstructor)] + pub fn constructor(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Constructor(inner.ok_or(Error::NotInitialized)?.into()), + State::Creator(pskt) => State::Constructor(pskt.constructor()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `UPDATER` + #[wasm_bindgen(js_name = toUpdater)] + pub fn updater(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Updater(inner.ok_or(Error::NotInitialized)?.into()), + State::Constructor(constructor) => State::Updater(constructor.updater()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `SIGNER` + #[wasm_bindgen(js_name = toSigner)] + pub fn signer(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Signer(inner.ok_or(Error::NotInitialized)?.into()), + State::Constructor(pskt) => State::Signer(pskt.signer()), + State::Updater(pskt) => State::Signer(pskt.signer()), + State::Combiner(pskt) => State::Signer(pskt.signer()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `COMBINER` + #[wasm_bindgen(js_name = toCombiner)] + pub fn combiner(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Combiner(inner.ok_or(Error::NotInitialized)?.into()), + State::Constructor(pskt) => State::Combiner(pskt.combiner()), + State::Updater(pskt) => State::Combiner(pskt.combiner()), + State::Signer(pskt) => State::Combiner(pskt.combiner()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `FINALIZER` + #[wasm_bindgen(js_name = toFinalizer)] + pub fn finalizer(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Finalizer(inner.ok_or(Error::NotInitialized)?.into()), + State::Combiner(pskt) => State::Finalizer(pskt.finalizer()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + /// Change role to `EXTRACTOR` + #[wasm_bindgen(js_name = toExtractor)] + pub fn extractor(&self) -> Result { + let state = match self.take() { + State::NoOp(inner) => State::Extractor(inner.ok_or(Error::NotInitialized)?.into()), + State::Finalizer(pskt) => State::Extractor(pskt.extractor()?), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = fallbackLockTime)] + pub fn fallback_lock_time(&self, lock_time: u64) -> Result { + let state = match self.take() { + State::Creator(pskt) => State::Creator(pskt.fallback_lock_time(lock_time)), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = inputsModifiable)] + pub fn inputs_modifiable(&self) -> Result { + let state = match self.take() { + State::Creator(pskt) => State::Creator(pskt.inputs_modifiable()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = outputsModifiable)] + pub fn outputs_modifiable(&self) -> Result { + let state = match self.take() { + State::Creator(pskt) => State::Creator(pskt.outputs_modifiable()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = noMoreInputs)] + pub fn no_more_inputs(&self) -> Result { + let state = match self.take() { + State::Constructor(pskt) => State::Constructor(pskt.no_more_inputs()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = noMoreOutputs)] + pub fn no_more_outputs(&self) -> Result { + let state = match self.take() { + State::Constructor(pskt) => State::Constructor(pskt.no_more_outputs()), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + pub fn input(&self, input: &TransactionInputT) -> Result { + let input = TransactionInput::try_owned_from(input)?; + let state = match self.take() { + State::Constructor(pskt) => State::Constructor(pskt.input(input.try_into()?)), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + pub fn output(&self, output: &TransactionOutputT) -> Result { + let output = TransactionOutput::try_owned_from(output)?; + let state = match self.take() { + State::Constructor(pskt) => State::Constructor(pskt.output(output.try_into()?)), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = setSequence)] + pub fn set_sequence(&self, n: u64, input_index: usize) -> Result { + let state = match self.take() { + State::Updater(pskt) => State::Updater(pskt.set_sequence(n, input_index)?), + state => Err(Error::state(state))?, + }; + + self.replace(state) + } + + #[wasm_bindgen(js_name = calculateId)] + pub fn calculate_id(&self) -> Result { + let state = self.state(); + match state.as_ref().unwrap() { + State::Signer(pskt) => Ok(pskt.calculate_id()), + state => Err(Error::state(state))?, + } + } +} diff --git a/wallet/pskt/src/wasm/result.rs b/wallet/pskt/src/wasm/result.rs new file mode 100644 index 0000000000..32f663388a --- /dev/null +++ b/wallet/pskt/src/wasm/result.rs @@ -0,0 +1 @@ +pub type Result = std::result::Result; diff --git a/wallet/wasm/Cargo.toml b/wallet/wasm/Cargo.toml index 976665c52a..b4b3d0f2ca 100644 --- a/wallet/wasm/Cargo.toml +++ b/wallet/wasm/Cargo.toml @@ -23,4 +23,4 @@ wasm-bindgen-futures.workspace = true wasm-bindgen.workspace = true workflow-log.workspace = true workflow-terminal.workspace = true -workflow-wasm.workspace = true +workflow-wasm.workspace = true \ No newline at end of file diff --git a/wasm/CHANGELOG.md b/wasm/CHANGELOG.md index 5882d9cbf5..d572dfd1d1 100644 --- a/wasm/CHANGELOG.md +++ b/wasm/CHANGELOG.md @@ -1,4 +1,33 @@ Latest online documentation available at: https://kaspa.aspectron.org/docs/ + +### Latest Release + +- Replace `MassCalculator` with `calculateTransactionMass` and `calculateTransactionFee` functions. +- Change `createTransaction` function signature (remove requirement for change address). +- Make `ITransactionInput.signatureScript` optional (if not supplied, the signatureScript is assigned an empty vector). + +### Release 2024-07-17 + +- Fix issues with deserializing manually-created objects matching `IUtxoEntry` interface. +- Allow arguments expecting ScriptPublicKey to receive `{ version, script }` object or a hex string. +- Fix `Transaction::serializeToObject()` return type (now returning `ISerializeTransaction` interface). +- Adding `setUserTransactionMaturityDAA()` and `setCoinbaseTransactionMaturityDAA()` that allow customizing +the maturity DAA periods for user and coinbase transactions. + +### Release 2024-06-12 + +- Fix `PublicKeyGenerator::change_address_as_string()` that was returning the receive address. +- WASM SDK now builds as a GitHub artifact during the CI process. +- `State` renamed to `PoW` +- Docs now have a PoW section that unifies all PoW-related classes and functions. +- `TransactionRecord.data` (`TransactionData`) now has correct TypeScript bindings. + +### Release 2024-05-26 + +- Adding utility functions: `payToAddressScript()`, `payToScriptHashScript()`, `payToScriptHashSignatureScript()`, `addressFromScriptPublicKey()`, `isScriptPayToPubkey()`, `isScriptPayToPubkeyECDSA()`, `isScriptPayToScriptHash()`. +- Adding `UtxoProcessor::isActive` property to check if the processor is in active state (connected and running). This property can be used to validate the processor state before invoking it's functions (that can throw is the UtxoProcessor is offline). +- Rename `UtxoContext::active` to `UtxoContext::isActive` for consistency. + ### Release 2024-04-27 - IAccountsCreateRequest interface simplified by flattering it and now it is union for future expansion for multisig etc. - IWalletEvent interface updated for Events with TransactionRecord diff --git a/wasm/Cargo.toml b/wasm/Cargo.toml index 78a747e19f..77b14c39dd 100644 --- a/wasm/Cargo.toml +++ b/wasm/Cargo.toml @@ -18,11 +18,13 @@ crate-type = ["cdylib"] cfg-if.workspace = true js-sys.workspace = true kaspa-addresses.workspace = true +kaspa-bip32.workspace = true kaspa-consensus-core.workspace = true kaspa-consensus-wasm.workspace = true kaspa-core.workspace = true kaspa-math.workspace = true kaspa-pow.workspace = true +kaspa-txscript.workspace = true kaspa-rpc-core.workspace = true kaspa-utils.workspace = true kaspa-wasm-core.workspace = true @@ -40,10 +42,12 @@ workflow-wasm.workspace = true wasm32-sdk = [ "kaspa-wallet-core/wasm32-sdk", "kaspa-pow/wasm32-sdk", + "kaspa-txscript/wasm32-sdk", ] wasm32-core = [ "kaspa-wallet-core/wasm32-core", "kaspa-pow/wasm32-sdk", + "kaspa-txscript/wasm32-sdk", ] wasm32-rpc = [ "kaspa-consensus-core/wasm32-sdk", diff --git a/wasm/build-node-dev b/wasm/build-node-dev index b8de2b6acf..6dc5446fd1 100755 --- a/wasm/build-node-dev +++ b/wasm/build-node-dev @@ -5,4 +5,5 @@ RED='\033[0;31m' NC='\033[0m' # No Color echo -e "${RED}WARNING: do not use resulting WASM binaries in production!${NC}" -wasm-pack build --weak-refs --dev --target nodejs --out-name kaspa --out-dir nodejs/kaspa --features wasm32-sdk $@ +# wasm-pack build --weak-refs --dev --target nodejs --out-name kaspa --out-dir nodejs/kaspa-dev --features wasm32-sdk $@ +wasm-pack build --weak-refs --dev --target nodejs --out-name kaspa --out-dir nodejs/kaspa-dev --features wasm32-sdk $@ diff --git a/wasm/core/Cargo.toml b/wasm/core/Cargo.toml index a8a49e0aa3..4c765e9bd6 100644 --- a/wasm/core/Cargo.toml +++ b/wasm/core/Cargo.toml @@ -15,6 +15,8 @@ wasm32-sdk = [] wasm-bindgen.workspace = true js-sys.workspace = true faster-hex.workspace = true +hexplay.workspace = true +workflow-wasm.workspace = true -[lints.clippy] -empty_docs = "allow" +[lints] +workspace = true diff --git a/wasm/core/src/hex.rs b/wasm/core/src/hex.rs new file mode 100644 index 0000000000..8c187e03f5 --- /dev/null +++ b/wasm/core/src/hex.rs @@ -0,0 +1,152 @@ +//! +//! Hex module provides a way to display binary data in a human-readable format. +//! + +use hexplay::{ + color::{Color, Spec}, + HexView, HexViewBuilder, +}; +use std::ops::Range; +use std::str::FromStr; +use wasm_bindgen::prelude::*; +use workflow_wasm::prelude::*; + +type Result = std::result::Result; + +#[derive(Default)] +pub struct HexViewConfig { + pub offset: Option, + pub replace_char: Option, + pub width: Option, + pub colors: Option)>>, +} + +impl HexViewConfig { + pub fn build(self, slice: &[u8]) -> HexView<'_> { + let mut builder = HexViewBuilder::new(slice); + + if let Some(offset) = self.offset { + builder = builder.address_offset(offset); + } + + if let Some(replace_char) = self.replace_char { + builder = builder.replacement_character(replace_char); + } + + if let Some(width) = self.width { + builder = builder.row_width(width); + } + + if let Some(colors) = self.colors { + if !colors.is_empty() { + builder = builder.add_colors(colors); + } + } + + builder.finish() + } +} + +pub struct ColorRange { + pub color: Option, + pub background: Option, + pub range: Range, +} + +impl ColorRange { + fn new(color: Option, background: Option, range: Range) -> Self { + Self { color, background, range } + } + + fn into_tuple(self) -> (Spec, Range) { + let mut spec = Spec::new(); + spec.set_fg(self.color); + spec.set_bg(self.background); + + (spec, self.range) + } +} + +#[wasm_bindgen(typescript_custom_section)] +const TS_HEX_VIEW: &'static str = r#" +/** + * Color range configuration for Hex View. + * + * @category General + */ +export interface IHexViewColor { + start: number; + end: number; + color?: string; + background?: string; +} + +/** + * Configuration interface for Hex View. + * + * @category General + */ +export interface IHexViewConfig { + offset? : number; + replacementCharacter? : string; + width? : number; + colors? : IHexViewColor[]; +} +"#; + +#[wasm_bindgen] +extern "C" { + #[wasm_bindgen(typescript_type = "IHexViewColor")] + pub type HexViewColorT; + #[wasm_bindgen(extends = js_sys::Array, typescript_type = "IHexViewColor[]")] + pub type HexViewColorArrayT; + #[wasm_bindgen(typescript_type = "IHexViewConfig")] + pub type HexViewConfigT; +} + +impl TryFrom for ColorRange { + type Error = JsValue; + fn try_from(js_value: JsValue) -> Result { + if let Some(object) = js_sys::Object::try_from(&js_value) { + let start = object.get_u32("start")? as usize; + let end = object.get_u32("end")? as usize; + + let color = object.get_string("color").ok(); + let color = + color.map(|color| Color::from_str(color.as_str()).map_err(|e| JsValue::from_str(&e.to_string()))).transpose()?; + + let background = object.get_string("background").ok(); + let background = background + .map(|background| Color::from_str(background.as_str()).map_err(|e| JsValue::from_str(&e.to_string()))) + .transpose()?; + + Ok(ColorRange::new(color, background, start..end)) + } else { + Err(JsValue::from_str("color range must be an object")) + } + } +} + +pub fn try_to_color_vec(js_value: JsValue) -> Result)>> { + if js_value.is_array() { + let list = js_sys::Array::from(&js_value).iter().map(TryFrom::try_from).collect::>>()?; + Ok(list.into_iter().map(ColorRange::into_tuple).collect::>()) + } else { + let tuple = ColorRange::try_from(js_value).map(ColorRange::into_tuple)?; + Ok(vec![tuple]) + } +} + +impl TryFrom for HexViewConfig { + type Error = JsValue; + fn try_from(js_value: HexViewConfigT) -> Result { + let object = js_sys::Object::try_from(&js_value).ok_or_else(|| JsValue::from_str("HexView config must be an object"))?; + + let offset = object.get_u32("offset").ok().map(|v| v as usize); + let replace_char = object.get_string("replacementCharacter").ok().map(|s| s.chars().next().unwrap_or(' ')); + let width = object.get_u32("width").ok().map(|v| v as usize); + let colors = object.get_value("colors").ok().map(try_to_color_vec).transpose()?; + + Ok(HexViewConfig { offset, replace_char, width, colors }) + } +} diff --git a/wasm/core/src/lib.rs b/wasm/core/src/lib.rs index 1710c11fea..d99f297310 100644 --- a/wasm/core/src/lib.rs +++ b/wasm/core/src/lib.rs @@ -1,4 +1,3 @@ pub mod events; +pub mod hex; pub mod types; - -// pub use types::*; diff --git a/wasm/core/src/types.rs b/wasm/core/src/types.rs index fc898ee8b0..7e8e29335e 100644 --- a/wasm/core/src/types.rs +++ b/wasm/core/src/types.rs @@ -30,7 +30,7 @@ impl From for HexString { impl TryFrom for String { type Error = &'static str; - fn try_from(value: HexString) -> Result { + fn try_from(value: HexString) -> std::result::Result { value.as_string().ok_or("Supplied value is not a string") } } diff --git a/wasm/examples/nodejs/javascript/general/derivation.js b/wasm/examples/nodejs/javascript/general/derivation.js index f92508c889..942f665f98 100644 --- a/wasm/examples/nodejs/javascript/general/derivation.js +++ b/wasm/examples/nodejs/javascript/general/derivation.js @@ -21,6 +21,7 @@ kaspa.initConsolePanicHook(); let xPrv = new XPrv(seed); // derive full path upto second address of receive wallet let pubkey1 = xPrv.derivePath("m/44'/111111'/0'/0/1").toXPub().toPublicKey(); + console.log("publickey", pubkey1.toString()) console.log("address", pubkey1.toAddress(NetworkType.Mainnet)); // create receive wallet @@ -28,17 +29,25 @@ kaspa.initConsolePanicHook(); // derive receive wallet for second address let pubkey2 = receiveWalletXPub.deriveChild(1, false).toPublicKey(); console.log("address", pubkey2.toAddress(NetworkType.Mainnet)); + if (pubkey1.toString() != pubkey2.toString()){ + throw new Error("pubkey2 dont match") + } // create change wallet let changeWalletXPub = xPrv.derivePath("m/44'/111111'/0'/1").toXPub(); // derive change wallet for first address let pubkey3 = changeWalletXPub.deriveChild(0, false).toPublicKey(); - console.log("address", pubkey2.toAddress(NetworkType.Mainnet)); + console.log("change address", pubkey3.toAddress(NetworkType.Mainnet)); + // --- - if (pubkey1.toString() != pubkey2.toString()){ - throw new Error("pubkeyes dont match") + //drive address via private key + let privateKey = xPrv.derivePath("m/44'/111111'/0'/0/1").toPrivateKey(); + console.log("address via private key", privateKey.toAddress(NetworkType.Mainnet)) + console.log("privatekey", privateKey.toString()); + let pubkey4 = privateKey.toPublicKey(); + if (pubkey1.toString() != pubkey4.toString()){ + throw new Error("pubkey4 dont match") } - // --- // xprv with ktrv prefix const ktrv = xPrv.intoString("ktrv"); diff --git a/wasm/examples/nodejs/javascript/general/mining-state.js b/wasm/examples/nodejs/javascript/general/mining-pow.js similarity index 90% rename from wasm/examples/nodejs/javascript/general/mining-state.js rename to wasm/examples/nodejs/javascript/general/mining-pow.js index 1741fd5459..58bf2e70fe 100644 --- a/wasm/examples/nodejs/javascript/general/mining-state.js +++ b/wasm/examples/nodejs/javascript/general/mining-pow.js @@ -32,12 +32,12 @@ kaspa.initConsolePanicHook(); console.log("header.blueWork:", header.blueWork); console.log("header.blueWork.toString(16):", header.blueWork.toString(16)); - console.log("creating state"); - const state = new kaspa.State(header); + console.log("creating PoW"); + const pow = new kaspa.PoW(header); const nonce = BigInt("0xffffffffffffffff"); console.log("nonce:", nonce); - const [a, v] = state.checkPow(nonce); - console.log("state:", state); + const [a, v] = pow.checkWork(nonce); + console.log("pow:", pow); console.log("[a,v]:", a, v); console.log("v.toString(16):", v.toString(16)); })(); diff --git a/wasm/examples/nodejs/javascript/transactions/serialize.js b/wasm/examples/nodejs/javascript/transactions/serialize.js new file mode 100644 index 0000000000..977ed1e8ab --- /dev/null +++ b/wasm/examples/nodejs/javascript/transactions/serialize.js @@ -0,0 +1,48 @@ +const { + Address, + createTransactions, + initConsolePanicHook, + Mnemonic, + XPrv, + PrivateKeyGenerator, + payToAddressScript, +} = require('../../../../nodejs/kaspa'); + + +(async () => { + + const networkId = 'mainnet'; + + const mnemonic = Mnemonic.random(); + const xprv = new XPrv(mnemonic.toSeed()); + const privateKey = new PrivateKeyGenerator(xprv, false, 0n).receiveKey(1); + const address = privateKey.toAddress(networkId); + const scriptPublicKey = payToAddressScript(address); + const entries = [{ + address, + outpoint: { + transactionId: '1b84324c701b16c1cfbbd713a5ff87edf78bc5c92a92866f86d7e32ab5cd387d', + index: 0 + }, + scriptPublicKey, + amount: 50000000000n, + isCoinbase: true, + blockDaaScore: 342n + }]; + + const { transactions, summary } = await createTransactions({ + entries, + outputs: [{ + address: 'kaspa:qpamkvhgh0kzx50gwvvp5xs8ktmqutcy3dfs9dc3w7lm9rq0zs76vf959mmrp', + amount: 400000000n + }], + changeAddress: address, + priorityFee: 0n, + networkId + }); + + for (const pending of transactions) { + const tx = pending.serializeToObject(); + console.log(tx); + } +})(); diff --git a/wasm/examples/nodejs/javascript/transactions/simple-transaction.js b/wasm/examples/nodejs/javascript/transactions/simple-transaction.js index b215fd597d..fc9aa3b0ab 100644 --- a/wasm/examples/nodejs/javascript/transactions/simple-transaction.js +++ b/wasm/examples/nodejs/javascript/transactions/simple-transaction.js @@ -55,7 +55,7 @@ initConsolePanicHook(); let { transactions, summary } = await createTransactions({ entries, - outputs: [{ address : destinationAddress, amount : kaspaToSompi(0.00012)}], + outputs: [{ address : destinationAddress, amount : kaspaToSompi("0.00012")}], priorityFee: 0n, changeAddress: sourceAddress, }); diff --git a/wasm/examples/nodejs/javascript/transactions/single-transaction-demo.js b/wasm/examples/nodejs/javascript/transactions/single-transaction-demo.js index def206c158..0400c7d1c5 100644 --- a/wasm/examples/nodejs/javascript/transactions/single-transaction-demo.js +++ b/wasm/examples/nodejs/javascript/transactions/single-transaction-demo.js @@ -73,7 +73,13 @@ const { networkId, encoding } = require("../utils").parseArgs(); const changeAddress = address; console.log("changeAddress:", changeAddress) - const tx = createTransaction(utxos, outputs, changeAddress, 0n, 0, 1, 1); + + // utxo_entry_source: IUtxoEntry[], + // outputs: IPaymentOutput[], + // priority_fee: bigint, + // payload: HexString | Uint8Array, + // sig_op_count?: number + const tx = createTransaction(utxos, outputs, 0n, "", 1); console.info("Transaction before signing:", tx); diff --git a/wasm/examples/nodejs/typescript/src/scriptBuilder.ts b/wasm/examples/nodejs/typescript/src/scriptBuilder.ts new file mode 100644 index 0000000000..13f02b12bf --- /dev/null +++ b/wasm/examples/nodejs/typescript/src/scriptBuilder.ts @@ -0,0 +1,13 @@ +import { ScriptBuilder, Opcodes, addressFromScriptPublicKey, NetworkType } from "../../../../nodejs/kaspa" + +// An OpTrue is an always spendable script +const myScript = new ScriptBuilder() + .addOp(Opcodes.OpTrue) + +const P2SHScript = myScript.createPayToScriptHashScript() +const address = addressFromScriptPublicKey(P2SHScript, NetworkType.Mainnet) + +// Payable address +console.log(address!.toString()) +// Unlock signature script +console.log(myScript.encodePayToScriptHashSignatureScript("")) \ No newline at end of file diff --git a/wasm/src/lib.rs b/wasm/src/lib.rs index 912ed9428b..d8b0f06a95 100644 --- a/wasm/src/lib.rs +++ b/wasm/src/lib.rs @@ -1,5 +1,5 @@ /*! -# `rusty-kaspa WASM32 bindings` +# Rusty Kaspa WASM32 bindings [github](https://github.com/kaspanet/rusty-kaspa/tree/master/wasm) [crates.io](https://crates.io/crates/kaspa-wasm) @@ -13,9 +13,9 @@ codebase within JavaScript environments such as Node.js and Web Browsers. ## Documentation -- [**integrating with Kaspa** guide](https://kaspa.aspectron.org/) -- [**Rustdoc** documentation](https://docs.rs/kaspa-wasm/latest/kaspa-wasm) -- [**JSDoc** documentation](https://kaspa.aspectron.org/jsdoc/) +- [**Integrating with Kaspa** guide](https://kaspa.aspectron.org/) +- [Rust SDK documentation (**Rustdoc**)](https://docs.rs/kaspa-wasm/) +- [TypeScript documentation (**JSDoc**)](https://kaspa.aspectron.org/docs/) Please note that while WASM directly binds JavaScript and Rust resources, their names on JavaScript side are different from their name in Rust as they conform to the 'camelCase' convention in JavaScript and @@ -25,9 +25,10 @@ to the 'snake_case' convention in Rust. The APIs are currently separated into the following groups (this will be expanded in the future): -- **Transaction API** — Bindings for primitives related to transactions. -- **RPC API** — [RPC interface bindings](rpc) for the Kaspa node using WebSocket (wRPC) connections. -- **Wallet API** — API for async core wallet processing tasks. +- **Consensus Client API** — Bindings for primitives related to transactions. +- **RPC API** — [RPC interface bindings](kaspa_wrpc_wasm::client) for the Kaspa node using WebSocket (wRPC) connections. +- **Wallet SDK** — API for async core wallet processing tasks. +- **Wallet API** — A rust implementation of the fully-featured wallet usable in the native Rust, Browser or NodeJs and Bun environments. ## NPM Modules @@ -43,6 +44,9 @@ of a native WebSocket in NodeJs environment, while the `kaspa` module includes `websocket` package dependency simulating the W3C WebSocket and due to this supports RPC. +NOTE: for security reasons it is always recommended to build WASM SDK from source or +download pre-built redistributables from releases or development builds. + ## Examples JavaScript examples for using this framework can be found at: @@ -54,8 +58,19 @@ For pre-built browser-compatible WASM32 redistributables of this framework please see the releases section of the Rusty Kaspa repository at . +## Development Builds + +The latest development builds from . +Development builds typically contain fixes and improvements that are not yet available in +stable releases. Additional information can be found at +. + ## Using RPC +No special handling is required to use the RPC client +in **Browser** or **Bun** environments due to the fact that +these environments provide native WebSocket support. + **NODEJS:** If you are building from source, to use WASM RPC client in the NodeJS environment, you need to introduce a global W3C WebSocket object before loading the WASM32 library (to simulate the browser behavior). @@ -123,7 +138,7 @@ const rpc = new RpcClient({ })(); ``` -For more details, please follow the [**integrating with Kaspa**](https://kaspa.aspectron.org/) guide. +For more details, please follow the [**Integrating with Kaspa**](https://kaspa.aspectron.org/) guide. */ @@ -145,6 +160,7 @@ cfg_if::cfg_if! { pub use kaspa_addresses::{Address, Version as AddressVersion}; pub use kaspa_consensus_core::tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}; pub use kaspa_pow::wasm::*; + pub use kaspa_txscript::wasm::*; pub mod rpc { //! Kaspa RPC interface @@ -171,6 +187,7 @@ cfg_if::cfg_if! { pub use kaspa_addresses::{Address, Version as AddressVersion}; pub use kaspa_consensus_core::tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}; pub use kaspa_pow::wasm::*; + pub use kaspa_txscript::wasm::*; pub mod rpc { //! Kaspa RPC interface @@ -206,6 +223,7 @@ cfg_if::cfg_if! { pub use kaspa_addresses::{Address, Version as AddressVersion}; pub use kaspa_wallet_keys::prelude::*; + pub use kaspa_bip32::*; pub use kaspa_wasm_core::types::*; }