From c1a89575b30218ba449500fb6034a413f96e95b7 Mon Sep 17 00:00:00 2001 From: Christian Smith Date: Tue, 9 Jan 2024 20:00:30 +0000 Subject: [PATCH] initial commit --- .gitignore | 1 + Cargo.lock | 2228 ++++++++++++++++++++++++++++++ Cargo.toml | 65 + LICENSE | 21 + README.md | 86 ++ build.rs | 26 + src/balance_cache.rs | 121 ++ src/bin/client/main.rs | 512 +++++++ src/bin/wallet/main.rs | 1059 +++++++++++++++ src/block.rs | 501 +++++++ src/block_header_hasher.rs | 408 ++++++ src/block_queue.rs | 113 ++ src/block_storage.rs | 59 + src/block_storage_disk.rs | 302 +++++ src/checkpoints.rs | 86 ++ src/constants.rs | 89 ++ src/cuda/BUILDING.md | 23 + src/cuda/CMakeLists.txt | 11 + src/cuda/LICENSE | 22 + src/cuda/README.md | 69 + src/cuda/mine.cu | 476 +++++++ src/cuda/sha3.cc | 161 +++ src/cuda/sha3.cu | 159 +++ src/cuda/sha3.h | 33 + src/cuda/sha3_ctx.h | 20 + src/cuda/sha3_cu.h | 32 + src/dns.rs | 245 ++++ src/error.rs | 162 +++ src/genesis.rs | 24 + src/gpu.rs | 60 + src/irc.rs | 186 +++ src/ledger.rs | 167 +++ src/ledger_disk.rs | 1087 +++++++++++++++ src/lib.rs | 32 + src/miner.rs | 451 ++++++ src/opencl/CMakeLists.txt | 10 + src/opencl/COPYING | 340 +++++ src/opencl/LICENSE | 22 + src/opencl/README.md | 10 + src/opencl/cruzbit.cl | 292 ++++ src/opencl/cruzbit.h | 4 + src/opencl/mine.cc | 137 ++ src/opencl/ocl.cc | 263 ++++ src/opencl/ocl.h | 37 + src/opencl/sha3.cc | 143 ++ src/opencl/sha3.h | 38 + src/peer.rs | 2263 +++++++++++++++++++++++++++++++ src/peer_manager.rs | 1118 +++++++++++++++ src/peer_storage.rs | 55 + src/peer_storage_disk.rs | 468 +++++++ src/processor.rs | 1669 +++++++++++++++++++++++ src/protocol.rs | 553 ++++++++ src/shutdown.rs | 97 ++ src/tls.rs | 176 +++ src/transaction.rs | 435 ++++++ src/transaction_queue.rs | 49 + src/transaction_queue_memory.rs | 194 +++ src/utils.rs | 59 + src/wallet.rs | 1053 ++++++++++++++ 59 files changed, 18582 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.lock create mode 100644 Cargo.toml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 build.rs create mode 100644 src/balance_cache.rs create mode 100644 src/bin/client/main.rs create mode 100644 src/bin/wallet/main.rs create mode 100644 src/block.rs create mode 100644 src/block_header_hasher.rs create mode 100644 src/block_queue.rs create mode 100644 src/block_storage.rs create mode 100644 src/block_storage_disk.rs create mode 100644 src/checkpoints.rs create mode 100644 src/constants.rs create mode 100644 src/cuda/BUILDING.md create mode 100644 src/cuda/CMakeLists.txt create mode 100644 src/cuda/LICENSE create mode 100644 src/cuda/README.md create mode 100644 src/cuda/mine.cu create mode 100644 src/cuda/sha3.cc create mode 100644 src/cuda/sha3.cu create mode 100644 src/cuda/sha3.h create mode 100644 src/cuda/sha3_ctx.h create mode 100644 src/cuda/sha3_cu.h create mode 100644 src/dns.rs create mode 100644 src/error.rs create mode 100644 src/genesis.rs create mode 100644 src/gpu.rs create mode 100644 src/irc.rs create mode 100644 src/ledger.rs create mode 100644 src/ledger_disk.rs create mode 100644 src/lib.rs create mode 100644 src/miner.rs create mode 100644 src/opencl/CMakeLists.txt create mode 100644 src/opencl/COPYING create mode 100644 src/opencl/LICENSE create mode 100644 src/opencl/README.md create mode 100644 src/opencl/cruzbit.cl create mode 100644 src/opencl/cruzbit.h create mode 100644 src/opencl/mine.cc create mode 100644 src/opencl/ocl.cc create mode 100644 src/opencl/ocl.h create mode 100644 src/opencl/sha3.cc create mode 100644 src/opencl/sha3.h create mode 100644 src/peer.rs create mode 100644 src/peer_manager.rs create mode 100644 src/peer_storage.rs create mode 100644 src/peer_storage_disk.rs create mode 100644 src/processor.rs create mode 100644 src/protocol.rs create mode 100644 src/shutdown.rs create mode 100644 src/tls.rs create mode 100644 src/transaction.rs create mode 100644 src/transaction_queue.rs create mode 100644 src/transaction_queue_memory.rs create mode 100644 src/utils.rs create mode 100644 src/wallet.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ea8c4bf --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +/target diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..00ef0f3 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,2228 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "argon2" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ba4cac0a46bc1d2912652a751c47f2a9f3a7fe89bcae2275d418f5270402f9" +dependencies = [ + "base64ct", + "blake2", + "cpufeatures", + "password-hash", +] + +[[package]] +name = "attohttpc" +version = "0.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb8867f378f33f78a811a8eb9bf108ad99430d7aad43315dd9319c827ef6247" +dependencies = [ + "http 0.2.11", + "log", + "url", + "wildmatch", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "base64" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" + +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.48.5", +] + +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", +] + +[[package]] +name = "console" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +dependencies = [ + "encode_unicode", + "lazy_static", + "libc", + "unicode-width", + "windows-sys 0.45.0", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + +[[package]] +name = "cpufeatures" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +dependencies = [ + "libc", +] + +[[package]] +name = "cruzbit" +version = "1.0.0" +dependencies = [ + "argon2", + "base64ct", + "bincode", + "cmake", + "console", + "cruzbit-leveldb", + "crypto_secretbox", + "cuckoofilter", + "dialoguer", + "domain", + "ed25519-compact", + "env_logger", + "faster-hex", + "futures", + "getopts", + "humantime", + "ibig", + "igd", + "irc", + "log", + "lz4", + "network-interface", + "num-bigint", + "rand 0.8.5", + "rcgen", + "rustls-pemfile", + "serde", + "serde_json", + "serde_with", + "sha3", + "tempfile", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-tungstenite", +] + +[[package]] +name = "cruzbit-leveldb" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "908949eac1a677c1690f4d1d3367c99247c72a537f12e1baae03fef9b977f4ed" +dependencies = [ + "cruzbit-leveldb-sys", + "libc", +] + +[[package]] +name = "cruzbit-leveldb-sys" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aca7f71b713516efeff011de3bb63ba56c46edb86da7c792eb64bae508ac09c" +dependencies = [ + "cmake", + "ffi-opaque", + "libc", + "num_cpus", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "typenum", +] + +[[package]] +name = "crypto_secretbox" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d6cf87adf719ddf43a805e92c6870a531aedda35ff640442cbaf8674e141e1" +dependencies = [ + "aead", + "cipher", + "generic-array", + "poly1305", + "salsa20", + "subtle", + "zeroize", +] + +[[package]] +name = "ct-codecs" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3b7eb4404b8195a9abb6356f4ac07d8ba267045c8d6d220ac4dc992e6cc75df" + +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + +[[package]] +name = "darling" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn", +] + +[[package]] +name = "darling_macro" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +dependencies = [ + "darling_core", + "quote", + "syn", +] + +[[package]] +name = "data-encoding" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "dialoguer" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" +dependencies = [ + "console", + "shell-words", + "tempfile", + "thiserror", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "domain" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e853e3f6d4c6e52a4d73a94c1810c66ad71958fbe24934a7119b447f425aed76" +dependencies = [ + "bytes", + "octseq", + "rand 0.8.5", + "time", +] + +[[package]] +name = "ed25519-compact" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a667e6426df16c2ac478efa4a439d0e674cba769c5556e8cf221739251640c8c" +dependencies = [ + "ct-codecs", + "getrandom 0.2.11", +] + +[[package]] +name = "encode_unicode" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" + +[[package]] +name = "encoding" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b0d943856b990d12d3b55b359144ff341533e516d94098b1d3fc1ac666d36ec" +dependencies = [ + "encoding-index-japanese", + "encoding-index-korean", + "encoding-index-simpchinese", + "encoding-index-singlebyte", + "encoding-index-tradchinese", +] + +[[package]] +name = "encoding-index-japanese" +version = "1.20141219.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e8b2ff42e9a05335dbf8b5c6f7567e5591d0d916ccef4e0b1710d32a0d0c91" +dependencies = [ + "encoding_index_tests", +] + +[[package]] +name = "encoding-index-korean" +version = "1.20141219.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dc33fb8e6bcba213fe2f14275f0963fd16f0a02c878e3095ecfdf5bee529d81" +dependencies = [ + "encoding_index_tests", +] + +[[package]] +name = "encoding-index-simpchinese" +version = "1.20141219.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87a7194909b9118fc707194baa434a4e3b0fb6a5a757c73c3adb07aa25031f7" +dependencies = [ + "encoding_index_tests", +] + +[[package]] +name = "encoding-index-singlebyte" +version = "1.20141219.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3351d5acffb224af9ca265f435b859c7c01537c0849754d3db3fdf2bfe2ae84a" +dependencies = [ + "encoding_index_tests", +] + +[[package]] +name = "encoding-index-tradchinese" +version = "1.20141219.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd0e20d5688ce3cab59eb3ef3a2083a5c77bf496cb798dc6fcdb75f323890c18" +dependencies = [ + "encoding_index_tests", +] + +[[package]] +name = "encoding_index_tests" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a246d82be1c9d791c5dfde9a2bd045fc3cbba3fa2b11ad558f27d01712f00569" + +[[package]] +name = "env_logger" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" +dependencies = [ + "humantime", + "log", +] + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "faster-hex" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183" +dependencies = [ + "serde", +] + +[[package]] +name = "fastrand" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" + +[[package]] +name = "ffi-opaque" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec54ac60a7f2ee9a97cad9946f9bf629a3bc6a7ae59e68983dc9318f5a54b81a" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "iana-time-zone" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ibig" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1fcc7f316b2c079dde77564a1360639c1a956a23fa96122732e416cb10717bb" +dependencies = [ + "cfg-if", + "num-traits", + "rand 0.8.5", + "static_assertions", +] + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "igd" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556b5a75cd4adb7c4ea21c64af1c48cefb2ce7d43dc4352c720a1fe47c21f355" +dependencies = [ + "attohttpc", + "log", + "rand 0.8.5", + "url", + "xmltree", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", + "serde", +] + +[[package]] +name = "indexmap" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", + "serde", +] + +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "irc" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5510c4c4631e53c57d6b05c44ab8447d1db6beef28fb9d12c4d6a46fad9dfcc" +dependencies = [ + "chrono", + "encoding", + "futures-util", + "irc-proto", + "log", + "parking_lot", + "pin-project", + "thiserror", + "tokio", + "tokio-rustls 0.22.0", + "tokio-stream", + "tokio-util", + "webpki-roots 0.20.0", +] + +[[package]] +name = "irc-proto" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55fa0a52d825e59ba8aea5b7503890245aea000f77e68d9b1903f3491fa33643" +dependencies = [ + "bytes", + "encoding", + "thiserror", + "tokio", + "tokio-util", +] + +[[package]] +name = "itoa" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" + +[[package]] +name = "js-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13e3bf6590cbc649f4d1a3eefc9d5d6eb746f5200ffb04e5e142700b8faa56e7" + +[[package]] +name = "linux-raw-sys" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" + +[[package]] +name = "lz4" +version = "1.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e9e2dd86df36ce760a60f6ff6ad526f7ba1f14ba0356f8254fb6905e6494df1" +dependencies = [ + "libc", + "lz4-sys", +] + +[[package]] +name = "lz4-sys" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "memchr" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "network-interface" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d68759ef97fe9c9e46f79ea8736c19f1d28992e24c8dc8ce86752918bfeaae7" +dependencies = [ + "cc", + "libc", + "thiserror", + "winapi", +] + +[[package]] +name = "num-bigint" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "octseq" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92b38a4aabbacf619b8083841713216e7668178422decfe06bbc70643024c5d" +dependencies = [ + "bytes", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", +] + +[[package]] +name = "password-hash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + +[[package]] +name = "pem" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" +dependencies = [ + "base64 0.21.5", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.11", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rcgen" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d918c80c5a4c7560db726763020bd16db179e4d5b828078842274a443addb5d" +dependencies = [ + "pem", + "ring 0.17.7", + "time", + "yasna", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom 0.2.11", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +dependencies = [ + "bitflags 2.4.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustls" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +dependencies = [ + "base64 0.13.1", + "log", + "ring 0.16.20", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +dependencies = [ + "log", + "ring 0.17.7", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +dependencies = [ + "base64 0.21.5", + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e9d979b3ce68192e42760c7810125eb6cf2ea10efae545a156063e61f314e2a" + +[[package]] +name = "rustls-webpki" +version = "0.102.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef4ca26037c909dedb327b48c3327d0ba91d3dd3c4e05dad328f210ffb68e95b" +dependencies = [ + "ring 0.17.7", + "rustls-pki-types", + "untrusted 0.9.0", +] + +[[package]] +name = "ryu" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" + +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "sct" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "serde" +version = "1.0.195" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.195" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_with" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" +dependencies = [ + "base64 0.21.5", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.1.0", + "serde", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" + +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "2.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +dependencies = [ + "cfg-if", + "fastrand", + "redox_syscall 0.4.1", + "rustix", + "windows-sys 0.52.0", +] + +[[package]] +name = "thiserror" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "time" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +dependencies = [ + "deranged", + "itoa", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +dependencies = [ + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +dependencies = [ + "rustls 0.19.1", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.2", + "rustls-pki-types", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c83b561d025642014097b66e6c1bb422783339e0909e4429cde4749d1990bc38" +dependencies = [ + "futures-util", + "log", + "rustls 0.22.2", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tungstenite", + "webpki-roots 0.26.0", +] + +[[package]] +name = "tokio-util" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tungstenite" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ef1a641ea34f399a848dea702823bbecfb4c486f911735368f1f137cb8257e1" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 1.0.0", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.22.2", + "rustls-pki-types", + "sha1", + "thiserror", + "url", + "utf-8", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-bidi" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-width" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" + +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" + +[[package]] +name = "web-sys" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "webpki-roots" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f20dea7535251981a9670857150d571846545088359b28e4951d350bdaf179f" +dependencies = [ + "webpki", +] + +[[package]] +name = "webpki-roots" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "wildmatch" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f44b95f62d34113cf558c93511ac93027e03e9c29a60dd0fd70e6e025c7270a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +dependencies = [ + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "xml-rs" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..8e68f33 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "cruzbit" +version = "1.0.0" +authors = ["Christian Smith "] +edition = "2021" +rust-version = "1.70.0" +description = "A simple decentralized peer-to-peer ledger implementation" +readme = "README.md" +homepage = "https://cruzb.it" +repository = "https://github.com/christian-smith/cruzbit" +keywords = ["bitcoin", "crypto"] +categories = ["cryptography::cryptocurrencies"] +license = "MIT" +default-run = "client" + +[[bin]] +name = "client" + +[[bin]] +name = "wallet" + +[dependencies] +argon2 = "0.5" +base64ct = { version = "1.6", features = ["std"] } +bincode = "1.3" +console = "0.15" +cruzbit-leveldb = "1.0" +crypto_secretbox = "0.1" +cuckoofilter = "0.5" +dialoguer = { version = "0.11", features = ["completion"] } +domain = "0.9" +ed25519-compact = "2.0" +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +faster-hex = { version = "0.9", features = ["serde"] } +futures = { version = "0.3", default-features = false } +getopts = "0.2" +humantime = "2.1" +ibig = "0.3" +igd = "0.12" +irc = { version = "0.15", default-features = false, features = ["tls-rust"] } +log = "0.4" +lz4 = "1.24" +network-interface = "1.1" +num-bigint = "0.4" +rand = { version = "0.8", default-features = false, features = ["std_rng"] } +rcgen = "0.12" +rustls-pemfile = "2.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +serde_with = "3.4" +sha3 = "0.10" +thiserror = "1.0" +tokio = { version = "1.35", features = ["macros", "net", "rt-multi-thread", "signal", "io-util"] } +tokio-rustls = "0.25" +tokio-tungstenite = { version = "0.21", features = ["rustls-tls-webpki-roots"] } + +[dev-dependencies] +tempfile = "3.9" + +[build-dependencies] +cmake = "0.1" + +[features] +cuda = [] +opencl = [] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..8cb90e8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 cruzbit developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..0e6afca --- /dev/null +++ b/README.md @@ -0,0 +1,86 @@ +

+ cruzbit fun image +

+ +
+ + + + + + +
Image1+ Image3
+
+ +## What is cruzbit? +[cruzbit](https://cruzb.it) is a simple decentralized peer-to-peer ledger implementation. cruzbit is very similar to [Bitcoin](https://www.bitcoin.com/bitcoin.pdf) with the following notable differences: + +* **Newer crypto** - The [Ed25519 signature system](https://ed25519.cr.yp.to/) is used for signing transactions. This system has a number of nice properties to protect users from security risks present with naive usage of ECDSA. The 256-bit version of the [SHA-3 hashing algorithm](https://en.wikipedia.org/wiki/SHA-3) is used for all hashing operations in the application, including the proof-of-work function but excluding hashing performed internally by the signature system. It's reported to be +[blazing fast](https://keccak.team/2017/is_sha3_slow.html) when implemented in hardware. [NaCl Secretbox](https://nacl.cr.yp.to/secretbox.html) is used to encrypt wallet private keys (not part of the protocol.) +* **Simplified transaction format** - No inputs and outputs. Just public key sender and receiver with a time, amount, explicit fee, memo field, pseudo-random nonce, series and signature. The series is incremented network-wide roughly once a week based on block height to allow for pruning transaction history. Also included are 2 optional fields for specifying maturity and expiration, both at a given block height. +* **No UTXO set** - This is a consequence of the second point. It considerably simplifies ledger construction and management as well as requires a wallet to know only about its public key balances and the current block height. It also allows the ledger to map more directly to the well-understood concept of a [double-entry bookkeeping system](https://en.wikipedia.org/wiki/Double-entry_bookkeeping_system). In cruzbit, the sum of all public key balances must equal the issuance at the current block height. This isn't the first ledger to get rid of the UTXO set model but I think we do it in a uniquely simple way. +* **No scripting** - This is another consequence of the second point. Signatures are simply signatures and not tiny scripts. It's a bit simpler and arguably safer. It does limit functionality, e.g. there is no native notion of a multi-signature transaction, however, depending on your needs, you can come _close_ to accomplishing that using [mechanisms external to cruzbit](https://en.wikipedia.org/wiki/Shamir%27s_Secret_Sharing). +* **No fixed block size limit** - Since transactions in cruzbit are more-or-less fixed size we cap blocks by transaction count instead, with the initial limit being 10,000 transactions. This per-block transaction limit increases with "piecewise-linear-between-doublings growth." This means the limit doubles roughly every 2 years by block height and increases linearly between doublings up until a hard limit of 2,147,483,647. This was directly inspired by [BIP 101](https://github.com/bitcoin/bips/blob/master/bip-0101.mediawiki). We use block height instead of time since another change in cruzbit is that all block headers contain the height (as well as the total cumulative chain work.) +* **Reference implementation is in [Go](https://golang.org/)** - Perhaps more accessible than C++. Hopefully it makes blockchain programming a bit easier to understand and attracts a wider variety of developer interest. +* **Web-friendly peer protocol** - Peer communication is via secure [WebSockets](https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API). And the peer protocol and all primitives are structured in [JSON](https://www.json.org/). This should make working with the protocol easy for just about every modern development environment. + +## Getting started mining +If you missed out on the opportunity to mine other cryptocurrencies you could give cruzbit a try! + +### 1. Rust needs to be installed + +- MacOS / Linux (from https://www.rust-lang.org/learn/get-started) + + * ```curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh``` + +- Windows: + - https://www.petergirnus.com/blog/how-to-install-rust-on-windows + - https://forge.rust-lang.org/infra/other-installation-methods.html + +### 2. cmake needs to be installed +- macOS + - ```brew install cmake``` + +- Debian Linux: + - ```apt install cmake``` + +- Windows + - ```winget install Kitware.CMake``` + +### 3. Running the client +Note: --release flag is needed for compiler optimizations + +### Mining +Like Bitcoin, any blocks you mine will need to have an additional 100 blocks mined on top of them prior to the new cruzbits being applied to your balance. This is to mitigate a potentially poor user experience in the case of honest blockchain reorganizations. + +#### Mining with multiple keys +```cargo run --release -- --datadir datadir --numminers 1 --keyfile keys.txt``` + +Instead of mining with a single public key, you can use the wallet to generate many keys and dump the public keys to a text file which the client will accept as a `--keyfile` argument. The wallet commands to do this are `genkeys` and `dumpkeys`. + +#### Mining with a single key +```cargo run --release -- --datadir datadir --numminers 1 --pubkey [pub key from wallet]``` + +#### GPU mining with OpenCL +```cargo run --release --features opencl -- --datadir datadir --numminers 1 --keyfile keys.txt``` + +#### GPU mining with Cuda +```cargo run --release --features cuda -- --datadir clientdata --numminers 1 --keyfile keys.txt``` + +#### Not interested in mining but want to play with cruzbit? +No problem! You can run the client with `--numminers 0` so that it can function as your wallet peer. + +```cargo run --release -- --datadir datadir --numminers 0``` + +### 4. Running the wallet +```cargo run --release --bin wallet -- --walletdb walletdata``` + +## Database compatibility with golang cruzbit +- client db data is not compatible +- wallet db data is compatible + +## License +cruzbit is released under the terms of the MIT license. See [LICENSE](https://github.com/christian-smith/cruzbit/blob/master/LICENSE) for more information or see https://opensource.org/licenses/MIT. + +## Join us on Discord +**[Cruzbit Discord](https://discord.gg/MRrEHYw)** for general chat as well as updates, including development status. diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..7ed8905 --- /dev/null +++ b/build.rs @@ -0,0 +1,26 @@ +use std::env; +use std::path::{Path, PathBuf}; +use std::process::exit; + +fn main() { + let lib_dir = PathBuf::from(env::var("OUT_DIR").unwrap()).join("lib"); + let is_cuda = env::var("CARGO_FEATURE_CUDA").is_ok(); + let is_opencl = env::var("CARGO_FEATURE_OPENCL").is_ok(); + + if is_cuda && is_opencl { + eprint!("can only build with either cuda or opencl enabled"); + exit(1); + } else if is_cuda { + let _ = cmake::Config::new(Path::new("src/cuda")) + .define("CMAKE_INSTALL_LIBDIR", &lib_dir) + .build(); + println!("cargo:rustc-link-search=native={}", lib_dir.display()); + println!("cargo:rustc-link-lib=cruzbit_cuda"); + } else if is_opencl { + let _ = cmake::Config::new(Path::new("src/opencl")) + .define("CMAKE_INSTALL_LIBDIR", &lib_dir) + .build(); + println!("cargo:rustc-link-search=native={}", lib_dir.display()); + println!("cargo:rustc-link-lib=cruzbit_ocl"); + } +} diff --git a/src/balance_cache.rs b/src/balance_cache.rs new file mode 100644 index 0000000..4c5c5ff --- /dev/null +++ b/src/balance_cache.rs @@ -0,0 +1,121 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use ed25519_compact::PublicKey; +use thiserror::Error; + +use crate::ledger::LedgerError; +use crate::ledger_disk::LedgerDisk; +use crate::transaction::Transaction; + +/// Maintains a partial unconfirmed view of the ledger. +/// It's used by Ledger when (dis-)connecting blocks and by TransactionQueueMemory +/// when deciding whether or not to add a transaction to the queue. +pub struct BalanceCache { + ledger: Arc, + min_balance: u64, + cache: HashMap, +} + +impl BalanceCache { + /// Returns a new instance of a BalanceCache. + pub fn new(ledger: Arc, min_balance: u64) -> Self { + BalanceCache { + ledger, + min_balance, + cache: HashMap::new(), + } + } + + /// Resets the balance cache. + pub fn reset(&mut self) { + self.cache = HashMap::new(); + } + + /// Applies the effect of the transaction to the involved parties' cached balances. + /// It returns false if the sender balance would go negative as a result of applying this transaction. + /// It also returns false if a remaining non-zero sender balance would be less than min_balance. + pub fn apply(&mut self, tx: &Transaction) -> Result { + if !tx.is_coinbase() { + // check and debit sender balance + let fpk = tx.from.expect("transaction should have a sender"); + let mut sender_balance = match self.cache.get(&fpk).copied() { + Some(v) => v, + None => self.ledger.get_public_key_balance(&fpk)?, + }; + let total_spent = tx.amount + tx.fee.expect("transaction should have a fee"); + if total_spent > sender_balance { + return Ok(false); + } + sender_balance -= total_spent; + if sender_balance > 0 && sender_balance < self.min_balance { + return Ok(false); + } + if let Some(balance) = self.cache.get_mut(&fpk) { + *balance = sender_balance; + } else { + self.cache.insert(fpk, sender_balance); + } + } + + // credit recipient balance + let tpk = tx.to; + let mut recipient_balance = match self.cache.get(&tpk).copied() { + Some(v) => v, + None => self.ledger.get_public_key_balance(&tx.to)?, + }; + recipient_balance += tx.amount; + self.cache.insert(tpk, recipient_balance); + + Ok(true) + } + + /// Undoes the effects of a transaction on the involved parties' cached balances. + pub fn undo(&mut self, tx: &Transaction) -> Result<(), BalanceCacheError> { + if !tx.is_coinbase() { + // credit balance for sender + let fpk = tx.from.expect("transaction should have a sender"); + let mut sender_balance = match self.cache.get(&fpk).copied() { + Some(v) => v, + None => { + let from = tx.from.expect("transaction should have a sender"); + self.ledger.get_public_key_balance(&from)? + } + }; + let total_spent = tx.amount + tx.fee.expect("transaction should have a fee"); + sender_balance += total_spent; + if let Some(balance) = self.cache.get_mut(&fpk) { + *balance = sender_balance; + } else { + self.cache.insert(fpk, sender_balance); + } + } + + // debit recipient balance + let tpk = tx.to; + let recipient_balance = match self.cache.get(&tpk).copied() { + Some(v) => v, + None => self.ledger.get_public_key_balance(&tpk)?, + }; + if recipient_balance < tx.amount { + panic!("Recipient balance went negative") + } + *self + .cache + .get_mut(&tpk) + .expect("recipient should be cached") = recipient_balance - tx.amount; + + Ok(()) + } + + /// returns the underlying cache of balances. + pub fn balances(&self) -> &HashMap { + &self.cache + } +} + +#[derive(Error, Debug)] +pub enum BalanceCacheError { + #[error("ledger")] + Ledger(#[from] LedgerError), +} diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs new file mode 100644 index 0000000..c329b3a --- /dev/null +++ b/src/bin/client/main.rs @@ -0,0 +1,512 @@ +use std::collections::HashMap; +use std::env::args; +use std::fs::File; +use std::io::{BufRead, BufReader, Write}; +use std::path::PathBuf; +use std::process::{exit, ExitCode}; +use std::sync::{Arc, OnceLock}; + +use base64ct::{Base64, Encoding}; +use cruzbit::block::{Block, BlockID}; +use cruzbit::block_storage::BlockStorageError; +use cruzbit::block_storage_disk::BlockStorageDisk; +use cruzbit::constants::{DEFAULT_CRUZBIT_PORT, MAX_INBOUND_PEER_CONNECTIONS}; +use cruzbit::dns::DnsSeeder; +use cruzbit::error::{ + impl_debug_error_chain, DataError, EncodingError, ErrChain, FileError, JsonError, ParsingError, +}; +use cruzbit::genesis::GENESIS_BLOCK_JSON; +use cruzbit::ledger::LedgerError; +use cruzbit::ledger_disk::LedgerDisk; +use cruzbit::miner::{HashrateMonitor, Miner}; +use cruzbit::peer::PEER_ADDR_SELF; +use cruzbit::peer_manager::{ + determine_external_ip, have_local_ip_match, PeerManager, PeerManagerError, +}; +use cruzbit::peer_storage::PeerStorageError; +use cruzbit::peer_storage_disk::PeerStorageDisk; +use cruzbit::processor::{ProcessBlockError, Processor}; +use cruzbit::shutdown::{shutdown_channel, Shutdown}; +use cruzbit::transaction_queue_memory::TransactionQueueMemory; +use cruzbit::utils::resolve_host; +use ed25519_compact::PublicKey; +use env_logger::{Builder, Env}; +use getopts::Options; +use log::{error, info, Level}; +use thiserror::Error; +use tokio::signal; +use tokio::sync::mpsc::channel; + +static BAN_MAP: OnceLock> = OnceLock::new(); +static GENESIS_ID: OnceLock = OnceLock::new(); +static MEMO: OnceLock> = OnceLock::new(); +static PUB_KEYS: OnceLock> = OnceLock::new(); + +#[tokio::main] +async fn main() -> ExitCode { + match run().await { + Ok(_) => ExitCode::SUCCESS, + Err(err) => match err { + ClientError::Args(err) => { + println!("{}", err); + ExitCode::SUCCESS + } + _ => { + error!("{:?}", err); + ExitCode::FAILURE + } + }, + } +} + +async fn run() -> Result<(), ClientError> { + init_logger(); + let args = args().collect::>(); + let program = &args[0]; + + let mut opts = Options::new(); + opts.long_only(true); + opts.optopt( + "", + "banlist", + "Path to a file containing a list of banned host addresses", + "string", + ); + opts.optflag("", "compress", "Compress blocks on disk with lz4"); + opts.optopt( + "", + "datadir", + "Path to a directory to save block chain data", + "string", + ); + opts.optflag( + "", + "dnsseed", + "Run a DNS server to allow others to find peers", + ); + opts.optflag( + "", + "inlimit", + "Limit for the number of inbound peer connections.", + ); + opts.optopt( + "", + "keyfile", + "Path to a file containing public keys to use when mining +", + "string", + ); + opts.optopt( + "", + "memo", + "A memo to include in newly mined blocks", + "string", + ); + opts.optflag("", "noaccept", "Disable inbound peer connections"); + opts.optflag("", "noirc", "Disable use of IRC for peer discovery"); + opts.optopt("", "numminers", "Number of miners to run", "int"); + opts.optopt("", "peer", "Address of a peer to connect to", "string"); + opts.optopt( + "", + "port", + "Port to listen for incoming peer connections", + "int", + ); + opts.optflag( + "", + "prune", + "Prune transaction and public key transaction indices", + ); + opts.optopt( + "", + "pubkey", + "A public key which receives newly mined block rewards", + "string", + ); + opts.optopt( + "", + "tlscert", + "Path to a file containing a PEM-encoded X.509 certificate to use with TLS", + "string", + ); + opts.optopt( + "", + "tlskey", + "Path to a file containing a PEM-encoded private key to use with TLS", + "string", + ); + opts.optflag( + "", + "upnp", + "Attempt to forward the cruzbit port on your router with UPnP", + ); + + let Ok(matches) = opts.parse(&args[1..]) else { + print_usage(program, opts); + return Ok(()); + }; + let ban_list = matches.opt_str("banlist").map(PathBuf::from); + let compress = matches.opt_present("compress"); + let data_dir = match matches.opt_str("datadir") { + Some(data_dir) => PathBuf::from(data_dir), + None => { + return Err("-datadir argument required".into()); + } + }; + let dns_seed = matches.opt_present("dnsseed"); + let inbound_limit = matches + .opt_get_default("inlimit", MAX_INBOUND_PEER_CONNECTIONS) + .map_err(|_| "inlimit should be a number")?; + let key_file = matches.opt_str("keyfile").map(PathBuf::from); + let no_accept = !matches.opt_present("noaccept"); + let no_irc = matches.opt_present("noirc"); + let num_miners = matches + .opt_get_default("numminers", 1) + .map_err(|_| "numminers should be a number")?; + let peer = match matches.opt_str("peer") { + Some(mut peer) => { + // add default port if one was not supplied + if !peer.contains(':') { + peer = format!("{}:{}", peer, DEFAULT_CRUZBIT_PORT); + } + // parse and resolve hostname to ip + Some(resolve_host(&peer)?) + } + None => None, + }; + let port = matches + .opt_get_default("port", DEFAULT_CRUZBIT_PORT) + .map_err(|_| "port should be a number")?; + let prune = matches.opt_present("prune"); + let pub_key = matches.opt_str("pubkey"); + let cert_path = matches.opt_str("tlscert").map(PathBuf::from); + let key_path = matches.opt_str("tlskey").map(PathBuf::from); + let upnp = matches.opt_present("upnp"); + + if num_miners > 0 { + if pub_key.is_none() && key_file.is_none() { + return Err( + "-pubkey or -keyfile argument required to receive newly mined block rewards".into(), + ); + } + if pub_key.is_some() && key_file.is_some() { + return Err("specify only one of -pubkey or -keyfile but not both".into()); + } + } + + if cert_path.is_some() && key_path.is_none() { + return Err("-tlskey argument missing".into()); + } + if cert_path.is_none() && key_path.is_some() { + return Err("-tlscert argument missing".into()); + } + + // initialize statics + let ban_map = BAN_MAP.get_or_init(|| { + if let Some(ban_list) = ban_list { + load_ban_list(ban_list).unwrap_or_else(|err| { + error!("{:?}", err); + exit(1); + }) + } else { + HashMap::new() + } + }); + + let genesis_block = + serde_json::from_str::(GENESIS_BLOCK_JSON).map_err(JsonError::Deserialize)?; + + let genesis_id = GENESIS_ID.get_or_init(|| genesis_block.id().expect("genesis block id")); + + let memo = MEMO.get_or_init(|| matches.opt_str("memo")); + + let pub_keys = PUB_KEYS.get_or_init(|| { + if num_miners > 0 { + load_public_keys(pub_key, key_file).unwrap_or_else(|err| { + error!("{:?}", err); + exit(1); + }) + } else { + Vec::new() + } + }); + + // initialize CUDA or OpenCL devices if enabled + #[cfg(any(feature = "cuda", feature = "opencl"))] + { + if num_miners > 0 { + let device_count = cruzbit::gpu::gpu_miner_init(); + let gpu = if cfg!(feature = "cuda") { + "CUDA" + } else { + "OpenCL" + }; + if device_count != num_miners { + return Err(ClientError::Args(format!( + "{} enabled but -numminers is {} and supported devices is {}", + gpu, num_miners, device_count, + ))); + } + info!("{} initialized", gpu); + } + } + + info!("Starting up..."); + info!("Genesis block ID: {}", genesis_id); + + // instantiate the block storage + let block_store = BlockStorageDisk::new( + data_dir.join("blocks"), + data_dir.join("headers.db"), + false, // not read only + compress, + )?; + + // instantiate the ledger + let ledger = LedgerDisk::new(data_dir.join("ledger.db"), Arc::clone(&block_store), prune)?; + + // instantiate peer storage + let peer_store = PeerStorageDisk::new(data_dir.join("peers.db"))?; + + // instantiate the transaction queue + let tx_queue = TransactionQueueMemory::new(Arc::clone(&ledger)); + + let mut shutdowns = Vec::new(); + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + // create and run the processor + let processor = Processor::new( + genesis_id, + Arc::clone(&block_store), + Arc::clone(&tx_queue), + Arc::clone(&ledger), + shutdown_chan_rx, + ); + shutdowns.push(Shutdown::new(processor.spawn(), shutdown_chan_tx)); + + // process the genesis block + processor + .process_candidate_block(*genesis_id, genesis_block, PEER_ADDR_SELF) + .await?; + + if num_miners > 0 { + let (hash_update_chan_tx, hash_update_chan_rx) = channel(num_miners); + + // create and run miners + for i in 0..num_miners { + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let miner = Miner::new( + pub_keys, + memo, + Arc::clone(&block_store), + Arc::clone(&tx_queue), + Arc::clone(&ledger), + Arc::clone(&processor), + hash_update_chan_tx.clone(), + i, + shutdown_chan_rx, + ); + + shutdowns.push(Shutdown::new(miner.spawn(), shutdown_chan_tx)); + } + + // print hashrate updates + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let hashrate_monitor = + HashrateMonitor::new(num_miners, hash_update_chan_rx, shutdown_chan_rx); + shutdowns.push(Shutdown::new(hashrate_monitor.spawn(), shutdown_chan_tx)); + } else { + info!("Mining is currently disabled") + } + + // determine external ip + let my_external_ip = determine_external_ip().await; + let mut my_external_ip_upnp = None; + + if upnp && !no_accept { + info!("Enabling forwarding for port {}...", port); + match igd::search_gateway(Default::default()) { + Err(ref err) => info!("Failed to enable forwarding: {}", err), + Ok(gateway) => match gateway.get_external_ip() { + Err(ref err) => { + info!("Failed to enable port forwarding: {}", err); + } + Ok(ext_addr) => { + my_external_ip_upnp = Some(ext_addr); + info!("Successfully enabled port forwarding"); + } + }, + } + } + + // determine if we're open for connections + let open = if let Some(my_external_ip_upnp) = my_external_ip_upnp { + // if upnp enabled make sure the address returned matches the outside view + my_external_ip + .as_ref() + .map_or(false, |ip| my_external_ip_upnp == *ip) + } else { + // if no upnp see if any local routable IP matches the outside view + my_external_ip.as_ref().map_or(false, |ip| { + have_local_ip_match(ip) + .map_err(ClientError::from) + .unwrap_or_else(|err| { + error!("{:?}", err); + false + }) + }) + }; + + // start a dns server + if dns_seed { + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let dns_seeder = DnsSeeder::new( + Arc::clone(&peer_store), + port, + my_external_ip, + shutdown_chan_rx, + ) + .await; + shutdowns.push(Shutdown::new(dns_seeder.spawn(), shutdown_chan_tx)); + } + + // manage peer connections + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let peer_manager = PeerManager::new( + genesis_id, + peer_store, + block_store, + ledger, + processor, + tx_queue, + data_dir, + my_external_ip, + peer, + cert_path, + key_path, + port, + inbound_limit, + no_accept, + ban_map, + no_irc, + dns_seed, + open, + shutdown_chan_rx, + ); + shutdowns.push(Shutdown::new(peer_manager.spawn(), shutdown_chan_tx)); + let _ = signal::ctrl_c().await; + for shutdown in shutdowns.into_iter().rev() { + shutdown.send().await; + } + info!("Exiting"); + + Ok(()) +} + +fn load_public_keys( + pub_key_encoded: Option, + key_file: Option, +) -> Result, ClientError> { + let mut pub_keys_encoded = Vec::new(); + let mut pub_keys = Vec::new(); + + if let Some(pub_key_encoded) = pub_key_encoded { + pub_keys_encoded.push(pub_key_encoded); + } else { + let filename = key_file.expect("expected a key file"); + let file = File::open(&filename).map_err(|err| FileError::Open(filename, err))?; + let buf = BufReader::new(file); + + pub_keys_encoded = buf + .lines() + .map(|line| line.expect("failed to parse pubkey")) + .collect(); + } + + for pub_key_encoded in pub_keys_encoded { + let mut buf = [0u8; PublicKey::BYTES]; + let pub_key_bytes = + Base64::decode(pub_key_encoded, &mut buf).map_err(EncodingError::Base64Decode)?; + let pub_key = PublicKey::from_slice(pub_key_bytes).map_err(DataError::Ed25519)?; + pub_keys.push(pub_key); + } + + Ok(pub_keys) +} + +fn load_ban_list(ban_list_file: PathBuf) -> Result, ClientError> { + let file = File::open(&ban_list_file).map_err(|err| FileError::Open(ban_list_file, err))?; + let mut ban_map = HashMap::new(); + let lines = BufReader::new(file).lines(); + + for ip in lines.map_while(Result::ok) { + ban_map.insert(ip.trim().to_owned(), true); + } + + Ok(ban_map) +} + +fn print_usage(program: &str, opts: Options) { + let brief = format!("Usage of {}:", program); + print!("{}", opts.usage(&brief)); +} + +fn init_logger() { + // default to info level unless RUST_LOG is set + Builder::from_env(Env::default().default_filter_or("info")) + .format(|buf, record| { + write!(buf, "{}", buf.timestamp())?; + if record.level() != Level::Info { + write!(buf, " {}", record.level())?; + } + writeln!(buf, " {}", record.args()) + }) + .init(); +} + +#[derive(Error)] +pub enum ClientError { + #[error("{0}")] + Args(String), + + #[error("block storage")] + BlockStorage(#[from] BlockStorageError), + #[error("data")] + Data(#[from] DataError), + #[error("encoding")] + Encoding(#[from] EncodingError), + #[error("file")] + File(#[from] FileError), + #[error("json")] + Json(#[from] JsonError), + #[error("ledger")] + Ledger(#[from] LedgerError), + #[error("parsing")] + Parsing(#[from] ParsingError), + #[error("peer manager")] + PeerManager(#[from] PeerManagerError), + #[error("peer storage")] + PeerStorage(#[from] PeerStorageError), + #[error("processing block")] + ProcessBlock(#[from] ProcessBlockError), +} + +impl_debug_error_chain!(ClientError, "client"); + +impl From<&str> for ClientError { + fn from(s: &str) -> Self { + ClientError::Args(s.to_owned()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_load_public_keys() { + let pub_key_encoded = Some("2df37wYjh3t8OekRXD0qpRsj9dD9XpVyqkvxnsqNj/s=".to_owned()); + let key_file = None; + let pub_keys = load_public_keys(pub_key_encoded, key_file); + assert_eq!(1, pub_keys.unwrap().len()); + } +} diff --git a/src/bin/wallet/main.rs b/src/bin/wallet/main.rs new file mode 100644 index 0000000..850ff57 --- /dev/null +++ b/src/bin/wallet/main.rs @@ -0,0 +1,1059 @@ +use std::env::args; +use std::fs::File; +use std::io::{BufRead, BufReader, Write}; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::path::PathBuf; +use std::process::ExitCode; +use std::sync::{Arc, Mutex}; +use std::time::{Duration, UNIX_EPOCH}; + +use base64ct::{Base64, Encoding}; +use console::style; +use cruzbit::block::{Block, BlockError, BlockID}; +use cruzbit::constants::{ + COINBASE_MATURITY, CRUZBITS_PER_CRUZ, DEFAULT_CRUZBIT_PORT, MAX_MEMO_LENGTH, +}; +use cruzbit::error::{DataError, EncodingError, ErrChain, FileError, JsonError, ParsingError}; +use cruzbit::genesis::GENESIS_BLOCK_JSON; +use cruzbit::impl_debug_error_chain; +use cruzbit::protocol::{FilterBlockMessage, PushTransactionMessage}; +use cruzbit::transaction::{Transaction, TransactionError, TransactionID, TRANSACTION_ID_LENGTH}; +use cruzbit::utils::resolve_host; +use cruzbit::wallet::{FilterBlockCallback, TransactionCallback, Wallet, WalletError}; +use dialoguer::theme::SimpleTheme; +use dialoguer::{Completion, Confirm, Input, Password}; +use ed25519_compact::{PublicKey, SecretKey}; +use env_logger::{Builder, Env}; +use faster_hex::hex_decode; +use futures::Future; +use getopts::Options; +use humantime::format_rfc3339; +use log::{error, Level}; +use thiserror::Error; +use tokio::sync::Mutex as AsyncMutex; + +/// This is a lightweight wallet client. +#[tokio::main] +async fn main() -> ExitCode { + match run().await { + Ok(_) => ExitCode::SUCCESS, + Err(err) => match err { + WalletBinError::Args(err) => { + println!("{}", err); + ExitCode::SUCCESS + } + _ => { + error!("{:?}", err); + ExitCode::FAILURE + } + }, + } +} + +async fn run() -> Result<(), WalletBinError> { + init_logger(); + let args = args().collect::>(); + let program = &args[0]; + + let mut opts = Options::new(); + opts.long_only(true); + opts.optopt("", "peer", "Address of a peer to connect to", "string"); + opts.optopt( + "", + "walletdb", + "Path to a wallet database (created if it doesn't exist)", + "walletdb", + ); + opts.optflag( + "", + "tlsverify", + "Verify the TLS certificate of the peer is signed by a recognized CA and the host matches the CN", + ); + opts.optflag("", "recover", "Attempt to recover a corrupt walletdb"); + + let Ok(matches) = opts.parse(&args[1..]) else { + print_usage(program, opts); + return Ok(()); + }; + let db_path = match matches.opt_str("walletdb") { + Some(db_path) => PathBuf::from(db_path), + None => return Err("path to wallet database required".into()), + }; + let peer = match matches.opt_str("peer") { + Some(mut peer) => { + // add default port if one was not supplied + if !peer.contains(':') { + peer = format!("{}:{}", peer, DEFAULT_CRUZBIT_PORT); + } + // parse and resolve hostname to ip + resolve_host(&peer)? + } + None => SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + DEFAULT_CRUZBIT_PORT, + ), + }; + let recover = matches.opt_present("recover"); + if recover { + match Wallet::repair_db(db_path).map_err(WalletBinError::Wallet) { + Ok(_) => { + println!("Recovery completed without error"); + return Ok(()); + } + Err(err) => return Err(err), + } + } + let tls_verify = matches.opt_present("tlsverify"); + + // load genesis block + let genesis_block = + serde_json::from_str::(GENESIS_BLOCK_JSON).map_err(JsonError::Deserialize)?; + let genesis_id = genesis_block.id()?; + + println!("Starting up..."); + println!("Genesis block ID: {}", genesis_id); + + if recover { + println!("Attempting to recover wallet..."); + } + + // instantiate wallet + let wallet = Wallet::new(db_path)?; + + loop { + // load wallet passphrase + let passphrase = prompt_for_passphrase(); + if wallet.set_passphrase(passphrase)? { + break; + } + println!( + "{}", + style("Passphrase is not the one used to encrypt your most recent key.") + .bold() + .red() + ); + } + + let new_txs = NewTxs::default(); + let new_confs = NewConfs::default(); + let cmd_lock = Arc::new(AsyncMutex::new(())); + + // handle new incoming transactions + let transaction_cmd_lock = Arc::clone(&cmd_lock); + let transaction_new_txs = Arc::clone(&new_txs); + let transaction_callback: TransactionCallback = + Box::new(move |wallet: &Arc, pt: PushTransactionMessage| { + match transaction_is_relevant(wallet, &pt.transaction) { + Ok(ok) => { + if !ok { + // false positive + return; + } + } + Err(err) => { + eprintln!("Error: {:?}", err); + return; + } + }; + + let mut new_txs = transaction_new_txs.lock().unwrap(); + let show_message = new_txs.len() == 0; + new_txs.push(pt.transaction); + + if show_message { + let cmd_lock = Arc::clone(&transaction_cmd_lock); + tokio::spawn(async move { + // don't interrupt a user during a command + let _cmd_guard = cmd_lock.lock().await; + print!("\n\nNew incoming transaction! "); + print!("Type {} to view it.\n\n", style("show").bold().green()); + }); + } + }); + wallet.set_transaction_callback(transaction_callback); + + // handle new incoming filter blocks + let filter_block_cmd = Arc::clone(&cmd_lock); + let filter_block_new_confs = Arc::clone(&new_confs); + let filter_block_callback: FilterBlockCallback = + Box::new(move |wallet: &Arc, fb: FilterBlockMessage| { + for tx in fb.transactions { + match transaction_is_relevant(wallet, &tx) { + Ok(ok) => { + if !ok { + // false positive + continue; + } + } + Err(err) => { + eprintln!("Error: {:?}", err); + continue; + } + }; + + let mut new_confs = filter_block_new_confs.lock().unwrap(); + let show_message = new_confs.len() == 0; + new_confs.push(TransactionWithHeight { + tx, + height: fb.header.height, + }); + + if show_message { + let cmd = Arc::clone(&filter_block_cmd); + tokio::spawn(async move { + // don't interrupt a user during a command + let _cmd_guard = cmd.lock().await; + print!("\n\nNew transaction confirmation! "); + print!("Type {} to view it.\n\n", style("conf").bold().green()); + }); + } + } + }); + wallet.set_filter_block_callback(filter_block_callback); + + // setup prompt + let completion = CmdCompletion::default(); + println!("Please select a command."); + println!( + "To connect to your wallet peer you need to issue a command requiring it, e.g. {}", + style("balance").bold().green() + ); + + loop { + // run interactive prompt + let cmd = Input::::with_theme(&SimpleTheme {}) + .with_prompt(">") + .allow_empty(true) + .completion_with(&completion) + .interact_text()?; + { + let _cmd_guard = cmd_lock.lock().await; + let connect = connect_wallet(&wallet, peer, &genesis_id, tls_verify); + match handle_cmd(&completion, &cmd, &wallet, connect, &new_confs, &new_txs).await { + Ok(ok) => { + if !ok { + break Ok(()); + } + } + Err(err) => { + eprintln!("Error: {:?}", err); + } + } + } + } +} + +async fn handle_cmd( + completion: &CmdCompletion, + cmd: &str, + wallet: &Arc, + connect: impl Future>, + new_confs: &NewConfs, + new_txs: &NewTxs, +) -> Result { + match cmd { + "balance" => { + connect.await?; + let pub_keys = wallet.get_keys()?; + + let mut total = 0; + for (i, pub_key) in pub_keys.iter().enumerate() { + let (balance, _height) = wallet.get_balance(pub_key).await?; + let amount = round_float(balance as f64, 8) / CRUZBITS_PER_CRUZ as f64; + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + println!("{:4}: {} {:16.8}", i + 1, encoded, amount); + total += balance; + } + let amount = round_float(total as f64, 8) / CRUZBITS_PER_CRUZ as f64; + println!("{}: {:.8}", style("Total").bold(), amount); + } + + "clearconf" => { + let mut new_confs = new_confs.lock().unwrap(); + new_confs.clear(); + } + + "clearnew" => { + let mut new_txs = new_txs.lock().unwrap(); + new_txs.clear(); + } + + "conf" => { + connect.await?; + let (tx, left) = { + let mut new_confs = new_confs.lock().unwrap(); + if new_confs.len() == 0 { + (None, 0) + } else { + let tx = new_confs.remove(0); + (Some(tx), new_confs.len()) + } + }; + if let Some(tx) = tx { + show_transaction(wallet, tx.tx, Some(tx.height)).await?; + if left > 0 { + println!( + "\n{} new confirmations(s) left to display. Type {} to continue.", + left, + style("conf").bold().green() + ); + } + } else { + println!("No new confirmations to display") + } + } + + "dumpkeys" => { + let pub_keys = wallet.get_keys()?; + if pub_keys.is_empty() { + println!("No public keys found"); + return Ok(true); + } + let name = "keys.txt"; + let mut file = File::create(name).map_err(|err| FileError::Create(name.into(), err))?; + for pub_key in pub_keys.iter() { + let mut buf = [0u8; 44]; + let _encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + file.write_all(&[&buf, &b"\n"[..]].concat()) + .map_err(|err| FileError::Write(name.into(), err))?; + } + println!( + "{} public keys saved to '{}'", + pub_keys.len(), + style(name).bold() + ); + } + + "export" => { + println!("{}: Anyone with access to a wallet's private key(s) has full control of the funds in the wallet.", style("WARNING").bold().red()); + let confirm = prompt_for_confirmation("Are you sure you wish to proceed?"); + if !confirm { + println!("Aborting export"); + return Ok(true); + } + let pub_keys = wallet.get_keys()?; + if pub_keys.is_empty() { + println!("No private keys found"); + return Ok(true); + } + let filename: PathBuf = prompt_for_string("Filename", "export.txt").into(); + let mut file = + File::create(&filename).map_err(|err| FileError::Open(filename.clone(), err))?; + let mut count = 0; + for pub_key in pub_keys { + let priv_key = match wallet.get_private_key(pub_key) { + Ok(Some(v)) => v, + Ok(None) | Err(_) => { + let mut buf = [0u8; 44]; + let encoded = + match Base64::encode(pub_key.as_ref(), &mut buf).map_err(|err| { + WalletBinError::Encoding(EncodingError::Base64Encode(err)) + }) { + Ok(v) => v, + Err(err) => { + eprintln!("Error: {:?}", err); + continue; + } + }; + println!( + "Couldn't get private key for public key: {}; omitting from export", + encoded + ); + continue; + } + }; + + let mut buf = [0u8; 44]; + let encoded_pub_key = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + let mut buf = [0u8; 88]; + let encoded_priv_key = Base64::encode(priv_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + let pair = format!("{},{}\n", encoded_pub_key, encoded_priv_key,); + + if let Err(err) = file + .write(pair.as_bytes()) + .map_err(|err| WalletBinError::File(FileError::Write(filename.clone(), err))) + { + eprintln!("Error: {:?}", err); + } + count += 1; + } + + println!( + "{} wallet key pairs saved to '{}'", + count, + style(&filename.display()).bold() + ); + } + + "genkeys" => { + let count = prompt_for_number("Count"); + let pub_keys = wallet.new_keys(count)?; + println!("Generated {} new keys", pub_keys.len()); + if wallet.is_connected().await { + // update our filter if online + wallet.set_filter().await? + } + } + + "import" => { + println!( + "Files should have one address per line, in the format: {}", + style("PUBLIC_KEY,PRIVATE_KEY").bold() + ); + println!( + "Files generated by the {} command are automatically formatted in this way.", + style("export").bold() + ); + + let filename: PathBuf = prompt_for_string("Filename", "export.txt").into(); + let file = File::open(&filename).map_err(|err| FileError::Open(filename, err))?; + + let mut skipped = 0; + let mut pub_keys = Vec::new(); + let reader = BufReader::new(file); + + for line in reader.lines() { + let line = line.unwrap(); + let key = line.split(',').collect::>(); + if key.len() != 2 { + eprintln!("Error found: incorrectly formatted line"); + skipped += 1; + continue; + } + let mut buf = [0u8; PublicKey::BYTES]; + let pub_key_bytes = + match Base64::decode(key[0], &mut buf).map_err(EncodingError::Base64Decode) { + Ok(v) => v, + Err(err) => { + eprintln!("Error with public key: {}", err); + skipped += 1; + continue; + } + }; + let pub_key = match PublicKey::from_slice(pub_key_bytes).map_err(DataError::Ed25519) + { + Ok(v) => v, + Err(err) => { + eprintln!("Error: {}", err); + continue; + } + }; + + let mut buf = [0u8; SecretKey::BYTES]; + let priv_key_bytes = + match Base64::decode(key[1], &mut buf).map_err(EncodingError::Base64Decode) { + Ok(v) => v, + Err(err) => { + eprintln!("Error with private key: {}", err); + skipped += 1; + continue; + } + }; + + let priv_key = SecretKey::from_slice(priv_key_bytes).unwrap(); + // add key to database + if let Err(err) = wallet.add_key(pub_key, priv_key) { + eprintln!("Error adding key pair to database: {:?}", err); + skipped += 1; + continue; + } + pub_keys.push(pub_key); + } + for (i, pub_key) in pub_keys.iter().enumerate() { + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + + println!("{:4}: {}", i + 1, encoded); + } + println!( + "Successfully added {} key(s); {} line(s) skipped.", + pub_keys.len(), + skipped + ); + } + + "listkeys" => { + let pub_keys = wallet.get_keys()?; + for (i, pub_key) in pub_keys.iter().enumerate() { + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + println!("{:4}: {}", i + 1, encoded); + } + } + + "newkey" => { + let pub_keys = wallet.new_keys(1)?; + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_keys[0].as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + println!("New key generated, public key: {}", style(encoded).bold()); + if wallet.is_connected().await { + // update our filter if online + wallet.set_filter().await? + } + } + + "quit" => { + wallet.shutdown().await?; + return Ok(false); + } + + "rewards" => { + connect.await?; + let pub_keys = wallet.get_keys()?; + let (_tip_id, tip_header) = wallet.get_tip_header().await?; + let mut total = 0; + + 'gpkt: for (i, pub_key) in pub_keys.into_iter().enumerate() { + let mut rewards = 0; + let mut start_height = tip_header.height.saturating_sub(COINBASE_MATURITY); + let mut start_index = 0; + + loop { + let (_start_height, stop_height, stop_index, fbs) = match wallet + .get_public_key_transactions( + pub_key, + start_height, + tip_header.height + 1, + start_index, + 32, + ) + .await + .map_err(WalletBinError::Wallet) + { + Ok(v) => v, + Err(err) => { + eprintln!("Error: {:?}", err); + break 'gpkt; + } + }; + let mut num_tx = 0; + (start_height, start_index) = (stop_height, stop_index + 1); + + if let Some(fbs) = fbs { + for fb in fbs { + for tx in fb.transactions { + num_tx += 1; + if tx.is_coinbase() { + rewards += tx.amount; + } + } + } + } + + if num_tx < 32 { + break; + } + } + + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + let amount = round_float(rewards as f64, 8) / CRUZBITS_PER_CRUZ as f64; + println!("{:4}: {} {:16.8}", i + 1, encoded, amount); + total += rewards; + } + + let amount = round_float(total as f64, 8) / CRUZBITS_PER_CRUZ as f64; + println!("{}: {:.8}", style("Total").bold(), amount); + } + + "send" => { + connect.await?; + let id = send_transaction(wallet).await?; + println!("Transaction {} sent", id); + } + + "show" => { + connect.await?; + let (tx, left) = { + let mut new_txs = new_txs.lock().unwrap(); + if new_txs.len() == 0 { + (None, 0) + } else { + let tx = new_txs.remove(0); + (Some(tx), new_txs.len()) + } + }; + if let Some(tx) = tx { + show_transaction(wallet, tx, None).await?; + if left > 0 { + println!( + "\n{} new transaction(s) left to display. Type {} to continue.", + left, + style("show").green() + ); + } + } else { + println!("No new transactions to display") + } + } + + "txstatus" => { + connect.await?; + let tx_id = prompt_for_transaction_id("ID")?; + println!(); + let (Some(tx), _block_id, Some(height)) = wallet.get_transaction(tx_id).await? else { + println!( + "Transaction {} not found in the blockchain at this time.", + tx_id + ); + println!("It may be waiting for confirmation."); + return Ok(true); + }; + show_transaction(wallet, tx, Some(height)).await?; + } + + "verify" => { + let pub_keys = wallet.get_keys()?; + let mut verified = 0; + let mut corrupt = 0; + for (i, pub_key) in pub_keys.into_iter().enumerate() { + match wallet.verify_key(pub_key) { + Ok(_) => { + verified += 1; + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + println!( + "{:4}: {} {}", + i + 1, + encoded, + style("Verified").bold().green() + ); + } + Err(err) => { + corrupt += 1; + let mut buf = [0u8; 44]; + let encoded = Base64::encode(pub_key.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode)?; + println!("{:4}: {} {:?}", i + 1, encoded, style(err).bold().red()); + } + }; + } + println!( + "{} key(s) verified and {} key(s) potentially corrupt", + verified, corrupt + ) + } + + _ => { + completion.help(); + } + } + + println!(); + Ok(true) +} + +/// Prompt for transaction details and request the wallet to send it +async fn send_transaction(wallet: &Wallet) -> Result { + let (min_fee, min_amount) = wallet.get_transaction_relay_policy().await?; + + // prompt for from + let from = prompt_for_public_key("From")?; + + // prompt for to + let to = prompt_for_public_key("To")?; + + // prompt for amount + let amount = prompt_for_value("Amount"); + if amount < min_amount { + return Err(ValidationError::MinimumTransactionAmount( + round_float(min_amount as f64, 8) / CRUZBITS_PER_CRUZ as f64, + ) + .into()); + } + + // prompt for fee + let fee = prompt_for_value("Fee"); + if fee < min_fee { + return Err(ValidationError::MinimumTransactionFee( + round_float(min_fee as f64, 8) / CRUZBITS_PER_CRUZ as f64, + ) + .into()); + } + + // prompt for memo + let memo = Input::::new() + .with_prompt("Memo") + .allow_empty(true) + .interact()?; + if memo.len() > MAX_MEMO_LENGTH { + return Err(ValidationError::MaximumMemoLengthExceeded(MAX_MEMO_LENGTH, memo.len()).into()); + } + + let memo = if memo.is_empty() { None } else { Some(memo) }; + + // create and send send it. by default the transaction expires if not mined within 3 blocks from now + let id = wallet + .send(from, to, amount, fee, None, Some(3), memo) + .await?; + Ok(id) +} + +fn prompt_for_public_key(prompt: &str) -> Result { + let text = Input::::new() + .with_prompt(prompt) + .allow_empty(true) + .interact() + .unwrap(); + + if text.is_empty() || text.len() != 44 { + return Err(ValidationError::PublicKeyInvalid.into()); + }; + + let mut buf = [0u8; PublicKey::BYTES]; + Base64::decode(text.as_bytes(), &mut buf).map_err(EncodingError::Base64Decode)?; + if buf.len() != PublicKey::BYTES { + return Err(ValidationError::PublicKeyInvalid.into()); + } + + let pub_key = PublicKey::from_slice(&buf).map_err(DataError::Ed25519)?; + Ok(pub_key) +} + +fn prompt_for_value(prompt: &str) -> u64 { + let value = Input::new().with_prompt(prompt).interact().unwrap(); + let value_float = round_float(value, 8) * CRUZBITS_PER_CRUZ as f64; + round_to_6th(value_float) as u64 +} + +fn prompt_for_number(prompt: &'static str) -> usize { + Input::new().with_prompt(prompt).interact().unwrap() +} + +fn prompt_for_confirmation(prompt: &str) -> bool { + Confirm::new() + .with_prompt(prompt) + .default(false) + .interact() + .unwrap() +} + +fn prompt_for_string(prompt: &str, default_response: &str) -> String { + Input::new() + .with_prompt(prompt) + .with_initial_text(default_response) + .interact() + .unwrap() +} + +fn prompt_for_transaction_id(prompt: &str) -> Result { + let text = Input::::new().with_prompt(prompt).interact()?; + if text.len() != TRANSACTION_ID_LENGTH * 2 { + return Err(ValidationError::TransactionIDInvalid.into()); + } + let mut tx_id = TransactionID::new(); + hex_decode(text.as_bytes(), &mut tx_id).map_err(EncodingError::HexDecode)?; + + Ok(tx_id) +} + +async fn show_transaction( + wallet: &Arc, + tx: Transaction, + height: Option, +) -> Result<(), WalletBinError> { + let id = tx.id()?; + println!("{:7}: {}", style("ID").bold(), id); + println!("{:7}: {}", style("Series").bold(), tx.series); + println!( + "{:7}: {}", + style("Time").bold(), + format_rfc3339(UNIX_EPOCH + Duration::from_secs(tx.time)) + ); + + if let Some(from) = tx.from { + let mut buf = [0u8; 44]; + let encoded = Base64::encode(&from[..], &mut buf).map_err(EncodingError::Base64Encode)?; + println!("{:7}: {}", style("From").bold(), encoded); + } + + let mut buf = [0u8; 44]; + let encoded = Base64::encode(&tx.to[..], &mut buf).map_err(EncodingError::Base64Encode)?; + + println!("{:7}: {}", style("To").bold(), encoded); + println!( + "{:7}: {:.8}", + style("Amount").bold(), + round_float(tx.amount as f64, 8) / CRUZBITS_PER_CRUZ as f64 + ); + + if let Some(fee) = tx.fee { + println!( + "{:7}: {:.8}", + style("Fee").bold(), + round_float(fee as f64, 8) / CRUZBITS_PER_CRUZ as f64 + ); + } + + if let Some(memo) = tx.memo { + println!("{:7}: {}", style("Memo").bold(), memo); + } + + let (_, header) = wallet.get_tip_header().await?; + + if let Some(height) = height { + println!( + "{:7}: confirmed at height {}, {} confirmation(s)", + style("Status").bold(), + height, + (header.height - height) + 1 + ); + } else { + if let Some(matures) = tx.matures { + println!( + "{:7}: cannot be mined until height: {}, current height: {}", + style("Matures").bold(), + matures, + header.height + ); + } + if let Some(expires) = tx.expires { + println!( + "{:7}: cannot be mined after height: {}, current height: {}", + style("Expires").bold(), + expires, + header.height + ); + } + } + + Ok(()) +} + +/// Catch filter false-positives +fn transaction_is_relevant(wallet: &Arc, tx: &Transaction) -> Result { + let pub_keys = wallet.get_keys()?; + for pub_key in pub_keys { + if tx.contains(pub_key) { + return Ok(true); + } + } + + Ok(false) +} + +// secure passphrase prompt helper +fn prompt_for_passphrase() -> String { + Password::new() + .with_prompt("\nEnter passphrase") + .with_confirmation("Confirm passphrase", "Passphrase mismatch") + .interact() + .unwrap() +} + +/// From: +fn round_float(mut x: f64, prec: u32) -> f64 { + let pow = 10_f64.powi(prec as i32); + let mut intermed = x * pow; + intermed = (intermed * 1e6).round() / 1e6; // round to 6th decimal + let frac = intermed.fract(); + intermed += 0.5; + x = 0.5; + + if frac < 0.0 { + x = -0.5; + intermed -= 1_f64; + } + let rounder = if frac >= x { + intermed.ceil() + } else { + intermed.floor() + }; + + rounder / pow +} + +fn round_to_6th(x: f64) -> f64 { + (x * 1e6).round() / 1e6 +} + +/// Connect the wallet on-demand +async fn connect_wallet( + wallet: &Arc, + peer: SocketAddr, + genesis_id: &BlockID, + tls_verify: bool, +) -> Result<(), WalletBinError> { + if wallet.is_connected().await { + return Ok(()); + } + wallet + .connect(peer, genesis_id, tls_verify) + .await + .map_err(WalletBinError::Wallet)?; + wallet.set_filter().await.map_err(WalletBinError::Wallet) +} + +fn print_usage(program: &str, opts: Options) { + let brief = format!("Usage of {}:", program); + print!("{}", opts.usage(&brief)); +} + +fn init_logger() { + // default to error level unless RUST_LOG is set + Builder::from_env(Env::default().default_filter_or("error")) + .format(|buf, record| { + write!(buf, "{}", buf.timestamp())?; + if record.level() != Level::Info { + write!(buf, " {}", record.level())?; + } + writeln!(buf, " {}", record.args()) + }) + .init(); +} + +type NewTxs = Arc>>; +type NewConfs = Arc>>; + +struct TransactionWithHeight { + tx: Transaction, + height: u64, +} + +struct CmdCompletion { + options: Vec<&'static str>, + items: Vec<(&'static str, &'static str)>, +} + +impl CmdCompletion { + pub fn new(items: Vec<(&'static str, &'static str)>) -> Self { + let mut options = items.iter().map(|(cmd, _)| *cmd).collect::>(); + options.push("help"); + Self { options, items } + } + + pub fn help(&self) { + for (text, description) in self.items.iter() { + println!("{} - {}", text, description); + } + } +} + +impl Completion for CmdCompletion { + fn get(&self, input: &str) -> Option { + let matches = self + .options + .iter() + .filter(|option| option.starts_with(input)) + .collect::>(); + + if matches.len() == 1 { + Some(matches[0].to_string()) + } else { + None + } + } +} + +impl Default for CmdCompletion { + fn default() -> Self { + let items = vec![ + ("balance", "Retrieve the current balance of all public keys"), + ("clearconf", "Clear all pending transaction confirmation notifications"), + ("clearnew", "Clear all pending incoming transaction notifications"), + ("conf", "Show new transaction confirmations"), + ("dumpkeys", "Dump all of the wallet's public keys to a text file"), + ("export", "Save all of the wallet's public-private key pairs to a text file"), + ("genkeys", "Generate multiple keys at once"), + ("import", "Import public-private key pairs from a text file"), + ("listkeys", "List all known public keys"), + ("newkey", "Generate and store a new private key"), + ("rewards", "Show immature block rewards for all public keys"), + ("send", "Send cruzbits to someone"), + ("show", "Show new incoming transactions"), + ("txstatus", "Show confirmed transaction information given a transaction ID"), + ("verify", "Verify the private key is decryptable and intact for all public keys displayed with 'listkeys'"), + ("quit", "Quit this wallet session"), + ]; + + Self::new(items) + } +} + +#[derive(Error)] +pub enum WalletBinError { + #[error("{0}")] + Args(String), + + #[error("block")] + Block(#[from] BlockError), + #[error("data")] + Data(#[from] DataError), + #[error("encoding")] + Encoding(#[from] EncodingError), + #[error("block")] + File(#[from] FileError), + #[error("json")] + Json(#[from] JsonError), + #[error("parsing")] + Parsing(#[from] ParsingError), + #[error("transaction")] + Transaction(#[from] TransactionError), + #[error("validation")] + Validation(#[from] ValidationError), + #[error(transparent)] + Wallet(#[from] WalletError), + + #[error(transparent)] + Dialoguer(#[from] dialoguer::Error), +} + +impl_debug_error_chain!(WalletBinError, "wallet"); + +impl From<&'static str> for WalletBinError { + fn from(s: &'static str) -> Self { + WalletBinError::Args(s.to_owned()) + } +} + +#[derive(Error, Debug)] +pub enum ValidationError { + #[error("maximum memo length ({0}) exceeded ({0})")] + MaximumMemoLengthExceeded(usize, usize), + #[error("the peer's minimum amount to relay transactions is %.8f")] + MinimumTransactionAmount(f64), + #[error("the peer's minimum required fee to relay transactions is {0:8}")] + MinimumTransactionFee(f64), + #[error("public key is invalid")] + PublicKeyInvalid, + #[error("transaction id is invalid")] + TransactionIDInvalid, +} + +#[cfg(test)] +#[test] +fn test_round_float() { + let amount = round_float(4.89_f64, 8) * CRUZBITS_PER_CRUZ as f64; + assert_eq!(round_to_6th(amount), 489000000_f64); + + let f = round_float(amount, 8) / CRUZBITS_PER_CRUZ as f64; + assert_eq!(f, 4.89); + + let amount = round_float(0.00000001, 8) * CRUZBITS_PER_CRUZ as f64; + assert_eq!(round_to_6th(amount), 1_f64); + + let f = round_float(amount, 8) / CRUZBITS_PER_CRUZ as f64; + assert_eq!(f, 0.00000001); + + let amount = round_float(1.00000001, 8) * CRUZBITS_PER_CRUZ as f64; + assert_eq!(round_to_6th(amount), 100000001_f64); + + let f = round_float(amount, 8) / CRUZBITS_PER_CRUZ as f64; + assert_eq!(f, 1.00000001); + + let amount = round_float(123_f64, 8) * CRUZBITS_PER_CRUZ as f64; + assert_eq!(round_to_6th(amount), 12300000000_f64); + + let f = round_float(amount, 8) / CRUZBITS_PER_CRUZ as f64; + assert_eq!(f, 123.0); +} diff --git a/src/block.rs b/src/block.rs new file mode 100644 index 0000000..bc018de --- /dev/null +++ b/src/block.rs @@ -0,0 +1,501 @@ +use std::convert::AsRef; +use std::fmt::{self, Debug, Display}; +use std::ops::{Deref, DerefMut}; + +use faster_hex::hex_encode; +use ibig::UBig; +use rand::Rng; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use sha3::{Digest, Sha3_256}; +use thiserror::Error; + +use crate::block_header_hasher::BlockHeaderHasher; +use crate::constants::{MAX_NUMBER, MAX_TRANSACTIONS_PER_BLOCK}; +use crate::error::JsonError; +use crate::transaction::{Transaction, TransactionError, TransactionID}; +use crate::utils::now_as_secs; + +/// Represents a block in the block chain. It has a header and a list of transactions. +/// As blocks are connected their transactions affect the underlying ledger. +#[derive(Clone, Deserialize, Debug, Serialize)] +pub struct Block { + pub header: BlockHeader, + pub transactions: Vec, + #[serde(skip)] + /// hash state used by miner. not marshaled + hasher: Sha3_256, +} + +impl Block { + /// Creates and returns a new Block to be mined. + pub fn new( + previous: BlockID, + height: u64, + target: BlockID, + chain_work: BlockID, + transactions: Vec, + ) -> Result { + // enforce the hard cap transaction limit + if transactions.len() > MAX_TRANSACTIONS_PER_BLOCK as usize { + return Err(BlockError::TransactionsSizeExceeded( + transactions.len(), + MAX_TRANSACTIONS_PER_BLOCK, + )); + } + + // compute the hash list root + let mut hasher = Sha3_256::new(); + let hash_list_root = compute_hash_list_root(&mut hasher, &transactions)?; + + // create the header and block + let block = Block { + header: BlockHeader { + previous, + hash_list_root, + // just use the system time + time: now_as_secs(), + target, + chain_work: compute_chain_work(&target, &chain_work), + nonce: rand::thread_rng().gen_range(0..=MAX_NUMBER), + height, + transaction_count: transactions.len() as u32, + }, + transactions, + // save this to use while mining + hasher, + }; + + Ok(block) + } + + /// Computes an ID for a given block. + pub fn id(&self) -> Result { + self.header.id() + } + + /// Verifies the block's proof-of-work satisfies the declared target. + pub fn check_pow(&self, id: &BlockID) -> bool { + id.as_big_int() <= self.header.target.as_big_int() + } + + /// Adds a new transaction to the block. Called by miner when mining a new block. + pub fn add_transaction( + &mut self, + id: TransactionID, + tx: Transaction, + ) -> Result<(), BlockError> { + // hash the new transaction hash with the running state + self.hasher.update(&id[..]); + + // update coinbase's fee + self.transactions[0].amount += tx.fee.expect("transaction should have a fee"); + + // update the hash list root to account for coinbase amount change + self.header.hash_list_root = + add_coinbase_to_hash_list_root(&mut self.hasher, &self.transactions[0])?; + + // append the new transaction to the list + self.transactions.push(tx); + self.header.transaction_count += 1; + Ok(()) + } +} + +pub fn compute_hash_list_root( + hasher: &mut Sha3_256, + transactions: &[Transaction], +) -> Result { + // don't include coinbase in the first round + for tx in transactions[1..].iter() { + let id = tx.id()?; + hasher.update(id); + } + + // add the coinbase last + add_coinbase_to_hash_list_root(hasher, &transactions[0]) +} + +/// Add the coinbase to the hash list root +fn add_coinbase_to_hash_list_root( + hasher: &mut Sha3_256, + coinbase: &Transaction, +) -> Result { + // get the root of all of the non-coinbase transaction hashes + let root_hash_without_coinbase = hasher.clone().finalize(); + + // add the coinbase separately + // this makes adding new transactions while mining more efficient since the coinbase + // fee amount will change when adding new transactions to the block + let id = coinbase.id()?; + + // hash the coinbase hash with the transaction list root hash + let mut root_hash = Sha3_256::new(); + root_hash.update(id); + root_hash.update(root_hash_without_coinbase); + let hash = root_hash.finalize(); + + // we end up with a sort of modified hash list root of the form: + // HashListRoot = H(TXID[0] | H(TXID[1] | ... | TXID[N-1])) + let hash_list_root = TransactionID::from(&hash[..]); + Ok(hash_list_root) +} + +/// Compute block work given its target +fn compute_block_work(target: &BlockID) -> UBig { + let block_work_int = UBig::from(0u8); + let mut target_int = target.as_big_int(); + + if target_int <= block_work_int { + return block_work_int; + } + + // block work = 2**256 / (target+1) + let max_int = UBig::from(2u8).pow(256); + target_int += UBig::from(1u8); + max_int / target_int +} + +/// Compute cumulative chain work given a block's target and the previous chain work +pub fn compute_chain_work(target: &BlockID, chain_work: &BlockID) -> BlockID { + let block_work_int = compute_block_work(target); + let chain_work_int = chain_work.as_big_int(); + BlockID::from(chain_work_int + block_work_int) +} + +/// Contains data used to determine block validity and its place in the block chain. +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct BlockHeader { + pub previous: BlockID, + pub hash_list_root: TransactionID, + pub time: u64, + pub target: BlockID, + /// total cumulative chain work + pub chain_work: BlockID, + /// not used for crypto + pub nonce: u64, + pub height: u64, + pub transaction_count: u32, +} + +impl BlockHeader { + /// Computes an ID for a given block header. + pub fn id(&self) -> Result { + let json = serde_json::to_string(self).map_err(JsonError::Serialize)?; + let hash = Sha3_256::digest(json.as_bytes()); + let block_id = BlockID::from(&hash[..]); + Ok(block_id) + } + + /// Computes an ID for a given block header when mining. + pub fn id_fast(&mut self, miner_num: usize, hasher: &mut BlockHeaderHasher) { + BlockHeaderHasher::update(miner_num, self, hasher); + } + + /// Returns true if the header indicates it is a better chain than "their_header" up to both points. + /// "this_when" is the timestamp of when we stored this block header. + /// "their_when" is the timestamp of when we stored "their_header". + pub fn compare(&self, their_header: &BlockHeader, this_when: u64, their_when: u64) -> bool { + let this_work_int = self.chain_work.as_big_int(); + let their_work_int = their_header.chain_work.as_big_int(); + + // most work wins + if this_work_int > their_work_int { + return true; + } + if this_work_int < their_work_int { + return false; + } + + // tie goes to the block we stored first + if this_when < their_when { + return true; + } + if this_when > their_when { + return false; + } + + // if we still need to break a tie go by the lesser id + let this_id = self.id().unwrap_or_else(|err| panic!("{}", err)); + let their_id = their_header.id().unwrap_or_else(|err| panic!("{}", err)); + this_id.as_big_int() < their_id.as_big_int() + } +} + +impl PartialEq for BlockHeader { + fn eq(&self, other: &Self) -> bool { + self.previous == other.previous + } +} + +/// SHA3-256 hash +pub const BLOCK_ID_LENGTH: usize = 32; + +/// A block's unique identifier. +#[derive(Clone, Copy, Default, Eq, Hash, PartialEq)] +pub struct BlockID([u8; BLOCK_ID_LENGTH]); + +impl BlockID { + pub fn new() -> BlockID { + Default::default() + } + + /// Returns BlockID as a hex string + pub fn as_hex(&self) -> String { + format!("{}", self) + } + + /// Converts from BlockID to BigInt. + pub fn as_big_int(&self) -> UBig { + UBig::from_be_bytes(self) + } +} + +impl AsRef<[u8]> for BlockID { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl Deref for BlockID { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for BlockID { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Debug for BlockID { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self) + } +} + +impl Display for BlockID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut buf = [0u8; BLOCK_ID_LENGTH * 2]; + let _ = hex_encode(self, &mut buf); + write!(f, "{}", String::from_utf8_lossy(&buf)) + } +} + +impl From for BlockID { + /// Converts from BlockID to BigInt. + fn from(value: UBig) -> Self { + let mut block_id = BlockID::new(); + let int_bytes = value.to_be_bytes(); + + if int_bytes.len() > 32 { + panic!("Too much work") + } + + block_id[32 - int_bytes.len()..].copy_from_slice(&int_bytes); + block_id + } +} + +impl From> for BlockID { + fn from(value: Vec) -> Self { + BlockID(value.try_into().expect("incorrect bytes for block id")) + } +} + +impl From<&[u8]> for BlockID { + fn from(value: &[u8]) -> Self { + BlockID(value.try_into().expect("incorrect bytes for block id")) + } +} + +impl FromIterator for BlockID { + fn from_iter>(iter: I) -> Self { + iter.into_iter().collect::>().into() + } +} + +impl Serialize for BlockID { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + faster_hex::nopfx_lowercase::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for BlockID { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + faster_hex::nopfx_lowercase::deserialize(deserializer) + } +} + +#[derive(Error, Debug)] +pub enum BlockError { + #[error("transaction list size exceeds limit per block, size: {0}, max {1}")] + TransactionsSizeExceeded(usize, u32), + + #[error("json")] + Json(#[from] JsonError), + #[error("transaction")] + Transaction(#[from] TransactionError), +} + +#[cfg(test)] +pub mod test_utils { + use ed25519_compact::{KeyPair, Seed}; + use faster_hex::hex_decode; + + use super::*; + use crate::constants::{INITIAL_TARGET, MAX_MONEY, MIN_FEE_CRUZBITS}; + + // create a deterministic test block + pub fn make_test_block(num_tx: usize) -> Block { + let mut txs = Vec::with_capacity(num_tx); + let seed = 1.to_string().repeat(Seed::BYTES); + let pub_key_coinbase = + KeyPair::from_seed(Seed::new(seed.as_bytes().try_into().unwrap())).pk; + // coinbase + txs.push(Transaction::new( + None, + pub_key_coinbase, + MIN_FEE_CRUZBITS, + None, + None, + None, + MAX_NUMBER, + Some("こんにちは".to_owned()), + )); + + // create txs + for i in 1..num_tx { + // create a sender + let seed = (i % 10).to_string().repeat(Seed::BYTES); + let keypair = KeyPair::from_seed(Seed::new(seed.as_bytes().try_into().unwrap())); + let pub_key = keypair.pk; + let priv_key = keypair.sk; + + // create a recipient + let seed2 = ((i + 1) % 10).to_string().repeat(Seed::BYTES); + let keypair2 = KeyPair::from_seed(Seed::new(seed2.as_bytes().try_into().unwrap())); + let pub_key2 = keypair2.pk; + + let matures = MAX_NUMBER; + let expires = MAX_NUMBER; + let height = MAX_NUMBER; + let amount = MAX_MONEY; + let fee = MAX_MONEY; + + let mut tx = Transaction::new( + Some(pub_key), + pub_key2, + amount, + Some(fee), + Some(matures), + Some(expires), + height, + Some("こんにちは".to_owned()), + ); + + let memo = tx.memo.as_ref().unwrap(); + assert_eq!( + memo.len(), + 15, + "Expected memo length to be 15 but received {}", + memo.len() + ); + + tx.nonce = 123456789 + i as u32; + + // sign the transaction + tx.sign(priv_key).unwrap(); + txs.insert(i, tx); + } + + // create the block + let mut target = BlockID::new(); + hex_decode(INITIAL_TARGET.as_bytes(), &mut target).unwrap(); + Block::new(BlockID::new(), 0, target, BlockID::new(), txs).unwrap() + } +} + +#[cfg(test)] +mod test { + use ed25519_compact::KeyPair; + use faster_hex::hex_decode; + + use super::*; + use crate::block::test_utils::make_test_block; + + #[test] + fn test_id() { + let block = make_test_block(1); + assert!(block.id().is_ok(), "failed to hash block id"); + } + + #[test] + fn test_id_fast() { + let mut block = make_test_block(1); + let mut hasher = BlockHeaderHasher::new(); + block.header.id_fast(0, &mut hasher); + assert_eq!(hasher.result, block.id().unwrap().as_big_int()); + } + + #[test] + fn test_compute_block_work() { + let mut target = BlockID::new(); + hex_decode( + "ffff000000000000000000000000000000000000000000000000000000000000".as_bytes(), + &mut target, + ) + .unwrap(); + let block_work = compute_block_work(&target); + assert_eq!(block_work, UBig::from(1u8)); + + hex_decode( + "00000000ffff0000000000000000000000000000000000000000000000000000".as_bytes(), + &mut target, + ) + .unwrap(); + let block_work = compute_block_work(&target); + assert_eq!(block_work, UBig::from(4295032833_u64)) + } + + #[test] + fn test_add_transaction() { + let mut block = make_test_block(0); + let key_pair = KeyPair::generate(); + let tx1 = Transaction::new(None, key_pair.pk, 1, Some(1), None, None, 0, None); + let tx2 = tx1.clone(); + block.add_transaction(tx1.id().unwrap(), tx1).unwrap(); + block.add_transaction(tx2.id().unwrap(), tx2).unwrap(); + let mut hasher = Sha3_256::new(); + let hlr = compute_hash_list_root(&mut hasher, &block.transactions).unwrap(); + assert_eq!(block.header.hash_list_root, hlr); + } + + #[test] + fn test_compute_hash_list_root() { + let block = make_test_block(3); + + let mut hasher = Sha3_256::new(); + for tx in block.transactions[1..].iter() { + hasher.update(tx.id().unwrap()); + } + let without_coinbase_hash = hasher.finalize(); + + let mut hasher = Sha3_256::new(); + hasher.update(block.transactions[0].id().unwrap()); + hasher.update(without_coinbase_hash); + let hlr1 = TransactionID::from(&hasher.finalize()[..]); + + let mut hasher = Sha3_256::new(); + let hlr2 = compute_hash_list_root(&mut hasher, &block.transactions).unwrap(); + assert_eq!(hlr1, hlr2); + } +} diff --git a/src/block_header_hasher.rs b/src/block_header_hasher.rs new file mode 100644 index 0000000..1f0977c --- /dev/null +++ b/src/block_header_hasher.rs @@ -0,0 +1,408 @@ +use faster_hex::hex_encode; +use ibig::UBig; +use sha3::digest::generic_array::typenum::U32; +use sha3::digest::generic_array::GenericArray; +use sha3::{Digest, Sha3_256}; + +use crate::block::BlockHeader; +#[cfg(any(feature = "cuda", feature = "opencl"))] +use crate::gpu::{gpu_miner_mine, gpu_miner_update}; +use crate::transaction::TransactionID; + +#[derive(Clone, Default, Debug)] +pub struct BlockHeaderHasher { + // these can change per attempt + pub previous_hash_list_root: TransactionID, + pub previous_time: u64, + pub previous_nonce: u64, + pub previous_transaction_count: u32, + + // used for tracking offsets of mutable fields in the buffer + pub hash_list_root_offset: usize, + pub time_offset: usize, + pub nonce_offset: usize, + pub transaction_count_offset: usize, + + // used for calculating a running offset + pub time_len: usize, + pub nonce_len: usize, + pub tx_count_len: usize, + + // used for hashing + pub initialized: bool, + pub buf_len: usize, + pub buffer: Vec, + pub hasher: Sha3_256, + pub result_buf: GenericArray, + pub result: UBig, + pub hashes_per_attempt: u64, +} + +/// Static fields +pub const HDR_PREVIOUS: &[u8] = br#"{"previous":""#; +pub const HDR_HASH_LIST_ROOT: &[u8] = br#"","hash_list_root":""#; +pub const HDR_TIME: &[u8] = br#"","time":"#; +pub const HDR_TARGET: &[u8] = br#","target":""#; +pub const HDR_CHAIN_WORK: &[u8] = br#"","chain_work":""#; +pub const HDR_NONCE: &[u8] = br#"","nonce":"#; +pub const HDR_HEIGHT: &[u8] = br#","height":"#; +pub const HDR_TRANSACTION_COUNT: &[u8] = br#","transaction_count":"#; +pub const HDR_END: &[u8] = br#"}"#; + +// calculate the maximum buffer length needed +const BUF_LEN: usize = HDR_PREVIOUS.len() + + HDR_HASH_LIST_ROOT.len() + + HDR_TIME.len() + + HDR_TARGET.len() + + HDR_CHAIN_WORK.len() + + HDR_NONCE.len() + + HDR_HEIGHT.len() + + HDR_TRANSACTION_COUNT.len() + + HDR_END.len() + // TODO: what are these sizes for + + 4 * 64 + + 3 * 19 + + 10; + +impl BlockHeaderHasher { + /// Returns a newly initialized BlockHeaderHasher + pub fn new() -> Self { + // initialize the hasher + Self { + buffer: vec![0; BUF_LEN], + hashes_per_attempt: 1, + ..Default::default() + } + } + + /// Initialize the buffer to be hashed + pub fn init_buffer(&mut self, header: &mut BlockHeader) { + // lots of slice copying to array offsets. + + // previous + self.buffer[..HDR_PREVIOUS.len()].copy_from_slice(HDR_PREVIOUS); + let mut buf_len = HDR_PREVIOUS.len(); + let _ = hex_encode( + &header.previous, + &mut self.buffer[buf_len..][..header.previous.len() * 2], + ); + buf_len += header.previous.len() * 2; + + // hash_list_root + self.previous_hash_list_root = header.hash_list_root; + self.buffer[buf_len..][..HDR_HASH_LIST_ROOT.len()].copy_from_slice(HDR_HASH_LIST_ROOT); + buf_len += HDR_HASH_LIST_ROOT.len(); + self.hash_list_root_offset = buf_len; + let _ = hex_encode( + &header.hash_list_root, + &mut self.buffer[buf_len..][..header.hash_list_root.len() * 2], + ); + buf_len += header.hash_list_root.len() * 2; + + // time + self.previous_time = header.time; + self.buffer[buf_len..][..HDR_TIME.len()].copy_from_slice(HDR_TIME); + buf_len += HDR_TIME.len(); + self.time_offset = buf_len; + let time_bytes = header.time.to_string().into_bytes(); + self.buffer[buf_len..buf_len + time_bytes.len()].copy_from_slice(&time_bytes); + self.time_len = time_bytes.len(); + buf_len += time_bytes.len(); + + // target + self.buffer[buf_len..][..HDR_TARGET.len()].copy_from_slice(HDR_TARGET); + buf_len += HDR_TARGET.len(); + let _ = hex_encode( + &header.target, + &mut self.buffer[buf_len..][..header.target.len() * 2], + ); + buf_len += header.target.len() * 2; + + // chain_work + self.buffer[buf_len..][..HDR_CHAIN_WORK.len()].copy_from_slice(HDR_CHAIN_WORK); + buf_len += HDR_CHAIN_WORK.len(); + + let _ = hex_encode( + &header.chain_work, + &mut self.buffer[buf_len..buf_len + header.chain_work.len() * 2], + ); + buf_len += header.chain_work.len() * 2; + + // nonce + self.previous_nonce = header.nonce; + self.buffer[buf_len..][..HDR_NONCE.len()].copy_from_slice(HDR_NONCE); + buf_len += HDR_NONCE.len(); + self.nonce_offset = buf_len; + let nonce_bytes = header.nonce.to_string().into_bytes(); + self.buffer[buf_len..][..nonce_bytes.len()].copy_from_slice(&nonce_bytes); + self.nonce_len = nonce_bytes.len(); + buf_len += nonce_bytes.len(); + + // height + self.buffer[buf_len..][..HDR_HEIGHT.len()].copy_from_slice(HDR_HEIGHT); + buf_len += HDR_HEIGHT.len(); + let height_bytes = header.height.to_string().into_bytes(); + self.buffer[buf_len..][..height_bytes.len()].copy_from_slice(&height_bytes); + buf_len += height_bytes.len(); + + // transaction_count + self.previous_transaction_count = header.transaction_count; + self.buffer[buf_len..][..HDR_TRANSACTION_COUNT.len()] + .copy_from_slice(HDR_TRANSACTION_COUNT); + buf_len += HDR_TRANSACTION_COUNT.len(); + self.transaction_count_offset = buf_len; + let transaction_count_bytes = header.transaction_count.to_string().into_bytes(); + self.buffer[buf_len..][..transaction_count_bytes.len()] + .copy_from_slice(&transaction_count_bytes); + self.tx_count_len = transaction_count_bytes.len(); + buf_len += transaction_count_bytes.len(); + + // end + self.buffer[buf_len..][..HDR_END.len()].copy_from_slice(HDR_END); + buf_len += HDR_END.len(); + self.buf_len = buf_len; + + self.initialized = true; + } + + /// Is called every time the header is updated and the caller wants its new hash value/ID. + pub fn update(_miner_num: usize, header: &mut BlockHeader, hasher: &mut BlockHeaderHasher) { + let mut _buffer_changed = false; + + if !hasher.initialized { + hasher.init_buffer(header); + _buffer_changed = true; + } else { + // hash_list_root + if hasher.previous_hash_list_root != header.hash_list_root { + _buffer_changed = true; + // write out the new value + hasher.previous_hash_list_root = header.hash_list_root; + let _ = hex_encode( + &header.hash_list_root, + &mut hasher.buffer[hasher.hash_list_root_offset..], + ); + } + + let mut offset = 0; + + // time + if hasher.previous_time != header.time { + _buffer_changed = true; + hasher.previous_time = header.time; + + // write out the new value + let mut buf_len = hasher.time_offset; + let time_bytes = header.time.to_string().into_bytes(); + hasher.buffer[buf_len..][..time_bytes.len()].copy_from_slice(&time_bytes); + hasher.time_len = time_bytes.len(); + buf_len += time_bytes.len(); + + // did time shrink or grow in length? + offset = time_bytes.len() as isize - hasher.time_len as isize; + + if offset != 0 { + // shift everything below up or down + + // target + hasher.buffer[buf_len..][..HDR_TARGET.len()].copy_from_slice(HDR_TARGET); + buf_len += HDR_TARGET.len(); + + let _ = hex_encode( + &header.target, + &mut hasher.buffer[buf_len..][..header.target.len() * 2], + ); + buf_len += header.target.len() * 2; + + // chain_work + hasher.buffer[buf_len..][..HDR_CHAIN_WORK.len()] + .copy_from_slice(HDR_CHAIN_WORK); + buf_len += HDR_CHAIN_WORK.len(); + let _ = hex_encode( + &header.chain_work, + &mut hasher.buffer[buf_len..][..header.chain_work.len() * 2], + ); + buf_len += header.chain_work.len() * 2; // hex bytes written + + // start of nonce + hasher.buffer[buf_len..][..HDR_NONCE.len()].copy_from_slice(HDR_NONCE); + } + } + + // nonce + let device_mining = cfg!(feature = "cuda") || cfg!(feature = "opencl"); + if offset != 0 || (!device_mining && hasher.previous_nonce != header.nonce) { + _buffer_changed = true; + hasher.previous_nonce = header.nonce; + + // write out the new value (or old value at a new location) + if offset.is_positive() { + hasher.nonce_offset += offset as usize; + } else { + hasher.nonce_offset -= offset.unsigned_abs() + } + + let buf_len = hasher.nonce_offset; + let nonce_bytes = header.nonce.to_string().into_bytes(); + hasher.buffer[buf_len..buf_len + nonce_bytes.len()].copy_from_slice(&nonce_bytes); + + let nonce_len = nonce_bytes.len(); + hasher.nonce_len = nonce_len; + + // did nonce shrink or grow in length? + offset += nonce_len as isize - hasher.nonce_len as isize; + + if offset != 0 { + // shift everything below up or down + + // height + hasher.buffer.extend_from_slice(HDR_HEIGHT); + hasher + .buffer + .extend_from_slice(&header.height.to_string().into_bytes()); + + // start of transaction_count + hasher.buffer.extend_from_slice(HDR_TRANSACTION_COUNT); + } + } + + // transaction_count + if offset != 0 || hasher.previous_transaction_count != header.transaction_count { + _buffer_changed = true; + hasher.previous_transaction_count = header.transaction_count; + + // write out the new value (or old value at a new location) + if offset.is_positive() { + hasher.transaction_count_offset += offset as usize; + } else { + hasher.transaction_count_offset -= offset.unsigned_abs(); + } + + let buf_len = hasher.transaction_count_offset; + let transaction_count_bytes = header.transaction_count.to_string().into_bytes(); + hasher.buffer[buf_len..][..transaction_count_bytes.len()] + .copy_from_slice(&transaction_count_bytes); + + // did count shrink or grow in length? + offset += transaction_count_bytes.len() as isize - hasher.tx_count_len as isize; + hasher.tx_count_len = transaction_count_bytes.len(); + + if offset != 0 { + // shift the footer up or down + hasher.buffer[buf_len..][..HDR_END.len()].copy_from_slice(HDR_END); + } + } + + // it's possible (likely) we did a bunch of encoding with no net impact to the buffer length + if offset.is_positive() { + hasher.buf_len += offset as usize; + } else { + hasher.buf_len -= offset.unsigned_abs(); + } + } + + #[cfg(any(feature = "cuda", feature = "opencl"))] + { + // devices don't return a hash just a solving nonce (if found) + let nonce = hasher.update_device(_miner_num, header, _buffer_changed); + if nonce == 0x7fffffff_ffffffff { + // not found + hasher.result = UBig::from_be_bytes( + // indirectly let miner.go know we failed + &[ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + ], + ); + } else { + log::info!( + "GPU miner {} found a possible solution: {}, double-checking it...", + _miner_num, + nonce + ); + // rebuild the buffer with the new nonce since we don't update it + // per attempt when using CUDA/OpenCL. + header.nonce = nonce; + hasher.init_buffer(header); + } + } + + // hash it + hasher.hasher.update(&hasher.buffer[..hasher.buf_len]); + hasher.hasher.finalize_into_reset(&mut hasher.result_buf); + hasher.result = UBig::from_be_bytes(&hasher.result_buf); + } + + /// Handle mining with GPU devices + #[cfg(any(feature = "cuda", feature = "opencl"))] + pub fn update_device( + &mut self, + miner_num: usize, + header: &BlockHeader, + buffer_changed: bool, + ) -> u64 { + if buffer_changed { + // update the device's copy of the buffer + let last_offset = self.nonce_offset + self.nonce_len; + self.hashes_per_attempt = gpu_miner_update( + miner_num, + &self.buffer, + self.buf_len, + self.nonce_offset, + last_offset, + &header.target, + ); + } + + // try for a solution + gpu_miner_mine(miner_num, header.nonce) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::block::test_utils::make_test_block; + use crate::block::{Block, BlockID}; + + #[test] + fn test_block_header_hasher() { + let mut block = make_test_block(10); + assert!(compare_ids(&mut block), "ID mismatch 1"); + + block.header.time = 1234; + assert!(compare_ids(&mut block), "ID mismatch 2"); + + block.header.nonce = 1234; + assert!(compare_ids(&mut block), "ID mismatch 3"); + + block.header.nonce = 1235; + assert!(compare_ids(&mut block), "ID mismatch 4"); + + block.header.nonce = 1236; + block.header.time = 1234; + assert!(compare_ids(&mut block), "ID mismatch 5"); + + block.header.time = 123498; + block.header.nonce = 12370910; + let tx = &block.transactions[1]; + let tx_id = tx.id().unwrap(); + block.add_transaction(tx_id, tx.clone()).unwrap(); + assert!(compare_ids(&mut block), "ID mismatch 6"); + + block.header.time = 987654321; + assert!(compare_ids(&mut block), "ID mismatch 7"); + } + + fn compare_ids(block: &mut Block) -> bool { + // compute header ID + let id = block.id().unwrap(); + + // use delta method + let mut hasher = BlockHeaderHasher::new(); + block.header.id_fast(0, &mut hasher); + let id2 = BlockID::from(hasher.result); + id == id2 + } +} diff --git a/src/block_queue.rs b/src/block_queue.rs new file mode 100644 index 0000000..adb66d4 --- /dev/null +++ b/src/block_queue.rs @@ -0,0 +1,113 @@ +use std::collections::{HashMap, VecDeque}; +use std::net::SocketAddr; +use std::sync::RwLock; +use std::time::{Duration, SystemTime}; + +use crate::block::BlockID; + +/// If a block has been in the queue for more than 2 minutes it can be re-added with a new peer responsible for its download. +const MAX_QUEUE_WAIT: Duration = Duration::from_secs(2 * 60); + +/// A queue of blocks to download. +pub struct BlockQueue { + block_map: RwLock>, + block_queue: RwLock>, +} + +struct BlockQueueEntry { + who: SocketAddr, + when: SystemTime, +} + +impl BlockQueue { + /// Returns a new instance of a BlockQueue + pub fn new() -> Self { + Self { + block_map: RwLock::new(HashMap::new()), + block_queue: RwLock::new(VecDeque::new()), + } + } + + /// Adds the block ID to the back of the queue and records the address of the peer who pushed it if it didn't exist in the queue. + /// If it did exist and MAX_QUEUE_WAIT has elapsed, the block is left in its position but the peer responsible for download is updated. + pub fn add(&self, id: &BlockID, who: &SocketAddr) -> bool { + let mut block_map = self.block_map.write().unwrap(); + if let Some(block_queue_entry) = block_map.get_mut(id) { + let elapsed = block_queue_entry + .when + .elapsed() + .expect("couldn't get elapsed time from block queue entry"); + if elapsed < MAX_QUEUE_WAIT { + // it's still pending download + return false; + } + + // it's expired. signal that it can be tried again and leave it in place + block_queue_entry.when = SystemTime::now(); + // new peer owns its place in the queue + block_queue_entry.who = *who; + return true; + } + + let block_queue_entry = BlockQueueEntry { + who: *who, + when: SystemTime::now(), + }; + block_map.insert(*id, block_queue_entry); + + let mut block_queue = self.block_queue.write().unwrap(); + + // add block id to the back of the queue + block_queue.push_back(*id); + + true + } + + /// Removes the block ID from the queue only if the requester is who is currently responsible for its download. + pub fn remove(&self, id: &BlockID, who: &SocketAddr) -> bool { + let mut block_map = self.block_map.write().unwrap(); + if let Some(block_queue_entry) = block_map.get(id) { + if block_queue_entry.who == *who { + let mut block_queue = self.block_queue.write().unwrap(); + if let Some(index) = block_queue.iter().position(|queue_id| queue_id == id) { + block_queue.remove(index); + } + block_map.remove(id); + return true; + } + } + + false + } + + /// Returns true if the block ID exists in the queue. + pub fn exists(&self, id: &BlockID) -> bool { + self.block_map.read().unwrap().contains_key(id) + } + + /// Returns the ID of the block at the front of the queue. + pub fn peek(&self) -> Option { + let block_queue = self.block_queue.read().unwrap(); + if block_queue.len() == 0 { + None + } else { + block_queue.front().cloned() + } + } + + /// Returns the length of the queue. + pub fn len(&self) -> usize { + self.block_queue.read().unwrap().len() + } + + /// Returns true if the queue has a length of 0. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +impl Default for BlockQueue { + fn default() -> Self { + Self::new() + } +} diff --git a/src/block_storage.rs b/src/block_storage.rs new file mode 100644 index 0000000..2e1318a --- /dev/null +++ b/src/block_storage.rs @@ -0,0 +1,59 @@ +use thiserror::Error; + +use crate::block::{Block, BlockHeader, BlockID}; +use crate::error::{DbError, EncodingError, FileError, JsonError}; +use crate::transaction::Transaction; + +pub trait BlockStorage { + /// Called to store all of the block's information. + fn store(&self, id: &BlockID, block: &Block, now: u64) -> Result<(), BlockStorageError>; + + /// Returns the referenced block. + fn get_block(&self, id: &BlockID) -> Result, BlockStorageError>; + + /// Returns the referenced block as a byte slice. + fn get_block_bytes(&self, id: &BlockID) -> Result>, BlockStorageError>; + + /// Returns the referenced block's header and the timestamp of when it was stored. + fn get_block_header( + &self, + id: &BlockID, + ) -> Result, BlockStorageError>; + + /// Returns a transaction within a block and the block's header. + fn get_transaction( + &self, + id: &BlockID, + index: u32, + ) -> Result<(Option, BlockHeader), BlockStorageError>; +} + +#[derive(Error, Debug)] +pub enum BlockStorageError { + #[error("block storage is in read-only mode")] + ReadOnly, + + #[error("block storage not found")] + BlockStorageNotFound(#[from] BlockStorageNotFoundError), + + #[error("db")] + Db(#[from] DbError), + #[error("encoding")] + Encoding(#[from] EncodingError), + #[error("file")] + File(#[from] FileError), + #[error("json")] + Json(#[from] JsonError), +} + +#[derive(Error, Debug)] +pub enum BlockStorageNotFoundError { + #[error("block {0} not found")] + Block(BlockID), + #[error("block {0} bytes not found")] + BlockBytes(BlockID), + #[error("block {0} header not found")] + BlockHeader(BlockID), + #[error("transaction at block {0}, index {1} not found")] + TransactionAtBlockIndex(BlockID, u32), +} diff --git a/src/block_storage_disk.rs b/src/block_storage_disk.rs new file mode 100644 index 0000000..bc6cfd9 --- /dev/null +++ b/src/block_storage_disk.rs @@ -0,0 +1,302 @@ +use std::fs; +use std::io::{self, Read, Write}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use leveldb::database::Database; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use lz4::{Decoder, EncoderBuilder}; +use serde::Deserialize; + +use crate::block::{Block, BlockHeader, BlockID}; +use crate::block_storage::{BlockStorage, BlockStorageError, BlockStorageNotFoundError}; +use crate::error::{DbError, EncodingError, FileError, JsonError}; +use crate::transaction::Transaction; + +/// An on-disk BlockStorage implementation using the filesystem for blocks +/// and LevelDB for block headers. +pub struct BlockStorageDisk { + db: Database, + dir_path: PathBuf, + read_only: bool, + compress: bool, +} + +impl BlockStorageDisk { + /// Returns a new instance of on-disk block storage. + pub fn new( + dir_path: PathBuf, + db_path: PathBuf, + read_only: bool, + compress: bool, + ) -> Result, BlockStorageError> { + // create the blocks path if it doesn't exist + if !read_only { + if !dir_path.exists() { + fs::create_dir_all(&dir_path) + .map_err(|err| FileError::Create(dir_path.clone(), err))?; + } else { + let md = fs::metadata(&dir_path) + .map_err(|err| FileError::Create(dir_path.clone(), err))?; + if md.is_file() { + return Err(FileError::Create( + dir_path.clone(), + io::Error::new(io::ErrorKind::AlreadyExists, "Path is a file"), + ) + .into()); + } + } + } + + // open the database + // TODO: open database as read only when option is available + let mut options = Options::new(); + options.create_if_missing = true; + let db = Database::open(&db_path, &options).map_err(|err| DbError::Open(db_path, err))?; + Ok(Arc::new(Self { + db, + dir_path, + read_only, + compress, + })) + } +} + +impl BlockStorage for BlockStorageDisk { + /// Is called to store all of the block's information. + fn store(&self, id: &BlockID, block: &Block, now: u64) -> Result<(), BlockStorageError> { + if self.read_only { + return Err(BlockStorageError::ReadOnly); + } + + let ext = if self.compress { "lz4" } else { "json" }; + let block_path = Path::new(&self.dir_path) + .join(id.as_hex()) + .with_extension(ext); + + // save the complete block to the filesystem + + let block_bytes = if self.compress { + // compress with lz4 + let mut zout = Vec::new(); + let mut encoder = EncoderBuilder::new() + .build(&mut zout) + .map_err(|err| FileError::Compress(block_path.clone(), err))?; + let block_bytes = serde_json::to_vec(&block).map_err(JsonError::Serialize)?; + io::copy(&mut &block_bytes[..], &mut encoder) + .map_err(|err| FileError::Compress(block_path.clone(), err))?; + let (_output, _result) = encoder.finish(); + zout + } else { + serde_json::to_vec(&block).map_err(JsonError::Serialize)? + }; + + // write the block and sync + let mut f = fs::OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(&block_path) + .map_err(|err| FileError::Write(block_path.clone(), err))?; + + let n = f + .write(&block_bytes) + .map_err(|err| FileError::Write(block_path.clone(), err))?; + + f.sync_all() + .map_err(|err| FileError::Write(block_path.clone(), err))?; + + if n < block_bytes.len() { + return Err(FileError::Write( + block_path, + io::Error::new( + io::ErrorKind::UnexpectedEof, + "Bytes written are smaller than block bytes", + ), + ) + .into()); + } + + // save the header to leveldb + let encoded_block_header = encode_block_header(&block.header, now)?; + self.db + .put_u8(&WriteOptions { sync: true }, id, &encoded_block_header) + .map_err(DbError::Write)?; + Ok(()) + } + + /// Returns the referenced block. + fn get_block(&self, id: &BlockID) -> Result, BlockStorageError> { + let Some(block_json) = self.get_block_bytes(id)? else { + return Ok(None); + }; + + // unmarshal + let block = + serde_json::from_slice::(&block_json[..]).map_err(JsonError::Deserialize)?; + Ok(Some(block)) + } + + /// Returns the referenced block as a byte slice. + fn get_block_bytes(&self, id: &BlockID) -> Result>, BlockStorageError> { + let ext = if self.compress { + // order to try finding the block by extension + ["lz4", "json"] + } else { + ["json", "lz4"] + }; + + let mut compressed = self.compress; + + let mut block_path = Path::new(&self.dir_path) + .join(id.as_hex()) + .with_extension(ext[0]); + if !block_path.exists() { + compressed = !compressed; + block_path = Path::new(&self.dir_path) + .join(id.as_hex()) + .with_extension(ext[1]); + + if !block_path.exists() { + // not found + return Ok(None); + } + } + + // read it off disk + let mut block_bytes = + fs::read(&block_path).map_err(|err| FileError::Read(block_path.clone(), err))?; + + if compressed { + // uncompress + let mut out = Vec::new(); + let mut decoder = Decoder::new(&block_bytes[..]) + .map_err(|err| FileError::Decompress(block_path.clone(), err))?; + decoder + .read_to_end(&mut out) + .map_err(|err| FileError::Decompress(block_path.clone(), err))?; + block_bytes = out; + } + + Ok(Some(block_bytes)) + } + + /// Returns the referenced block's header and the timestamp of when it was stored. + fn get_block_header( + &self, + id: &BlockID, + ) -> Result, BlockStorageError> { + // fetch it + let Some(encoded_header) = self + .db + .get_u8(&ReadOptions::new(), id) + .map_err(DbError::Read)? + else { + return Ok(None); + }; + + // decode it + let (block_header, when) = decode_block_header(&encoded_header)?; + + Ok(Some((block_header, when))) + } + + /// Returns a transaction within a block and the block's header. + fn get_transaction( + &self, + id: &BlockID, + index: u32, + ) -> Result<(Option, BlockHeader), BlockStorageError> { + let Some(block_bytes) = self.get_block_bytes(id)? else { + return Err(BlockStorageNotFoundError::BlockBytes(*id).into()); + }; + + // pick out and unmarshal the transaction at the index + let block_json = serde_json::from_slice::(&block_bytes) + .map_err(JsonError::Deserialize)?; + let tx = match block_json["transactions"].get(index as usize) { + Some(tx_json) => { + Some(Transaction::deserialize(tx_json).map_err(JsonError::Deserialize)?) + } + None => None, + }; + + // pick out and unmarshal the header + let header = match block_json.get("header") { + Some(hdr_json) => BlockHeader::deserialize(hdr_json).map_err(JsonError::Deserialize)?, + None => { + return Err(BlockStorageNotFoundError::BlockHeader(*id).into()); + } + }; + + Ok((tx, header)) + } +} + +// leveldb schema: +// {bid} -> {timestamp}{bincode encoded header} +// note: original implementation is with gob instead of bincode + +const U64_LENGTH: usize = mem::size_of::(); + +fn encode_block_header(header: &BlockHeader, when: u64) -> Result, BlockStorageError> { + let mut buf = Vec::new(); + buf.extend_from_slice(&when.to_be_bytes()); + let encoded = bincode::serialize(&header).map_err(EncodingError::BincodeEncode)?; + buf.extend_from_slice(&encoded); + Ok(buf) +} + +fn decode_block_header(encoded_header: &[u8]) -> Result<(BlockHeader, u64), BlockStorageError> { + let mut when_bytes = [0u8; U64_LENGTH]; + when_bytes.copy_from_slice(&encoded_header[0..U64_LENGTH]); + let when = u64::from_be_bytes(when_bytes); + let header = bincode::deserialize::(&encoded_header[U64_LENGTH..]) + .map_err(EncodingError::BincodeDecode)?; + Ok((header, when)) +} + +#[cfg(test)] +mod test { + use ed25519_compact::KeyPair; + use faster_hex::hex_decode; + + use super::*; + use crate::constants::{INITIAL_COINBASE_REWARD, INITIAL_TARGET}; + + #[test] + fn test_encode_block_header() { + let pub_key = KeyPair::generate().pk; + + // create a coinbase + let tx = Transaction::new( + None, + pub_key, + INITIAL_COINBASE_REWARD, + None, + None, + None, + 0, + Some("hello".to_owned()), + ); + + // create a block + let mut target = BlockID::new(); + hex_decode(INITIAL_TARGET.as_bytes(), &mut target).unwrap(); + let block = Block::new(BlockID::new(), 0, target, BlockID::new(), vec![tx]).unwrap(); + + // encode the header + let encoded_header = encode_block_header(&block.header, 12345).unwrap(); + + // decode the header + let (header, when) = decode_block_header(&encoded_header).unwrap(); + + // compare + assert_eq!( + header, block.header, + "Decoded header doesn't match original" + ); + assert_eq!(when, 12345, "Decoded timestamp doesn't match original"); + } +} diff --git a/src/checkpoints.rs b/src/checkpoints.rs new file mode 100644 index 0000000..3c5b69c --- /dev/null +++ b/src/checkpoints.rs @@ -0,0 +1,86 @@ +use std::collections::HashMap; +use std::sync::OnceLock; + +use thiserror::Error; + +use crate::block::BlockID; + +/// Can be disabled for testing. +pub const CHECKPOINTS_ENABLED: bool = true; + +/// Used to determine if the client is synced. +pub const LATEST_CHECKPOINT_HEIGHT: u64 = 205780; + +/// Checkpoints are known height and block ID pairs on the main chain. +static CHECKPOINTS: OnceLock> = OnceLock::new(); + +/// Returns an error if the passed height is a checkpoint and the +/// passed block ID does not match the given checkpoint block ID. +pub fn checkpoint_check(id: &BlockID, height: u64) -> Result<(), CheckpointError> { + if !CHECKPOINTS_ENABLED { + return Ok(()); + } + + match CHECKPOINTS + .get_or_init(|| { + HashMap::from([ + ( + 18144, + "000000000000b83e78ec29355d098256936389010d7450a288763ed4f191069e", + ), + ( + 36288, + "00000000000052bd43e85cf60f2ecd1c5016083e6a560b3ee57427c7f2dd64e8", + ), + ( + 54432, + "000000000001131d0597533737d7aadac0a5d4e132caa4c47c793c02e6d56063", + ), + ( + 72576, + "0000000000013873c9974f8468c7e03419e02f49aaf9761f4d6c19e233d0bb3d", + ), + ( + 90720, + "0000000000026254d69f914ff774ed8691d30003c8094d03e61aa8ed4c862c5f", + ), + ( + 108864, + "00000000001d7b35c09ac85a4e5b577dc62569f2782220723f1613ea268c66aa", + ), + ( + 127008, + "000000000013df027075d395d6f97e03cd8285db6c37b1575e66ede1c480d3de", + ), + ( + 145142, + "0000000006dcd69479a3f4f40a301d22e78b1f56de44e00c1fa3191967fd1425", + ), + ( + 205780, + "0000000089ad25388e0af7139383288203b46240da2d0651a89af0252e5fc4d3", + ), + ]) + }) + .get(&height) + { + Some(checkpoint_id) => { + if id.as_hex() != *checkpoint_id { + Err(CheckpointError::BlockMismatch( + *id, + height, + checkpoint_id.to_string(), + )) + } else { + Ok(()) + } + } + None => Ok(()), + } +} + +#[derive(Error, Debug)] +pub enum CheckpointError { + #[error("block {0} at height {1} does not match checkpoint ID {2}")] + BlockMismatch(BlockID, u64, String), +} diff --git a/src/constants.rs b/src/constants.rs new file mode 100644 index 0000000..2026805 --- /dev/null +++ b/src/constants.rs @@ -0,0 +1,89 @@ +// The below values affect ledger consensus and come directly from bitcoin. +// we could have played with these but we're introducing significant enough changes +// already IMO, so let's keep the scope of this experiment as small as we can + +pub const CRUZBITS_PER_CRUZ: u64 = 100000000; + +pub const INITIAL_COINBASE_REWARD: u64 = 50 * CRUZBITS_PER_CRUZ; + +/// blocks +pub const COINBASE_MATURITY: u64 = 100; + +pub const INITIAL_TARGET: &str = "00000000ffff0000000000000000000000000000000000000000000000000000"; + +/// 2 hours +pub const MAX_FUTURE_SECONDS: u64 = 2 * 60 * 60; + +pub const MAX_MONEY: u64 = 21000000 * CRUZBITS_PER_CRUZ; + +/// 2 weeks in blocks +pub const RETARGET_INTERVAL: u64 = 2016; + +/// 2 weeks in seconds +pub const RETARGET_TIME: u64 = 1209600; + +/// every 10 minutes +pub const TARGET_SPACING: u64 = 600; + +pub const NUM_BLOCKS_FOR_MEDIAN_TIMESTAMP: u64 = 11; + +/// 4 years in blocks +pub const BLOCKS_UNTIL_REWARD_HALVING: u64 = 210000; + +// the below value affects ledger consensus and comes from bitcoin cash + +/// 1 day in blocks +pub const RETARGET_SMA_WINDOW: u64 = 144; + +// the below values affect ledger consensus and are new as of our ledger + +/// 16.666... tx/sec, ~4 MBish in JSON +pub const INITIAL_MAX_TRANSACTIONS_PER_BLOCK: u32 = 10000; + +/// 2 years in blocks +pub const BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING: u64 = 105000; + +pub const MAX_TRANSACTIONS_PER_BLOCK: u32 = (1 << 31) - 1; + +/// pre-calculated +pub const MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT: u64 = 1852032; + +/// 1 week in blocks +pub const BLOCKS_UNTIL_NEW_SERIES: u64 = 1008; + +/// bytes (ascii/utf8 only) +pub const MAX_MEMO_LENGTH: usize = 100; + +/// given our JSON protocol we should respect Javascript's Number.MAX_SAFE_INTEGER value +pub const MAX_NUMBER: u64 = (1 << 53) - 1; + +/// height at which we switch from bitcoin's difficulty adjustment algorithm to bitcoin cash's algorithm +pub const BITCOIN_CASH_RETARGET_ALGORITHM_HEIGHT: u64 = 28861; + +// the below values only affect peering behavior and do not affect ledger consensus + +pub const DEFAULT_CRUZBIT_PORT: u16 = 8831; + +pub const MAX_OUTBOUND_PEER_CONNECTIONS: usize = 8; + +pub const MAX_INBOUND_PEER_CONNECTIONS: usize = 128; + +pub const MAX_INBOUND_PEER_CONNECTIONS_FROM_SAME_HOST: usize = 4; + +pub const MAX_TIP_AGE: u64 = 24 * 60 * 60; + +/// doesn't apply to blocks +pub const MAX_PROTOCOL_MESSAGE_LENGTH: usize = 2 * 1024 * 1024; + +// the below values are mining policy and also do not affect ledger consensus +// if you change this it needs to be less than the maximum at the current height + +pub const MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK: u32 = INITIAL_MAX_TRANSACTIONS_PER_BLOCK; + +pub const MAX_TRANSACTION_QUEUE_LENGTH: u32 = MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK * 10; + +/// 0.01 cruz +pub const MIN_FEE_CRUZBITS: u64 = 1000000; + +/// 0.01 cruz +pub const MIN_AMOUNT_CRUZBITS: u64 = 1000000; diff --git a/src/cuda/BUILDING.md b/src/cuda/BUILDING.md new file mode 100644 index 0000000..b2b9e80 --- /dev/null +++ b/src/cuda/BUILDING.md @@ -0,0 +1,23 @@ +## How to build with CUDA support + +Tested on Ubuntu Linux 18.10. This assumes you've followed [these steps](https://gist.github.com/setanimals/f562ed7dd1c69af3fbe960c7b9502615) so far. + +```bash +sudo apt install -y build-essential +sudo apt install -y cmake +wget https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.168_418.67_linux.run +sudo sh cuda_10.1.168_418.67_linux.run +rm -rf go/src/github.com/cruzbit/ +mkdir -p go/src/github.com/cruzbit/ +cd go/src/github.com/cruzbit/ +git clone https://github.com/cruzbit/cruzbit.git +cd cruzbit/cuda +mkdir build +cd build +cmake .. +make +sudo make install +cd ../../client +go install -tags cuda +LD_LIBRARY_PATH=/usr/local/lib client -numminers +``` diff --git a/src/cuda/CMakeLists.txt b/src/cuda/CMakeLists.txt new file mode 100644 index 0000000..0b49975 --- /dev/null +++ b/src/cuda/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.10) +project(cruzbit_cuda) +find_package(CUDA REQUIRED) +set(CUDA_SEPARABLE_COMPILATION ON) +set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS}; -Xcompiler -fPIC) +CUDA_ADD_LIBRARY(cruzbit_cuda SHARED + mine.cu + sha3.cu + sha3.cc +) +install(TARGETS cruzbit_cuda DESTINATION lib) diff --git a/src/cuda/LICENSE b/src/cuda/LICENSE new file mode 100644 index 0000000..d2d484d --- /dev/null +++ b/src/cuda/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Markku-Juhani O. Saarinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/src/cuda/README.md b/src/cuda/README.md new file mode 100644 index 0000000..c0dc565 --- /dev/null +++ b/src/cuda/README.md @@ -0,0 +1,69 @@ +### Updated 19-Jul-19: + +This is a CUDA port of tiny_sha3, which also now includes mining specific calls for use by the cruzbit client. +Original project's README follows. + +Take it easy, +- asdvxgxasjab + +# tiny_sha3 +Very small, readable implementation of the FIPS 202 and SHA3 hash function. +Public domain. + +### Updated 27-Dec-15: + +Added SHAKE128 and SHAKE256 code and test vectors. The code can actually do +a XOF of arbitrary size (like "SHAKE512"). + + +### Updated 03-Sep-15: + +Made the implementation portable. The API is now pretty much the +same that OpenSSL uses. + + +### Updated 07-Aug-15: + +Now that SHA3 spec is out, I've updated the package to match with the +new padding rules. There is literally one line difference between +Keccak 3.0 and SHA-3 implementations: + +``` + temp[inlen++] = 0x06; // XXX Padding Changed from Keccak 3.0 +``` + +The 0x06 constant there used to be 0x01. But this of course totally +breaks compatibility and test vectors had to be revised. + +SHA-3 Spec: http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf + +Cheers, +- markku + + +### Original README.TXT from 19-Nov-11: + +Hi. + +The SHA-3 competition is nearing it's end and I would personally like +to support Keccak as the winner. I have a PhD in hash function cryptanalysis +so don't take my word for it, go ahead and look into the code ! + +Since I couldn't find a *compact* and/or *readable* implementation of Keccak +anywhere, here's one I cooked up as a service to the curious. + +This implementation is intended for study of the algorithm, not for +production use. + +The code works correctly on 64-bit little-endian platforms with gcc. +Like your Linux box. The main.c module contains self-tests for all +officially supported hash sizes. + +If you're looking for production code, the official multi-megabyte package +covers everyting you could possibly need and too much much more: +http://keccak.noekeon.org/ + +Cheers, +- Markku 19-Nov-11 + +Dr. Markku-Juhani O. Saarinen diff --git a/src/cuda/mine.cu b/src/cuda/mine.cu new file mode 100644 index 0000000..6da2d3b --- /dev/null +++ b/src/cuda/mine.cu @@ -0,0 +1,476 @@ +// mine.cu +// 19-Jul-19 Provides cgo hooks to manage mining on Nvidia devices -asdvxgxasjab + +#include "sha3.h" +#include "sha3_cu.h" +#include +#include +#include +#include + +static const char *_cudaErrorToString(cudaError_t error) { + switch (error) { + case cudaSuccess: + return "cudaSuccess"; + + case cudaErrorMissingConfiguration: + return "cudaErrorMissingConfiguration"; + + case cudaErrorMemoryAllocation: + return "cudaErrorMemoryAllocation"; + + case cudaErrorInitializationError: + return "cudaErrorInitializationError"; + + case cudaErrorLaunchFailure: + return "cudaErrorLaunchFailure"; + + case cudaErrorPriorLaunchFailure: + return "cudaErrorPriorLaunchFailure"; + + case cudaErrorLaunchTimeout: + return "cudaErrorLaunchTimeout"; + + case cudaErrorLaunchOutOfResources: + return "cudaErrorLaunchOutOfResources"; + + case cudaErrorInvalidDeviceFunction: + return "cudaErrorInvalidDeviceFunction"; + + case cudaErrorInvalidConfiguration: + return "cudaErrorInvalidConfiguration"; + + case cudaErrorInvalidDevice: + return "cudaErrorInvalidDevice"; + + case cudaErrorInvalidValue: + return "cudaErrorInvalidValue"; + + case cudaErrorInvalidPitchValue: + return "cudaErrorInvalidPitchValue"; + + case cudaErrorInvalidSymbol: + return "cudaErrorInvalidSymbol"; + + case cudaErrorMapBufferObjectFailed: + return "cudaErrorMapBufferObjectFailed"; + + case cudaErrorUnmapBufferObjectFailed: + return "cudaErrorUnmapBufferObjectFailed"; + + case cudaErrorInvalidHostPointer: + return "cudaErrorInvalidHostPointer"; + + case cudaErrorInvalidDevicePointer: + return "cudaErrorInvalidDevicePointer"; + + case cudaErrorInvalidTexture: + return "cudaErrorInvalidTexture"; + + case cudaErrorInvalidTextureBinding: + return "cudaErrorInvalidTextureBinding"; + + case cudaErrorInvalidChannelDescriptor: + return "cudaErrorInvalidChannelDescriptor"; + + case cudaErrorInvalidMemcpyDirection: + return "cudaErrorInvalidMemcpyDirection"; + + case cudaErrorAddressOfConstant: + return "cudaErrorAddressOfConstant"; + + case cudaErrorTextureFetchFailed: + return "cudaErrorTextureFetchFailed"; + + case cudaErrorTextureNotBound: + return "cudaErrorTextureNotBound"; + + case cudaErrorSynchronizationError: + return "cudaErrorSynchronizationError"; + + case cudaErrorInvalidFilterSetting: + return "cudaErrorInvalidFilterSetting"; + + case cudaErrorInvalidNormSetting: + return "cudaErrorInvalidNormSetting"; + + case cudaErrorMixedDeviceExecution: + return "cudaErrorMixedDeviceExecution"; + + case cudaErrorCudartUnloading: + return "cudaErrorCudartUnloading"; + + case cudaErrorUnknown: + return "cudaErrorUnknown"; + + case cudaErrorNotYetImplemented: + return "cudaErrorNotYetImplemented"; + + case cudaErrorMemoryValueTooLarge: + return "cudaErrorMemoryValueTooLarge"; + + case cudaErrorInvalidResourceHandle: + return "cudaErrorInvalidResourceHandle"; + + case cudaErrorNotReady: + return "cudaErrorNotReady"; + + case cudaErrorInsufficientDriver: + return "cudaErrorInsufficientDriver"; + + case cudaErrorSetOnActiveProcess: + return "cudaErrorSetOnActiveProcess"; + + case cudaErrorInvalidSurface: + return "cudaErrorInvalidSurface"; + + case cudaErrorNoDevice: + return "cudaErrorNoDevice"; + + case cudaErrorECCUncorrectable: + return "cudaErrorECCUncorrectable"; + + case cudaErrorSharedObjectSymbolNotFound: + return "cudaErrorSharedObjectSymbolNotFound"; + + case cudaErrorSharedObjectInitFailed: + return "cudaErrorSharedObjectInitFailed"; + + case cudaErrorUnsupportedLimit: + return "cudaErrorUnsupportedLimit"; + + case cudaErrorDuplicateVariableName: + return "cudaErrorDuplicateVariableName"; + + case cudaErrorDuplicateTextureName: + return "cudaErrorDuplicateTextureName"; + + case cudaErrorDuplicateSurfaceName: + return "cudaErrorDuplicateSurfaceName"; + + case cudaErrorDevicesUnavailable: + return "cudaErrorDevicesUnavailable"; + + case cudaErrorInvalidKernelImage: + return "cudaErrorInvalidKernelImage"; + + case cudaErrorNoKernelImageForDevice: + return "cudaErrorNoKernelImageForDevice"; + + case cudaErrorIncompatibleDriverContext: + return "cudaErrorIncompatibleDriverContext"; + + case cudaErrorPeerAccessAlreadyEnabled: + return "cudaErrorPeerAccessAlreadyEnabled"; + + case cudaErrorPeerAccessNotEnabled: + return "cudaErrorPeerAccessNotEnabled"; + + case cudaErrorDeviceAlreadyInUse: + return "cudaErrorDeviceAlreadyInUse"; + + case cudaErrorProfilerDisabled: + return "cudaErrorProfilerDisabled"; + + case cudaErrorProfilerNotInitialized: + return "cudaErrorProfilerNotInitialized"; + + case cudaErrorProfilerAlreadyStarted: + return "cudaErrorProfilerAlreadyStarted"; + + case cudaErrorProfilerAlreadyStopped: + return "cudaErrorProfilerAlreadyStopped"; + + case cudaErrorAssert: + return "cudaErrorAssert"; + + case cudaErrorTooManyPeers: + return "cudaErrorTooManyPeers"; + + case cudaErrorHostMemoryAlreadyRegistered: + return "cudaErrorHostMemoryAlreadyRegistered"; + + case cudaErrorHostMemoryNotRegistered: + return "cudaErrorHostMemoryNotRegistered"; + + case cudaErrorOperatingSystem: + return "cudaErrorOperatingSystem"; + + case cudaErrorPeerAccessUnsupported: + return "cudaErrorPeerAccessUnsupported"; + + case cudaErrorLaunchMaxDepthExceeded: + return "cudaErrorLaunchMaxDepthExceeded"; + + case cudaErrorLaunchFileScopedTex: + return "cudaErrorLaunchFileScopedTex"; + + case cudaErrorLaunchFileScopedSurf: + return "cudaErrorLaunchFileScopedSurf"; + + case cudaErrorSyncDepthExceeded: + return "cudaErrorSyncDepthExceeded"; + + case cudaErrorLaunchPendingCountExceeded: + return "cudaErrorLaunchPendingCountExceeded"; + + case cudaErrorNotPermitted: + return "cudaErrorNotPermitted"; + + case cudaErrorNotSupported: + return "cudaErrorNotSupported"; + + case cudaErrorHardwareStackError: + return "cudaErrorHardwareStackError"; + + case cudaErrorIllegalInstruction: + return "cudaErrorIllegalInstruction"; + + case cudaErrorMisalignedAddress: + return "cudaErrorMisalignedAddress"; + + case cudaErrorInvalidAddressSpace: + return "cudaErrorInvalidAddressSpace"; + + case cudaErrorInvalidPc: + return "cudaErrorInvalidPc"; + + case cudaErrorIllegalAddress: + return "cudaErrorIllegalAddress"; + + case cudaErrorInvalidPtx: + return "cudaErrorInvalidPtx"; + + case cudaErrorInvalidGraphicsContext: + return "cudaErrorInvalidGraphicsContext"; + + case cudaErrorStartupFailure: + return "cudaErrorStartupFailure"; + + case cudaErrorApiFailureBase: + return "cudaErrorApiFailureBase"; + + case cudaErrorNvlinkUncorrectable: + return "cudaErrorNvlinkUncorrectable"; + + case cudaErrorJitCompilerNotFound: + return "cudaErrorJitCompilerNotFound"; + + case cudaErrorCooperativeLaunchTooLarge: + return "cudaErrorCooperativeLaunchTooLarge"; + } + + return ""; +} + +__device__ int memcmp_cu(const void *p1, const void *p2, size_t len) { + for (size_t i = 0; i < len; i++) { + uint8_t b1 = ((uint8_t *)p1)[i]; + uint8_t b2 = ((uint8_t *)p2)[i]; + if (b1 < b2) { + return -1; + } + if (b1 > b2) { + return 1; + } + } + return 0; +} + +__device__ int strlen_cu(char *s) { + int i; + for (i = 0; s[i] != '\0';) { + i++; + } + return i; +} + +__device__ char *reverse(char *str) { + char tmp, *src, *dst; + size_t len; + if (str != NULL) { + len = strlen_cu(str); + if (len > 1) { + src = str; + dst = src + len - 1; + while (src < dst) { + tmp = *src; + *src++ = *dst; + *dst-- = tmp; + } + } + } + return str; +} + +__device__ int itoa(int64_t n, char s[]) { + int i; + int64_t sign; + + if ((sign = n) < 0) /* record sign */ + n = -n; /* make n positive */ + i = 0; + + do { /* generate digits in reverse order */ + s[i++] = n % 10 + '0'; /* get next digit */ + } while ((n /= 10) > 0); /* delete it */ + + if (sign < 0) + s[i++] = '-'; + + s[i] = '\0'; + reverse(s); + return i; +} + +__device__ void debug_print_buf(const void *buf, size_t len) { + for (int i = 0; i < len; i++) { + printf("%c", ((char *)buf)[i]); + } + printf("\n"); +} + +__device__ void debug_print_hash(const void *hash) { + for (int i = 0; i < 32; i++) { + printf("%02x", ((char *)hash)[i] & 0xFF); + } + printf("\n"); +} + +// called by each device thread +__global__ void try_solve(int64_t start_nonce, const sha3_ctx_t *prev_sha3, + const void *last, size_t last_len, const void *target, + int64_t *good_nonce) { + uint8_t hash[32]; + uint8_t nonce_s[20]; + + int index = blockDim.x * blockIdx.x + threadIdx.x; + int64_t nonce = start_nonce + (int64_t)index; + size_t n = (size_t)itoa(nonce, (char *)nonce_s); + + sha3_ctx_t sha3; + memcpy(&sha3, prev_sha3, sizeof(sha3_ctx_t)); + sha3_update_cu(&sha3, nonce_s, n); + sha3_update_cu(&sha3, last, last_len); + sha3_final_cu(hash, &sha3); + + if (memcmp_cu(hash, target, 32) <= 0) { + // found a solution. not thread-safe but a race is very unlikely + *good_nonce = nonce; + } +} + +// device-local state +struct miner_state { + int num_blocks, block_size, max_threads; + sha3_ctx_t *prev_sha3_cu; + void *last_cu, *target_cu; + size_t last_len; + int64_t *nonce_cu; +}; + +static struct miner_state *states = 0; + +extern "C" { + +// called on startup +int cuda_init() { + int device_count = -1; + cudaError_t error = cudaGetDeviceCount(&device_count); + if (error != cudaSuccess) { + printf("cudaGetDeviceCount: %s\n", _cudaErrorToString(error)); + return -1; + } + if (device_count <= 0) { + return -1; + } + + states = new struct miner_state[device_count]; + + for (int i = 0; i < device_count; i++) { + cudaDeviceProp props; + error = cudaGetDeviceProperties(&props, i); + if (error != cudaSuccess) { + printf("cudaGetDeviceProperties: %s\n", _cudaErrorToString(error)); + return -1; + } + + states[i].max_threads = + props.maxThreadsPerMultiProcessor * props.multiProcessorCount; + states[i].block_size = props.warpSize; + states[i].num_blocks = states[i].max_threads / states[i].block_size; + + error = cudaSetDevice(i); + if (error != cudaSuccess) { + printf("cudaSetDevice: %s\n", _cudaErrorToString(error)); + return -1; + } + + error = cudaDeviceReset(); + if (error != cudaSuccess) { + printf("cudaDeviceReset: %s\n", _cudaErrorToString(error)); + return -1; + } + +#if 0 + // I tried this but it noticeably impacted performance + error = cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); + if (error != cudaSuccess) { + printf("cudaSetDeviceFlags: %s\n", _cudaErrorToString(error)); + return -1; + } +#endif + + // allocate memory used on device written to by the host + cudaMalloc(&states[i].prev_sha3_cu, sizeof(sha3_ctx_t)); + cudaMalloc(&states[i].last_cu, 512); + cudaMalloc(&states[i].target_cu, 32); + cudaMalloc(&states[i].nonce_cu, sizeof(int64_t)); + } + + return device_count; +} + +// called after updating the block header +int miner_update(int miner_num, const void *first, size_t first_len, + const void *last, size_t last_len, const void *target) { + cudaSetDevice(miner_num); + + // hash the first (largest) part of the header once and copy the state + sha3_ctx_t sha3; + sha3_init(&sha3, 32); + sha3_update(&sha3, first, first_len); + cudaMemcpy(states[miner_num].prev_sha3_cu, &sha3, sizeof(sha3_ctx_t), + cudaMemcpyHostToDevice); + + // copy the end part of the header + states[miner_num].last_len = last_len; + cudaMemcpy(states[miner_num].last_cu, last, last_len, cudaMemcpyHostToDevice); + + // copy the target + cudaMemcpy(states[miner_num].target_cu, target, 32, cudaMemcpyHostToDevice); + + // set the nonce to "not found" + cudaMemset(states[miner_num].nonce_cu, 0x7F, sizeof(int64_t)); + cudaMemset(states[miner_num].nonce_cu, 0xFF, sizeof(int64_t) - 1); + + return states[miner_num].num_blocks * states[miner_num].block_size; +} + +// called in a loop until solved +// returns a solving nonce if found; otherwise 0x7FFFFFFFFFFFFFFF +int64_t miner_mine(int miner_num, int64_t start_nonce) { + cudaSetDevice(miner_num); + int64_t nonce; + int num_blocks = states[miner_num].num_blocks; + int block_size = states[miner_num].block_size; + try_solve<<>>( + start_nonce, states[miner_num].prev_sha3_cu, states[miner_num].last_cu, + states[miner_num].last_len, states[miner_num].target_cu, + states[miner_num].nonce_cu); + cudaDeviceSynchronize(); + cudaMemcpy(&nonce, states[miner_num].nonce_cu, sizeof(int64_t), + cudaMemcpyDeviceToHost); + return nonce; +} +} diff --git a/src/cuda/sha3.cc b/src/cuda/sha3.cc new file mode 100644 index 0000000..d102b31 --- /dev/null +++ b/src/cuda/sha3.cc @@ -0,0 +1,161 @@ +// sha3.cc +// 19-Nov-11 Markku-Juhani O. Saarinen + +// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3" +// Revised 03-Sep-15 for portability + OpenSSL - style API +// Revised 21-Jul-19 to strip unneeded code -asdvxgxasjab + +#include "sha3.h" + +// update the state with given number of rounds + +void sha3_keccakf(uint64_t st[25]) { + // constants + const uint64_t keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008}; + const int keccakf_rotc[24] = {1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44}; + const int keccakf_piln[24] = {10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1}; + + // variables + int i, j, r; + uint64_t t, bc[5]; + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ +#error "CUDA support only available on little-endian hosts." +#if 0 + uint8_t *v; + + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + st[i] = ((uint64_t)v[0]) | (((uint64_t)v[1]) << 8) | + (((uint64_t)v[2]) << 16) | (((uint64_t)v[3]) << 24) | + (((uint64_t)v[4]) << 32) | (((uint64_t)v[5]) << 40) | + (((uint64_t)v[6]) << 48) | (((uint64_t)v[7]) << 56); + } +#endif +#endif + + // actual iteration + for (r = 0; r < KECCAKF_ROUNDS; r++) { + + // Theta + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + // Rho Pi + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + // Chi + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + + // Iota + st[0] ^= keccakf_rndc[r]; + } + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ +#error "CUDA support only available on little-endian hosts." +#if 0 + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + t = st[i]; + v[0] = t & 0xFF; + v[1] = (t >> 8) & 0xFF; + v[2] = (t >> 16) & 0xFF; + v[3] = (t >> 24) & 0xFF; + v[4] = (t >> 32) & 0xFF; + v[5] = (t >> 40) & 0xFF; + v[6] = (t >> 48) & 0xFF; + v[7] = (t >> 56) & 0xFF; + } +#endif +#endif +} + +// Initialize the context for SHA3 + +int sha3_init(sha3_ctx_t *c, int mdlen) { + int i; + + for (i = 0; i < 25; i++) + c->st.q[i] = 0; + c->mdlen = mdlen; + c->rsiz = 200 - 2 * mdlen; + c->pt = 0; + + return 1; +} + +// update state with more data + +int sha3_update(sha3_ctx_t *c, const void *data, size_t len) { + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((const uint8_t *)data)[i]; + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +// finalize and output a hash + +int sha3_final(void *md, sha3_ctx_t *c) { + int i; + + c->st.b[c->pt] ^= 0x06; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf(c->st.q); + + for (i = 0; i < c->mdlen; i++) { + ((uint8_t *)md)[i] = c->st.b[i]; + } + + return 1; +} + +// compute a SHA-3 hash (md) of given byte length from "in" + +void *sha3(const void *in, size_t inlen, void *md, int mdlen) { + sha3_ctx_t sha3; + + sha3_init(&sha3, mdlen); + sha3_update(&sha3, in, inlen); + sha3_final(md, &sha3); + + return md; +} diff --git a/src/cuda/sha3.cu b/src/cuda/sha3.cu new file mode 100644 index 0000000..0d98280 --- /dev/null +++ b/src/cuda/sha3.cu @@ -0,0 +1,159 @@ +// sha3.cu +// 19-Nov-11 Markku-Juhani O. Saarinen + +// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3" +// Revised 03-Sep-15 for portability + OpenSSL - style API +// Revised 19-Jul-19 to run on Nvidia devices -asdvxgxasjab + +#include "sha3_cu.h" + +// update the state with given number of rounds + +__device__ void sha3_keccakf_cu(uint64_t st[25]) { + // constants + const uint64_t keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008}; + const int keccakf_rotc[24] = {1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44}; + const int keccakf_piln[24] = {10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1}; + + // variables + int i, j, r; + uint64_t t, bc[5]; + +#if 0 +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + uint8_t *v; + + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + st[i] = ((uint64_t)v[0]) | (((uint64_t)v[1]) << 8) | + (((uint64_t)v[2]) << 16) | (((uint64_t)v[3]) << 24) | + (((uint64_t)v[4]) << 32) | (((uint64_t)v[5]) << 40) | + (((uint64_t)v[6]) << 48) | (((uint64_t)v[7]) << 56); + } +#endif +#endif + + // actual iteration + for (r = 0; r < KECCAKF_ROUNDS; r++) { + + // Theta + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + // Rho Pi + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + // Chi + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + + // Iota + st[0] ^= keccakf_rndc[r]; + } + +#if 0 +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + t = st[i]; + v[0] = t & 0xFF; + v[1] = (t >> 8) & 0xFF; + v[2] = (t >> 16) & 0xFF; + v[3] = (t >> 24) & 0xFF; + v[4] = (t >> 32) & 0xFF; + v[5] = (t >> 40) & 0xFF; + v[6] = (t >> 48) & 0xFF; + v[7] = (t >> 56) & 0xFF; + } +#endif +#endif +} + +// Initialize the context for SHA3 + +__device__ int sha3_init_cu(sha3_ctx_t *c, int mdlen) { + int i; + + for (i = 0; i < 25; i++) + c->st.q[i] = 0; + c->mdlen = mdlen; + c->rsiz = 200 - 2 * mdlen; + c->pt = 0; + + return 1; +} + +// update state with more data + +__device__ int sha3_update_cu(sha3_ctx_t *c, const void *data, size_t len) { + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((const uint8_t *)data)[i]; + if (j >= c->rsiz) { + sha3_keccakf_cu(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +// finalize and output a hash + +__device__ int sha3_final_cu(void *md, sha3_ctx_t *c) { + int i; + + c->st.b[c->pt] ^= 0x06; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf_cu(c->st.q); + + for (i = 0; i < c->mdlen; i++) { + ((uint8_t *)md)[i] = c->st.b[i]; + } + + return 1; +} + +// compute a SHA-3 hash (md) of given byte length from "in" + +__device__ void *sha3_cu(const void *in, size_t inlen, void *md, int mdlen) { + sha3_ctx_t sha3; + + sha3_init_cu(&sha3, mdlen); + sha3_update_cu(&sha3, in, inlen); + sha3_final_cu(md, &sha3); + + return md; +} diff --git a/src/cuda/sha3.h b/src/cuda/sha3.h new file mode 100644 index 0000000..2dfc74f --- /dev/null +++ b/src/cuda/sha3.h @@ -0,0 +1,33 @@ +// sha3.h +// 19-Nov-11 Markku-Juhani O. Saarinen +// Revised 21-Jul-19 to strip unneeded code and move sha3_ctx_t to its own file. +// we need to build both a host and device version of this code. -asdvxgxasjab + +#ifndef SHA3_H +#define SHA3_H + +#include "sha3_ctx.h" +#include +#include + +#ifndef KECCAKF_ROUNDS +#define KECCAKF_ROUNDS 24 +#endif + +#ifndef ROTL64 +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) +#endif + +// Compression function. +void sha3_keccakf(uint64_t st[25]); + +// OpenSSL - like interfece +int sha3_init(sha3_ctx_t *c, + int mdlen); // mdlen = hash output in bytes +int sha3_update(sha3_ctx_t *c, const void *data, size_t len); +int sha3_final(void *md, sha3_ctx_t *c); // digest goes to md + +// compute a sha3 hash (md) of given byte length from "in" +void *sha3(const void *in, size_t inlen, void *md, int mdlen); + +#endif diff --git a/src/cuda/sha3_ctx.h b/src/cuda/sha3_ctx.h new file mode 100644 index 0000000..385f16a --- /dev/null +++ b/src/cuda/sha3_ctx.h @@ -0,0 +1,20 @@ +// sha3_ctx.h +// 19-Nov-11 Markku-Juhani O. Saarinen +// Revised 21-Jul-19 moved sha3_ctx_t to its own file -asdvxgxasjab + +#ifndef SHA3_CTX_H +#define SHA3_CTX_H + +#include +#include + +// state context +typedef struct { + union { // state: + uint8_t b[200]; // 8-bit bytes + uint64_t q[25]; // 64-bit words + } st; + int pt, rsiz, mdlen; // these don't overflow +} sha3_ctx_t; + +#endif diff --git a/src/cuda/sha3_cu.h b/src/cuda/sha3_cu.h new file mode 100644 index 0000000..a100906 --- /dev/null +++ b/src/cuda/sha3_cu.h @@ -0,0 +1,32 @@ +// sha3_cu.h +// 19-Nov-11 Markku-Juhani O. Saarinen +// Revised 19-Jul-19 sha3.h to run on Nvidia devices -asdvxgxasjab + +#ifndef SHA3_CU_H +#define SHA3_CU_H + +#include "sha3_ctx.h" +#include +#include + +#ifndef KECCAKF_ROUNDS +#define KECCAKF_ROUNDS 24 +#endif + +#ifndef ROTL64 +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) +#endif + +// Compression function. +__device__ void sha3_keccakf_cu(uint64_t st[25]); + +// OpenSSL - like interfece +__device__ int sha3_init_cu(sha3_ctx_t *c, + int mdlen); // mdlen = hash output in bytes +__device__ int sha3_update_cu(sha3_ctx_t *c, const void *data, size_t len); +__device__ int sha3_final_cu(void *md, sha3_ctx_t *c); // digest goes to md + +// compute a sha3 hash (md) of given byte length from "in" +__device__ void *sha3_cu(const void *in, size_t inlen, void *md, int mdlen); + +#endif diff --git a/src/dns.rs b/src/dns.rs new file mode 100644 index 0000000..def7807 --- /dev/null +++ b/src/dns.rs @@ -0,0 +1,245 @@ +use std::net::{IpAddr, SocketAddr}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use domain::base::iana::Rcode; +use domain::base::{Dname, Message, MessageBuilder, Rtype, StaticCompressor, StreamTarget}; +use domain::rdata::{Aaaa, AllRecordData, A}; +use log::{error, info}; +use rand::seq::SliceRandom; +use thiserror::Error; +use tokio::net::UdpSocket; +use tokio::task::JoinHandle; +use tokio::time::timeout; + +use crate::constants::DEFAULT_CRUZBIT_PORT; +use crate::error::{impl_debug_error_chain, ErrChain, ParsingError, SocketError}; +use crate::peer_storage::{PeerStorage, PeerStorageError}; +use crate::peer_storage_disk::PeerStorageDisk; +use crate::shutdown::{ShutdownChanReceiver, SpawnedError}; +use crate::utils::{now_as_duration, resolve_host}; + +const DNAME: &str = "client.cruzbit"; + +const SEEDERS: &[&str] = &["45.32.6.23:8831", "66.117.62.146:8831", "dns.cruzb.it:8831"]; + +/// Returns known peers in response to DNS queries. +pub struct DnsSeeder { + peer_store: Arc, + sock: UdpSocket, + port: u16, + my_external_ip: Option, + shutdown_chan_rx: ShutdownChanReceiver, +} + +impl DnsSeeder { + /// Creates a new DNS seeder given a PeerStorage interface. + pub async fn new( + peer_store: Arc, + port: u16, + my_external_ip: Option, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + let sock = UdpSocket::bind(format!("0.0.0.0:{}", port)) + .await + .expect("dns seeder couldn't bind to address"); + + Self { + peer_store, + sock, + port, + my_external_ip, + shutdown_chan_rx, + } + } + + /// Spawns the DNS Seeder's main loop. + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(async move { self.run().await.map_err(Into::into) }) + } + + /// Runs the DNS Seeder's main loop. + pub async fn run(mut self) -> Result<(), DnsSeederError> { + let mut buf = vec![0; 512]; + + loop { + tokio::select! { + recv = self.sock.recv_from(&mut buf) => { + match recv.map_err(|err| DnsSeederError::Socket(SocketError::Receive(err))) { + Ok((len, addr)) => { + let data = &buf[..len]; + let request = match Message::from_octets(data).map_err(DnsSeederError::ShortMessage) { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + continue; + } + }; + + if let Err(err) = self.handle_query(request, addr).await { + error!("{:?}", err); + continue; + }; + }, + Err(err) => { + error!("{:?}", err); + continue; + } + } + } + + _ = &mut self.shutdown_chan_rx => { + info!("DNS Seeder shutting down"); + break Ok(()) + } + } + } + } + + async fn handle_query( + &self, + request: Message<&[u8]>, + src: SocketAddr, + ) -> Result<(), DnsSeederError> { + let q = match request.question().next() { + Some(Ok(v)) => v, + Some(Err(err)) => return Err(DnsSeederError::ParseQuestion(err)), + None => return Err(DnsSeederError::NoValue), + }; + + if q.qname().to_string() == DNAME && q.qtype() == Rtype::A { + // get up to 128 peers that we've connected to in the last 48 hours + let mut addresses = self + .peer_store + .get_since(128, now_as_duration() - Duration::from_secs(48 * 60 * 60))?; + + // add ourself + if let Some(my_external_ip) = self.my_external_ip { + addresses.push(SocketAddr::from((my_external_ip, self.port))); + } + + // shuffle them + addresses.shuffle(&mut rand::thread_rng()); + + let answer = MessageBuilder::new_vec(); + let mut answer = answer.start_answer(&request, Rcode::NoError)?; + + // return at most 4 + let limit = 4; + for (i, addr) in addresses.iter().enumerate() { + if i == limit { + break; + } + if addr.port() != DEFAULT_CRUZBIT_PORT { + continue; + } + if let IpAddr::V4(ip) = addr.ip() { + answer.push((q.qname(), 3600, A::new(ip)))?; + } else if let IpAddr::V6(ip) = addr.ip() { + answer.push((q.qname(), 3600, Aaaa::new(ip)))?; + } + } + + self.sock + .send_to(answer.as_slice(), src) + .await + .map_err(|err| SocketError::SendTo(src, err))?; + } + + Ok(()) + } +} + +/// Query DNS seeders +pub async fn query_for_peers() -> Result, DnsSeederError> { + let addr = SocketAddr::from_str("0.0.0.0:0").unwrap(); + let socket = UdpSocket::bind(addr) + .await + .map_err(|err| SocketError::BindUdp(addr, err))?; + + let msg = MessageBuilder::from_target(StaticCompressor::new(StreamTarget::new_vec())).unwrap(); + let mut msg = msg.question(); + msg.push((Dname::>::from_str(DNAME).unwrap(), Rtype::A))?; + + let message = msg.finish().into_target(); + let mut peers = Vec::new(); + + for seeder in SEEDERS.iter().map(|addr| resolve_host(addr)) { + let seeder = match seeder { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + continue; + } + }; + socket + .send_to(message.as_dgram_slice(), seeder) + .await + .map_err(|err| SocketError::SendTo(seeder, err))?; + + let mut buffer = vec![0; 1232]; + if timeout(Duration::from_secs(5), socket.recv_from(&mut buffer)) + .await + .is_err() + { + let err = DnsSeederError::QueryTimeout(seeder); + error!("{:?}", err); + continue; + } + + let response = match Message::from_octets(buffer).map_err(DnsSeederError::ShortMessage) { + Ok(response) => response, + Err(err) => { + error!("{:?}", err); + continue; + } + }; + + match response.answer().map_err(ParsingError::DnsData) { + Ok(answers) => { + for record in answers.limit_to::>() { + let a = match record.map_err(DnsSeederError::ParseQuestion) { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + continue; + } + }; + info!("Seeder returned: {}", a.data()); + let peer = format!("{}:{}", a.data(), DEFAULT_CRUZBIT_PORT); + peers.push(peer); + } + } + Err(err) => { + error!("{:?}", DnsSeederError::Parsing(err)); + } + } + } + + Ok(peers) +} + +#[derive(Error)] +pub enum DnsSeederError { + #[error("received no value")] + NoValue, + #[error("failed to parse question")] + ParseQuestion(#[source] domain::base::wire::ParseError), + #[error("connecting timeout querying seeder: {0}")] + QueryTimeout(SocketAddr), + + #[error("parsing")] + Parsing(#[from] ParsingError), + #[error("peer storage")] + PeerStorage(#[from] PeerStorageError), + #[error("socket")] + Socket(#[from] SocketError), + + #[error("dns message builder")] + MessageBuilder(#[from] domain::base::message_builder::PushError), + #[error("dns message")] + ShortMessage(#[from] domain::base::message::ShortMessage), +} + +impl_debug_error_chain!(DnsSeederError, "dns seeder"); diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..9dac8af --- /dev/null +++ b/src/error.rs @@ -0,0 +1,162 @@ +use std::error::Error as StdError; +use std::fmt::{self, Write}; +use std::net::{AddrParseError, SocketAddr}; +use std::num::ParseIntError; +use std::path::PathBuf; + +use thiserror::Error; + +/// Helper to display an error's chain of sources +pub trait ErrChain { + fn chain(&self) -> Result; +} + +impl ErrChain for E { + fn chain(&self) -> Result { + let mut buf = String::new(); + write!(buf, "{}", self)?; + for err in std::iter::successors(self.source(), |&error| error.source()) { + write!(buf, " -> {}", err)?; + } + Ok(buf) + } +} + +#[macro_export] +macro_rules! impl_debug_error_chain { + ($t:ident, $type:expr) => { + impl std::fmt::Debug for $t + where + $t: $crate::error::ErrChain, + { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{} -> {}", $type, self.chain()?) + } + } + }; +} +pub use impl_debug_error_chain; + +#[derive(Error, Debug)] +pub enum ChannelError { + #[error("receive on {0} channel")] + OneshotReceive( + &'static str, + #[source] tokio::sync::oneshot::error::RecvError, + ), + #[error("send on {0} channel")] + OneshotSend(&'static str), + #[error("receive on {0} channel: {1}")] + Receive(&'static str, String), + #[error("send on {0} channel: {1}")] + Send(&'static str, String), +} + +#[derive(Error, Debug)] +pub enum DataError { + #[error("bytes not found")] + NotFound, + #[error("invalid bytes for public key")] + PublicKey(#[source] std::array::TryFromSliceError), + #[error("invalid bytes for string")] + String(#[source] std::str::Utf8Error), + #[error("invalid bytes for u32")] + U32(#[source] std::array::TryFromSliceError), + #[error("invalid bytes for u64")] + U64(#[source] std::array::TryFromSliceError), + + #[error(transparent)] + Ed25519(#[from] ed25519_compact::Error), +} + +#[derive(Error, Debug)] +pub enum DbError { + #[error("failed to open database at: {0}")] + Open(PathBuf, #[source] leveldb::error::Error), + #[error("failed to read")] + Read(#[source] leveldb::error::Error), + #[error("failed to repair database at: {0}")] + Repair(PathBuf, #[source] leveldb::error::Error), + #[error("failed to write")] + Write(#[source] leveldb::error::Error), + #[error("failed to write batch")] + WriteBatch(#[source] leveldb::error::Error), +} + +#[derive(Error, Debug)] +pub enum EncodingError { + #[error("failed to base64 decode bytes: {0}")] + Base64Decode(base64ct::Error), + #[error("failed to base64 encode bytes: {0}")] + Base64Encode(base64ct::InvalidLengthError), + #[error("failed to bincode decode bytes, db may be using gob (golang)")] + BincodeDecode(#[source] Box), + #[error("failed to bincode encode bytes")] + BincodeEncode(#[source] Box), + #[error("failed to hex decode bytes")] + HexDecode(#[source] faster_hex::Error), + #[error("failed to hex encode bytes")] + HexEncode(#[source] faster_hex::Error), + #[error("failed to decode pem")] + Pem, +} + +#[derive(Error, Debug)] +pub enum FileError { + #[error("failed to compress file at: {0}")] + Compress(PathBuf, #[source] std::io::Error), + #[error("failed to create file at: {0}")] + Create(PathBuf, #[source] std::io::Error), + #[error("failed to decompress file at: {0}")] + Decompress(PathBuf, #[source] std::io::Error), + #[error("failed to open file at: {0}")] + Open(PathBuf, #[source] std::io::Error), + #[error("failed to read file at: {0}")] + Read(PathBuf, #[source] std::io::Error), + #[error("failed to write to file at: {0}")] + Write(PathBuf, #[source] std::io::Error), +} + +#[derive(Error, Debug)] +pub enum JsonError { + #[error("failed to deserialize")] + Deserialize(#[source] serde_json::error::Error), + #[error("failed to serialize")] + Serialize(#[source] serde_json::error::Error), +} + +#[derive(Error, Debug)] +pub enum KeyError { + #[error("private key")] + PrivateKeyDecode(#[source] EncodingError), +} + +#[derive(Error, Debug)] +pub enum ParsingError { + #[error("failed to parse dns data")] + DnsData(#[source] domain::base::wire::ParseError), + #[error("failed to parse http header")] + HttpHeader(#[source] tokio_tungstenite::tungstenite::http::header::ToStrError), + #[error("failed to parse integer")] + Integer(#[source] ParseIntError), + #[error("failed to parse ip address")] + IpAddress(#[source] AddrParseError), + #[error("failed to resolve ip address")] + ToSocketAddr(#[source] std::io::Error), +} + +#[derive(Error, Debug)] +pub enum SocketError { + #[error("failed to bind to tcp socket: {0}")] + BindTcp(SocketAddr, #[source] std::io::Error), + #[error("failed to bind to udp socket: {0}")] + BindUdp(SocketAddr, #[source] std::io::Error), + #[error("failed to receive on socket")] + Receive(#[source] std::io::Error), + #[error("failed to receive on socket from: {0}")] + ReceiveFrom(SocketAddr, #[source] std::io::Error), + #[error("failed to send on socket to: {0}")] + SendTo(SocketAddr, #[source] std::io::Error), + #[error("failed to tls connect on stream: {0}")] + TlsConnect(SocketAddr, #[source] std::io::Error), +} diff --git a/src/genesis.rs b/src/genesis.rs new file mode 100644 index 0000000..30323d7 --- /dev/null +++ b/src/genesis.rs @@ -0,0 +1,24 @@ +/// The first block in the chain. +/// The memo field is the hash of the tip of the bitcoin blockchain at the time of this block's creation. +pub const GENESIS_BLOCK_JSON: &str = r#"{ + "header": { + "previous": "0000000000000000000000000000000000000000000000000000000000000000", + "hash_list_root": "7afb89705316b3de79a3882ec3732b6b8796dd4bf2a80240549ae8fd49a517d8", + "time": 1561173156, + "target": "00000000ffff0000000000000000000000000000000000000000000000000000", + "chain_work": "0000000000000000000000000000000000000000000000000000000100010001", + "nonce": 1695541686981695, + "height": 0, + "transaction_count": 1 + }, + "transactions": [ + { + "time": 1561173126, + "nonce": 1654479747, + "to": "ntkSbbG+b0vo49IGd9nnH39eHIxIEqXmIL8aaJZV+jQ=", + "amount": 5000000000, + "memo": "0000000000000000000de6d595bddae743ac032b1458a47ccaef7b0f6f1e3210", + "series": 1 + } + ] +}"#; diff --git a/src/gpu.rs b/src/gpu.rs new file mode 100644 index 0000000..3c63c04 --- /dev/null +++ b/src/gpu.rs @@ -0,0 +1,60 @@ +use std::ffi::{c_int, c_void}; + +use crate::block::BlockID; + +type Int64T = i64; +type SizeT = usize; + +extern "C" { + #[cfg(feature = "cuda")] + fn cuda_init() -> c_int; + #[cfg(feature = "opencl")] + fn ocl_init() -> c_int; + fn miner_update( + miner_num: c_int, + first: *const c_void, + first_len: SizeT, + last: *const c_void, + last_len: SizeT, + target: *const c_void, + ) -> c_int; + fn miner_mine(miner_num: c_int, start_nonce: Int64T) -> Int64T; +} + +/// Is called on startup. +pub fn gpu_miner_init() -> usize { + unsafe { + #[cfg(feature = "cuda")] + return cuda_init() as usize; + #[cfg(feature = "opencl")] + return ocl_init() as usize; + } +} + +/// Is called when the underlying header changes. +pub fn gpu_miner_update( + miner_num: usize, + header_bytes: &Vec, + header_bytes_len: usize, + start_nonce_offset: usize, + end_nonce_offset: usize, + target: &BlockID, +) -> u64 { + unsafe { + miner_update( + miner_num as c_int, + header_bytes.as_ptr() as *const c_void, + start_nonce_offset, + header_bytes.as_ptr().add(end_nonce_offset) as *const c_void, + header_bytes_len - end_nonce_offset, + target.as_ptr() as *const c_void, + ) as u64 + } +} + +/// Is called on every solution attempt. +/// It will perform N hashing attempts where N is the maximum number of threads your device is capable of executing. +/// Returns a solving nonce; otherwise 0x7FFFFFFFFFFFFFFF. +pub fn gpu_miner_mine(miner_num: usize, start_nonce: u64) -> u64 { + unsafe { miner_mine(miner_num as c_int, start_nonce as Int64T) as u64 } +} diff --git a/src/irc.rs b/src/irc.rs new file mode 100644 index 0000000..4630a6f --- /dev/null +++ b/src/irc.rs @@ -0,0 +1,186 @@ +use faster_hex::hex_string; +use futures::StreamExt; +use irc::client::data::Config; +use irc::client::Client; +use irc::proto::{Command, Prefix, Response}; +use log::{error, info}; +use rand::Rng; +use thiserror::Error; +use tokio::task::JoinHandle; + +use crate::block::BlockID; +use crate::error::{impl_debug_error_chain, ChannelError, ErrChain, ParsingError}; +use crate::peer_manager::AddrChanSender; +use crate::shutdown::{ShutdownChanReceiver, SpawnedError}; + +const SERVER: &str = "irc.libera.chat"; +const PORT: u16 = 6697; + +/// IRC can be used for bootstrapping the network. +/// It primarily exists as a backup to our current limited set of DNS seeders. +pub struct IRC { + conn: Client, + nick: String, + genesis_id: &'static BlockID, + addr_chan_tx: AddrChanSender, + shutdown_chan_rx: ShutdownChanReceiver, +} + +impl IRC { + /// Connects the IRC bootstrapper to the IRC network. + /// port is our local cruzbit port. If it's set to 0 we won't be used for inbound connections. + pub async fn connect( + port: u16, + genesis_id: &'static BlockID, + addr_chan_tx: AddrChanSender, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Result { + let nick = generate_random_nick(); + + let config = Config { + nickname: Some(nick.clone()), + username: Some(port.to_string()), + server: Some(SERVER.to_owned()), + port: Some(PORT), + use_tls: Some(true), + ..Default::default() + }; + + let conn = Client::from_config(config).await?; + conn.identify()?; + + Ok(IRC { + conn, + nick, + genesis_id, + addr_chan_tx, + shutdown_chan_rx, + }) + } + + /// Spawns IRC on it's own main loop. + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(async move { self.run().await.map_err(Into::into) }) + } + + /// Run IRC on it's own main loop. + pub async fn run(mut self) -> Result<(), IrcError> { + let mut stream = self.conn.stream()?; + let sender = self.conn.sender(); + // TODO: why is the channel name random + let n = rand::thread_rng().gen_range(0..10); + let channel = generate_channel_name(self.genesis_id, &n); + + loop { + tokio::select! { + msg = stream.next() => { + let message = match msg { + Some(Ok(v)) => v, + Some(Err(err)) => { + let err = IrcError::from(err); + error!("{:?}", err); + continue; + }, + None => { + break Err(IrcError::Connection); + } + }; + + match message.command { + Command::Response(Response::RPL_WELCOME, _) => { + info!("Joining channel {}", &channel); + sender.send_join(&channel)?; + } + + Command::Response(Response::RPL_ENDOFNAMES, _) => { + info!("Joined channel {}", &channel); + sender.send(Command::WHO(Some(channel.clone()), None))?; + } + + Command::Response(Response::RPL_WHOREPLY, ref args) => { + let (nickname, username, hostname) = (&args[1], &args[2], &args[3]); + if *nickname != self.nick { + self.handle_irc_peer(username, hostname).await; + } + } + + Command::JOIN(_, _, _) => { + if let Some(Prefix::Nickname(nickname, username, hostname)) = &message.prefix { + if *nickname != self.nick { + self.handle_irc_peer(username, hostname).await; + } + } + } + + _ => {} + } + } + + _ = &mut self.shutdown_chan_rx => { + info!("IRC shutting down"); + break Ok(()) + } + } + } + } + + async fn handle_irc_peer(&self, username: &str, hostname: &str) { + if !username.is_empty() { + // pop off the ~ + let username = username[1..].to_owned(); + match username + .parse::() + .map_err(|err| IrcError::Parsing(ParsingError::Integer(err))) + { + Ok(port) => { + if port != 0 { + let addr_str = format!("{}:{}", hostname, port); + if let Err(err) = self + .addr_chan_tx + .send(addr_str) + .await + .map_err(IrcError::from) + { + error!("{:?}", err); + } + } + } + Err(err) => { + error!("{:?}", err); + } + } + } + } +} + +fn generate_random_nick() -> String { + let nick_bytes = rand::thread_rng().gen::<[u8; 6]>(); + format!("cb{}", hex_string(&nick_bytes)) +} + +fn generate_channel_name(genesis_id: &BlockID, n: &usize) -> String { + let g = genesis_id.as_hex(); + format!("#cruzbit-{}-{}", &g[g.len() - 8..], n) +} + +#[derive(Error)] +pub enum IrcError { + #[error("client connection error, closing")] + Connection, + + #[error("parsing peer address")] + Parsing(#[from] ParsingError), + #[error("channel")] + Channel(#[from] ChannelError), + + #[error(transparent)] + Irc(#[from] irc::error::Error), +} + +impl_debug_error_chain!(IrcError, "irc"); + +impl From> for IrcError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("addr", err.to_string())) + } +} diff --git a/src/ledger.rs b/src/ledger.rs new file mode 100644 index 0000000..6081163 --- /dev/null +++ b/src/ledger.rs @@ -0,0 +1,167 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use ed25519_compact::PublicKey; +use thiserror::Error; + +use crate::balance_cache::BalanceCacheError; +use crate::block::{Block, BlockID}; +use crate::block_storage::BlockStorageError; +use crate::error::{DataError, DbError}; +use crate::transaction::{TransactionError, TransactionID}; + +/// Indicates the type of branch a particular block resides on. +/// Only blocks currently on the main branch are considered confirmed and only +/// transactions in those blocks affect public key balances. +/// Values are: Main, Side, Orphan or Unknown. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum BranchType { + Main = 0, + Side = 2, + Orphan = 3, + Unknown = 4, +} + +impl TryFrom for BranchType { + type Error = LedgerError; + + fn try_from(value: u8) -> Result { + match value { + 0 => Ok(BranchType::Main), + 2 => Ok(BranchType::Side), + 3 => Ok(BranchType::Orphan), + 4 => Ok(BranchType::Unknown), + _ => Err(LedgerError::BranchTypeInvalid(value)), + } + } +} + +/// Ledger is an interface to a ledger built from the most-work chain of blocks. +/// It manages and computes public key balances as well as transaction and public key transaction indices. +/// It also maintains an index of the block chain by height as well as branch information. +pub trait Ledger { + /// Returns the ID and the height of the block at the current tip of the main chain. + fn get_chain_tip(&self) -> Result, LedgerError>; + + /// Returns the ID of the block at the given block chain height. + fn get_block_id_for_height(&self, height: u64) -> Result, LedgerError>; + + /// Sets the branch type for the given block. + fn set_branch_type(&self, id: &BlockID, branch_type: BranchType) -> Result<(), LedgerError>; + + /// Returns the branch type for the given block. + fn get_branch_type(&self, id: &BlockID) -> Result; + + /// Connects a block to the tip of the block chain and applies the transactions + /// to the ledger. + fn connect_block( + self: &Arc, + id: &BlockID, + block: &Block, + ) -> Result, LedgerError>; + + /// Disconnects a block from the tip of the block chain and undoes the effects + /// of the transactions on the ledger. + fn disconnect_block( + self: &Arc, + id: &BlockID, + block: &Block, + ) -> Result, LedgerError>; + + /// Returns the current balance of a given public key. + fn get_public_key_balance(&self, pub_key: &PublicKey) -> Result; + + /// Returns the current balance of the given public keys + /// along with block ID and height of the corresponding main chain tip. + fn get_public_key_balances( + &self, + pub_keys: Vec, + ) -> Result<(HashMap, BlockID, u64), LedgerError>; + + /// Returns the index of a processed transaction. + fn get_transaction_index(&self, id: &TransactionID) -> Result<(BlockID, u32), LedgerError>; + + /// Returns transaction indices involving a given public key + /// over a range of heights. If startHeight > endHeight this iterates in reverse. + fn get_public_key_transaction_indices_range( + &self, + pub_key: PublicKey, + start_height: u64, + end_height: u64, + start_index: u32, + limit: usize, + ) -> Result<(Vec, Vec, u64, u32), LedgerError>; + + /// Returns the total current ledger balance by summing the balance of all public keys. + /// It's only used offline for verification purposes. + fn balance(&self) -> Result; + + /// Returns the public key balance at the given height. + /// It's only used offline for historical and verification purposes. + /// This is only accurate when the full block chain is indexed (pruning disabled.) + fn get_public_key_balance_at( + &self, + pub_key: &PublicKey, + height: u64, + ) -> Result; +} + +#[derive(Error, Debug)] +pub enum LedgerError { + #[error("failed to apply transaction {0} to balance cache, sender balance would go negative")] + BalanceCacheApplyFailed(TransactionID), + #[error("branch type is invalid for value {0}")] + BranchTypeInvalid(u8), + #[error("being asked to connect {0} but previous {1} does not match tip {2}")] + ConnectBlockTipAndPreviousMismatch(BlockID, BlockID, BlockID), + #[error("being asked to disconnect {0} but it does not match tip {1}")] + DisconnectTipMismatch(BlockID, BlockID), + #[error("being asked to disconnect {0} but no tip is currently set")] + DisconnectTipNotFound(BlockID), + #[error("sender has insufficent balance in transaction {0}")] + SenderBalanceInsufficient(TransactionID), + #[error("transaction {0} already processed")] + TransactionAlreadyProcessed(TransactionID), + #[error("balance went negative at transaction {0}")] + TransactionBalanceNegative(TransactionID), + #[error("transaction {0} doesn't involve the public key")] + TransactionPublicKeyMismatch(TransactionID), + + #[error("balance cache")] + BalanceCache(Box), + #[error("block storage")] + BlockStorage(#[from] BlockStorageError), + #[error("data")] + Data(#[from] DataError), + #[error("db")] + Db(#[from] DbError), + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + #[error("transaction")] + Transaction(#[from] TransactionError), +} + +// needs boxed because it's recursive +impl From for LedgerError { + fn from(value: BalanceCacheError) -> Self { + Self::BalanceCache(Box::new(value)) + } +} + +#[derive(Error, Debug)] +pub enum LedgerNotFoundError { + #[error("block for ID {0} not found")] + BlockForID(BlockID), + #[error("block ID for height {0} not found")] + BlockIDForHeight(u64), + #[error("chain tip not found")] + ChainTip, + #[error("chain tip header not found")] + ChainTipHeader, + #[error("coinbase for block {0} not found")] + CoinbaseForBlock(BlockID), + #[error("transaction at index for {0} not found")] + TransactionAtIndex(TransactionID), + #[error("transaction at index {0} in block {1} not found")] + TransactionInBlock(u32, BlockID), +} diff --git a/src/ledger_disk.rs b/src/ledger_disk.rs new file mode 100644 index 0000000..557d2ff --- /dev/null +++ b/src/ledger_disk.rs @@ -0,0 +1,1087 @@ +use std::collections::HashMap; +use std::mem; +use std::path::PathBuf; +use std::sync::Arc; + +use ed25519_compact::PublicKey; +use leveldb::database::batch::{Batch, WriteBatch}; +use leveldb::database::{Database, DatabaseReader}; +use leveldb::iterator::{Iterable, LevelDBIterator}; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use leveldb::snapshots::Snapshots; + +use crate::balance_cache::BalanceCache; +use crate::block::{Block, BlockID, BLOCK_ID_LENGTH}; +use crate::block_storage::BlockStorage; +use crate::block_storage_disk::BlockStorageDisk; +use crate::constants::{BLOCKS_UNTIL_NEW_SERIES, COINBASE_MATURITY}; +use crate::error::{DataError, DbError}; +use crate::ledger::{BranchType, Ledger, LedgerError, LedgerNotFoundError}; +use crate::transaction::{TransactionID, TRANSACTION_ID_LENGTH}; + +/// An on-disk implementation of the Ledger interface using LevelDB. +pub struct LedgerDisk { + db: Database, + block_store: Arc, + /// prune historic transaction and public key transaction indices + prune: bool, +} + +impl LedgerDisk { + /// Returns a new instance of LedgerDisk. + pub fn new( + db_path: PathBuf, + block_store: Arc, + prune: bool, + ) -> Result, LedgerError> { + let mut options = Options::new(); + options.create_if_missing = true; + let db = Database::open(&db_path, &options).map_err(|err| DbError::Open(db_path, err))?; + + Ok(Arc::new(Self { + db, + block_store, + prune, + })) + } + + /// Sometimes we call this with leveldb Database or Snapshot + fn get_chain_tip(db: &impl DatabaseReader) -> Result, LedgerError> { + // compute db key + let key = compute_chain_tip_key(); + + // fetch the id + let Some(ct_bytes) = db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Ok(None); + }; + + // decode the tip + let (id, height) = decode_chain_tip(&ct_bytes)?; + Ok(Some((id, height))) + } + + /// Sometimes we call this with leveldb Database or Snapshot + fn get_block_id_for_height( + height: u64, + db: &T, + ) -> Result, LedgerError> { + // compute db key + let key = compute_block_height_index_key(height); + + // fetch the id + let Some(id_bytes) = db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Ok(None); + }; + + // return it + Ok(Some(BlockID::from(id_bytes))) + } + /// Prune transaction and public key transaction indices created by the block at the given height + fn prune_indices(&self, height: u64, batch: &WriteBatch) -> Result<(), LedgerError> { + // get the ID + let Some(id) = self.get_block_id_for_height(height)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + }; + + // fetch the block + let Some(block) = self.block_store.get_block(&id)? else { + return Err(LedgerNotFoundError::BlockForID(id).into()); + }; + + for (i, tx) in block.transactions.iter().enumerate() { + let tx_id = tx.id()?; + + // prune transaction index + let key = compute_transaction_index_key(&tx_id); + batch.delete_u8(&key); + + // prune public key transaction indices + if !tx.is_coinbase() { + let key = compute_pub_key_transaction_index_key( + tx.from.expect("transaction should have a sender"), + Some(block.header.height), + Some(i as u32), + ); + batch.delete(&key); + } + } + + Ok(()) + } + + /// Restore transaction and public key transaction indices created by the block at the given height + fn restore_indices(&self, height: u64, batch: &WriteBatch) -> Result<(), LedgerError> { + // get the ID + let Some(id) = self.get_block_id_for_height(height)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + }; + + // fetch the block + let Some(block) = self.block_store.get_block(&id)? else { + return Err(LedgerNotFoundError::BlockForID(id).into()); + }; + + for (i, tx) in block.transactions.iter().enumerate() { + let tx_id = tx.id()?; + + // restore transaction index + let key = compute_transaction_index_key(&tx_id); + let index_bytes = encode_transaction_index(block.header.height, i as u32); + batch.put_u8(&key, &index_bytes); + + // restore public key transaction indices + if !tx.is_coinbase() { + let key = compute_pub_key_transaction_index_key( + tx.from.expect("transaction should have a sender"), + Some(block.header.height), + Some(i as u32), + ); + batch.put_u8(&key, &[0x1]); + } + + let key = compute_pub_key_transaction_index_key( + tx.to, + Some(block.header.height), + Some(i as u32), + ); + batch.put_u8(&key, &[0x1]); + } + + Ok(()) + } + + /// Returns the index of a processed transaction. + pub fn get_transaction_index( + &self, + id: &TransactionID, + ) -> Result, LedgerError> { + // compute the db key + let key_result = compute_transaction_index_key(id); + + // we want a consistent view during our two queries as height can change + let snapshot = self.db.snapshot(); + + // fetch and decode the index + let Some(index_bytes) = snapshot + .get_u8(&ReadOptions::new(), &key_result) + .map_err(DbError::Read)? + else { + return Ok(None); + }; + let (height, index) = decode_transaction_index(&index_bytes)?; + + // map height to block id + let Some(block_id) = Self::get_block_id_for_height(height, &snapshot)? else { + return Ok(None); + }; + + Ok(Some((block_id, index))) + } + + /// Iterate through transaction history going forward + fn get_public_key_transaction_indices_range_forward( + &self, + pub_key: PublicKey, + start_height: u64, + mut end_height: u64, + start_index: u32, + limit: usize, + ) -> Result<(Vec, Vec, u64, u32), LedgerError> { + let start_key = + compute_pub_key_transaction_index_key(pub_key, Some(start_height), Some(start_index)); + + // make it inclusive + end_height += 1; + let end_key = compute_pub_key_transaction_index_key(pub_key, Some(end_height), None); + + let mut height_map = HashMap::new(); + let mut ids = Vec::new(); + let mut indices = Vec::new(); + let mut last_height = 0; + let mut last_index = 0; + + // we want a consistent view of this. heights can change out from under us otherwise + let snapshot = self.db.snapshot(); + let iter = snapshot + .keys_iter(&ReadOptions::new()) + .from(&start_key) + .to(&end_key); + + for key in iter { + (_, last_height, last_index) = decode_pub_key_transaction_index_key(key)?; + + // lookup the block id + let id = match height_map.get(&last_height).cloned() { + Some(v) => v, + None => { + let Some(id) = Self::get_block_id_for_height(last_height, &snapshot)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(last_height).into()); + }; + height_map.insert(last_height, id); + id + } + }; + + ids.push(id); + indices.push(last_index); + + if limit != 0 && indices.len() == limit { + break; + } + } + + Ok((ids, indices, last_height, last_index)) + } + + /// Iterate through transaction history in reverse + fn get_public_key_transaction_indices_range_reverse( + &self, + pub_key: PublicKey, + start_height: u64, + end_height: u64, + mut start_index: u32, + limit: usize, + ) -> Result<(Vec, Vec, u64, u32), LedgerError> { + let end_key = compute_pub_key_transaction_index_key(pub_key, Some(end_height), None); + + // make it inclusive + start_index += 1; + let start_key = + compute_pub_key_transaction_index_key(pub_key, Some(start_height), Some(start_index)); + + let mut height_map = HashMap::new(); + let mut ids = Vec::new(); + let mut indices = Vec::new(); + let mut last_height = 0; + let mut last_index = 0; + + // we want a consistent view of this. heights can change out from under us otherwise + let snapshot = self.db.snapshot(); + let iter = snapshot + .keys_iter(&ReadOptions::new()) + .from(&start_key) + .to(&end_key) + .reverse(); + + for key in iter { + (_, last_height, last_index) = decode_pub_key_transaction_index_key(key)?; + + // lookup the block id + let id = match height_map.get(&last_height).cloned() { + Some(v) => v, + None => { + let Some(id) = Self::get_block_id_for_height(last_height, &snapshot)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(last_height).into()); + }; + height_map.insert(last_height, id); + id + } + }; + + ids.push(id); + indices.push(last_index); + if limit != 0 && indices.len() == limit { + break; + } + } + + Ok((ids, indices, last_height, last_index)) + } + + /// Returns the current balance of a given public key. + pub fn get_public_key_balance(&self, pub_key: &PublicKey) -> Result { + // compute db key + let key = compute_pub_key_balance_key(pub_key); + + // fetch balance + let Some(balance_bytes) = self + .db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Ok(0); + }; + + // decode and return it + let balance = u64::from_be_bytes(balance_bytes[..].try_into().map_err(DataError::U64)?); + + Ok(balance) + } +} + +impl Ledger for LedgerDisk { + /// Returns the ID and the height of the block at the current tip of the main chain. + fn get_chain_tip(&self) -> Result, LedgerError> { + Self::get_chain_tip(&self.db) + } + + /// Returns the ID of the block at the given block chain height. + fn get_block_id_for_height(&self, height: u64) -> Result, LedgerError> { + Self::get_block_id_for_height(height, &self.db) + } + + /// Sets the branch type for the given block. + fn set_branch_type(&self, id: &BlockID, branch_type: BranchType) -> Result<(), LedgerError> { + // compute db key + let key = compute_branch_type_key(id); + self.db + .put_u8(&WriteOptions { sync: true }, &key, &[branch_type as u8]) + .map_err(DbError::Write)?; + + Ok(()) + } + + /// Returns the branch type for the given block. + fn get_branch_type(&self, id: &BlockID) -> Result { + let key = compute_branch_type_key(id); + let options = ReadOptions::new(); + match self.db.get_u8(&options, &key).map_err(DbError::Read)? { + Some(bt) => Ok(BranchType::try_from(bt[0])?), + None => Ok(BranchType::Unknown), + } + } + + /// Connects a block to the tip of the block chain and applies the transactions to the ledger. + fn connect_block( + self: &Arc, + id: &BlockID, + block: &Block, + ) -> Result, LedgerError> { + // sanity check + if let Some((tip_id, _height)) = self.get_chain_tip()? { + if tip_id != block.header.previous { + return Err(LedgerError::ConnectBlockTipAndPreviousMismatch( + *id, + block.header.previous, + tip_id, + )); + } + } + + // apply all resulting writes atomically + let batch = WriteBatch::new(); + + let mut balance_cache = BalanceCache::new(Arc::clone(self), 0); + let mut tx_ids = Vec::with_capacity(block.transactions.len()); + + for (i, tx) in block.transactions.iter().enumerate() { + let tx_id = tx.id()?; + tx_ids.push(tx_id); + + // verify the transaction hasn't been processed already. + // note that we can safely prune indices for transactions older than the previous series + let key = compute_transaction_index_key(&tx_id); + if self + .db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + .is_some() + { + return Err(LedgerError::TransactionAlreadyProcessed(tx_id)); + } + + // set the transaction index now + let index_bytes = encode_transaction_index(block.header.height, i as u32); + batch.put_u8(&key, &index_bytes); + + let mut tx_to_apply = Some(tx.clone()); + + if tx.is_coinbase() { + // don't apply a coinbase to a balance until it's 100 blocks deep. + // during honest reorgs normal transactions usually get into the new most-work branch + // but coinbases vanish. this mitigates the impact on UX when reorgs occur and transactions + // depend on coinbases. + tx_to_apply = None; + + if block.header.height >= COINBASE_MATURITY { + // mature the coinbase from 100 blocks ago now + let Some(old_id) = + self.get_block_id_for_height(block.header.height - COINBASE_MATURITY)? + else { + return Err(LedgerNotFoundError::BlockIDForHeight( + block.header.height - COINBASE_MATURITY, + ) + .into()); + }; + + // we could store the last 100 coinbases on our own in memory if we end up needing to + let (Some(old_tx), _block_header) = + self.block_store.get_transaction(&old_id, 0)? + else { + return Err(LedgerNotFoundError::CoinbaseForBlock(old_id).into()); + }; + + // apply it to the recipient's balance + tx_to_apply = Some(old_tx); + } + } + + if let Some(tx_to_apply) = tx_to_apply { + // check sender balance and update sender and receiver balances + if !balance_cache.apply(&tx_to_apply)? { + return Err(LedgerError::BalanceCacheApplyFailed(tx_id)); + } + } + + // associate this transaction with both parties + if !tx.is_coinbase() { + let from = tx.from.expect("transaction should have a sender"); + let key = compute_pub_key_transaction_index_key( + from, + Some(block.header.height), + Some(i as u32), + ); + batch.put_u8(&key, &[0x1]); + } + + let key = compute_pub_key_transaction_index_key( + tx.to, + Some(block.header.height), + Some(i as u32), + ); + batch.put_u8(&key, &[0x1]); + } + + // update recorded balances + let balances = balance_cache.balances(); + for (pub_key, balance) in balances.iter() { + let key = compute_pub_key_balance_key(pub_key); + + if *balance == 0 { + batch.delete_u8(&key); + } else { + batch.put_u8(&key, &balance.to_be_bytes()); + } + } + + // index the block by height + let key = compute_block_height_index_key(block.header.height); + batch.put_u8(&key, id); + + // set this block on the main chain + let key = compute_branch_type_key(id); + batch.put_u8(&key, &[BranchType::Main as u8]); + + // set this block as the new tip + let key = compute_chain_tip_key(); + let ct_bytes = encode_chain_tip(id, block.header.height); + batch.put_u8(&key, &ct_bytes); + + // prune historic transaction and public key transaction indices now + if self.prune && block.header.height >= 2 * BLOCKS_UNTIL_NEW_SERIES { + self.prune_indices(block.header.height - 2 * BLOCKS_UNTIL_NEW_SERIES, &batch)?; + }; + + // perform the writes + let wo = WriteOptions { sync: true }; + self.db.write(&wo, &batch).map_err(DbError::Write)?; + + Ok(tx_ids) + } + + /// Disconnects a block from the tip of the block chain and undoes the effects of the transactions on the ledger. + fn disconnect_block( + self: &Arc, + id: &BlockID, + block: &Block, + ) -> Result, LedgerError> { + // sanity check + let Some((tip_id, _height)) = self.get_chain_tip()? else { + return Err(LedgerError::DisconnectTipNotFound(*id)); + }; + + if tip_id != *id { + return Err(LedgerError::DisconnectTipMismatch(*id, tip_id)); + } + + // apply all resulting writes atomically + let batch = WriteBatch::new(); + + let mut balance_cache = BalanceCache::new(Arc::clone(self), 0); + let mut tx_ids = Vec::with_capacity(block.transactions.len()); + + // disconnect transactions in reverse order + for (i, tx) in block.transactions.iter().rev().enumerate() { + let tx_id = tx.id()?; + // save the id + tx_ids.push(tx_id); + + // mark the transaction unprocessed now (delete its index) + let key = compute_transaction_index_key(&tx_id); + batch.delete_u8(&key); + + let mut tx_to_undo = Some(tx.clone()); + if tx.is_coinbase() { + // coinbase doesn't affect recipient balance for 100 more blocks + tx_to_undo = None; + + if block.header.height >= COINBASE_MATURITY { + // undo the effect of the coinbase from 100 blocks ago now + let Some(old_id) = + self.get_block_id_for_height(block.header.height - COINBASE_MATURITY)? + else { + return Err(LedgerNotFoundError::BlockIDForHeight( + block.header.height - COINBASE_MATURITY, + ) + .into()); + }; + + let (Some(old_tx), _block_header) = + self.block_store.get_transaction(&old_id, 0)? + else { + return Err(LedgerNotFoundError::CoinbaseForBlock(old_id).into()); + }; + + // undo its effect on the recipient's balance + tx_to_undo = Some(old_tx); + } + } + + if let Some(tx_to_undo) = tx_to_undo { + // credit sender and debit recipient + balance_cache.undo(&tx_to_undo)?; + } + + // unassociate this transaction with both parties + if !tx.is_coinbase() { + let key = compute_pub_key_transaction_index_key( + tx.from.expect("transaction should have a sender"), + Some(block.header.height), + Some(i as u32), + ); + batch.delete_u8(&key); + } + + let key = compute_pub_key_transaction_index_key( + tx.to, + Some(block.header.height), + Some(i as u32), + ); + batch.delete_u8(&key); + } + + // update recorded balances + let balances = balance_cache.balances(); + for (pub_key, balance) in balances.iter() { + let key = compute_pub_key_balance_key(pub_key); + if *balance == 0 { + batch.delete_u8(&key); + } else { + batch.put_u8(&key, &balance.to_be_bytes()); + } + } + + // remove this block's index by height + let key = compute_block_height_index_key(block.header.height); + batch.delete_u8(&key); + + // set this block on a side chain + let key = compute_branch_type_key(id); + batch.put_u8(&key, &[BranchType::Side as u8]); + + // set the previous block as the chain tip + let key = compute_chain_tip_key(); + let ct_bytes = encode_chain_tip(&block.header.previous, block.header.height - 1); + batch.put_u8(&key, &ct_bytes); + + // restore historic indices now + if self.prune && block.header.height >= 2 * BLOCKS_UNTIL_NEW_SERIES { + self.restore_indices(block.header.height - 2 * BLOCKS_UNTIL_NEW_SERIES, &batch)?; + } + + // perform the writes + let wo = WriteOptions { sync: true }; + self.db.write(&wo, &batch).map_err(DbError::Write)?; + + Ok(tx_ids) + } + + /// Returns the current balance of the given public keys + /// along with block ID and height of the corresponding main chain tip. + fn get_public_key_balances( + &self, + pub_keys: Vec, + ) -> Result<(HashMap, BlockID, u64), LedgerError> { + // get a consistent view across all queries + let snapshot = self.db.snapshot(); + + // get the chain tip + let Some((tip_id, tip_height)) = Self::get_chain_tip(&snapshot)? else { + return Err(LedgerNotFoundError::ChainTip.into()); + }; + + let mut balances = HashMap::new(); + + for pub_key in pub_keys.iter() { + // compute balance db key + let key = compute_pub_key_balance_key(pub_key); + + // fetch balance + let Some(balance_bytes) = snapshot + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + balances.insert(*pub_key, 0); + continue; + }; + + // decode it + let balance = u64::from_be_bytes(balance_bytes[..].try_into().map_err(DataError::U64)?); + + // save it + balances.insert(*pub_key, balance); + } + + Ok((balances, tip_id, tip_height)) + } + + /// Returns the current balance of a given public key. + fn get_public_key_balance(&self, pub_key: &PublicKey) -> Result { + // compute db key + let key = compute_pub_key_balance_key(pub_key); + + // fetch balance + let Some(balance_bytes) = self + .db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Err(DataError::NotFound.into()); + }; + + // decode and return it + let balance = u64::from_be_bytes(balance_bytes[..].try_into().map_err(DataError::U64)?); + + Ok(balance) + } + + /// Returns the index of a processed transaction. + fn get_transaction_index(&self, id: &TransactionID) -> Result<(BlockID, u32), LedgerError> { + let key = compute_transaction_index_key(id); + + // we want a consistent view during our two queries as height can change + let snapshot = self.db.snapshot(); + + // fetch and decode the index + let Some(index_bytes) = snapshot + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Err(LedgerNotFoundError::TransactionAtIndex(*id).into()); + }; + + let (height, index) = decode_transaction_index(&index_bytes)?; + // map height to block id + let Some(block_id) = Self::get_block_id_for_height(height, &snapshot)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + }; + + Ok((block_id, index)) + } + + /// Returns transaction indices involving a given public key over + /// a range of heights. If startHeight > endHeight this iterates + /// in reverse. + fn get_public_key_transaction_indices_range( + &self, + pub_key: PublicKey, + start_height: u64, + end_height: u64, + start_index: u32, + limit: usize, + ) -> Result<(Vec, Vec, u64, u32), LedgerError> { + if end_height >= start_height { + // forward + self.get_public_key_transaction_indices_range_forward( + pub_key, + start_height, + end_height, + start_index, + limit, + ) + } else { + // reverse + self.get_public_key_transaction_indices_range_reverse( + pub_key, + start_height, + end_height, + start_index, + limit, + ) + } + } + + /// Returns the total current ledger balance by summing the balance of all public keys. + /// It's only used offline for verification purposes. + fn balance(&self) -> Result { + let mut total = 0; + + // compute the sum of all public key balances + let key = compute_pub_key_balance_key_all(); + let iter = self.db.value_iter(&ReadOptions::new()).prefix(&key); + + for balance_bytes in iter { + let balance = u64::from_be_bytes(balance_bytes[..].try_into().map_err(DataError::U64)?); + total += balance + } + + Ok(total) + } + + /// Returns the public key balance at the given height. + /// It's only used offline for historical and verification purposes. + /// This is only accurate when the full block chain is indexed (pruning disabled.) + fn get_public_key_balance_at( + &self, + pub_key: &PublicKey, + mut height: u64, + ) -> Result { + let current_height = match self.get_chain_tip()? { + Some((_tip_id, height)) => height, + None => 0, + }; + + let start_key = compute_pub_key_transaction_index_key(*pub_key, None, None); + + // make it inclusive + height += 1; + let end_key = compute_pub_key_transaction_index_key(*pub_key, Some(height), None); + + let mut balance = 0; + + let snapshot = self.db.snapshot(); + let iter = snapshot + .keys_iter(&ReadOptions::new()) + .from(&start_key) + .to(&end_key); + + for key in iter { + let (_, height, index) = decode_pub_key_transaction_index_key(key)?; + + if index == 0 && height > current_height.saturating_sub(COINBASE_MATURITY) { + // coinbase isn't mature + continue; + } + + let Some(block_id) = self.get_block_id_for_height(height)? else { + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + }; + + let (Some(tx), _block_header) = self.block_store.get_transaction(&block_id, index)? + else { + return Err(LedgerNotFoundError::TransactionInBlock(index, block_id).into()); + }; + + if *pub_key == tx.to { + balance += tx.amount; + } else if pub_key == tx.from.as_ref().expect("transaction should have a sender") { + let fee = tx.fee.expect("transaction should have a fee"); + balance = balance + .checked_sub(tx.amount) + .and_then(|new_balance| new_balance.checked_sub(fee)) + .ok_or(LedgerError::TransactionBalanceNegative(tx.id()?))?; + } else { + return Err(LedgerError::TransactionPublicKeyMismatch(tx.id()?)); + } + } + + Ok(balance) + } +} + +/// leveldb schema +/// h{height} -> {bid} +/// B{bid} -> main|side|orphan (1 byte) +/// T -> {bid}{height} (main chain tip) +/// b{pk} -> {balance} (we always need all of this table) +/// k{pk}{height}{index} -> 1 (not strictly necessary. probably should make it optional by flag) +/// t{txid} -> {height}{index} (prunable up to the previous series) +const BLOCK_HEIGHT_INDEX_PREFIX: u8 = b'h'; +const BRANCH_TYPE_PREFIX: u8 = b'B'; +const CHAIN_TIP_PREFIX: u8 = b'T'; +const PUB_KEY_BALANCE_PREFIX: u8 = b'b'; +const PUB_KEY_TRANSACTION_INDEX_PREFIX: u8 = b'k'; +const TRANSACTION_INDEX_PREFIX: u8 = b't'; + +const U32_LENGTH: usize = mem::size_of::(); +const U64_LENGTH: usize = mem::size_of::(); +const PREFIX_LENGTH: usize = 1; + +type BlockHeightIndexKey = [u8; PREFIX_LENGTH + U64_LENGTH]; +type BranchTypeKey = [u8; PREFIX_LENGTH + BLOCK_ID_LENGTH]; +type ChainTipKey = [u8; PREFIX_LENGTH]; +type PubKeyBalanceKey = [u8; PREFIX_LENGTH + PublicKey::BYTES]; +type PubKeyBalanceKeyAll = [u8; PREFIX_LENGTH]; +type PubKeyTransactionIndexKey = Vec; +type TransactionIndexKey = [u8; PREFIX_LENGTH + TRANSACTION_ID_LENGTH]; + +type ChainTip = [u8; BLOCK_ID_LENGTH + U64_LENGTH]; +type TransactionIndex = [u8; U64_LENGTH + U32_LENGTH]; + +fn compute_branch_type_key(id: &BlockID) -> BranchTypeKey { + let mut key: BranchTypeKey = [0u8; mem::size_of::()]; + key[0] = BRANCH_TYPE_PREFIX; + key[1..].copy_from_slice(id); + key +} + +fn compute_block_height_index_key(height: u64) -> BlockHeightIndexKey { + let mut key: BlockHeightIndexKey = [0u8; mem::size_of::()]; + key[0] = BLOCK_HEIGHT_INDEX_PREFIX; + key[1..].copy_from_slice(&height.to_be_bytes()); + key +} + +fn compute_chain_tip_key() -> ChainTipKey { + [CHAIN_TIP_PREFIX] +} + +fn compute_transaction_index_key(id: &TransactionID) -> TransactionIndexKey { + let mut key: TransactionIndexKey = [0u8; mem::size_of::()]; + key[0] = TRANSACTION_INDEX_PREFIX; + key[1..].copy_from_slice(id); + key +} + +fn compute_pub_key_transaction_index_key( + pub_key: PublicKey, + height: Option, + index: Option, +) -> PubKeyTransactionIndexKey { + let mut key: PubKeyTransactionIndexKey = Vec::new(); + key.push(PUB_KEY_TRANSACTION_INDEX_PREFIX); + key.extend_from_slice(&pub_key[..]); + + // omiting lets us search with just the public key + if let Some(height) = height { + key.extend_from_slice(&height.to_be_bytes()); + } else { + return key; + } + + // omiting lets us search with just the public key and height + if let Some(index) = index { + key.extend_from_slice(&index.to_be_bytes()); + }; + key +} + +fn decode_pub_key_transaction_index_key( + key: Vec, +) -> Result<(PublicKey, u64, u32), LedgerError> { + let mut offset = PREFIX_LENGTH; + let pub_key = PublicKey::new( + key[offset..][..PublicKey::BYTES] + .try_into() + .map_err(DataError::PublicKey)?, + ); + offset += PublicKey::BYTES; + + let height = u64::from_be_bytes( + key[offset..][..U64_LENGTH] + .try_into() + .map_err(DataError::U64)?, + ); + offset += U64_LENGTH; + + let index = u32::from_be_bytes( + key[offset..][..U32_LENGTH] + .try_into() + .map_err(DataError::U64)?, + ); + Ok((pub_key, height, index)) +} + +fn compute_pub_key_balance_key(pub_key: &PublicKey) -> PubKeyBalanceKey { + let mut key: PubKeyBalanceKey = [0u8; mem::size_of::()]; + key[0] = PUB_KEY_BALANCE_PREFIX; + key[1..].copy_from_slice(&pub_key[..]); + key +} + +fn compute_pub_key_balance_key_all() -> PubKeyBalanceKeyAll { + [PUB_KEY_BALANCE_PREFIX] +} + +fn encode_chain_tip(id: &BlockID, height: u64) -> ChainTip { + let mut buf = [0u8; std::mem::size_of::()]; + buf[..BLOCK_ID_LENGTH].copy_from_slice(id); + buf[BLOCK_ID_LENGTH..][..U64_LENGTH].copy_from_slice(&u64::to_be_bytes(height)); + buf +} + +fn decode_chain_tip(ct_bytes: &[u8]) -> Result<(BlockID, u64), LedgerError> { + let id = BlockID::from(&ct_bytes[..BLOCK_ID_LENGTH]); + let height = u64::from_be_bytes( + ct_bytes[BLOCK_ID_LENGTH..] + .try_into() + .map_err(DataError::U64)?, + ); + Ok((id, height)) +} + +fn encode_transaction_index(height: u64, index: u32) -> TransactionIndex { + let mut buf = [0u8; std::mem::size_of::()]; + buf[..U64_LENGTH].copy_from_slice(&height.to_be_bytes()); + buf[U64_LENGTH..].copy_from_slice(&index.to_be_bytes()); + buf +} + +fn decode_transaction_index(index_bytes: &[u8]) -> Result<(u64, u32), LedgerError> { + let height = u64::from_be_bytes( + index_bytes[..U64_LENGTH] + .try_into() + .map_err(DataError::U64)?, + ); + let index = u32::from_be_bytes( + index_bytes[U64_LENGTH..][..U32_LENGTH] + .try_into() + .map_err(DataError::U32)?, + ); + Ok((height, index)) +} + +#[cfg(test)] +mod test { + use ed25519_compact::KeyPair; + use tempfile::tempdir; + + use crate::block::test_utils::make_test_block; + use crate::utils::now_as_secs; + + use super::*; + + #[test] + fn test_compute_block_height_index_key() { + let height = 1; + let block_height_key = compute_block_height_index_key(height); + assert_eq!(block_height_key[0], BLOCK_HEIGHT_INDEX_PREFIX); + assert_eq!(block_height_key[1..], height.to_be_bytes()); + } + + #[test] + fn test_compute_branch_type_key() { + let block_id = BlockID::new(); + let branch_type_key = compute_branch_type_key(&block_id); + assert_eq!(branch_type_key[0], BRANCH_TYPE_PREFIX); + assert_eq!(branch_type_key[1..], block_id[..]); + } + + #[test] + fn test_compute_pub_key_balance_key() { + let public_key = KeyPair::generate().pk; + let compute_key = compute_pub_key_balance_key(&public_key); + assert_eq!(compute_key[0], PUB_KEY_BALANCE_PREFIX); + assert_eq!(&compute_key[1..], &public_key[..]); + } + + #[test] + fn test_decode_transaction_index() { + let height = 1; + let index = 1; + let transaction_index = encode_transaction_index(height, index); + let (height_decode, index_decode) = decode_transaction_index(&transaction_index).unwrap(); + assert_eq!(height_decode, height); + assert_eq!(index_decode, index); + } + + #[test] + fn test_decode_chain_tip() { + let block_id = BlockID::new(); + let height = 1; + let chain_tip = encode_chain_tip(&block_id, height); + let (block_id_decode, height_decode) = decode_chain_tip(&chain_tip).unwrap(); + assert_eq!(block_id_decode, block_id); + assert_eq!(height_decode, height); + } + + #[test] + fn test_compute_chain_tip_key() { + let chain_tip_key = compute_chain_tip_key(); + assert_eq!(chain_tip_key[0], CHAIN_TIP_PREFIX); + } + + #[test] + fn test_compute_pub_key_transaction_index_key() { + let public_key = KeyPair::generate().pk; + let height = 1; + let index = 1; + + let pub_key_transaction_index_key = + compute_pub_key_transaction_index_key(public_key, Some(height), Some(index)); + assert_eq!( + pub_key_transaction_index_key[0], + PUB_KEY_TRANSACTION_INDEX_PREFIX + ); + let mut offset = 1; + assert_eq!( + &pub_key_transaction_index_key[offset..][..PublicKey::BYTES], + &public_key[..] + ); + + offset += PublicKey::BYTES; + let height_encoded = &pub_key_transaction_index_key[offset..][..U64_LENGTH]; + assert_eq!(height_encoded, height.to_be_bytes()); + + offset += U64_LENGTH; + let index_encoded = &pub_key_transaction_index_key[offset..]; + assert_eq!(index_encoded, index.to_be_bytes()); + } + + #[test] + fn test_get_public_key_transaction_indices_range_forward() { + let temp_dir = tempdir().unwrap(); + let data_dir = temp_dir.path(); + + let block_store = BlockStorageDisk::new( + data_dir.join("blocks"), + data_dir.join("headers.db"), + false, // not read only + false, + ) + .unwrap(); + + let mut genesis_block = make_test_block(0); + genesis_block.header.height += 1; + let genesis_block_id = genesis_block.id().unwrap(); + block_store + .store(&genesis_block_id, &genesis_block, now_as_secs()) + .unwrap(); + + let mut block = genesis_block.clone(); + block.header.height += 1; + block.header.previous = genesis_block_id; + block.transactions[0].time += 1; + let block_id = block.id().unwrap(); + block_store.store(&block_id, &block, now_as_secs()).unwrap(); + + let ledger = LedgerDisk::new(data_dir.join("ledger.db"), block_store, false).unwrap(); + ledger + .connect_block(&genesis_block_id, &genesis_block) + .unwrap(); + ledger.connect_block(&block_id, &block).unwrap(); + + let tx = &genesis_block.transactions[0]; + let (ids, indices, last_height, last_index) = ledger + .get_public_key_transaction_indices_range(tx.to, 1, block.header.height - 1, 0, 100) + .unwrap(); + assert_eq!(ids, vec![genesis_block_id]); + assert_eq!(indices, vec![0]); + assert_eq!(last_height, 1); + assert_eq!(last_index, 0); + + let (ids, indices, last_height, last_index) = ledger + .get_public_key_transaction_indices_range(tx.to, 1, block.header.height, 0, 100) + .unwrap(); + assert_eq!(ids, vec![genesis_block_id, block_id]); + assert_eq!(indices, vec![0, 0]); + assert_eq!(last_height, block.header.height); + assert_eq!(last_index, 0); + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 0000000..1c50dc2 --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,32 @@ +#![allow(clippy::too_many_arguments)] + +pub mod balance_cache; +pub mod block; +pub mod block_header_hasher; +pub mod block_queue; +pub mod block_storage; +pub mod block_storage_disk; +pub mod checkpoints; +pub mod constants; +pub mod dns; +pub mod error; +pub mod genesis; +#[cfg(any(feature = "cuda", feature = "opencl"))] +pub mod gpu; +pub mod irc; +pub mod ledger; +pub mod ledger_disk; +pub mod miner; +pub mod peer; +pub mod peer_manager; +pub mod peer_storage; +pub mod peer_storage_disk; +pub mod processor; +pub mod protocol; +pub mod shutdown; +pub mod tls; +pub mod transaction; +pub mod transaction_queue; +pub mod transaction_queue_memory; +pub mod utils; +pub mod wallet; diff --git a/src/miner.rs b/src/miner.rs new file mode 100644 index 0000000..fc0b318 --- /dev/null +++ b/src/miner.rs @@ -0,0 +1,451 @@ +use std::sync::Arc; +use std::thread; +use std::time::{Duration, Instant}; + +use ed25519_compact::PublicKey; +use ibig::UBig; +use log::{error, info}; +use rand::Rng; +use thiserror::Error; +use tokio::runtime::Handle; +use tokio::sync::mpsc::{channel, unbounded_channel, Receiver, Sender}; +use tokio::task::JoinHandle; + +use crate::block::{Block, BlockError, BlockHeader, BlockID}; +use crate::block_header_hasher::BlockHeaderHasher; +use crate::block_storage_disk::BlockStorageDisk; +use crate::constants::{MAX_NUMBER, MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK}; +use crate::error::{impl_debug_error_chain, ChannelError, ErrChain}; +use crate::ledger::LedgerNotFoundError; +use crate::ledger_disk::LedgerDisk; +use crate::peer::PEER_ADDR_SELF; +use crate::peer_manager::{PeerManager, PeerManagerError}; +use crate::processor::{ProcessBlockError, Processor, ProcessorError}; +use crate::shutdown::{ShutdownChanReceiver, SpawnedError}; +use crate::transaction::Transaction; +use crate::transaction_queue::TransactionQueue; +use crate::transaction_queue_memory::TransactionQueueMemory; +use crate::utils::now_as_secs; + +pub type HashUpdateChanTx = Sender; +pub type HashUpdateChanRx = Receiver; +pub type HashUpdateChan = (HashUpdateChanTx, HashUpdateChanRx); + +/// Tries to mine a new tip block. +pub struct Miner { + /// receipients of any block rewards we mine + pub_keys: &'static Vec, + /// memo for coinbase of any blocks we mine + memo: &'static Option, + block_store: Arc, + tx_queue: Arc, + ledger: Arc, + processor: Arc, + num: usize, + key_index: usize, + hash_update_chan: HashUpdateChanTx, + shutdown_chan_rx: ShutdownChanReceiver, + shutdown_fns: Vec>, +} + +/// Collects hash counts from all miners in order to monitor and display the aggregate hashrate. +pub struct HashrateMonitor { + num_miners: usize, + hash_update_chan: HashUpdateChanRx, + shutdown_chan_rx: ShutdownChanReceiver, +} + +impl HashrateMonitor { + pub fn new( + num_miners: usize, + hash_update_chan: HashUpdateChanRx, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + Self { + num_miners, + hash_update_chan, + shutdown_chan_rx, + } + } + + /// Spawns the Hashrate Monitor's main loop. + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(async { self.run().await.map_err(Into::into) }) + } + + /// Runs the Hashrate Monitor's main loop. + pub async fn run(mut self) -> Result<(), HashrateMonitorError> { + let mut total_hashes = 0; + let mut miner_updates = 0; + let mut last_update = Instant::now(); + + loop { + tokio::select! { + Some(hashes) = self.hash_update_chan.recv() => { + total_hashes += hashes; + miner_updates += 1; + + // miners update every 30 seconds, report every minute (num_miners * 2) + if miner_updates == self.num_miners * 2 { + let elapsed = last_update.elapsed().as_secs_f64(); + let hps = total_hashes as f64 / elapsed; + info!("Hashrate: {:.2} MH/s", hps/1000_f64/1000_f64); + total_hashes = 0; + miner_updates = 0; + last_update = Instant::now(); + } + } + + _ = &mut self.shutdown_chan_rx => { + info!("Hashrate monitor shutting down"); + break Ok(()) + } + } + } + } +} + +#[derive(Error, Debug)] +pub enum HashrateMonitorError {} + +impl Miner { + /// Returns a new Miner instance. + pub fn new( + pub_keys: &'static Vec, + memo: &'static Option, + block_store: Arc, + tx_queue: Arc, + ledger: Arc, + processor: Arc, + hash_update_chan: HashUpdateChanTx, + num: usize, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + let key_index = rand::thread_rng().gen_range(0..pub_keys.len()); + + Self { + pub_keys, + memo, + block_store, + tx_queue, + ledger, + processor, + num, + key_index, + hash_update_chan, + shutdown_chan_rx, + shutdown_fns: Vec::new(), + } + } + + /// Spawns the miner's main loop + pub fn spawn(self) -> JoinHandle> { + tokio::task::spawn_blocking(|| self.run().map_err(Into::into)) + } + + /// Run the miner's main loop + pub fn run(mut self) -> Result<(), MinerError> { + let interval = Duration::from_secs(30); + let mut ticker = Instant::now() + interval; + + // don't start mining until we think we're synced. + // we're just wasting time and slowing down the sync otherwise + let (ibd, _height) = + PeerManager::is_initial_block_download(&self.ledger, &self.block_store)?; + if ibd { + info!("Miner {} waiting for blockchain sync", self.num); + } + + loop { + if self.shutdown_chan_rx.try_recv().is_ok() { + info!("Miner {} shutting down", self.num); + return Ok(()); + } + + if ticker <= Instant::now() { + ticker += interval; + let (ibd, _height) = + PeerManager::is_initial_block_download(&self.ledger, &self.block_store)?; + if !ibd { + // time to start mining + break; + } + } + + thread::sleep(Duration::from_millis(100)); + } + + // Register for tip changes to the processor + let (tip_change_chan_tx, mut tip_change_chan_rx) = unbounded_channel(); + self.processor + .register_for_tip_change(tip_change_chan_tx.clone()); + + // Register for new transactions + let (new_tx_chan_tx, mut new_tx_chan_rx) = channel(1); + self.processor + .register_for_new_transactions(new_tx_chan_tx.clone()); + + // unregister from the processor on shutdown + { + let processor = Arc::clone(&self.processor); + self.shutdown_fns.push(Box::new(move || { + processor.unregister_for_tip_change(tip_change_chan_tx.clone()); + processor.unregister_for_new_transactions(new_tx_chan_tx.clone()); + })); + } + + // main mining loop + let mut hashes = 0; + let mut median_timestamp = 0; + let mut block = None; + let mut target_int = UBig::default(); + + loop { + if let Ok(tip) = tip_change_chan_rx.try_recv() { + if !tip.connect || tip.more { + // only build off newly connected tip blocks + continue; + } + + // give up whatever block we were working on + info!( + "Miner {} received notice of new tip block {}", + self.num, tip.block_id + ); + + // start working on a new block + let mut next_block = + self.create_next_block_from_tip(&tip.block_id, &tip.block.header)?; + + // make sure we're at least +1 the median timestamp + median_timestamp = + Processor::compute_median_timestamp(&tip.block.header, &self.block_store)?; + + if next_block.header.time <= median_timestamp { + next_block.header.time = median_timestamp + 1; + } + + // convert our target to a BigInt + target_int = next_block.header.target.as_big_int(); + block = Some((next_block, BlockHeaderHasher::new())); + } + + if let Ok(new_tx) = new_tx_chan_rx.try_recv() { + info!( + "Miner {} received notice of new transaction {}", + self.num, new_tx.transaction_id + ); + + let Some((block_new_tx, _hasher)) = block.as_mut() else { + // we're not working on a block yet + continue; + }; + + if MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK != 0 + && block_new_tx.transactions.len() + >= MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK as usize + { + info!( + "Per-block transaction limit hit ({})", + block_new_tx.transactions.len() + ); + continue; + } + + // add the transaction to the block (it updates the coinbase fee) + if let Err(err) = + block_new_tx.add_transaction(new_tx.transaction_id, new_tx.transaction) + { + info!( + "Error adding new transaction {} to block: {}", + new_tx.transaction_id, err + ); + // abandon the block + block = None; + } + } + + if self.shutdown_chan_rx.try_recv().is_ok() { + info!("Miner {} shutting down...", self.num); + break Ok(()); + } + + if ticker <= Instant::now() { + ticker += interval; + + // update hash count for hash rate monitor + self.hash_update_chan.blocking_send(hashes).unwrap(); + hashes = 0; + + if let Some((block, _hasher)) = block.as_mut() { + // update block time every so often + let now = now_as_secs(); + if now > median_timestamp { + block.header.time = now; + } + } + } + + if block.is_none() { + // find the tip to start working off of + let Some((tip_id, tip_header, _tip_when)) = + Processor::get_chain_tip_header(&self.ledger, &self.block_store)? + else { + break Err(LedgerNotFoundError::ChainTipHeader.into()); + }; + + // create a new block + let mut next_block = self.create_next_block_from_tip(&tip_id, &tip_header)?; + + // make sure we're at least +1 the median timestamp + median_timestamp = + match Processor::compute_median_timestamp(&tip_header, &self.block_store) { + Ok(v) => v, + Err(err) => break Err(err.into()), + }; + + if next_block.header.time <= median_timestamp { + next_block.header.time = median_timestamp + 1; + } + + // convert our target to a BigInt + target_int = next_block.header.target.as_big_int(); + block = Some((next_block, BlockHeaderHasher::new())); + } + + let (candidate_block, hasher) = block.as_mut().unwrap(); + candidate_block.header.id_fast(self.num, hasher); + hashes += hasher.hashes_per_attempt; + + if hasher.result <= target_int { + // found a solution + let (candidate_block, hasher) = block.take().unwrap(); + let id = BlockID::from(hasher.result); + info!("Miner {} mined new block {}", self.num, &id); + + let handle = Handle::current(); + handle.block_on(async { + // process the block + if let Err(err) = self + .processor + .process_candidate_block(id, candidate_block, PEER_ADDR_SELF) + .await + .map_err(MinerError::ProcessBlock) + { + error!("{:?}", err); + } + }); + + self.key_index = rand::thread_rng().gen_range(0..self.pub_keys.len()); + } else { + // no solution yet + candidate_block.header.nonce += hasher.hashes_per_attempt; + if candidate_block.header.nonce > MAX_NUMBER { + candidate_block.header.nonce = 0; + } + } + } + } + + /// Create a new block off of the given tip block. + pub fn create_next_block_from_tip( + &self, + tip_id: &BlockID, + tip_header: &BlockHeader, + ) -> Result { + info!( + "Miner {} mining new block from current tip {}", + self.num, &tip_id + ); + + let Some(pub_key) = self.pub_keys.get(self.key_index).cloned() else { + return Err(MinerError::PublicKeyAtIndexMissing(self.key_index)); + }; + + Miner::create_next_block( + tip_id, + tip_header, + &self.tx_queue, + &self.block_store, + &self.ledger, + pub_key, + self.memo.clone(), + ) + } + + /// Called by the miner as well as the peer to support get_work. + pub fn create_next_block( + tip_id: &BlockID, + tip_header: &BlockHeader, + tx_queue: &Arc, + block_store: &Arc, + ledger: &Arc, + pub_key: PublicKey, + memo: Option, + ) -> Result { + // fetch transactions to confirm from the queue + let mut txs = tx_queue.get(MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK as usize - 1); + + // calculate total fees + let mut fees = 0; + for tx in &txs { + fees += tx.fee.expect("transaction should have a fee"); + } + + // calculate total block reward + let new_height = tip_header.height + 1; + let reward = Processor::block_creation_reward(new_height) + fees; + + // build coinbase + let tx = Transaction::new(None, pub_key, reward, None, None, None, new_height, memo); + + // prepend coinbase + txs.insert(0, tx); + + // compute the next target + let new_target = Processor::compute_target(tip_header, block_store, ledger) + .map_err(|err| MinerError::ComputeTarget(*tip_id, err))?; + + // create the block + let block = Block::new(*tip_id, new_height, new_target, tip_header.chain_work, txs)?; + + Ok(block) + } +} + +impl Drop for Miner { + fn drop(&mut self) { + for shutdown_fn in &self.shutdown_fns { + shutdown_fn(); + } + } +} + +#[derive(Error)] +pub enum MinerError { + #[error("public key at index {0} is missing")] + PublicKeyAtIndexMissing(usize), + #[error("failed to compute target for block: {0}")] + ComputeTarget(BlockID, #[source] ProcessBlockError), + + #[error("channel")] + Channel(#[from] ChannelError), + #[error("block")] + Block(#[from] BlockError), + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + #[error("peer manager")] + PeerManager(#[from] PeerManagerError), + #[error("processing block")] + ProcessBlock(#[from] ProcessBlockError), + #[error("processor")] + ProcessorError(#[from] ProcessorError), +} + +impl_debug_error_chain!(MinerError, "miner"); + +impl From> for MinerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("hash update", err.to_string())) + } +} diff --git a/src/opencl/CMakeLists.txt b/src/opencl/CMakeLists.txt new file mode 100644 index 0000000..d9bf158 --- /dev/null +++ b/src/opencl/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required (VERSION 3.10) +project(cruzbit_ocl) +find_package(OpenCL REQUIRED) +add_library(cruzbit_ocl SHARED + ocl.cc + sha3.cc + mine.cc +) +target_link_libraries(cruzbit_ocl OpenCL::OpenCL) +install(TARGETS cruzbit_ocl DESTINATION lib) diff --git a/src/opencl/COPYING b/src/opencl/COPYING new file mode 100644 index 0000000..d60c31a --- /dev/null +++ b/src/opencl/COPYING @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/src/opencl/LICENSE b/src/opencl/LICENSE new file mode 100644 index 0000000..d2d484d --- /dev/null +++ b/src/opencl/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Markku-Juhani O. Saarinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/src/opencl/README.md b/src/opencl/README.md new file mode 100644 index 0000000..49bc542 --- /dev/null +++ b/src/opencl/README.md @@ -0,0 +1,10 @@ +## License + +This library either borrows code directly from or was inspired by the following: + +* [tiny_sha3](https://github.com/mjosaarinen/tiny_sha3/) +* [oclminer](https://github.com/tcatm/oclminer/) + +The [LICENSE](https://github.com/cruzbit/cruzbit/blob/master/opencl/LICENSE) file in this directory is the license associated with tiny_sha3. + +The [COPYING](https://github.com/cruzbit/cruzbit/blob/master/opencl/COPYING) file in this directory is the license associated with oclminer. diff --git a/src/opencl/cruzbit.cl b/src/opencl/cruzbit.cl new file mode 100644 index 0000000..82cfabd --- /dev/null +++ b/src/opencl/cruzbit.cl @@ -0,0 +1,292 @@ +// this runs on the device + +typedef uchar uint8_t; +typedef ulong uint64_t; +typedef long int64_t; + +// state context +typedef struct { + union { // state: + uint8_t b[200]; // 8-bit bytes + uint64_t q[25]; // 64-bit words + } st; + int pt, rsiz, mdlen; // these don't overflow +} sha3_ctx_t; + +#define WGS __attribute__((reqd_work_group_size(256, 1, 1))) +#define KECCAKF_ROUNDS 24 +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) + +void sha3_keccakf(uint64_t st[25]); +int sha3_init(sha3_ctx_t *c, int mdlen); +int sha3_update(sha3_ctx_t *c, __constant void *data, size_t len); +int sha3_update_private(sha3_ctx_t *c, __private void *data, size_t len); +int sha3_final(void *md, sha3_ctx_t *c); +char *reverse(char *str); +int strlen(char *s); +int itoa(int64_t n, char *s); +int memcmp(const void *p1, __constant void *p2, size_t len); +void memcpy(void *dst, __constant void *src, size_t len); +void debug_print_buf(const void *buf, size_t len); +void debug_print_hash(const void *hash); + +// update the state with given number of rounds + +void sha3_keccakf(uint64_t st[25]) { + // constants + const uint64_t keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008}; + const int keccakf_rotc[24] = {1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44}; + const int keccakf_piln[24] = {10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1}; + + // variables + int i, j, r; + uint64_t t, bc[5]; + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + uint8_t *v; + + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + st[i] = ((uint64_t)v[0]) | (((uint64_t)v[1]) << 8) | + (((uint64_t)v[2]) << 16) | (((uint64_t)v[3]) << 24) | + (((uint64_t)v[4]) << 32) | (((uint64_t)v[5]) << 40) | + (((uint64_t)v[6]) << 48) | (((uint64_t)v[7]) << 56); + } +#endif + + // actual iteration + for (r = 0; r < KECCAKF_ROUNDS; r++) { + + // Theta + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + // Rho Pi + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + // Chi + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + + // Iota + st[0] ^= keccakf_rndc[r]; + } + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + t = st[i]; + v[0] = t & 0xFF; + v[1] = (t >> 8) & 0xFF; + v[2] = (t >> 16) & 0xFF; + v[3] = (t >> 24) & 0xFF; + v[4] = (t >> 32) & 0xFF; + v[5] = (t >> 40) & 0xFF; + v[6] = (t >> 48) & 0xFF; + v[7] = (t >> 56) & 0xFF; + } +#endif +} + +// Initialize the context for SHA3 + +int sha3_init(sha3_ctx_t *c, int mdlen) { + int i; + + for (i = 0; i < 25; i++) + c->st.q[i] = 0; + c->mdlen = mdlen; + c->rsiz = 200 - 2 * mdlen; + c->pt = 0; + + return 1; +} + +// update state with more data + +int sha3_update(sha3_ctx_t *c, __constant void *data, size_t len) { + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((__constant uint8_t *)data)[i]; + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +int sha3_update_private(sha3_ctx_t *c, __private void *data, size_t len) { + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((__private uint8_t *)data)[i]; + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +// finalize and output a hash + +int sha3_final(void *md, sha3_ctx_t *c) { + int i; + + c->st.b[c->pt] ^= 0x06; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf(c->st.q); + + for (i = 0; i < c->mdlen; i++) { + ((uint8_t *)md)[i] = c->st.b[i]; + } + + return 1; +} + +int memcmp(const void *p1, __constant void *p2, size_t len) { + for (size_t i = 0; i < len; i++) { + uint8_t b1 = ((uint8_t *)p1)[i]; + uint8_t b2 = ((__constant uint8_t *)p2)[i]; + if (b1 < b2) { + return -1; + } + if (b1 > b2) { + return 1; + } + } + return 0; +} + +void memcpy(void *dst, __constant void *src, size_t len) { + for (size_t i = 0; i < len; i++) { + ((uint8_t *)dst)[i] = ((__constant uint8_t *)src)[i]; + } +} + +char *reverse(char *str) { + char tmp, *src, *dst; + size_t len; + if (str != 0) { + len = strlen(str); + if (len > 1) { + src = str; + dst = src + len - 1; + while (src < dst) { + tmp = *src; + *src++ = *dst; + *dst-- = tmp; + } + } + } + return str; +} + +int strlen(char *s) { + int i; + for (i = 0; s[i] != '\0';) { + i++; + } + return i; +} + +int itoa(int64_t n, char *s) { + int i; + int64_t sign; + + if ((sign = n) < 0) /* record sign */ + n = -n; /* make n positive */ + i = 0; + + do { /* generate digits in reverse order */ + s[i++] = n % 10 + '0'; /* get next digit */ + } while ((n /= 10) > 0); /* delete it */ + + if (sign < 0) + s[i++] = '-'; + + s[i] = '\0'; + reverse(s); + return i; +} + +void debug_print_buf(const void *buf, size_t len) { + for (int i = 0; i < len; i++) { + printf("%c", ((char *)buf)[i]); + } + printf("\n"); +} + +void debug_print_hash(const void *hash) { + for (int i = 0; i < 32; i++) { + printf("%02x", ((char *)hash)[i] & 0xFF); + } + printf("\n"); +} + +typedef struct { + sha3_ctx_t prev_sha3; + uint8_t last[512], target[32]; + size_t last_len; +} miner_state; + +__kernel __attribute__((vec_type_hint(uint))) WGS void +cruzbit(__constant miner_state *state, __global int64_t *good_nonce, + __global uint8_t *found) { + uint8_t hash[32]; + char nonce_s[20]; + + int index = get_global_id(0); + int64_t nonce = *good_nonce + (int64_t)index; + size_t n = (size_t)itoa(nonce, nonce_s); + + sha3_ctx_t sha3; + memcpy(&sha3, (__constant void *)&state->prev_sha3, sizeof(sha3_ctx_t)); + sha3_update_private(&sha3, nonce_s, n); + sha3_update(&sha3, (__constant char *)state->last, state->last_len); + sha3_final(hash, &sha3); + + if (memcmp(hash, state->target, 32) <= 0) { + // found a solution. not thread-safe but a race is very unlikely + *good_nonce = nonce; + *found = 1; + } +} diff --git a/src/opencl/cruzbit.h b/src/opencl/cruzbit.h new file mode 100644 index 0000000..3b3f796 --- /dev/null +++ b/src/opencl/cruzbit.h @@ -0,0 +1,4 @@ +const char opencl_cruzbit[] = { +0x2f, 0x2f, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x75, 0x6e, 0x73, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x65, 0x66, 0x20, 0x75, 0x63, 0x68, 0x61, 0x72, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x3b, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x65, 0x66, 0x20, 0x75, 0x6c, 0x6f, 0x6e, 0x67, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x3b, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x65, 0x66, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x20, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x3b, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x65, 0x66, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x75, 0x6e, 0x69, 0x6f, 0x6e, 0x20, 0x7b, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x62, 0x5b, 0x32, 0x30, 0x30, 0x5d, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x38, 0x2d, 0x62, 0x69, 0x74, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x71, 0x5b, 0x32, 0x35, 0x5d, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x36, 0x34, 0x2d, 0x62, 0x69, 0x74, 0x20, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x73, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x70, 0x74, 0x2c, 0x20, 0x72, 0x73, 0x69, 0x7a, 0x2c, 0x20, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x2f, 0x2f, 0x20, 0x74, 0x68, 0x65, 0x73, 0x65, 0x20, 0x64, 0x6f, 0x6e, 0x27, 0x74, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x0a, 0x7d, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x3b, 0x0a, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x57, 0x47, 0x53, 0x20, 0x5f, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x5f, 0x28, 0x28, 0x72, 0x65, 0x71, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x28, 0x32, 0x35, 0x36, 0x2c, 0x20, 0x31, 0x2c, 0x20, 0x31, 0x29, 0x29, 0x29, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, 0x46, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x53, 0x20, 0x32, 0x34, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x52, 0x4f, 0x54, 0x4c, 0x36, 0x34, 0x28, 0x78, 0x2c, 0x20, 0x79, 0x29, 0x20, 0x28, 0x28, 0x28, 0x78, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x28, 0x79, 0x29, 0x29, 0x20, 0x7c, 0x20, 0x28, 0x28, 0x78, 0x29, 0x20, 0x3e, 0x3e, 0x20, 0x28, 0x36, 0x34, 0x20, 0x2d, 0x20, 0x28, 0x79, 0x29, 0x29, 0x29, 0x29, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x73, 0x74, 0x5b, 0x32, 0x35, 0x5d, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x5f, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x28, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x6d, 0x64, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x29, 0x3b, 0x0a, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x74, 0x72, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x72, 0x6c, 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x74, 0x6f, 0x61, 0x28, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x6e, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x29, 0x3b, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x6d, 0x65, 0x6d, 0x63, 0x6d, 0x70, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x70, 0x31, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x70, 0x32, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x6d, 0x65, 0x6d, 0x63, 0x70, 0x79, 0x28, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x73, 0x74, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x73, 0x72, 0x63, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x68, 0x61, 0x73, 0x68, 0x29, 0x3b, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x73, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x73, 0x74, 0x5b, 0x32, 0x35, 0x5d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x72, 0x6e, 0x64, 0x63, 0x5b, 0x32, 0x34, 0x5d, 0x20, 0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x31, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x32, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x62, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x31, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x31, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x39, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x38, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x39, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x61, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x62, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x62, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x39, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x33, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x32, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x61, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x61, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x31, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x38, 0x30, 0x2c, 0x20, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x31, 0x2c, 0x20, 0x30, 0x78, 0x38, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x30, 0x38, 0x30, 0x30, 0x38, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x72, 0x6f, 0x74, 0x63, 0x5b, 0x32, 0x34, 0x5d, 0x20, 0x3d, 0x20, 0x7b, 0x31, 0x2c, 0x20, 0x20, 0x33, 0x2c, 0x20, 0x20, 0x36, 0x2c, 0x20, 0x20, 0x31, 0x30, 0x2c, 0x20, 0x31, 0x35, 0x2c, 0x20, 0x32, 0x31, 0x2c, 0x20, 0x32, 0x38, 0x2c, 0x20, 0x33, 0x36, 0x2c, 0x20, 0x34, 0x35, 0x2c, 0x20, 0x35, 0x35, 0x2c, 0x20, 0x32, 0x2c, 0x20, 0x20, 0x31, 0x34, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x32, 0x37, 0x2c, 0x20, 0x34, 0x31, 0x2c, 0x20, 0x35, 0x36, 0x2c, 0x20, 0x38, 0x2c, 0x20, 0x20, 0x32, 0x35, 0x2c, 0x20, 0x34, 0x33, 0x2c, 0x20, 0x36, 0x32, 0x2c, 0x20, 0x31, 0x38, 0x2c, 0x20, 0x33, 0x39, 0x2c, 0x20, 0x36, 0x31, 0x2c, 0x20, 0x32, 0x30, 0x2c, 0x20, 0x34, 0x34, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x70, 0x69, 0x6c, 0x6e, 0x5b, 0x32, 0x34, 0x5d, 0x20, 0x3d, 0x20, 0x7b, 0x31, 0x30, 0x2c, 0x20, 0x37, 0x2c, 0x20, 0x20, 0x31, 0x31, 0x2c, 0x20, 0x31, 0x37, 0x2c, 0x20, 0x31, 0x38, 0x2c, 0x20, 0x33, 0x2c, 0x20, 0x35, 0x2c, 0x20, 0x20, 0x31, 0x36, 0x2c, 0x20, 0x38, 0x2c, 0x20, 0x20, 0x32, 0x31, 0x2c, 0x20, 0x32, 0x34, 0x2c, 0x20, 0x34, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x31, 0x35, 0x2c, 0x20, 0x32, 0x33, 0x2c, 0x20, 0x31, 0x39, 0x2c, 0x20, 0x31, 0x33, 0x2c, 0x20, 0x31, 0x32, 0x2c, 0x20, 0x32, 0x2c, 0x20, 0x32, 0x30, 0x2c, 0x20, 0x31, 0x34, 0x2c, 0x20, 0x32, 0x32, 0x2c, 0x20, 0x39, 0x2c, 0x20, 0x20, 0x36, 0x2c, 0x20, 0x20, 0x31, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x2c, 0x20, 0x6a, 0x2c, 0x20, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x74, 0x2c, 0x20, 0x62, 0x63, 0x5b, 0x35, 0x5d, 0x3b, 0x0a, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x5f, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x5f, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x5f, 0x20, 0x21, 0x3d, 0x20, 0x5f, 0x5f, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x54, 0x54, 0x4c, 0x45, 0x5f, 0x45, 0x4e, 0x44, 0x49, 0x41, 0x4e, 0x5f, 0x5f, 0x0a, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x76, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x65, 0x73, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x72, 0x65, 0x64, 0x75, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x20, 0x6f, 0x6e, 0x20, 0x6c, 0x69, 0x74, 0x74, 0x6c, 0x65, 0x2d, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x32, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x20, 0x3d, 0x20, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x26, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x30, 0x5d, 0x29, 0x20, 0x7c, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x31, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x38, 0x29, 0x20, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x32, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x31, 0x36, 0x29, 0x20, 0x7c, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x33, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x32, 0x34, 0x29, 0x20, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x34, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x33, 0x32, 0x29, 0x20, 0x7c, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x35, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x34, 0x30, 0x29, 0x20, 0x7c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x36, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x34, 0x38, 0x29, 0x20, 0x7c, 0x20, 0x28, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x76, 0x5b, 0x37, 0x5d, 0x29, 0x20, 0x3c, 0x3c, 0x20, 0x35, 0x36, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x20, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x72, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x72, 0x20, 0x3c, 0x20, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, 0x46, 0x5f, 0x52, 0x4f, 0x55, 0x4e, 0x44, 0x53, 0x3b, 0x20, 0x72, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x54, 0x68, 0x65, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x63, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x20, 0x5e, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x20, 0x2b, 0x20, 0x35, 0x5d, 0x20, 0x5e, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x20, 0x2b, 0x20, 0x31, 0x30, 0x5d, 0x20, 0x5e, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x20, 0x2b, 0x20, 0x31, 0x35, 0x5d, 0x20, 0x5e, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x20, 0x2b, 0x20, 0x32, 0x30, 0x5d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x20, 0x3d, 0x20, 0x62, 0x63, 0x5b, 0x28, 0x69, 0x20, 0x2b, 0x20, 0x34, 0x29, 0x20, 0x25, 0x20, 0x35, 0x5d, 0x20, 0x5e, 0x20, 0x52, 0x4f, 0x54, 0x4c, 0x36, 0x34, 0x28, 0x62, 0x63, 0x5b, 0x28, 0x69, 0x20, 0x2b, 0x20, 0x31, 0x29, 0x20, 0x25, 0x20, 0x35, 0x5d, 0x2c, 0x20, 0x31, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x6a, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x6a, 0x20, 0x3c, 0x20, 0x32, 0x35, 0x3b, 0x20, 0x6a, 0x20, 0x2b, 0x3d, 0x20, 0x35, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x5b, 0x6a, 0x20, 0x2b, 0x20, 0x69, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x52, 0x68, 0x6f, 0x20, 0x50, 0x69, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x5b, 0x31, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x32, 0x34, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x20, 0x3d, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x70, 0x69, 0x6c, 0x6e, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x63, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x5b, 0x6a, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x5b, 0x6a, 0x5d, 0x20, 0x3d, 0x20, 0x52, 0x4f, 0x54, 0x4c, 0x36, 0x34, 0x28, 0x74, 0x2c, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x72, 0x6f, 0x74, 0x63, 0x5b, 0x69, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x20, 0x3d, 0x20, 0x62, 0x63, 0x5b, 0x30, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x20, 0x43, 0x68, 0x69, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x6a, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x6a, 0x20, 0x3c, 0x20, 0x32, 0x35, 0x3b, 0x20, 0x6a, 0x20, 0x2b, 0x3d, 0x20, 0x35, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x63, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x5b, 0x6a, 0x20, 0x2b, 0x20, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x5b, 0x6a, 0x20, 0x2b, 0x20, 0x69, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x28, 0x7e, 0x62, 0x63, 0x5b, 0x28, 0x69, 0x20, 0x2b, 0x20, 0x31, 0x29, 0x20, 0x25, 0x20, 0x35, 0x5d, 0x29, 0x20, 0x26, 0x20, 0x62, 0x63, 0x5b, 0x28, 0x69, 0x20, 0x2b, 0x20, 0x32, 0x29, 0x20, 0x25, 0x20, 0x35, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x20, 0x49, 0x6f, 0x74, 0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x5b, 0x30, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x5f, 0x72, 0x6e, 0x64, 0x63, 0x5b, 0x72, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x5f, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x5f, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x5f, 0x20, 0x21, 0x3d, 0x20, 0x5f, 0x5f, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x4c, 0x49, 0x54, 0x54, 0x4c, 0x45, 0x5f, 0x45, 0x4e, 0x44, 0x49, 0x41, 0x4e, 0x5f, 0x5f, 0x0a, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x65, 0x73, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x72, 0x65, 0x64, 0x75, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x20, 0x6f, 0x6e, 0x20, 0x6c, 0x69, 0x74, 0x74, 0x6c, 0x65, 0x2d, 0x65, 0x6e, 0x64, 0x69, 0x61, 0x6e, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x32, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x20, 0x3d, 0x20, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x26, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x20, 0x74, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x31, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x38, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x32, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x31, 0x36, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x33, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x32, 0x34, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x34, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x33, 0x32, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x35, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x34, 0x30, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x36, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x34, 0x38, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x76, 0x5b, 0x37, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x74, 0x20, 0x3e, 0x3e, 0x20, 0x35, 0x36, 0x29, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x53, 0x48, 0x41, 0x33, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x32, 0x35, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x71, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x72, 0x73, 0x69, 0x7a, 0x20, 0x3d, 0x20, 0x32, 0x30, 0x30, 0x20, 0x2d, 0x20, 0x32, 0x20, 0x2a, 0x20, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6a, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6a, 0x20, 0x3d, 0x20, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x62, 0x5b, 0x6a, 0x2b, 0x2b, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x28, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x64, 0x61, 0x74, 0x61, 0x29, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6a, 0x20, 0x3e, 0x3d, 0x20, 0x63, 0x2d, 0x3e, 0x72, 0x73, 0x69, 0x7a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x28, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x71, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x6a, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x2c, 0x20, 0x5f, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x61, 0x74, 0x61, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6a, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6a, 0x20, 0x3d, 0x20, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x62, 0x5b, 0x6a, 0x2b, 0x2b, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x28, 0x28, 0x5f, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x64, 0x61, 0x74, 0x61, 0x29, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6a, 0x20, 0x3e, 0x3d, 0x20, 0x63, 0x2d, 0x3e, 0x72, 0x73, 0x69, 0x7a, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x28, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x71, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x6a, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x61, 0x20, 0x68, 0x61, 0x73, 0x68, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x28, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x6d, 0x64, 0x2c, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x2a, 0x63, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x62, 0x5b, 0x63, 0x2d, 0x3e, 0x70, 0x74, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x30, 0x78, 0x30, 0x36, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x62, 0x5b, 0x63, 0x2d, 0x3e, 0x72, 0x73, 0x69, 0x7a, 0x20, 0x2d, 0x20, 0x31, 0x5d, 0x20, 0x5e, 0x3d, 0x20, 0x30, 0x78, 0x38, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x6b, 0x65, 0x63, 0x63, 0x61, 0x6b, 0x66, 0x28, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x71, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x63, 0x2d, 0x3e, 0x6d, 0x64, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x6d, 0x64, 0x29, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x63, 0x2d, 0x3e, 0x73, 0x74, 0x2e, 0x62, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x6d, 0x65, 0x6d, 0x63, 0x6d, 0x70, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x70, 0x31, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x70, 0x32, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x62, 0x31, 0x20, 0x3d, 0x20, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x70, 0x31, 0x29, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x62, 0x32, 0x20, 0x3d, 0x20, 0x28, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x70, 0x32, 0x29, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x62, 0x31, 0x20, 0x3c, 0x20, 0x62, 0x32, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x2d, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x62, 0x31, 0x20, 0x3e, 0x20, 0x62, 0x32, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x30, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x6d, 0x65, 0x6d, 0x63, 0x70, 0x79, 0x28, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x64, 0x73, 0x74, 0x2c, 0x20, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x73, 0x72, 0x63, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x28, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x64, 0x73, 0x74, 0x29, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x28, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x29, 0x73, 0x72, 0x63, 0x29, 0x5b, 0x69, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x74, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x74, 0x6d, 0x70, 0x2c, 0x20, 0x2a, 0x73, 0x72, 0x63, 0x2c, 0x20, 0x2a, 0x64, 0x73, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x74, 0x72, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x6e, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x72, 0x6c, 0x65, 0x6e, 0x28, 0x73, 0x74, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6c, 0x65, 0x6e, 0x20, 0x3e, 0x20, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x72, 0x63, 0x20, 0x3d, 0x20, 0x73, 0x74, 0x72, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x73, 0x74, 0x20, 0x3d, 0x20, 0x73, 0x72, 0x63, 0x20, 0x2b, 0x20, 0x6c, 0x65, 0x6e, 0x20, 0x2d, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x72, 0x63, 0x20, 0x3c, 0x20, 0x64, 0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6d, 0x70, 0x20, 0x3d, 0x20, 0x2a, 0x73, 0x72, 0x63, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2a, 0x73, 0x72, 0x63, 0x2b, 0x2b, 0x20, 0x3d, 0x20, 0x2a, 0x64, 0x73, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2a, 0x64, 0x73, 0x74, 0x2d, 0x2d, 0x20, 0x3d, 0x20, 0x74, 0x6d, 0x70, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x73, 0x74, 0x72, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x72, 0x6c, 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x73, 0x5b, 0x69, 0x5d, 0x20, 0x21, 0x3d, 0x20, 0x27, 0x5c, 0x30, 0x27, 0x3b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x2b, 0x2b, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x74, 0x6f, 0x61, 0x28, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x6e, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x28, 0x73, 0x69, 0x67, 0x6e, 0x20, 0x3d, 0x20, 0x6e, 0x29, 0x20, 0x3c, 0x20, 0x30, 0x29, 0x20, 0x2f, 0x2a, 0x20, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6e, 0x20, 0x3d, 0x20, 0x2d, 0x6e, 0x3b, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x20, 0x6e, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x64, 0x6f, 0x20, 0x7b, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2a, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x20, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x20, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x5b, 0x69, 0x2b, 0x2b, 0x5d, 0x20, 0x3d, 0x20, 0x6e, 0x20, 0x25, 0x20, 0x31, 0x30, 0x20, 0x2b, 0x20, 0x27, 0x30, 0x27, 0x3b, 0x20, 0x2f, 0x2a, 0x20, 0x67, 0x65, 0x74, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x64, 0x69, 0x67, 0x69, 0x74, 0x20, 0x2a, 0x2f, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x28, 0x6e, 0x20, 0x2f, 0x3d, 0x20, 0x31, 0x30, 0x29, 0x20, 0x3e, 0x20, 0x30, 0x29, 0x3b, 0x20, 0x2f, 0x2a, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x69, 0x74, 0x20, 0x2a, 0x2f, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x67, 0x6e, 0x20, 0x3c, 0x20, 0x30, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x5b, 0x69, 0x2b, 0x2b, 0x5d, 0x20, 0x3d, 0x20, 0x27, 0x2d, 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x73, 0x5b, 0x69, 0x5d, 0x20, 0x3d, 0x20, 0x27, 0x5c, 0x30, 0x27, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x28, 0x73, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x69, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x66, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x65, 0x6e, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x6c, 0x65, 0x6e, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x25, 0x63, 0x22, 0x2c, 0x20, 0x28, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x29, 0x62, 0x75, 0x66, 0x29, 0x5b, 0x69, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x5c, 0x6e, 0x22, 0x29, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x68, 0x61, 0x73, 0x68, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x20, 0x3d, 0x20, 0x30, 0x3b, 0x20, 0x69, 0x20, 0x3c, 0x20, 0x33, 0x32, 0x3b, 0x20, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x25, 0x30, 0x32, 0x78, 0x22, 0x2c, 0x20, 0x28, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x29, 0x68, 0x61, 0x73, 0x68, 0x29, 0x5b, 0x69, 0x5d, 0x20, 0x26, 0x20, 0x30, 0x78, 0x46, 0x46, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x5c, 0x6e, 0x22, 0x29, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x64, 0x65, 0x66, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x33, 0x3b, 0x0a, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x5b, 0x35, 0x31, 0x32, 0x5d, 0x2c, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5b, 0x33, 0x32, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x3b, 0x0a, 0x7d, 0x20, 0x6d, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x3b, 0x0a, 0x0a, 0x5f, 0x5f, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x20, 0x5f, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x5f, 0x28, 0x28, 0x76, 0x65, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x28, 0x75, 0x69, 0x6e, 0x74, 0x29, 0x29, 0x29, 0x20, 0x57, 0x47, 0x53, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x0a, 0x63, 0x72, 0x75, 0x7a, 0x62, 0x69, 0x74, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x6d, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x2a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x5f, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x2a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x5f, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x2a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x75, 0x69, 0x6e, 0x74, 0x38, 0x5f, 0x74, 0x20, 0x68, 0x61, 0x73, 0x68, 0x5b, 0x33, 0x32, 0x5d, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x5b, 0x32, 0x30, 0x5d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x3d, 0x20, 0x67, 0x65, 0x74, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x28, 0x30, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x20, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x2a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x20, 0x2b, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x74, 0x29, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x20, 0x6e, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x29, 0x69, 0x74, 0x6f, 0x61, 0x28, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x2c, 0x20, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x20, 0x73, 0x68, 0x61, 0x33, 0x3b, 0x0a, 0x20, 0x20, 0x6d, 0x65, 0x6d, 0x63, 0x70, 0x79, 0x28, 0x26, 0x73, 0x68, 0x61, 0x33, 0x2c, 0x20, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x2a, 0x29, 0x26, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2d, 0x3e, 0x70, 0x72, 0x65, 0x76, 0x5f, 0x73, 0x68, 0x61, 0x33, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x6f, 0x66, 0x28, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x63, 0x74, 0x78, 0x5f, 0x74, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x28, 0x26, 0x73, 0x68, 0x61, 0x33, 0x2c, 0x20, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x2c, 0x20, 0x6e, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x26, 0x73, 0x68, 0x61, 0x33, 0x2c, 0x20, 0x28, 0x5f, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x29, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2d, 0x3e, 0x6c, 0x61, 0x73, 0x74, 0x2c, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2d, 0x3e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6c, 0x65, 0x6e, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x73, 0x68, 0x61, 0x33, 0x5f, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x28, 0x68, 0x61, 0x73, 0x68, 0x2c, 0x20, 0x26, 0x73, 0x68, 0x61, 0x33, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x65, 0x6d, 0x63, 0x6d, 0x70, 0x28, 0x68, 0x61, 0x73, 0x68, 0x2c, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2d, 0x3e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2c, 0x20, 0x33, 0x32, 0x29, 0x20, 0x3c, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x61, 0x20, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x73, 0x61, 0x66, 0x65, 0x20, 0x62, 0x75, 0x74, 0x20, 0x61, 0x20, 0x72, 0x61, 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x76, 0x65, 0x72, 0x79, 0x20, 0x75, 0x6e, 0x6c, 0x69, 0x6b, 0x65, 0x6c, 0x79, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x20, 0x3d, 0x20, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2a, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x3d, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x7d, 0x0a +}; +const size_t opencl_cruzbit_size = sizeof(opencl_cruzbit); diff --git a/src/opencl/mine.cc b/src/opencl/mine.cc new file mode 100644 index 0000000..e4b8ea0 --- /dev/null +++ b/src/opencl/mine.cc @@ -0,0 +1,137 @@ +#include "ocl.h" +#include "sha3.h" +#include + +cl_state *cl_states[16]; +miner_state *miner_states[16]; + +// TODO: make this configurable +size_t num_threads = 1 << 20; + +extern "C" { + +int ocl_init() { + int device_count = num_devices_cl(); + + char name[32]; + for (int i = 0; i < device_count; i++) { + cl_states[i] = init_cl(i, name, sizeof(name)); + if (cl_states[i] == NULL) { + return -1; + } + miner_states[i] = new miner_state; + } + + return device_count; +} + +int miner_update(int miner_num, const void *first, size_t first_len, + const void *last, size_t last_len, const void *target) { + miner_state *state = miner_states[miner_num]; + + // hash the first (largest) part of the header once + sha3_init(&state->prev_sha3, 32); + sha3_update(&state->prev_sha3, first, first_len); + + // copy the end part of the header + state->last_len = last_len; + memcpy(state->last, last, last_len); + + // copy the target + memcpy(state->target, target, 32); + + return num_threads; +} + +int64_t miner_mine(int miner_num, int64_t start_nonce) { + cl_state *device_state = cl_states[miner_num]; + int64_t nonce = 0x7FFFFFFFFFFFFFFF; + size_t global_threads[1]; + size_t local_threads[1]; + + global_threads[0] = num_threads; + local_threads[0] = 256; + + cl_int status = clSetKernelArg(device_state->kernel, 0, sizeof(cl_mem), + (void *)&device_state->arg0); + if (status != CL_SUCCESS) { + printf("Error: Setting kernel argument 1.\n"); + return nonce; + } + + status = clSetKernelArg(device_state->kernel, 1, sizeof(cl_mem), + (void *)&device_state->arg1); + if (status != CL_SUCCESS) { + printf("Error: Setting kernel argument 2.\n"); + return nonce; + } + + status = clSetKernelArg(device_state->kernel, 2, sizeof(cl_mem), + (void *)&device_state->arg2); + if (status != CL_SUCCESS) { + printf("Error: Setting kernel argument 2.\n"); + return nonce; + } + + status = clEnqueueWriteBuffer(device_state->command_queue, device_state->arg0, + CL_TRUE, 0, sizeof(miner_state), + (void *)miner_states[miner_num], 0, NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: clEnqueueWriteBuffer failed.\n"); + return nonce; + } + + status = clEnqueueWriteBuffer(device_state->command_queue, device_state->arg1, + CL_TRUE, 0, sizeof(int64_t), + (void *)&start_nonce, 0, NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: clEnqueueWriteBuffer failed.\n"); + return nonce; + } + + uint8_t found = 0; + status = clEnqueueWriteBuffer(device_state->command_queue, device_state->arg2, + CL_TRUE, 0, sizeof(uint8_t), (void *)&found, 0, + NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: clEnqueueWriteBuffer failed.\n"); + return nonce; + } + + clFinish(device_state->command_queue); + + status = clEnqueueNDRangeKernel(device_state->command_queue, + device_state->kernel, 1, NULL, global_threads, + local_threads, 0, NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: Enqueueing kernel onto command queue. " + "(clEnqueueNDRangeKernel)\n"); + return nonce; + } + + clFlush(device_state->command_queue); + + status = + clEnqueueReadBuffer(device_state->command_queue, device_state->arg1, + CL_TRUE, 0, sizeof(int64_t), &nonce, 0, NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)\n"); + return nonce; + } + + status = + clEnqueueReadBuffer(device_state->command_queue, device_state->arg2, + CL_TRUE, 0, sizeof(uint8_t), &found, 0, NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: clEnqueueReadBuffer failed. (clEnqueueReadBuffer)\n"); + return nonce; + } + + if (found == 1) { + return nonce; + } + + // not found + return 0x7FFFFFFFFFFFFFFF; +} +} diff --git a/src/opencl/ocl.cc b/src/opencl/ocl.cc new file mode 100644 index 0000000..e598808 --- /dev/null +++ b/src/opencl/ocl.cc @@ -0,0 +1,263 @@ +// this is partially re-worked from https://github.com/tcatm/oclminer/ +// primary changes to this file were renaming and addition of additional +// parameters. -asdvxgxasjab 7/30/19 +#include "ocl.h" +#include "sha3.h" +#include + +// include cruzbit opencl kernel +#include "cruzbit.h" + +int num_devices_cl() { + cl_int status = 0; + + cl_uint num_platforms; + cl_platform_id platform = NULL; + status = clGetPlatformIDs(0, NULL, &num_platforms); + if (status != CL_SUCCESS) { + printf("Error: Getting Platforms. (clGetPlatformsIDs)\n"); + return -1; + } + + if (num_platforms > 0) { + cl_platform_id *platforms = + (cl_platform_id *)malloc(num_platforms * sizeof(cl_platform_id)); + status = clGetPlatformIDs(num_platforms, platforms, NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Platform Ids. (clGetPlatformsIDs)\n"); + return -1; + } + + unsigned int i; + for (i = 0; i < num_platforms; ++i) { + char pbuff[100]; + status = clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, + sizeof(pbuff), pbuff, NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Platform Info. (clGetPlatformInfo)\n"); + free(platforms); + return -1; + } + platform = platforms[i]; + if (!strcmp(pbuff, "Advanced Micro Devices, Inc.")) { + break; + } + } + free(platforms); + } + + if (platform == NULL) { + perror("NULL platform found!\n"); + return -1; + } + + cl_uint num_devices; + status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices); + if (status != CL_SUCCESS) { + printf("Error: Getting Device IDs (num)\n"); + return -1; + } + + return num_devices; +} + +static char *read_file(const char *filename, int *length) { + FILE *f = fopen(filename, "r"); + void *buffer; + + if (!f) { + fprintf(stderr, "Unable to open %s for reading\n", filename); + return NULL; + } + + fseek(f, 0, SEEK_END); + *length = ftell(f); + fseek(f, 0, SEEK_SET); + + buffer = (char *)malloc(*length + 1); + *length = fread(buffer, 1, *length, f); + fclose(f); + ((char *)buffer)[*length] = '\0'; + + return (char *)buffer; +} + +cl_state *init_cl(int gpu, char *name, size_t name_len) { + cl_int status = 0; + cl_state *device_state = new cl_state; + + cl_uint num_platforms; + cl_platform_id platform = NULL; + status = clGetPlatformIDs(0, NULL, &num_platforms); + if (status != CL_SUCCESS) { + printf("Error: Getting Platforms. (clGetPlatformsIDs)\n"); + return NULL; + } + + if (num_platforms > 0) { + cl_platform_id *platforms = + (cl_platform_id *)malloc(num_platforms * sizeof(cl_platform_id)); + status = clGetPlatformIDs(num_platforms, platforms, NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Platform Ids. (clGetPlatformsIDs)\n"); + return NULL; + } + + unsigned int i; + for (i = 0; i < num_platforms; ++i) { + char pbuff[100]; + status = clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, + sizeof(pbuff), pbuff, NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Platform Info. (clGetPlatformInfo)\n"); + free(platforms); + return NULL; + } + platform = platforms[i]; + if (!strcmp(pbuff, "Advanced Micro Devices, Inc.")) { + break; + } + } + free(platforms); + } + + if (platform == NULL) { + perror("NULL platform found!\n"); + return NULL; + } + + cl_uint num_devices; + status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices); + if (status != CL_SUCCESS) { + printf("Error: Getting Device IDs (num)\n"); + return NULL; + } + + cl_device_id *devices; + if (num_devices > 0) { + devices = (cl_device_id *)malloc(num_devices * sizeof(cl_device_id)); + + /* Now, get the device list data */ + + status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, num_devices, devices, + NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Device IDs (list)\n"); + return NULL; + } + + printf("OpenCL devices:\n"); + + int i; + for (i = 0; i < num_devices; i++) { + char pbuff[100]; + status = clGetDeviceInfo(devices[i], CL_DEVICE_NAME, sizeof(pbuff), pbuff, + NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Device Info\n"); + return NULL; + } + + printf("%i: %s\n", i, pbuff); + } + + if (gpu >= 0 && gpu < num_devices) { + char pbuff[100]; + status = clGetDeviceInfo(devices[gpu], CL_DEVICE_NAME, sizeof(pbuff), + pbuff, NULL); + if (status != CL_SUCCESS) { + printf("Error: Getting Device Info\n"); + return NULL; + } + + printf("Selected %i: %s\n", gpu, pbuff); + strncpy(name, pbuff, name_len); + } else { + printf("Invalid GPU %i\n", gpu); + return NULL; + } + + } else + return NULL; + + cl_context_properties cps[3] = {CL_CONTEXT_PLATFORM, + (cl_context_properties)platform, 0}; + + device_state->context = + clCreateContextFromType(cps, CL_DEVICE_TYPE_GPU, NULL, NULL, &status); + if (status != CL_SUCCESS) { + printf("Error: Creating Context. (clCreateContextFromType)\n"); + return NULL; + } + + ///////////////////////////////////////////////////////////////// + // Load CL file, build CL program object, create CL kernel object + ///////////////////////////////////////////////////////////////// + // + const char *sources[] = { opencl_cruzbit }; + + device_state->program = clCreateProgramWithSource( + device_state->context, 1, sources, &opencl_cruzbit_size, &status); + if (status != CL_SUCCESS) { + printf( + "Error: Loading Binary into cl_program (clCreateProgramWithBinary)\n"); + return NULL; + } + + /* create a cl program executable for all the devices specified */ + status = + clBuildProgram(device_state->program, 1, &devices[gpu], "", NULL, NULL); + if (status != CL_SUCCESS) { + printf("Error: Building Program (clBuildProgram)\n"); + size_t logSize; + status = clGetProgramBuildInfo(device_state->program, devices[gpu], + CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize); + + char *log = (char *)malloc(logSize); + status = clGetProgramBuildInfo(device_state->program, devices[gpu], + CL_PROGRAM_BUILD_LOG, logSize, log, NULL); + printf("%s\n", log); + return NULL; + } + + /* get a kernel object handle for a kernel with the given name */ + device_state->kernel = + clCreateKernel(device_state->program, "cruzbit", &status); + if (status != CL_SUCCESS) { + printf("Error: Creating Kernel from program. (clCreateKernel)\n"); + return NULL; + } + + ///////////////////////////////////////////////////////////////// + // Create an OpenCL command queue + ///////////////////////////////////////////////////////////////// + device_state->command_queue = + clCreateCommandQueue(device_state->context, devices[gpu], 0, &status); + if (status != CL_SUCCESS) { + printf("Creating Command Queue. (clCreateCommandQueue)\n"); + return NULL; + } + + device_state->arg0 = clCreateBuffer(device_state->context, CL_MEM_READ_WRITE, + sizeof(miner_state), NULL, &status); + if (status != CL_SUCCESS) { + printf("Error: clCreateBuffer (inputBuffer)\n"); + return NULL; + } + + device_state->arg1 = clCreateBuffer(device_state->context, CL_MEM_READ_WRITE, + sizeof(int64_t), NULL, &status); + if (status != CL_SUCCESS) { + printf("Error: clCreateBuffer (foundNonce)\n"); + return NULL; + } + + device_state->arg2 = clCreateBuffer(device_state->context, CL_MEM_READ_WRITE, + sizeof(uint8_t), NULL, &status); + if (status != CL_SUCCESS) { + printf("Error: clCreateBuffer (foundNonce)\n"); + return NULL; + } + + return device_state; +} diff --git a/src/opencl/ocl.h b/src/opencl/ocl.h new file mode 100644 index 0000000..2bc4d6d --- /dev/null +++ b/src/opencl/ocl.h @@ -0,0 +1,37 @@ +// this is partially re-worked from https://github.com/tcatm/oclminer/ +// primary changes to this file were renaming and addition of additional +// parameters. -asdvxgxasjab 7/30/19 +#ifndef OCL_H_ +#define OCL_H_ + +#define CL_SILENCE_DEPRECATION 1 +#ifdef __APPLE_CC__ +#include +#else +#include +#endif +#include "sha3.h" +#include +#include +#include + +typedef struct { + cl_context context; + cl_kernel kernel; + cl_command_queue command_queue; + cl_program program; + cl_mem arg0; + cl_mem arg1; + cl_mem arg2; +} cl_state; + +typedef struct { + sha3_ctx_t prev_sha3; + uint8_t last[512], target[32]; + size_t last_len; +} miner_state; + +int num_devices_cl(); +cl_state *init_cl(int gpu, char *name, size_t name_len); + +#endif diff --git a/src/opencl/sha3.cc b/src/opencl/sha3.cc new file mode 100644 index 0000000..0ab81ce --- /dev/null +++ b/src/opencl/sha3.cc @@ -0,0 +1,143 @@ +// sha3.cc +// 19-Nov-11 Markku-Juhani O. Saarinen + +// Revised 07-Aug-15 to match with official release of FIPS PUB 202 "SHA3" +// Revised 03-Sep-15 for portability + OpenSSL - style API +// Revised 21-Jul-19 to strip unneeded code -asdvxgxasjab + +#include "sha3.h" + +// update the state with given number of rounds + +void sha3_keccakf(uint64_t st[25]) { + // constants + const uint64_t keccakf_rndc[24] = { + 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, + 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, + 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, + 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, + 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, + 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, + 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, + 0x8000000000008080, 0x0000000080000001, 0x8000000080008008}; + const int keccakf_rotc[24] = {1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, + 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44}; + const int keccakf_piln[24] = {10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, + 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1}; + + // variables + int i, j, r; + uint64_t t, bc[5]; + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + uint8_t *v; + + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + st[i] = ((uint64_t)v[0]) | (((uint64_t)v[1]) << 8) | + (((uint64_t)v[2]) << 16) | (((uint64_t)v[3]) << 24) | + (((uint64_t)v[4]) << 32) | (((uint64_t)v[5]) << 40) | + (((uint64_t)v[6]) << 48) | (((uint64_t)v[7]) << 56); + } +#endif + + // actual iteration + for (r = 0; r < KECCAKF_ROUNDS; r++) { + + // Theta + for (i = 0; i < 5; i++) + bc[i] = st[i] ^ st[i + 5] ^ st[i + 10] ^ st[i + 15] ^ st[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + st[j + i] ^= t; + } + + // Rho Pi + t = st[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = st[j]; + st[j] = ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + // Chi + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = st[j + i]; + for (i = 0; i < 5; i++) + st[j + i] ^= (~bc[(i + 1) % 5]) & bc[(i + 2) % 5]; + } + + // Iota + st[0] ^= keccakf_rndc[r]; + } + +#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ + // endianess conversion. this is redundant on little-endian targets + for (i = 0; i < 25; i++) { + v = (uint8_t *)&st[i]; + t = st[i]; + v[0] = t & 0xFF; + v[1] = (t >> 8) & 0xFF; + v[2] = (t >> 16) & 0xFF; + v[3] = (t >> 24) & 0xFF; + v[4] = (t >> 32) & 0xFF; + v[5] = (t >> 40) & 0xFF; + v[6] = (t >> 48) & 0xFF; + v[7] = (t >> 56) & 0xFF; + } +#endif +} + +// Initialize the context for SHA3 + +int sha3_init(sha3_ctx_t *c, int mdlen) { + int i; + + for (i = 0; i < 25; i++) + c->st.q[i] = 0; + c->mdlen = mdlen; + c->rsiz = 200 - 2 * mdlen; + c->pt = 0; + + return 1; +} + +// update state with more data + +int sha3_update(sha3_ctx_t *c, const void *data, size_t len) { + size_t i; + int j; + + j = c->pt; + for (i = 0; i < len; i++) { + c->st.b[j++] ^= ((const uint8_t *)data)[i]; + if (j >= c->rsiz) { + sha3_keccakf(c->st.q); + j = 0; + } + } + c->pt = j; + + return 1; +} + +// finalize and output a hash + +int sha3_final(void *md, sha3_ctx_t *c) { + int i; + + c->st.b[c->pt] ^= 0x06; + c->st.b[c->rsiz - 1] ^= 0x80; + sha3_keccakf(c->st.q); + + for (i = 0; i < c->mdlen; i++) { + ((uint8_t *)md)[i] = c->st.b[i]; + } + + return 1; +} diff --git a/src/opencl/sha3.h b/src/opencl/sha3.h new file mode 100644 index 0000000..87e60aa --- /dev/null +++ b/src/opencl/sha3.h @@ -0,0 +1,38 @@ +// sha3.h +// 19-Nov-11 Markku-Juhani O. Saarinen +// Revised 21-Jul-19 to strip unneeded code and move sha3_ctx_t to its own file. +// we need to build both a host and device version of this code. -asdvxgxasjab + +#ifndef SHA3_H +#define SHA3_H + +#include +#include + +#ifndef KECCAKF_ROUNDS +#define KECCAKF_ROUNDS 24 +#endif + +#ifndef ROTL64 +#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) +#endif + +// state context +typedef struct { + union { // state: + uint8_t b[200]; // 8-bit bytes + uint64_t q[25]; // 64-bit words + } st; + int pt, rsiz, mdlen; // these don't overflow +} sha3_ctx_t; + +// Compression function. +void sha3_keccakf(uint64_t st[25]); + +// OpenSSL - like interfece +int sha3_init(sha3_ctx_t *c, + int mdlen); // mdlen = hash output in bytes +int sha3_update(sha3_ctx_t *c, const void *data, size_t len); +int sha3_final(void *md, sha3_ctx_t *c); // digest goes to md + +#endif diff --git a/src/peer.rs b/src/peer.rs new file mode 100644 index 0000000..f6b1554 --- /dev/null +++ b/src/peer.rs @@ -0,0 +1,2263 @@ +use std::collections::hash_map::DefaultHasher; +use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::from_utf8; +use std::sync::Arc; +use std::time::Duration; + +use cuckoofilter::{CuckooFilter, ExportedCuckooFilter}; +use ed25519_compact::PublicKey; +use futures::future::Either; +use futures::stream::SplitSink; +use futures::{SinkExt, StreamExt}; +use log::{error, info}; +use rand::Rng; +use thiserror::Error; +use tokio::net::TcpStream; +use tokio::sync::mpsc::{channel, unbounded_channel, UnboundedSender}; +use tokio::task::JoinHandle; +use tokio::time::{interval_at, sleep, timeout, Instant}; +use tokio_rustls::server::TlsStream; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use tokio_tungstenite::tungstenite::http::StatusCode; +use tokio_tungstenite::tungstenite::{Error as WsError, Message as WsMessage}; +use tokio_tungstenite::{ + connect_async_tls_with_config, Connector, MaybeTlsStream, WebSocketStream, +}; + +use crate::block::{Block, BlockError, BlockHeader, BlockID}; +use crate::block_queue::BlockQueue; +use crate::block_storage::{BlockStorage, BlockStorageError, BlockStorageNotFoundError}; +use crate::block_storage_disk::BlockStorageDisk; +use crate::checkpoints::{CHECKPOINTS_ENABLED, LATEST_CHECKPOINT_HEIGHT}; +use crate::constants::{ + MAX_MEMO_LENGTH, MAX_PROTOCOL_MESSAGE_LENGTH, MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK, + MIN_AMOUNT_CRUZBITS, MIN_FEE_CRUZBITS, +}; +use crate::error::{impl_debug_error_chain, ChannelError, DataError, ErrChain, JsonError}; +use crate::ledger::{BranchType, Ledger, LedgerError, LedgerNotFoundError}; +use crate::ledger_disk::LedgerDisk; +use crate::miner::{Miner, MinerError}; +use crate::peer_manager::{AddrChanSender, PeerManager, PeerManagerError}; +use crate::peer_storage::{PeerStorage, PeerStorageError}; +use crate::peer_storage_disk::PeerStorageDisk; +use crate::processor::{ProcessBlockError, Processor, ProcessorError}; +use crate::protocol::{ + BalanceMessage, BalancesMessage, BlockHeaderMessage, BlockMessage, FilterBlockMessage, + FilterResultMessage, FilterTransactionQueueMessage, FindCommonAncestorMessage, GetBlockMessage, + GetWorkMessage, InvBlockMessage, Message, PeerAddressesMessage, PublicKeyBalance, + PublicKeyTransactionsMessage, PushTransactionMessage, PushTransactionResultMessage, + SubmitWorkMessage, SubmitWorkResultMessage, TipHeaderMessage, TransactionMessage, + TransactionRelayPolicyMessage, WorkMessage, +}; +use crate::shutdown::{ShutdownChanReceiver, SpawnedError}; +use crate::tls::client_config; +use crate::transaction::{Transaction, TransactionError, TransactionID}; +use crate::transaction_queue::TransactionQueue; +use crate::transaction_queue_memory::TransactionQueueMemory; +use crate::utils::{now_as_duration, rand_int31}; + +pub type EitherWebSocketStream = + Either>, WebSocketStream>>; + +type WsSink = SplitSink; + +/// A peer client in the network. They all speak WebSocket protocol to each other. +/// Peers could be fully validating and mining nodes or simply wallets. +pub struct Peer { + /// peer connection, left = outbound, right = inbound + conn: Option, + genesis_id: &'static BlockID, + peer_store: Arc, + block_store: Arc, + ledger: Arc, + processor: Arc, + tx_queue: Arc, + outbound: bool, + /// peer-local download queue + local_download_queue: BlockQueue, + /// peer-local inflight queue + local_inflight_queue: Arc, + /// global inflight queue + global_inflight_queue: Arc, + ignore_blocks: HashMap, + continuation_block_id: Option, + last_peer_addresses_received_time: Option, + filter: Option>, + addr_chan_tx: AddrChanSender, + work: Option, + pub_keys: Vec, + memo: Option, + read_limit: u32, + addr: SocketAddr, + shutdown_chan_rx: ShutdownChanReceiver, + shutdown_fns: Vec>, +} + +pub struct PeerWork { + work_id: u32, + work_block: Block, + median_timestamp: u64, +} + +type OutChanSender = UnboundedSender; + +/// Timing constants +/// Time allowed to wait for WebSocket connection +pub const CONNECT_WAIT: Duration = Duration::from_secs(10); + +/// Time allowed to write a message to the peer +pub const WRITE_WAIT: Duration = Duration::from_secs(30); + +/// Time allowed to read the next pong message from the peer +const PONG_WAIT: Duration = Duration::from_secs(120); + +/// Send pings to peer with this period. Must be less than PONG_WAIT +const PING_PERIOD: Duration = Duration::from_secs(PONG_WAIT.as_secs() / 2); + +/// How often should we refresh this peer's connectivity status with storage +const PEER_STORAGE_REFRESH_PERIOD: Duration = Duration::from_secs(5 * 60); + +/// How often should we request peer addresses from a peer +const GET_PEER_ADDRESSES_PERIOD: Duration = Duration::from_secs(60 * 60); + +/// Time allowed between processing new blocks before we consider a blockchain sync stalled +const SYNC_WAIT: Duration = Duration::from_secs(2 * 60); + +/// Maximum blocks per inv_block message +const MAX_BLOCKS_PER_INV: usize = 500; + +/// Maximum local inflight queue size +const INFLIGHT_QUEUE_MAX: usize = 8; + +/// Maximum local download queue size +const DOWNLOAD_QUEUE_MAX: usize = MAX_BLOCKS_PER_INV * 10; + +pub const PEER_ADDR_SELF: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); + +impl Peer { + /// Returns a new instance of a peer. + pub fn new( + conn: Option, + genesis_id: &'static BlockID, + peer_store: Arc, + block_store: Arc, + ledger: Arc, + processor: Arc, + tx_queue: Arc, + block_queue: Arc, + addr_chan_tx: AddrChanSender, + addr: SocketAddr, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + let mut peer = Self { + conn, + genesis_id, + peer_store, + block_store, + ledger, + processor, + tx_queue, + local_download_queue: BlockQueue::new(), + local_inflight_queue: Arc::new(BlockQueue::new()), + global_inflight_queue: block_queue, + ignore_blocks: HashMap::new(), + continuation_block_id: None, + last_peer_addresses_received_time: None, + outbound: false, + filter: None, + addr_chan_tx, + work: None, + pub_keys: Vec::new(), + memo: None, + read_limit: 0, + addr, + shutdown_chan_rx, + shutdown_fns: Vec::new(), + }; + peer.update_read_limit(); + peer + } + + /// Connects outbound to a peer. + pub async fn connect( + &mut self, + nonce: u32, + my_addr: Option, + ) -> Result<(), PeerConnectionError> { + let url = format!("wss://{}/{}", self.addr, &self.genesis_id); + info!("Connecting to {}", url); + + let mut request = url.into_client_request()?; + request + .headers_mut() + .append("Cruzbit-Peer-Nonce", nonce.to_string().parse()?); + + if let Some(my_addr) = my_addr { + request + .headers_mut() + .append("Cruzbit-Peer-Address", my_addr.to_string().parse()?); + } + + self.peer_store.on_connect_attempt(self.addr)?; + + let tls_verify = false; + let client_config = client_config(tls_verify); + let (conn, _response) = match timeout( + CONNECT_WAIT, + connect_async_tls_with_config( + request, + None, + true, + Some(Connector::Rustls(Arc::new(client_config))), + ), + ) + .await + { + Err(err) => { + return Err(PeerConnectionError::Timeout(self.addr, err)); + } + Ok(Ok(v)) => v, + Ok(Err(err)) => { + if let WsError::Http(response) = &err { + if response.status() == StatusCode::TOO_MANY_REQUESTS { + // the peer is already connected to us inbound. + // mark it successful so we try it again in the future. + self.peer_store.on_connect_success(self.addr)?; + self.peer_store.on_disconnect(self.addr)?; + } else { + self.peer_store.on_connect_failure(self.addr)?; + } + } + + return Err(PeerConnectionError::Connect(self.addr, err)); + } + }; + + // left is outbound, right is inbound + self.conn = Some(EitherWebSocketStream::Left(conn)); + self.outbound = true; + self.peer_store + .on_connect_success(self.addr) + .map_err(PeerConnectionError::PeerStorage) + } + + /// Spawns the Peer's main loop. + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(async { self.run().await.map_err(Into::into) }) + } + + /// Runs the Peer's main loop. + /// It manages reading and writing to the peer's WebSocket and facilitating the protocol. + pub async fn run(mut self) -> Result<(), PeerError> { + let conn = self.conn.take().expect("peer should be connected"); + let (mut ws_sender, mut ws_receiver) = conn.split(); + + // on shutdown remove any inflight blocks this peer is no longer going to download + { + let local_inflight_queue = Arc::clone(&self.local_inflight_queue); + let global_inflight_queue = Arc::clone(&self.global_inflight_queue); + self.on_shutdown(Box::new(move || { + while let Some(block_in_flight) = local_inflight_queue.peek() { + local_inflight_queue.remove(&block_in_flight, &PEER_ADDR_SELF); + global_inflight_queue.remove(&block_in_flight, &self.addr); + } + })); + } + + // channel to send outgoing messages + let (out_chan_tx, mut out_chan_rx) = unbounded_channel(); + + // send a find common ancestor request and request peer addresses shortly after connecting + let (on_connect_chan_tx, mut on_connect_chan_rx) = channel(1); + tokio::spawn(async move { + sleep(Duration::from_secs(5)).await; + if let Err(_err) = on_connect_chan_tx.send(true).await { + error!("failed to send on-connect, peer must have shut down"); + } + }); + + // written to by the reader to update the current work block for the peer + let mut get_work_chan = channel::(1); + let mut submit_work_chan = channel::(1); + + // register to hear about tip block changes + let (tip_change_chan_tx, mut tip_change_chan_rx) = unbounded_channel(); + self.processor + .register_for_tip_change(tip_change_chan_tx.clone()); + + // register to hear about new transactions + let (new_tx_chan_tx, mut new_tx_chan_rx) = + channel(MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK as usize); + self.processor + .register_for_new_transactions(new_tx_chan_tx.clone()); + + // unregister from the processor on shutdown + { + let processor = Arc::clone(&self.processor); + self.on_shutdown(Box::new(move || { + processor.unregister_for_tip_change(tip_change_chan_tx.clone()); + processor.unregister_for_new_transactions(new_tx_chan_tx.clone()); + })); + } + + // send the peer pings + let mut ticker_ping = interval_at(Instant::now() + PING_PERIOD, PING_PERIOD); + + // update the peer store with the peer's connectivity + let mut ticker_peer_store_refresh = interval_at( + Instant::now() + PEER_STORAGE_REFRESH_PERIOD, + PEER_STORAGE_REFRESH_PERIOD, + ); + + // request new peer addresses + let mut ticker_get_peer_addresses = interval_at( + Instant::now() + GET_PEER_ADDRESSES_PERIOD, + GET_PEER_ADDRESSES_PERIOD, + ); + + // check to see if we need to update work for miners + let interval = Duration::from_secs(30); + let mut ticker_update_work_check = interval_at(Instant::now() + interval, interval); + + // update the peer store on disconnection + if self.outbound { + let peer_store = Arc::clone(&self.peer_store); + self.on_shutdown(Box::new(move || { + if let Err(err) = peer_store + .on_disconnect(self.addr) + .map_err(PeerError::PeerStorage) + { + error!("{:?}", err); + } + })); + } + + // are we syncing? + let mut last_new_block_time = Instant::now(); + let (ibd, _height) = + PeerManager::is_initial_block_download(&self.ledger, &self.block_store)?; + + loop { + tokio::select! { + msg = timeout(PONG_WAIT, ws_receiver.next()) => { + let message = match msg { + Err(err) => { + break Err(PeerConnectionError::Timeout(self.addr, err).into()) + } + Ok(Some(Ok(v))) => v, + Ok(Some(Err(err))) => { + break Err(PeerConnectionError::Websocket(err).into()) + } + Ok(None) => { + break Err(PeerConnectionError::Dropped(self.addr).into()) + }, + }; + + // new message from peer + match message { + WsMessage::Text(ref json) => { + // sanitize inputs + if let Err(err) = from_utf8(json.as_bytes()).map_err(|err| PeerError::MessageNotUtf8(self.addr, DataError::String(err))) { + break Err(err) + } + + let message = match serde_json::from_str::(json).map_err(|err| PeerError::MessageInvalid(self.addr, JsonError::Deserialize(err))) { + Ok(v) => v, + Err(err) => { + break Err(err) + } + }; + + // hangup if the peer is sending oversized messages (other than blocks) + if !matches!(message, Message::Block(_)) + && json.len() > MAX_PROTOCOL_MESSAGE_LENGTH + { + break Err(PeerError::MessageLengthExceeded( + json.len(), + message.to_string(), + self.addr + )) + } + + match message { + Message::Block(Some(b)) => { + if let Some(block) = b.block { + if let Err(err) = self.on_block(block, ibd, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + + } else { + last_new_block_time = Instant::now(); + } + } else { + break Err(PeerError::EmptyBlockReceived(self.addr)) + } + } + + Message::FilterAdd(fa) => { + if let Err(err) = self.on_filter_add(fa.public_keys, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::FilterLoad(fl) => { + if let Err(err) = self.on_filter_load(fl.r#type, fl.filter, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::FindCommonAncestor(fca) => { + let length = fca.block_ids.len(); + for (i, id) in + fca.block_ids.into_iter().enumerate() + { + match self + .on_find_common_ancestor(&id, i, length, &out_chan_tx).await { + Ok(ok) => { + if ok { + break + } + }, + Err(err) => { + error!("{:?}, from: {}", err, self.addr); + // don't need to process more + break + } + } + } + } + + Message::GetBalance(gb) => { + if let Err(err) = self.on_get_balance(gb.public_key, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue; + + } + } + + Message::GetBalances(gb) => { + if let Err(err) = self.on_get_balances(gb.public_keys, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetBlock(gb) => { + if let Err(err) = self.on_get_block(gb.block_id, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetBlockByHeight(gbbh) => { + if let Err(err) = self.on_get_block_by_height(gbbh.height, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetBlockHeader(gbh) => { + if let Err(err) = self.on_get_block_header(gbh.block_id, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetBlockHeaderByHeight(gbhbh) => { + if let Err(err) = self + .on_get_block_header_by_height(gbhbh.height, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetFilterTransactionQueue => { + if let Err(err) = self.on_get_filter_transaction_queue(&out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetPeerAddresses => { + if let Err(err) = self.on_get_peer_addresses(&out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetPublicKeyTransactions(gpkt) => { + if let Err(err) = self + .on_get_public_key_transactions( + gpkt.public_key, + gpkt.start_height, + gpkt.end_height, + gpkt.start_index, + gpkt.limit, + &out_chan_tx + ).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetTipHeader => { + if let Err(err) = self.on_get_tip_header(&out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetTransaction(gt) => { + if let Err(err) = self.on_get_transaction(gt.transaction_id, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::GetTransactionResult(ptr) => { + if let Some(err) = ptr.error { + error!("{:?}, from: {}", err, self.addr);} + } + + Message::GetTransactionRelayPolicy => { + out_chan_tx.send(Message::TransactionRelayPolicy(TransactionRelayPolicyMessage { + min_fee: MIN_FEE_CRUZBITS, + min_amount: MIN_AMOUNT_CRUZBITS, + }))?; + } + + Message::GetWork(gw) => { + info!("Received get_work message, from: {}", self.addr); + get_work_chan.0.send(gw).await?; + } + + Message::InvBlock(inv) => { + let block_ids_len = inv.block_ids.len(); + for (i, id) in + inv.block_ids.into_iter().enumerate() + { + if let Err(err) = self.on_inv_block(id, i, block_ids_len, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + } + + Message::PeerAddresses(pa) => { + if let Err(err) = self.on_peer_addresses(pa.addresses).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::PushTransaction(pt) => { + if let Err(err) = self.on_push_transaction(pt.transaction, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + continue + } + } + + Message::PushTransactionResult(ptr) => { + if let Some(err) = ptr.error { + error!("{}, from: {}", err, self.addr); + } + } + + Message::SubmitWork(sw) => { + info!("Received submit_work message, from: {}", self.addr); + submit_work_chan.0.send(sw).await?; + } + + _ => { + error!("Unknown message: {}, from: {}", message, self.addr);} + } + } + + WsMessage::Close(_) => { + info!( + "Received close message from: {}", + self.addr + ); + break Ok(()) + } + + WsMessage::Pong(_) => { + // handle pongs + if ibd { + // handle stalled blockchain syncs + let (ibd, _height) = PeerManager::is_initial_block_download(&self.ledger, &self.block_store)?; + let elapsed = last_new_block_time.elapsed(); + if ibd && elapsed > SYNC_WAIT { + break Err(PeerError::SyncStalled(self.addr)) + } + } else { + // try processing the queue in case we've been blocked by another client + // and their attempt has now expired + self.process_download_queue(&out_chan_tx).await?; + } + } + + // ignore other message types + _ => {}, + }; + } + + msg = out_chan_rx.recv() => { + match msg { + Some(message) => { + let json = serde_json::to_string(&message).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + }, + None => { + // close the connection if the tx is dropped + self.send_with_timeout(&mut ws_sender, WsMessage::Close(None)).await?; + } + } + } + + Some(tip) = tip_change_chan_rx.recv() => { + // update read limit if necessary + self.update_read_limit(); + + if tip.connect && !tip.more { + // only build off newly connected tip blocks. + // create and send out new work if necessary + self.create_new_work_block(&tip.block_id, &tip.block.header, &out_chan_tx).await?; + } + + if tip.source == self.addr { + continue + } + + if tip.connect { + // new tip announced, notify the peer + let inv = Message::InvBlock(InvBlockMessage { + block_ids: vec![tip.block_id] + }); + // send it + let json = serde_json::to_string(&inv).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + } + + // potentially create a filter_block + let fb = match self.create_filter_block(tip.block_id, tip.block) { + Ok(Some(v)) => v, + Ok(None) => continue, + Err(err) => { + error!("{:?}, to: {}", err, self.addr); + continue + } + }; + + // send it + let transactions_len = fb.transactions.len(); + + let (message, r#type) = if !tip.connect { + (Message::FilterBlockUndo(fb), "filter_block_undo") + } else { + (Message::FilterBlock(fb), "filter_block") + }; + + info!("Sending {} with {} transaction(s), to: {}", r#type, transactions_len, self.addr); + let json = serde_json::to_string(&message).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + } + + Some(new_tx) = new_tx_chan_rx.recv() => { + if new_tx.source == self.addr { + // this is who sent it to us + continue + } + + if !self.filter_lookup(&new_tx.transaction) { + continue + } + + // newly verified transaction announced, relay to peer + let push_tx = Message::PushTransaction(PushTransactionMessage { + transaction: new_tx.transaction, + }); + let json = serde_json::to_string(&push_tx).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + } + + Some(_) = on_connect_chan_rx.recv() => { + // send a new peer a request to find a common ancestor + self.send_find_common_ancestor(None, Some(&mut ws_sender), &out_chan_tx).await?; + + // send a get_peer_addresses to request peers + info!("Sending get_peer_addresses to: {}", self.addr); + let message = Message::GetPeerAddresses; + let json = serde_json::to_string(&message).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + } + + Some(gw) = get_work_chan.1.recv() => { + if let Err(err) = self.on_get_work(gw, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + } + } + + Some(sw) = submit_work_chan.1.recv() => { + if let Err(err) = self.on_submit_work(sw, &out_chan_tx).await { + error!("{:?}, from: {}", err, self.addr); + } + } + + _ = ticker_ping.tick() => { + self.send_with_timeout(&mut ws_sender, WsMessage::Ping(vec![])).await?; + } + + _ = ticker_peer_store_refresh.tick(), if self.outbound => { + // periodically refresh our connection time + if let Err(err) = self.peer_store.on_connect_success(self.addr).map_err(PeerError::PeerStorage) { + error!("{:?}, from: {}", err, self.addr); + } + } + + _ = ticker_get_peer_addresses.tick() => { + // periodically send a get_peer_addresses + info!("Sending get_peer_addresses to: {}", self.addr); + let message = Message::GetPeerAddresses; + let json = serde_json::to_string(&message).map_err(JsonError::Serialize)?; + self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await?; + } + + _ = ticker_update_work_check.tick(), if self.work.is_some() => { + let work = self.work.as_ref().expect("work should exist"); + let tx_count = work.work_block.transactions.len(); + if tx_count == MAX_TRANSACTIONS_TO_INCLUDE_PER_BLOCK as usize { + // already at capacity + continue + } + if tx_count - 1 != self.tx_queue.len() { + let Some((tip_id, tip_header, _tip_when)) = Processor::get_chain_tip_header(&self.ledger, &self.block_store)? else { + break Err(LedgerNotFoundError::ChainTip.into()) + }; + self.create_new_work_block(&tip_id, &tip_header, &out_chan_tx).await?; + } + } + + _ = &mut self.shutdown_chan_rx => { + ws_sender.close().await.map_err(PeerConnectionError::Websocket)?; + break Ok(()) + } + } + } + } + + /// Handle a message from a peer indicating block inventory available for download + async fn on_inv_block( + &mut self, + id: BlockID, + index: usize, + length: usize, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received inv_block: {}, from: {}", id, self.addr); + + if length > MAX_BLOCKS_PER_INV { + return Err(PeerError::InvBlockMaxBlocks(length, MAX_BLOCKS_PER_INV)); + } + + // is it on the ignore list? + if self.ignore_blocks.get(&id).is_some() { + info!("Ignoring block {}, from: {}", id, self.addr); + return Ok(()); + } + + // do we have it queued or inflight already? + if self.local_download_queue.exists(&id) || self.local_inflight_queue.exists(&id) { + info!( + "Block {} is already queued or inflight for download, from: {}", + id, self.addr + ); + return Ok(()); + } + + // have we processed it? + let branch_type = self.ledger.get_branch_type(&id)?; + if branch_type != BranchType::Unknown { + info!("Already processed block {}", id); + if length > 1 && index + 1 == length { + // we might be on a deep side chain. this will get us the next 500 blocks + return self + .send_find_common_ancestor(Some(id), None, out_chan_tx) + .await; + } + return Ok(()); + } + + if self.local_download_queue.len() >= DOWNLOAD_QUEUE_MAX { + info!( + "Too many blocks in the download queue {}, max: {}, for: {}", + self.local_download_queue.len(), + DOWNLOAD_QUEUE_MAX, + self.addr + ); + // don't return an error just stop adding them to the queue + return Ok(()); + } + + // add block to this peer's download queue + self.local_download_queue.add(&id, &PEER_ADDR_SELF); + + // process the download queue + self.process_download_queue(out_chan_tx).await + } + + /// Handle a request for a block from a peer + async fn on_get_block( + &mut self, + id: BlockID, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received get_block: {}, from: {}", id, self.addr); + self.get_block(id, out_chan_tx).await + } + + /// Handle a request for a block by height from a peer + async fn on_get_block_by_height( + &mut self, + height: u64, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received get_block_by_height: {}, from: {}", + height, self.addr + ); + let id = match self.ledger.get_block_id_for_height(height) { + Ok(Some(v)) => v, + Ok(None) => { + // not found + out_chan_tx.send(Message::Block(None))?; + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + } + Err(err) => { + // not found + out_chan_tx.send(Message::Block(None))?; + return Err(err.into()); + } + }; + + self.get_block(id, out_chan_tx).await + } + + async fn get_block( + &mut self, + id: BlockID, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + // fetch the block + let block_json = match self.block_store.get_block_bytes(&id) { + Ok(Some(v)) => v, + Ok(None) => { + // not found + out_chan_tx.send(Message::Block(Some(Box::new(BlockMessage { + block_id: id, + block: None, + }))))?; + + return Err(BlockStorageNotFoundError::BlockBytes(id).into()); + } + Err(err) => { + // not found + out_chan_tx.send(Message::Block(Some(Box::new(BlockMessage { + block_id: id, + block: None, + }))))?; + + return Err(err.into()); + } + }; + + // send out the raw bytes + let mut body = Vec::new(); + body.extend_from_slice(br#"{"block_id":""#); + body.extend_from_slice(id.as_hex().as_bytes()); + body.extend_from_slice(br#"","block":"#); + body.extend_from_slice(&block_json); + body.extend_from_slice(br#"}"#); + + let block_message = + serde_json::from_slice::(&body).map_err(JsonError::Deserialize)?; + out_chan_tx.send(Message::Block(Some(Box::new(block_message))))?; + + // was this the last block in the inv we sent in response to a find common ancestor request? + if let Some(ref continuation_block_id) = self.continuation_block_id { + if id == *continuation_block_id { + info!( + "Received get_block for continuation block {}, from: {}", + id, self.addr + ); + + self.continuation_block_id = None; + + // send an inv for our tip block to prompt the peer to + // send another find common ancestor request to complete its download of the chain. + let chain_tip = match self.ledger.get_chain_tip().map_err(PeerError::Ledger) { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + return Ok(()); + } + }; + + if let Some((tip_id, _height)) = chain_tip { + out_chan_tx.send(Message::InvBlock(InvBlockMessage { + block_ids: vec![tip_id], + }))?; + } + } + } + + Ok(()) + } + + /// Handle receiving a block from a peer. Returns true if the block was newly processed and accepted. + async fn on_block( + &mut self, + block: Block, + ibd: bool, + out_chan_tx: &OutChanSender, + ) -> Result { + // the message has the ID in it but we can't trust that. + // it's provided as convenience for trusted peering relationships only + let id = block.id()?; + + info!("Received block: {}, from: {}", id, self.addr); + + match self.local_inflight_queue.peek() { + Some(peek) => { + if peek != id { + // disconnect misbehaving peer + return Err(PeerError::ReceivedUnrequestedBlock); + } + } + None => { + // disconnect misbehaving peer + return Err(PeerError::ReceivedUnrequestedBlock); + } + }; + + // don't process low difficulty blocks + if !ibd && CHECKPOINTS_ENABLED && block.header.height < LATEST_CHECKPOINT_HEIGHT { + // don't disconnect them. they may need us to find out about the real chain + self.local_inflight_queue.remove(&id, &PEER_ADDR_SELF); + self.global_inflight_queue.remove(&id, &self.addr); + + // ignore future inv_blocks for this block + self.ignore_blocks.insert(id, true); + if self.ignore_blocks.len() > MAX_BLOCKS_PER_INV { + // they're intentionally sending us bad blocks + return Err(PeerError::MaxIgnoreListSizeExceeded); + } + + return Err(PeerError::BlockAtHeightLessThanCheckpoint( + id, + block.header.height, + LATEST_CHECKPOINT_HEIGHT, + )); + } + + let mut accepted = false; + + // is it an orphan? + match self.block_store.get_block_header(&block.header.previous) { + Ok(Some((_header, _height))) => { + // process the block + if let Err(err) = self + .processor + .process_candidate_block(id, block, self.addr) + .await + { + // TODO: disconnect from peer here + // disconnect a peer that sends us a bad block + return Err(err.into()); + } + // newly accepted block + accepted = true; + + // remove it from the inflight queues only after we process it + self.local_inflight_queue.remove(&id, &PEER_ADDR_SELF); + self.global_inflight_queue.remove(&id, &self.addr); + } + Ok(None) => { + self.local_inflight_queue.remove(&id, &PEER_ADDR_SELF); + self.global_inflight_queue.remove(&id, &self.addr); + + info!( + "Block {} is an orphan, sending find_common_ancestor to: {}", + id, self.addr + ); + + // send a find common ancestor request + self.send_find_common_ancestor(None, None, out_chan_tx) + .await?; + } + Err(err) => { + self.local_inflight_queue.remove(&id, &PEER_ADDR_SELF); + self.global_inflight_queue.remove(&id, &self.addr); + return Err(err.into()); + } + }; + + // see if there are any more blocks to download right now + self.process_download_queue(out_chan_tx).await?; + + Ok(accepted) + } + + /// Try requesting blocks that are in the download queue + async fn process_download_queue( + &mut self, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + // fill up as much of the inflight queue as possible + let mut queued = 0; + + while self.local_inflight_queue.len() < INFLIGHT_QUEUE_MAX { + // next block to download + let Some(block_to_download) = self.local_download_queue.peek() else { + // no more blocks in the queue + break; + }; + + // double-check if it's been processed since we last checked + let branch_type = self.ledger.get_branch_type(&block_to_download)?; + if branch_type != BranchType::Unknown { + // it's been processed. remove it and check the next one + info!( + "Block {} has been processed, removing from download queue for: {}", + block_to_download, self.addr + ); + self.local_download_queue + .remove(&block_to_download, &PEER_ADDR_SELF); + continue; + } + + // add block to the global inflight queue with this peer as the owner + { + if !self + .global_inflight_queue + .add(&block_to_download, &self.addr) + { + // another peer is downloading it right now. + // wait to see if they succeed before trying to download any others + info!( + "Block {} is being downloaded already from another peer", + block_to_download + ); + break; + } + } + + // pop it off the local download queue + self.local_download_queue + .remove(&block_to_download, &PEER_ADDR_SELF); + + // mark it inflight locally + self.local_inflight_queue + .add(&block_to_download, &PEER_ADDR_SELF); + queued += 1; + + // request it + info!( + "Sending get_block for {}, to: {}", + block_to_download, self.addr + ); + out_chan_tx.send(Message::GetBlock(GetBlockMessage { + block_id: block_to_download, + }))?; + } + + if queued > 0 { + info!( + "Requested {} block(s) for download, from: {}", + queued, self.addr + ); + info!( + "Queue size: {}, peer inflight: {}, global inflight: {}, for: {}", + self.local_download_queue.len(), + self.local_inflight_queue.len(), + self.global_inflight_queue.len(), + self.addr + ); + } + + Ok(()) + } + + /// Send a message to look for a common ancestor with a peer + /// Might be called from reader or writer context. sender means we're in the writer context + async fn send_find_common_ancestor( + &self, + mut start_id: Option, + ws_sender: Option<&mut WsSink>, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Sending find_common_ancestor to: {}", self.addr); + + let mut height = match start_id { + Some(id) => { + let Some((header, _when)) = self.block_store.get_block_header(&id)? else { + info!("No header for block {}", id); + return Ok(()); + }; + header.height + } + None => { + let Some((id_tip, height_tip)) = self.ledger.get_chain_tip()? else { + return Ok(()); + }; + start_id = Some(id_tip); + height_tip + } + }; + + let mut block_id = start_id; + let mut ids = Vec::new(); + let mut step = 1; + while let Some(id) = block_id { + if id == *self.genesis_id { + break; + } + ids.push(id); + let depth = height - step; + if depth == 0 { + break; + } + block_id = match self + .ledger + .get_block_id_for_height(depth) + .map_err(PeerError::Ledger) + { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + return Ok(()); + } + }; + if ids.len() > 10 { + step *= 2; + } + height = depth; + } + ids.push(*self.genesis_id); + let message = Message::FindCommonAncestor(FindCommonAncestorMessage { block_ids: ids }); + + // send immediately if the sender is passed in + if let Some(ws_sender) = ws_sender { + let json = serde_json::to_string(&message).map_err(JsonError::Serialize)?; + self.send_with_timeout(ws_sender, WsMessage::Text(json)) + .await + .map_err(PeerError::PeerConnection)?; + return Ok(()); + } + out_chan_tx.send(message)?; + + Ok(()) + } + + /// Handle a find common ancestor message from a peer + async fn on_find_common_ancestor( + &mut self, + id: &BlockID, + index: usize, + length: usize, + out_chan_tx: &OutChanSender, + ) -> Result { + info!( + "Received find_common_ancestor: {}, index: {}, length: {}, from: {}", + id, index, length, self.addr + ); + + let Some((header, _when)) = self.block_store.get_block_header(id)? else { + return Ok(false); + }; + + // have we processed it? + let branch_type = self.ledger.get_branch_type(id)?; + if branch_type != BranchType::Main { + // not on the main branch + return Ok(false); + } + + info!( + "Common ancestor found: {}, height: {}, with: {}", + id, header.height, self.addr + ); + + let mut ids = Vec::new(); + let mut height = header.height + 1; + + while ids.len() < MAX_BLOCKS_PER_INV { + let Some(next_id) = self.ledger.get_block_id_for_height(height)? else { + break; + }; + info!( + "Queueing inv for block {}, height: {}, to: {}", + next_id, height, self.addr + ); + ids.push(next_id); + height += 1; + } + + if !ids.is_empty() { + // save the last ID so after the peer requests it we can trigger it to + // send another find common ancestor request to finish downloading the rest of the chain + let continuation_block_id = ids[ids.len() - 1]; + info!( + "Sending inv_block with {} IDs, continuation block: {}, to: {}", + ids.len(), + continuation_block_id, + self.addr + ); + self.continuation_block_id = Some(continuation_block_id); + + out_chan_tx.send(Message::InvBlock(InvBlockMessage { block_ids: ids }))?; + } + + Ok(true) + } + + /// Handle a request for a block header from a peer + async fn on_get_block_header( + &self, + id: BlockID, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received get_block_header: {}, from: {}", id, self.addr); + + self.get_block_header(id, out_chan_tx).await + } + + /// Handle a request for a block header by ID from a peer + async fn on_get_block_header_by_height( + &self, + height: u64, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received get_block_header_by_height: {}, from: {}", + height, self.addr + ); + let id = match self.ledger.get_block_id_for_height(height) { + Ok(Some(v)) => v, + Ok(None) => { + // not found + out_chan_tx.send(Message::BlockHeader(None))?; + return Err(LedgerNotFoundError::BlockIDForHeight(height).into()); + } + Err(err) => { + // not found + out_chan_tx.send(Message::BlockHeader(None))?; + return Err(err.into()); + } + }; + + self.get_block_header(id, out_chan_tx).await + } + + async fn get_block_header( + &self, + block_id: BlockID, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + let (header, _when) = match self.block_store.get_block_header(&block_id) { + Ok(Some(v)) => v, + Ok(None) => { + // not found + out_chan_tx.send(Message::BlockHeader(Some(BlockHeaderMessage { + block_id, + block_header: None, + })))?; + + return Err(BlockStorageNotFoundError::BlockHeader(block_id).into()); + } + Err(err) => { + // not found + out_chan_tx.send(Message::BlockHeader(Some(BlockHeaderMessage { + block_id, + block_header: None, + })))?; + + return Err(err.into()); + } + }; + out_chan_tx.send(Message::BlockHeader(Some(BlockHeaderMessage { + block_id, + block_header: Some(header), + })))?; + + Ok(()) + } + + /// Handle a request for a public key's balancep + async fn on_get_balance( + &self, + pub_key: PublicKey, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received get_balance from {}", self.addr); + + let (balances, tip_id, tip_height) = + match self.ledger.get_public_key_balances(vec![pub_key]) { + Ok(v) => v, + Err(err) => { + out_chan_tx.send(Message::Balance(BalanceMessage { + block_id: None, + height: None, + public_key: Some(pub_key), + balance: None, + error: Some(err.to_string()), + }))?; + + return Err(err.into()); + } + }; + + // TODO: balance = balances[0]? + let mut balance = 0; + for (_pub_key, bal) in balances { + balance = bal; + } + + out_chan_tx.send(Message::Balance(BalanceMessage { + block_id: Some(tip_id), + height: Some(tip_height), + public_key: Some(pub_key), + balance: Some(balance), + error: None, + }))?; + + Ok(()) + } + + /// Handle a request for a set of public key balances. + async fn on_get_balances( + &self, + pub_keys: Vec, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received get_balances (count: {}) from: {}", + pub_keys.len(), + self.addr + ); + + let max_public_keys = 64; + if pub_keys.len() > max_public_keys { + let err = PeerBalancesError::PublicKeysExceeded(max_public_keys); + out_chan_tx.send(Message::Balances(BalancesMessage { + error: Some(err.to_string()), + block_id: None, + height: None, + balances: None, + }))?; + + return Err(err.into()); + } + + let (balances, tip_id, tip_height) = match self.ledger.get_public_key_balances(pub_keys) { + Ok(v) => v, + Err(err) => { + out_chan_tx.send(Message::Balances(BalancesMessage { + block_id: None, + height: None, + balances: None, + error: Some(err.to_string()), + }))?; + return Err(err.into()); + } + }; + + let mut pub_key_balances = Vec::with_capacity(balances.len()); + for (public_key, balance) in balances { + pub_key_balances.push(PublicKeyBalance { + public_key, + balance, + }); + } + out_chan_tx.send(Message::Balances(BalancesMessage { + block_id: Some(tip_id), + height: Some(tip_height), + balances: Some(pub_key_balances), + error: None, + }))?; + + Ok(()) + } + + /// Handle a request for a public key's transactions over a given height range + async fn on_get_public_key_transactions( + &self, + pub_key: PublicKey, + start_height: u64, + end_height: u64, + start_index: u32, + mut limit: usize, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received get_public_key_transactions from {}", self.addr); + // enforce our limit + if limit > 32 || limit == 0 { + limit = 32; + } + + // get the indices for all transactions for the given public key + // over the given range of block heights + let (block_ids, indices, stop_height, stop_index) = + match self.ledger.get_public_key_transaction_indices_range( + pub_key, + start_height, + end_height, + start_index, + limit, + ) { + Ok(v) => v, + Err(err) => { + out_chan_tx.send(Message::PublicKeyTransactions( + PublicKeyTransactionsMessage { + public_key: None, + start_height: None, + stop_height: None, + stop_index: None, + filter_blocks: None, + error: Some(err.to_string()), + }, + ))?; + + return Err(err.into()); + } + }; + + // build filter blocks from the indices + let mut fbs = Vec::new(); + + for (i, block_id) in block_ids.into_iter().enumerate() { + // fetch transaction and header + let (tx, block_header) = match self.block_store.get_transaction(&block_id, indices[i]) { + Ok((Some(tx), header)) => (tx, header), + Ok((None, _header)) => { + let err = PeerError::PublicKeyTransactionNotFound(block_id, indices[i]); + error!("{:?}", err); + continue; + } + Err(err) => { + // odd case. just log it and continue + let err = + PeerError::PublicKeyTransactionBlockStorage(block_id, indices[i], err); + error!("{:?}", err); + continue; + } + }; + + // figure out where to put it + if fbs.is_empty() { + // new block + let fb = FilterBlockMessage { + block_id, + header: block_header, + transactions: vec![tx], + }; + fbs.push(fb) + } else if fbs[fbs.len() - 1].block_id != block_id { + // new block + let fb = FilterBlockMessage { + block_id, + header: block_header, + transactions: vec![tx], + }; + fbs.push(fb); + } else { + // transaction is from the same block + let last_index = fbs.len() - 1; + let fb = &mut fbs[last_index]; + fb.transactions.push(tx); + }; + } + + out_chan_tx.send(Message::PublicKeyTransactions( + PublicKeyTransactionsMessage { + public_key: Some(pub_key), + start_height: Some(start_height), + stop_height: Some(stop_height), + stop_index: Some(stop_index), + filter_blocks: Some(fbs), + error: None, + }, + ))?; + + Ok(()) + } + + /// Handle a request for a transaction + async fn on_get_transaction( + &self, + tx_id: TransactionID, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received get_transaction for {}, from: {}", + tx_id, self.addr + ); + + let (block_id, index) = match self.ledger.get_transaction_index(&tx_id) { + Ok(Some(v)) => v, + Ok(None) => { + // not found + out_chan_tx.send(Message::Transaction(TransactionMessage { + block_id: None, + height: None, + transaction_id: tx_id, + transaction: None, + }))?; + + return Err(LedgerNotFoundError::TransactionAtIndex(tx_id).into()); + } + Err(err) => { + // not found + out_chan_tx.send(Message::Transaction(TransactionMessage { + block_id: None, + height: None, + transaction_id: tx_id, + transaction: None, + }))?; + + return Err(err.into()); + } + }; + + let (tx, block_header) = match self.block_store.get_transaction(&block_id, index) { + Ok((Some(tx), header)) => (tx, header), + Ok((None, header)) => { + // odd case but send back what we know at least + out_chan_tx.send(Message::Transaction(TransactionMessage { + block_id: Some(block_id), + height: Some(header.height), + transaction_id: tx_id, + transaction: None, + }))?; + + return Err( + BlockStorageNotFoundError::TransactionAtBlockIndex(block_id, index).into(), + ); + } + Err(err) => { + // odd case but send back what we know at least + out_chan_tx.send(Message::Transaction(TransactionMessage { + block_id: Some(block_id), + height: None, + transaction_id: tx_id, + transaction: None, + }))?; + + return Err(err.into()); + } + }; + + out_chan_tx.send(Message::Transaction(TransactionMessage { + block_id: Some(block_id), + height: Some(block_header.height), + transaction_id: tx_id, + transaction: Some(tx), + }))?; + + Ok(()) + } + + /// Handle a request for a block header of the tip of the main chain from a peer + async fn on_get_tip_header(&self, out_chan_tx: &OutChanSender) -> Result<(), PeerError> { + info!("Received get_tip_header, from: {}", self.addr); + let (tip_id, tip_header, tip_when) = + match Processor::get_chain_tip_header(&self.ledger, &self.block_store) { + Ok(Some(v)) => v, + Ok(None) => { + // shouldn't be possible + out_chan_tx.send(Message::TipHeader(None))?; + return Err(LedgerNotFoundError::ChainTipHeader.into()); + } + Err(err) => { + // shouldn't be possible + out_chan_tx.send(Message::TipHeader(None))?; + return Err(err.into()); + } + }; + out_chan_tx.send(Message::TipHeader(Some(TipHeaderMessage { + block_id: tip_id, + block_header: tip_header, + time_seen: tip_when, + })))?; + + Ok(()) + } + + /// Handle receiving a transaction from a peer + async fn on_push_transaction( + &self, + tx: Transaction, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + let id = match tx.id() { + Ok(v) => v, + Err(err) => { + out_chan_tx.send(Message::PushTransactionResult( + PushTransactionResultMessage { + transaction_id: None, + error: Some(err.to_string()), + }, + ))?; + return Err(err.into()); + } + }; + + info!("Received push_transaction: {}, from: {}", id, self.addr); + + // process the transaction if this is the first time we've seen it + let mut err_str = None; + if !self.tx_queue.exists(&id) { + if let Err(err) = self + .processor + .process_candidate_transaction(&id, &tx, &self.addr) + .await + { + err_str = Some(format!("{:?}", err)); + } + }; + + out_chan_tx.send(Message::PushTransactionResult( + PushTransactionResultMessage { + transaction_id: Some(id), + error: err_str, + }, + ))?; + + Ok(()) + } + + /// Handle a request to set a transaction filter for the connection + async fn on_filter_load( + &mut self, + filter_type: String, + exported_filter: ExportedCuckooFilter, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received filter_load (size: {}), from: {}", + exported_filter.length, self.addr + ); + + // check filter type + if filter_type != "cuckoo" { + let err = PeerFilterError::TypeUnsupported(filter_type); + out_chan_tx.send(Message::FilterResult(Some(FilterResultMessage { + error: err.to_string(), + })))?; + + return Err(err.into()); + } + + // check limit + let max_size = 1 << 16; + if exported_filter.length > max_size { + let err = PeerFilterError::SizeExceeded(max_size); + out_chan_tx.send(Message::FilterResult(Some(FilterResultMessage { + error: err.to_string(), + })))?; + + return Err(err.into()); + } + + // decode it + let filter = CuckooFilter::::from(exported_filter); + if filter.is_empty() { + let err = PeerFilterError::CreateFailed; + out_chan_tx.send(Message::FilterResult(Some(FilterResultMessage { + error: err.to_string(), + })))?; + + return Err(err.into()); + } + + // set the filter + self.filter = Some(filter); + + // send the empty result + out_chan_tx.send(Message::FilterResult(None))?; + + Ok(()) + } + + /// Handle a request to add a set of public keys to the filter + async fn on_filter_add( + &mut self, + pub_keys: Vec, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!( + "Received filter_add (public keys: {}), from: {}", + pub_keys.len(), + self.addr + ); + + // check limit + let max_public_keys = 256; + if pub_keys.len() > max_public_keys { + let err = PeerFilterError::PublicKeysExceeded(max_public_keys); + out_chan_tx.send(Message::FilterResult(Some(FilterResultMessage { + error: err.to_string(), + })))?; + return Err(err.into()); + } + + // set the filter if it's not set + let ok: Result<(), PeerFilterError> = (|| { + if self.filter.is_none() { + self.filter = Some(CuckooFilter::with_capacity(1 << 16)); + } + let filter = self.filter.as_mut().expect("filter should exist"); + + // perform the inserts + for pub_key in pub_keys { + if filter.add(&pub_key).is_err() { + return Err(PeerFilterError::InsertFailed); + } + } + + Ok(()) + })(); + + // send the result + let message = if let Err(err) = ok { + Message::FilterResult(Some(FilterResultMessage { + error: err.to_string(), + })) + } else { + Message::FilterResult(None) + }; + out_chan_tx.send(message)?; + + Ok(()) + } + + /// Send back a filtered view of the transaction queue + async fn on_get_filter_transaction_queue( + &self, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + info!("Received get_filter_transaction_queue, from: {}", self.addr); + + let message = if self.filter.is_none() { + Message::FilterTransactionQueue(FilterTransactionQueueMessage { + transactions: None, + error: Some(FilterTransactionQueueError::FilterMissing.to_string()), + }) + } else { + let transactions = self.tx_queue.get(0); + let mut ftq_transactions = Vec::new(); + for tx in transactions { + if self.filter_lookup(&tx) { + ftq_transactions.push(tx); + } + } + Message::FilterTransactionQueue(FilterTransactionQueueMessage { + transactions: Some(ftq_transactions), + error: None, + }) + }; + out_chan_tx.send(message)?; + + Ok(()) + } + + /// Returns true if the transaction is of interest to the peer + fn filter_lookup(&self, tx: &Transaction) -> bool { + match self.filter { + Some(ref filter) => { + if !tx.is_coinbase() + && filter.contains(&tx.from.expect("transaction should have a sender")) + { + return true; + } + + filter.contains(&tx.to) + } + None => true, + } + } + + /// Called from the writer context + fn create_filter_block( + &self, + id: BlockID, + block: Block, + ) -> Result, PeerError> { + if self.filter.is_none() { + // nothing to do + return Ok(None); + } + + // create a filter block + let mut fb = FilterBlockMessage { + block_id: id, + header: block.header, + transactions: Vec::new(), + }; + + // filter out transactions the peer isn't interested in + for tx in &block.transactions { + if self.filter_lookup(tx) { + fb.transactions.push(tx.clone()); + } + } + + Ok(Some(fb)) + } + + /// Received a request for peer addresses + async fn on_get_peer_addresses(&self, out_chan_tx: &OutChanSender) -> Result<(), PeerError> { + info!("Received get_peer_addresses message, from: {}", self.addr); + + // get up to 32 peers that have been connected to within the last 3 hours + let time_ago = Duration::from_secs(3 * 60 * 60); + let since = now_as_duration() - time_ago; + let addresses = self + .peer_store + .get_since(32, since)? + .into_iter() + .map(|addr| addr.to_string()) + .collect::>(); + + if !addresses.is_empty() { + out_chan_tx.send(Message::PeerAddresses(PeerAddressesMessage { addresses }))?; + } + + Ok(()) + } + + /// Received a list of addresses + async fn on_peer_addresses(&mut self, addresses: Vec) -> Result<(), PeerError> { + info!( + "Received peer_addresses message with {} address(es), from: {}", + addresses.len(), + self.addr + ); + + let elapsed = match self.last_peer_addresses_received_time { + Some(time) => time.elapsed(), + None => Duration::MAX, + }; + + if elapsed < GET_PEER_ADDRESSES_PERIOD - Duration::from_secs(2 * 60) { + // don't let a peer flood us with peer addresses + info!( + "Ignoring peer addresses, time since last addresses: {}", + elapsed.as_secs() + ); + return Ok(()); + } + self.last_peer_addresses_received_time = Some(Instant::now()); + + let limit = 32; + for (i, addr) in addresses.into_iter().enumerate() { + if i == limit { + break; + } + // notify the peer manager + self.addr_chan_tx.send(addr).await?; + } + + Ok(()) + } + + /// Called when work has been received + async fn on_get_work( + &mut self, + gw: GetWorkMessage, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + let ok = if self.work.is_some() { + Err(PeerGetWorkError::WorkBlockExists) + } else if gw.public_keys.is_empty() { + Err(PeerGetWorkError::WorkBlockNoPublicKeys) + } else if gw.memo.len() > MAX_MEMO_LENGTH { + Err(PeerGetWorkError::WorkBlockMaxMemoLengthExceeded( + MAX_MEMO_LENGTH, + gw.memo.len(), + )) + } else { + match Processor::get_chain_tip_header(&self.ledger, &self.block_store) { + Ok(Some((tip_id, tip_header, _tip_when))) => { + // create and send out new work + self.pub_keys = gw.public_keys; + self.memo = Some(gw.memo); + self.create_new_work_block(&tip_id, &tip_header, out_chan_tx) + .await?; + Ok(()) + } + Ok(None) => Err(LedgerNotFoundError::ChainTipHeader.into()), + Err(err) => Err(PeerGetWorkError::Processor(self.addr, err)), + } + }; + + if let Err(ref err) = ok { + out_chan_tx.send(Message::Work(WorkMessage { + work_id: None, + header: None, + min_time: None, + error: Some(err.to_string()), + }))?; + } + + ok.map_err(Into::into) + } + + /// Create a new work block for a mining peer. + async fn create_new_work_block( + &mut self, + tip_id: &BlockID, + tip_header: &BlockHeader, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + if self.pub_keys.is_empty() { + // peer doesn't want work + return Ok(()); + } + + let work_id = rand_int31(); + let peer_work = match Processor::compute_median_timestamp(tip_header, &self.block_store) + .map_err(PeerError::ProcessorComputingMedianTimestamp) + { + Ok(median_timestamp) => { + let key_index = rand::thread_rng().gen_range(0..self.pub_keys.len()); + match Miner::create_next_block( + tip_id, + tip_header, + &self.tx_queue, + &self.block_store, + &self.ledger, + self.pub_keys[key_index], + self.memo.clone(), + ) + .map_err(PeerError::MinerCreateNextWorkBlock) + { + Ok(work_block) => Ok(PeerWork { + work_id, + work_block, + median_timestamp, + }), + Err(err) => Err(err), + } + } + Err(err) => Err(err), + }; + + // create a new block + let message = match peer_work { + Ok(ref peer_work) => Message::Work(WorkMessage { + work_id: Some(peer_work.work_id), + header: Some(peer_work.work_block.header.clone()), + min_time: Some(peer_work.median_timestamp + 1), + error: None, + }), + Err(ref err) => Message::Work(WorkMessage { + work_id: Some(work_id), + header: None, + min_time: None, + error: Some(err.to_string()), + }), + }; + out_chan_tx.send(message)?; + self.work = peer_work.ok(); + + Ok(()) + } + + /// Handle a submission of mining work. + async fn on_submit_work( + &mut self, + sw: SubmitWorkMessage, + out_chan_tx: &OutChanSender, + ) -> Result<(), PeerError> { + let block_id = if sw.work_id == 0 { + Err(PeerSubmitWorkError::WorkIdMissing) + } else { + match sw.header.id() { + Ok(id) => { + if let Some(ref mut work) = self.work { + if sw.work_id != work.work_id { + Err(PeerSubmitWorkError::WorkIdInvalid(work.work_id, sw.work_id)) + } else { + work.work_block.header = sw.header; + match self + .processor + .process_candidate_block(id, work.work_block.clone(), self.addr) + .await + { + Ok(_) => Ok(id), + Err(err) => Err(err.into()), + } + } + } else { + Err(PeerSubmitWorkError::WorkIdPeerMissing) + } + } + Err(err) => Err(err.into()), + } + }; + + let message = if let Err(ref err) = block_id { + Message::SubmitWorkResult(SubmitWorkResultMessage { + work_id: sw.work_id, + error: Some(err.to_string()), + }) + } else { + Message::SubmitWorkResult(SubmitWorkResultMessage { + work_id: sw.work_id, + error: None, + }) + }; + out_chan_tx.send(message)?; + + if let Err(err) = block_id { + Err(err.into()) + } else { + Ok(()) + } + } + + /// Update the read limit if necessary + fn update_read_limit(&mut self) { + let (ok, height) = PeerManager::is_initial_block_download(&self.ledger, &self.block_store) + .unwrap_or_else(|err| panic!("{:?}", err)); + + if ok { + // TODO: do something smarter about this + self.read_limit = 0; + return; + } + + // transactions are <500 bytes so this gives us significant wiggle room + let max_transactions = Processor::compute_max_transactions_per_block(height + 1); + self.read_limit = max_transactions * 1024; + } + + /// Send outgoing messages with a write timeout period + async fn send_with_timeout( + &self, + ws_sender: &mut WsSink, + message: WsMessage, + ) -> Result<(), PeerConnectionError> { + match timeout(WRITE_WAIT, ws_sender.send(message)).await { + Err(err) => Err(PeerConnectionError::Timeout(self.addr, err)), + Ok(Err(err)) => Err(err.into()), + _ => Ok(()), + } + } + + /// Specifies a handler to call when the peer connection is closed. + pub fn on_shutdown(&mut self, shutdown_fn: impl Fn() + 'static + Send + Sync) { + self.shutdown_fns.push(Box::new(shutdown_fn)); + } +} + +impl Drop for Peer { + fn drop(&mut self) { + // peer is shutting down + if self.conn.is_none() { + info!("Closed connection with: {}", self.addr); + } + + for shutdown_fn in &self.shutdown_fns { + shutdown_fn(); + } + } +} + +#[derive(Error)] +pub enum PeerError { + #[error("block {0} at height {1} less than latest checkpoint height {2}")] + BlockAtHeightLessThanCheckpoint(BlockID, u64, u64), + #[error("received empty block, from {0}")] + EmptyBlockReceived(SocketAddr), + #[error("received empty transaction")] + EmptyTransactionReceive, + #[error("{0} blocks IDs is more than {1} maximum per inv_block")] + InvBlockMaxBlocks(usize, usize), + #[error("max block ignore list size exceeded")] + MaxIgnoreListSizeExceeded, + #[error("received invalid message, from: {0}")] + MessageInvalid(SocketAddr, #[source] JsonError), + #[error("received too large ({0} bytes) of a '{1}' message, from: {2}")] + MessageLengthExceeded(usize, String, SocketAddr), + #[error("received non-utf8 clean message, from: {0}")] + MessageNotUtf8(SocketAddr, #[source] DataError), + #[error("retrieving transaction history, block: {0}, index: {1} -> block storage")] + PublicKeyTransactionBlockStorage(BlockID, u32, #[source] BlockStorageError), + #[error("retrieving transaction history, block: {0}, index: {1}: no transaction found")] + PublicKeyTransactionNotFound(BlockID, u32), + #[error("received unrequested block")] + ReceivedUnrequestedBlock, + #[error("sync has stalled, disconnecting from from: {0}")] + SyncStalled(SocketAddr), + + #[error("miner -> creating next block")] + MinerCreateNextWorkBlock(#[source] MinerError), + #[error("processor -> computing median timestamp")] + ProcessorComputingMedianTimestamp(#[source] ProcessorError), + + #[error("peer balances")] + PeerBalances(#[from] PeerBalancesError), + #[error("peer connection")] + PeerConnection(#[from] PeerConnectionError), + #[error("peer filter")] + PeerFilter(#[from] PeerFilterError), + #[error("peer get work")] + PeerGetWork(#[from] PeerGetWorkError), + #[error("peer submit work")] + PeerSubmitWork(#[from] PeerSubmitWorkError), + + #[error("block")] + Block(#[from] BlockError), + #[error("block storage")] + BlockStorage(#[from] BlockStorageError), + #[error("block storage not found")] + BlockStorageNotFound(#[from] BlockStorageNotFoundError), + #[error("channel")] + Channel(#[from] ChannelError), + #[error("json")] + Json(#[from] JsonError), + #[error("ledger")] + Ledger(#[from] LedgerError), + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + #[error("peer manager")] + PeerManager(#[from] PeerManagerError), + #[error("peer storage")] + PeerStorage(#[from] PeerStorageError), + #[error("processing block")] + ProcessBlock(#[from] ProcessBlockError), + #[error("processor")] + Processor(#[from] ProcessorError), + #[error("transaction")] + Transaction(#[from] TransactionError), +} + +impl_debug_error_chain!(PeerError, "peer"); + +impl From> for PeerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("get work", err.to_string())) + } +} + +impl From> for PeerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("out", err.to_string())) + } +} + +impl From> for PeerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("addr", err.to_string())) + } +} + +impl From> for PeerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("submit work", err.to_string())) + } +} + +#[derive(Error, Debug)] +pub enum PeerConnectionError { + #[error("failed accepting incoming from: {0}")] + Accept(SocketAddr, #[source] tokio_tungstenite::tungstenite::Error), + #[error("failed connecting to peer: {0}")] + Connect(SocketAddr, #[source] tokio_tungstenite::tungstenite::Error), + #[error("websocket connection lost, closing...")] + Dropped(SocketAddr), + #[error("connection timeout for peer: {0}")] + Timeout(SocketAddr, #[source] tokio::time::error::Elapsed), + + #[error("peer storage")] + PeerStorage(#[from] PeerStorageError), + + #[error("websocket header")] + HttpHeaderValue(#[from] tokio_tungstenite::tungstenite::http::header::InvalidHeaderValue), + #[error("websocket")] + Websocket(#[from] tokio_tungstenite::tungstenite::Error), +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum FilterTransactionQueueError { + #[error("No filter set")] + FilterMissing, +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum PeerBalanceError { + #[error("ledger")] + Ledger(#[from] LedgerError), +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum PeerBalancesError { + #[error("Too many public keys, limit: {0}")] + PublicKeysExceeded(usize), +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum PeerFilterError { + #[error("Failed to create filter")] + CreateFailed, + #[error("Unable to insert into filter")] + InsertFailed, + #[error("Too many public keys, limit: {0}")] + PublicKeysExceeded(usize), + #[error("Filter too large, max {0}")] + SizeExceeded(usize), + #[error("Unsupported filter type {0}")] + TypeUnsupported(String), +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum PeerGetWorkError { + #[error("Work block already exists")] + WorkBlockExists, + #[error("Work block max memo length ({0}) exceeded: {1}")] + WorkBlockMaxMemoLengthExceeded(usize, usize), + #[error("Work block has no public keys")] + WorkBlockNoPublicKeys, + + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + + #[error("getting chain tip header, for: {0}")] + Processor(SocketAddr, #[source] ProcessorError), +} + +/// Error type associated with cruzbit protocol messages +#[derive(Error, Debug)] +pub enum PeerSubmitWorkError { + #[error("Unexpected work id {0}, found {1}")] + WorkIdInvalid(u32, u32), + #[error("No work id set")] + WorkIdMissing, + #[error("No work id set on peer")] + WorkIdPeerMissing, + + #[error("block id")] + Block(#[from] BlockError), + #[error("processing work block")] + ProcessBlock(#[from] ProcessBlockError), +} diff --git a/src/peer_manager.rs b/src/peer_manager.rs new file mode 100644 index 0000000..3f3f11a --- /dev/null +++ b/src/peer_manager.rs @@ -0,0 +1,1118 @@ +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::net::{IpAddr, SocketAddr}; +use std::path::PathBuf; +use std::str::{from_utf8, FromStr}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::Duration; + +use log::{error, info}; +use network_interface::{NetworkInterface, NetworkInterfaceConfig}; +use rand::seq::{IteratorRandom, SliceRandom}; +use rand::Rng; +use thiserror::Error; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::net::{TcpListener, TcpStream}; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::task::JoinHandle; +use tokio::time::{interval_at, sleep_until, timeout, Instant}; +use tokio_rustls::rustls::pki_types::ServerName; +use tokio_rustls::rustls::ServerConfig; +use tokio_rustls::server::TlsStream; +use tokio_rustls::{TlsAcceptor, TlsConnector}; +use tokio_tungstenite::accept_hdr_async; +use tokio_tungstenite::tungstenite::handshake::server::{Request, Response}; +use tokio_tungstenite::tungstenite::http::{header, StatusCode}; +use tokio_tungstenite::tungstenite::Error as WsError; + +use crate::block::BlockID; +use crate::block_queue::BlockQueue; +use crate::block_storage_disk::BlockStorageDisk; +use crate::checkpoints::{CHECKPOINTS_ENABLED, LATEST_CHECKPOINT_HEIGHT}; +use crate::constants::{ + MAX_INBOUND_PEER_CONNECTIONS_FROM_SAME_HOST, MAX_OUTBOUND_PEER_CONNECTIONS, MAX_TIP_AGE, +}; +use crate::dns::query_for_peers; +use crate::error::{ + impl_debug_error_chain, ChannelError, DataError, ErrChain, ParsingError, SocketError, +}; +use crate::irc::IRC; +use crate::ledger_disk::LedgerDisk; +use crate::peer::{EitherWebSocketStream, Peer, PeerConnectionError}; +use crate::peer_storage::{PeerStorage, PeerStorageError}; +use crate::peer_storage_disk::PeerStorageDisk; +use crate::processor::{Processor, ProcessorError}; +use crate::protocol::PROTOCOL; +use crate::shutdown::{shutdown_channel, Shutdown, ShutdownChanReceiver, SpawnedError}; +use crate::tls::{self, generate_self_signed_cert_and_key, server_config, TlsError}; +use crate::transaction_queue_memory::TransactionQueueMemory; +use crate::utils::{addr_is_reserved, now_as_secs, rand_int31, resolve_host}; + +pub type AddrChanSender = Sender; +pub type AddrChan = (AddrChanSender, Mutex>>); + +/// Manages incoming and outgoing peer connections on behalf of the client. +/// It also manages finding peers to connect to. +pub struct PeerManager { + genesis_id: &'static BlockID, + peer_store: Arc, + block_store: Arc, + ledger: Arc, + processor: Arc, + tx_queue: Arc, + block_queue: Arc, + data_dir: PathBuf, + my_external_ip: Option, + peer: Option, + cert_path: Option, + key_path: Option, + port: u16, + inbound_limit: usize, + accept: bool, + accepting: AtomicBool, + irc: bool, + dns_seed: bool, + ban_map: &'static HashMap, + in_peers: RwLock>, + in_peer_count_by_host: RwLock>, + out_peers: RwLock>, + addr_chan: AddrChan, + peer_nonce: u32, + open: AtomicBool, + server_shutdown: Mutex>, + irc_shutdown: Mutex>, + shutdown_chan_rx: Mutex>, +} + +impl PeerManager { + /// Returns a new PeerManager instance. + pub fn new( + genesis_id: &'static BlockID, + peer_store: Arc, + block_store: Arc, + ledger: Arc, + processor: Arc, + tx_queue: Arc, + data_dir: PathBuf, + my_external_ip: Option, + peer: Option, + cert_path: Option, + key_path: Option, + port: u16, + inbound_limit: usize, + accept: bool, + ban_map: &'static HashMap, + irc: bool, + dns_seed: bool, + open: bool, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Arc { + let addr_chan = channel(10000); + Arc::new(Self { + genesis_id, + peer_store, + block_store, + ledger, + processor, + tx_queue, + block_queue: Arc::new(BlockQueue::new()), + data_dir, + my_external_ip, + peer, + cert_path, + key_path, + port, + inbound_limit, + accept, + // not accepting connections initially + accepting: AtomicBool::new(false), + irc, + dns_seed, + ban_map, + in_peers: RwLock::new(HashMap::new()), + in_peer_count_by_host: RwLock::new(HashMap::new()), + out_peers: RwLock::new(HashMap::new()), + addr_chan: (addr_chan.0, Mutex::new(Some(addr_chan.1))), + peer_nonce: rand_int31(), + open: AtomicBool::new(open), + server_shutdown: Mutex::new(None), + irc_shutdown: Mutex::new(None), + shutdown_chan_rx: Mutex::new(Some(shutdown_chan_rx)), + }) + } + + /// Spawns the PeerManager's main loop. + pub fn spawn(self: Arc) -> JoinHandle> { + tokio::spawn(async move { self.run().await.map_err(Into::into) }) + } + + /// Runs the PeerManager's main loop. + /// It determines our connectivity and manages sourcing peer addresses from seed sources + /// as well as maintaining full outbound connections and accepting inbound connections. + pub async fn run(self: &Arc) -> Result<(), PeerManagerError> { + if let Some(peer) = self.peer { + // store the explicitly specified outbound peer + if let Err(err) = self + .peer_store + .store(peer) + .map_err(|err| PeerManagerError::SavePeer(peer, err)) + { + error!("{:?}", err); + }; + } else { + // query dns seeds for peers + match query_for_peers().await { + Ok(addresses) => { + for addr in addresses { + info!("Got peer address from DNS: {}", addr); + self.addr_chan.0.send(addr).await?; + } + } + Err(err) => { + error!("{:?}", err); + } + }; + + // handle IRC seeding + if self.irc { + let mut port = self.port; + if !self.open.load(Ordering::Relaxed) || !self.accept { + // don't advertise ourself as available for inbound connections + port = 0; + } + + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + match IRC::connect( + port, + self.genesis_id, + self.addr_chan.0.clone(), + shutdown_chan_rx, + ) + .await + { + Ok(irc) => { + let handle = irc.spawn(); + let mut irc_shutdown = self.irc_shutdown.lock().unwrap(); + *irc_shutdown = Some(Shutdown::new(handle, shutdown_chan_tx)); + } + Err(err) => { + error!("{:?}", err); + } + }; + } + } + + // handle listening for inbound peers + if let Err(err) = self.listen_for_peers().await { + error!("{:?}", err); + } + + // try connecting to some saved peers + if let Err(err) = self.connect_to_peers().await { + error!("{:?}", err); + } + + // try connecting out to peers every 5 minutes + let ticker_interval = Duration::from_secs(5 * 60); + let mut ticker = interval_at(Instant::now() + ticker_interval, ticker_interval); + + let mut addr_chan_rx = self.addr_chan.1.lock().unwrap().take().unwrap(); + let mut shutdown_chan_rx = self.shutdown_chan_rx.lock().unwrap().take().unwrap(); + + // main loop + loop { + tokio::select! { + Some(addr_str) = addr_chan_rx.recv() => { + // validate the address + let addr = match self.validate_peer_address(addr_str).map_err(PeerManagerError::PeerValidation) { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + continue + } + }; + + // is it banned? + if self.ban_map.get(&addr.ip().to_string()).is_some() { + info!("Ignoring banned host: {}", addr.ip()); + continue + } + + // store the peer + match self.peer_store.store(addr).map_err(|err| PeerManagerError::SavePeer(addr, err)) { + Ok(ok) => { + if !ok { + // we already knew about this peer address + continue + } + info!("Discovered new peer: {}", addr); + + // try connecting to some saved peers + if let Err(err) = self.connect_to_peers().await { + error!("{:?}", err); + continue + } + } + Err(err) => { + error!("{:?}", err); + continue + } + } + } + + _ = ticker.tick() => { + let out_count = self.outbound_peer_count(); + let in_count = self.inbound_peer_count(); + info!("Have {} outbound connections and {} inbound connections", out_count, in_count); + + // handle listening for inbound peers + if let Err(err) = self.listen_for_peers().await { + error!("{:?}", err); + } + + if self.dns_seed && rand::thread_rng().gen_range(0..2) == 1 { + // drop a peer so we can try another + self.drop_random_peer().await; + } + + // periodically try connecting to some saved peers + if let Err(err) = self.connect_to_peers().await { + error!("{:?}", err); + } + } + + _ = &mut shutdown_chan_rx => { + info!("Peer manager shutting down"); + self.shutdown().await; + break Ok(()) + } + } + } + } + + /// Shutdown peers, http server and irc + pub async fn shutdown(&self) { + let mut shutdowns = Vec::new(); + + // collect all outbound connected peers + let out_peers = { + let mut out_peers = self.out_peers.write().unwrap(); + out_peers + .drain() + .map(|(_addr, shutdown)| shutdown) + .collect::>() + }; + shutdowns.extend(out_peers); + + // collect all inbound connected peers + let in_peers = { + let mut in_peers = self.in_peers.write().unwrap(); + in_peers + .drain() + .map(|(_addr, shutdown)| shutdown) + .collect::>() + }; + shutdowns.extend(in_peers); + + // collect http server shutdown if it's running + if let Some(server_shutdown) = self.server_shutdown.lock().unwrap().take() { + shutdowns.push(server_shutdown); + } + + // collect irc shutdown if it's running + if let Some(irc_shutdown) = self.irc_shutdown.lock().unwrap().take() { + shutdowns.push(irc_shutdown) + } + + // shut everything down + for shutdown in shutdowns { + shutdown.send().await; + } + } + + fn inbound_peer_count(&self) -> usize { + self.in_peers.read().unwrap().len() + } + + fn outbound_peer_count(&self) -> usize { + self.out_peers.read().unwrap().len() + } + + /// Try connecting to some recent peers + async fn connect_to_peers(self: &Arc) -> Result<(), PeerManagerError> { + if let Some(peer) = self.peer { + if self.outbound_peer_count() != 0 { + // only connect to the explicitly requested peer once + return Ok(()); + } + + // try reconnecting to the explicit peer + info!("Attempting to connect to: {}", peer); + self.connect(&peer).await?; + info!("Connected to peer: {}", peer); + return Ok(()); + } + + // are we syncing? + let (ibd, _height) = + PeerManager::is_initial_block_download(&self.ledger, &self.block_store)?; + + let want = if ibd { + // only connect to 1 peer until we're synced. + // If this client is a bad actor we'll find out about the real + // chain as soon as we think we're done with them and find more peers + 1 + } else { + // otherwise try to keep us maximally connected + MAX_OUTBOUND_PEER_CONNECTIONS + }; + + let mut count = self.outbound_peer_count(); + let mut need = want.saturating_sub(count); + if need == 0 { + return Ok(()); + } + + let mut tried = HashMap::new(); + + info!( + "Have {} outbound connections, want {}. Trying some peer addresses now", + count, want + ); + + // try to satisfy desired outbound peer count + while need > 0 { + let addrs = self.peer_store.get(need)?; + if addrs.is_empty() { + // no more attempts possible at the moment + info!("No more peer addresses to try right now"); + return Ok(()); + } + for addr in addrs { + if tried.get(&addr).is_some() { + // we already tried this peer address. + // this shouldn't really be necessary if peer storage is respecting + // proper retry intervals but it doesn't hurt to be safe + info!( + "Already tried to connect to {} this time, will try again later", + addr + ); + return Ok(()); + } + tried.insert(addr, true); + + // is it banned? + if self.ban_map.get(&addr.ip().to_string()).is_some() { + info!("Skipping and removing banned host: {}", addr.ip()); + if let Err(err) = self + .peer_store + .delete(addr) + .map_err(|err| PeerManagerError::RemovePeer(addr, err)) + { + error!("{:?}", err); + } + continue; + } + + info!("Attempting to connect to: {}", addr); + match self.connect(&addr).await { + Ok(_) => { + info!("Connected to peer: {}", &addr); + } + Err(err) => { + error!("{:?}", err); + } + } + } + count = self.outbound_peer_count(); + need = want - count; + } + + info!( + "Have {} outbound connections. Done trying new peer addresses", + count + ); + + Ok(()) + } + + /// Connect to a peer + async fn connect(self: &Arc, addr: &SocketAddr) -> Result<(), PeerManagerError> { + let my_addr = if self.accepting.load(Ordering::Relaxed) + && self.open.load(Ordering::Relaxed) + && self.my_external_ip.is_some() + { + // advertise ourself as open + self.my_external_ip + .map(|my_external_ip| SocketAddr::from((my_external_ip, self.port))) + } else { + None + }; + + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let mut peer = Peer::new( + None, + self.genesis_id, + Arc::clone(&self.peer_store), + Arc::clone(&self.block_store), + Arc::clone(&self.ledger), + Arc::clone(&self.processor), + Arc::clone(&self.tx_queue), + Arc::clone(&self.block_queue), + self.addr_chan.0.clone(), + *addr, + shutdown_chan_rx, + ); + + if !self.check_outbound_set(addr) { + return Err(PeerValidationError::TooManyConnections.into()); + } + + // connect to the peer + peer.connect(self.peer_nonce, my_addr).await?; + + let arc_self = Arc::clone(self); + let addr = *addr; + peer.on_shutdown(move || { + arc_self.remove_from_outbound_set(&addr); + }); + + let handle = peer.spawn(); + let shutdown = Shutdown::new(handle, shutdown_chan_tx); + self.add_to_outbound_set(&addr, shutdown); + + Ok(()) + } + + /// Check to see if it's time to start accepting connections and do so if necessary + async fn listen_for_peers(self: &Arc) -> Result<(), PeerManagerError> { + // client is launched with no_accept + if !self.accept { + return Ok(()); + } + // this is true when we start listening for peers + if self.accepting.load(Ordering::Relaxed) { + return Ok(()); + } + + // don't accept new connections while we're syncing + let (ibd, _height) = Self::is_initial_block_download(&self.ledger, &self.block_store)?; + if ibd { + info!("We're still syncing. Not accepting new connections yet"); + return Ok(()); + } + + self.accepting.store(true, Ordering::Relaxed); + if let Err(err) = self.accept_connections() { + error!("{:?}", err); + } + + // give us some time to generate a certificate and start listening + // so we can correctly report connectivity to outbound peers + sleep_until(Instant::now() + Duration::from_secs(1)).await; + + if !self.open.load(Ordering::Relaxed) { + // if we don't yet think we're open try connecting to ourself to see if maybe we are. + // if the user manually forwarded a port on their router this is when we'd find out. + info!("Checking to see if we're open for public inbound connections"); + if let Some(my_external_ip) = self.my_external_ip { + let my_addr = SocketAddr::from((my_external_ip, self.port)); + if self.peer_store.store(my_addr).is_ok() { + if let Err(PeerManagerError::PeerConnection(PeerConnectionError::Connect( + _addr, + WsError::Http(response), + ))) = self.connect(&my_addr).await + { + if response.status() == StatusCode::LOOP_DETECTED { + self.open.store(true, Ordering::Relaxed); + } + } + + let out_peer = self.out_peers.write().unwrap().remove(&my_addr); + if let Some(shutdown) = out_peer { + shutdown.send().await; + } + } + if self.open.load(Ordering::Relaxed) { + info!("Open for public inbound connections"); + } else { + info!("Not open for public inbound connections"); + } + } + } + + Ok(()) + } + + /// Accept incoming peer connections + fn accept_connections(self: &Arc) -> Result<(), PeerManagerError> { + let (cert_path, key_path) = match (self.cert_path.as_ref(), self.key_path.as_ref()) { + (Some(cert_path), Some(key_path)) => (cert_path.clone(), key_path.clone()), + _ => { + // generate new certificate and key for tls on each run + info!("Generating TLS certificate and key"); + match generate_self_signed_cert_and_key(&self.data_dir) { + Ok((cert_path, key_path)) => (cert_path, key_path), + Err(err) => return Err(err.into()), + } + } + }; + + let bind_v4v6 = format!("[::]:{}", self.port); + let addr = SocketAddr::from_str(&bind_v4v6).map_err(ParsingError::IpAddress)?; + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let server_config = server_config(&cert_path, &key_path)?; + let server = HttpServer::new(addr, server_config, Arc::clone(self), shutdown_chan_rx); + let mut server_shutdown = self.server_shutdown.lock().unwrap(); + *server_shutdown = Some(Shutdown::new(server.spawn(), shutdown_chan_tx)); + + Ok(()) + } + + /// Helper to check if outbound peers will fit + fn check_outbound_set(&self, addr: &SocketAddr) -> bool { + let out_peers = self.out_peers.read().unwrap(); + + if out_peers.len() == MAX_OUTBOUND_PEER_CONNECTIONS { + // too many connections + return false; + } + + if out_peers.contains_key(addr) { + // already connected + return false; + } + + true + } + + /// Helper to add peers to the outbound set + fn add_to_outbound_set(&self, addr: &SocketAddr, shutdown: Shutdown) { + let mut out_peers = self.out_peers.write().unwrap(); + out_peers.insert(*addr, shutdown); + info!("Outbound peer count: {}", out_peers.len()); + } + + /// Helper to check if in peers will fit + fn check_inbound_set(&self, addr: SocketAddr) -> bool { + let in_peers = self.in_peers.read().unwrap(); + if in_peers.len() == self.inbound_limit { + // too many connections + return false; + } + if in_peers.get(&addr).is_some() { + // already connected + return false; + } + + true + } + + /// Helper to add peers to the inbound set if they'll fit + fn add_to_inbound_set(&self, addr: SocketAddr, shutdown: Shutdown) -> bool { + // update the count for this IP + let mut in_peer_count_by_host = self.in_peer_count_by_host.write().unwrap(); + match in_peer_count_by_host.get_mut(&addr) { + Some(count) => { + *count += 1; + } + None => { + in_peer_count_by_host.insert(addr, 1); + } + } + let mut in_peers = self.in_peers.write().unwrap(); + in_peers.insert(addr, shutdown); + info!("Inbound peer count: {}", in_peers.len()); + + true + } + + /// Helper to check if a peer address exists in the outbound set + fn exists_in_outbound_set(&self, addr: &SocketAddr) -> bool { + self.out_peers.read().unwrap().get(addr).is_some() + } + + /// Helper to remove peers from the outbound set + fn remove_from_outbound_set(&self, addr: &SocketAddr) { + let mut out_peers = self.out_peers.write().unwrap(); + if let Some(_shutdown) = out_peers.remove(addr) { + info!("Outbound peer count: {}", out_peers.len()); + } + } + + /// Helper to remove peers from the inbound set + fn remove_from_inbound_set(&self, addr: &SocketAddr) { + // we parsed this address on the way in so an error isn't possible + let mut in_peers = self.in_peers.write().unwrap(); + if let Some(_shutdown) = in_peers.remove(addr) { + info!("Inbound peer count: {}", in_peers.len()); + } + + let mut in_peer_count_by_host = self.in_peer_count_by_host.write().unwrap(); + if let Entry::Occupied(mut count) = in_peer_count_by_host.entry(*addr) { + *count.get_mut() -= 1; + if *count.get() == 0 { + count.remove_entry(); + } + }; + } + + /// Drop a random peer. Used by seeders. + async fn drop_random_peer(&self) { + let out_peer = { + let mut out_peers = self.out_peers.write().unwrap(); + out_peers + .keys() + .choose(&mut rand::thread_rng()) + .cloned() + .and_then(|addr| out_peers.remove_entry(&addr)) + }; + if let Some((addr, shutdown)) = out_peer { + info!("Dropping random peer: {}", addr); + shutdown.send().await; + } + } + + /// Validate a peer addresses + fn validate_peer_address(&self, addr_str: String) -> Result { + // resolve address + let addr = resolve_host(&addr_str) + .map_err(|err| PeerValidationError::ResolveFailed(addr_str, err))?; + + // don't accept ourself + if self.my_external_ip == Some(addr.ip()) && self.port == addr.port() { + return Err(PeerValidationError::IsOurs(addr)); + } + + // filter out local networks + if addr_is_reserved(&addr) { + return Err(PeerValidationError::IsLocal(addr)); + } + + Ok(addr) + } + + /// Returns true if it appears we're still syncing the block chain. + pub fn is_initial_block_download( + ledger: &Arc, + block_store: &Arc, + ) -> Result<(bool, u64), PeerManagerError> { + let Some((_tip_id, tip_header, _when)) = + Processor::get_chain_tip_header(ledger, block_store)? + else { + return Ok((true, 0)); + }; + + if CHECKPOINTS_ENABLED && tip_header.height < LATEST_CHECKPOINT_HEIGHT { + return Ok((true, tip_header.height)); + } + + let now = now_as_secs(); + Ok((tip_header.time < (now - MAX_TIP_AGE), tip_header.height)) + } +} + +#[derive(Error)] +pub enum PeerManagerError { + #[error("failed to remove peer, address: {0}")] + RemovePeer(SocketAddr, #[source] PeerStorageError), + #[error("failed to save peer, address: {0}")] + SavePeer(SocketAddr, #[source] PeerStorageError), + + #[error("peer connection")] + PeerConnection(#[from] PeerConnectionError), + #[error("peer storage")] + PeerStorage(#[from] PeerStorageError), + #[error("peer validation")] + PeerValidation(#[from] PeerValidationError), + + #[error("channel")] + Channel(#[from] ChannelError), + #[error("parsing")] + Parsing(#[from] ParsingError), + #[error("processor")] + Processor(#[from] ProcessorError), + #[error("tls")] + Tls(#[from] TlsError), + + #[error("network interface")] + NetworkInterface(#[from] network_interface::Error), +} + +impl_debug_error_chain!(PeerManagerError, "peer manager"); + +impl From> for PeerManagerError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("addr", err.to_string())) + } +} + +#[derive(Error, Debug)] +pub enum PeerValidationError { + #[error("ip {0} is in local address space")] + IsLocal(SocketAddr), + #[error("peer address is ours {0}")] + IsOurs(SocketAddr), + #[error("failed to resolve peer address {0}")] + ResolveFailed(String, #[source] ParsingError), + #[error("too many connections")] + TooManyConnections, +} + +/// Server to listen for and handle incoming secure WebSocket connections +pub struct HttpServer { + socket_addr: SocketAddr, + server_config: Arc, + peer_manager: Arc, + shutdown_chan_rx: ShutdownChanReceiver, +} + +impl HttpServer { + pub fn new( + socket_addr: SocketAddr, + server_config: Arc, + peer_manager: Arc, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + Self { + socket_addr, + server_config, + peer_manager, + shutdown_chan_rx, + } + } + + /// Spawns the HttpServer's main loop. + pub fn spawn(mut self) -> JoinHandle> { + tokio::spawn(async move { self.run().await.map_err(Into::into) }) + } + + /// Runs the HttpServer's main loop. + pub async fn run(&mut self) -> Result<(), HttpServerError> { + let listener = match TcpListener::bind(self.socket_addr) + .await + .map_err(|err| SocketError::BindTcp(self.socket_addr, err)) + { + Ok(v) => v, + Err(err) => { + error!("{:?}", err); + return Ok(()); + } + }; + loop { + tokio::select! { + Ok((stream, remote_addr)) = listener.accept() => { + let server_config = Arc::clone(&self.server_config); + + if let Ok(tls_stream) = TlsAcceptor::from(server_config).accept(stream).await { + if let Err(err) = self.handle_connection(tls_stream, remote_addr).await { + error!("{:?}", err); + continue; + } + } + } + + _ = &mut self.shutdown_chan_rx => { + break Ok(()) + } + } + } + } + + async fn handle_connection( + &self, + tls_stream: TlsStream, + remote_addr: SocketAddr, + ) -> Result<(), HttpServerError> { + // handle incoming connection upgrade requests + let callback = |request: &Request, mut response: Response| { + // append our protocol header + response.headers_mut().append( + header::SEC_WEBSOCKET_PROTOCOL, + PROTOCOL + .parse() + .expect("websocket protocol failed to parse"), + ); + + // is it banned? + if self + .peer_manager + .ban_map + .get(&remote_addr.ip().to_string()) + .is_some() + { + info!("Rejecting connection from banned host: {}", remote_addr); + *response.status_mut() = StatusCode::FORBIDDEN; + return Ok(response); + } + + // check the connection limit for this peer address + if !self.check_host_connection_limit(&remote_addr) { + info!( + "Too many connections from this peer's host: {}", + &remote_addr + ); + *response.status_mut() = StatusCode::SERVICE_UNAVAILABLE; + + return Ok(response); + } + + // check the peer nonce + if let Some(their_nonce_header) = request.headers().get("Cruzbit-Peer-Nonce") { + match their_nonce_header + .to_str() + .map_err(ParsingError::HttpHeader) + { + Ok(nonce_str) => { + match nonce_str.parse::().map_err(ParsingError::Integer) { + Ok(nonce) if nonce == self.peer_manager.peer_nonce => { + info!("Received connection with our own nonce"); + *response.status_mut() = StatusCode::LOOP_DETECTED; + return Ok(response); + } + Ok(_) => { + // nonce is different + } + Err(err) => { + let err = HttpServerError::HeaderNonceInvalid(err); + error!("{:?}", err); + } + } + } + Err(err) => { + let err = HttpServerError::HeaderNonceInvalid(err); + error!("{:?}", err); + } + } + }; + + // if they set their address it means they think they are open + let header_addr = match request.headers().get("Cruzbit-Peer-Address") { + Some(header) => match header.to_str().map_err(|err| { + HttpServerError::HeaderPeerAddressInvalid(ParsingError::HttpHeader(err)) + }) { + Ok(header_addr_str) => { + // validate the address + match self + .peer_manager + .validate_peer_address(header_addr_str.to_owned()) + .map_err(HttpServerError::PeerValidation) + { + Ok(header_addr) => Some(header_addr), + Err(err) => { + error!("{:?}", err); + // don't proceed to save it + None + } + } + } + Err(err) => { + error!("{:?}", err); + None + } + }, + None => None, + }; + + if let Some(addr) = header_addr { + // see if we're already connected outbound to them + if self.peer_manager.exists_in_outbound_set(&addr) { + info!("Already connected to {}, dropping inbound connection", addr); + // write back error reply + *response.status_mut() = StatusCode::TOO_MANY_REQUESTS; + return Ok(response); + } else { + // save their address for later use + if let Err(err) = self.peer_manager.peer_store.store(addr) { + info!("Error saving peer: {}, address: {}", err, &addr); + } + } + }; + + Ok(response) + }; + + // accept the new websocket + let conn = match accept_hdr_async(tls_stream, callback).await { + Ok(v) => v, + Err(err) => { + return Err(PeerConnectionError::Accept(remote_addr, err).into()); + } + }; + + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let mut peer = Peer::new( + Some(EitherWebSocketStream::Right(conn)), + self.peer_manager.genesis_id, + Arc::clone(&self.peer_manager.peer_store), + Arc::clone(&self.peer_manager.block_store), + Arc::clone(&self.peer_manager.ledger), + Arc::clone(&self.peer_manager.processor), + Arc::clone(&self.peer_manager.tx_queue), + Arc::clone(&self.peer_manager.block_queue), + self.peer_manager.addr_chan.0.clone(), + remote_addr, + shutdown_chan_rx, + ); + + if !self.peer_manager.check_inbound_set(remote_addr) { + return Ok(()); + } + + let peer_manager = Arc::clone(&self.peer_manager); + peer.on_shutdown(move || { + peer_manager.remove_from_inbound_set(&remote_addr); + }); + + info!("New peer connection from: {}", &remote_addr); + let handle = peer.spawn(); + let shutdown = Shutdown::new(handle, shutdown_chan_tx); + self.peer_manager.add_to_inbound_set(remote_addr, shutdown); + + Ok(()) + } + + /// Returns false if this host has too many inbound connections already. + fn check_host_connection_limit(&self, addr: &SocketAddr) -> bool { + // filter out local networks + if addr_is_reserved(addr) { + // no limit for loopback peers + return true; + } + + match self + .peer_manager + .in_peer_count_by_host + .read() + .unwrap() + .get(addr) + { + Some(count) => *count < MAX_INBOUND_PEER_CONNECTIONS_FROM_SAME_HOST, + None => true, + } + } +} + +#[derive(Error)] +pub enum HttpServerError { + #[error("nonce in header is invalid")] + HeaderNonceInvalid(#[source] ParsingError), + #[error("peer address in header is invalid")] + HeaderPeerAddressInvalid(#[source] ParsingError), + + #[error("peer connection")] + PeerConnection(#[from] PeerConnectionError), + #[error("peer validation")] + PeerValidation(#[from] PeerValidationError), +} + +impl_debug_error_chain!(HttpServerError, "http server"); + +/// Do any of our local IPs match our external IP? +pub fn have_local_ip_match(external_ip: &IpAddr) -> Result { + let ifaces = NetworkInterface::show()?; + for i in ifaces { + for address in &i.addr { + if address.ip() == *external_ip { + return Ok(true); + } + } + } + + Ok(false) +} + +/// Determine external IP address +pub async fn determine_external_ip() -> Option { + // attempt to obtain ip with fallbacks + let mut ip_services = [ + "api.ipify.org", + "checkip.amazonaws.com", + "icanhazip.com", + "ident.me", + "myip.dnsomatic.com", + "whatismyip.akamai.com", + ]; + let mut rng = rand::thread_rng(); + ip_services.shuffle(&mut rng); + + async fn try_connect(host: &str) -> Result, ExternalIpError> { + let addr = resolve_host(&format!("{}:443", host))?; + let stream = timeout(Duration::from_secs(5), TcpStream::connect(&addr)) + .await + .map_err(ExternalIpError::Timeout)? + .map_err(|err| SocketError::SendTo(addr, err))?; + let dnsname = ServerName::try_from(host)?.to_owned(); + let client_config = tls::client_config(false); + let connector = TlsConnector::from(Arc::new(client_config)); + let mut tls_stream = connector + .connect(dnsname, stream) + .await + .map_err(|err| ExternalIpError::Socket(SocketError::TlsConnect(addr, err)))?; + let content = format!( + "GET / HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n\r\n", + host + ); + tls_stream + .write_all(&content.into_bytes()) + .await + .map_err(|err| SocketError::SendTo(addr, err))?; + let mut buffer = Vec::new(); + tls_stream + .read_to_end(&mut buffer) + .await + .map_err(|err| SocketError::ReceiveFrom(addr, err))?; + let response = from_utf8(&buffer).map_err(DataError::String)?; + let body = response.trim_matches(char::from(0)); + let line = body + .split('\n') + .filter(|line| !line.trim().is_empty()) + .last() + .map(str::to_string); + match line { + Some(ip_string) => { + let addr = IpAddr::from_str(ip_string.as_str()).map_err(ParsingError::IpAddress)?; + info!("Found external IP: {}", addr); + Ok(Some(addr)) + } + None => Ok(None), + } + } + + for (i, host) in ip_services.into_iter().enumerate() { + if i > 0 { + info!("Retrying..."); + } + match try_connect(host).await { + Ok(ip) => { + if ip.is_some() { + return ip; + } + } + Err(err) => { + error!("{:?}", err); + } + } + } + + None +} + +#[derive(Error)] +pub enum ExternalIpError { + #[error("connection timeout")] + Timeout(#[source] tokio::time::error::Elapsed), + + #[error("data")] + Data(#[from] DataError), + #[error("socket")] + Socket(#[from] SocketError), + #[error("parsing")] + Parsing(#[from] ParsingError), + + #[error("dns")] + DnsName(#[from] tokio_rustls::rustls::pki_types::InvalidDnsNameError), +} + +impl_debug_error_chain!(ExternalIpError, "external ip"); diff --git a/src/peer_storage.rs b/src/peer_storage.rs new file mode 100644 index 0000000..7a10660 --- /dev/null +++ b/src/peer_storage.rs @@ -0,0 +1,55 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use thiserror::Error; + +use crate::error::{DataError, DbError, EncodingError, ParsingError}; + +/// An interface for storing peer addresses and information about their connectivity. +pub trait PeerStorage { + /// Stores a peer address. Returns true if the peer was newly added to storage. + fn store(&self, addr: SocketAddr) -> Result; + + /// Returns some peers for us to attempt to connect to. + fn get(&self, count: usize) -> Result, PeerStorageError>; + + /// Returns some peers to tell others about last active less than "when" ago. + fn get_since(&self, count: usize, when: Duration) -> Result, PeerStorageError>; + + /// Is called to explicitly remove a peer address from storage. + fn delete(&self, addr: SocketAddr) -> Result<(), PeerStorageError>; + + /// Is called prior to attempting to connect to the peer. + fn on_connect_attempt(&self, addr: SocketAddr) -> Result<(), PeerStorageError>; + + /// Is called upon successful handshake with the peer. + fn on_connect_success(&self, addr: SocketAddr) -> Result<(), PeerStorageError>; + + /// Is called upon connection failure. + fn on_connect_failure(&self, addr: SocketAddr) -> Result<(), PeerStorageError>; + + /// Is called upon disconnection. + fn on_disconnect(&self, addr: SocketAddr) -> Result<(), PeerStorageError>; +} + +#[derive(Error, Debug)] +pub enum PeerStorageError { + #[error("data")] + Data(#[from] DataError), + #[error("db")] + Db(#[from] DbError), + #[error("encoding")] + Encoding(#[from] EncodingError), + #[error("parsing")] + Parsing(#[from] ParsingError), + #[error("peer storage not found")] + PeerStorageNotFound(#[from] PeerStorageNotFoundError), +} + +#[derive(Error, Debug)] +pub enum PeerStorageNotFoundError { + #[error("peer info for {0} not found")] + PeerInfo(SocketAddr), + #[error("peer {0} not found")] + Peer(SocketAddr), +} diff --git a/src/peer_storage_disk.rs b/src/peer_storage_disk.rs new file mode 100644 index 0000000..287679b --- /dev/null +++ b/src/peer_storage_disk.rs @@ -0,0 +1,468 @@ +use std::collections::HashMap; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::str::{from_utf8, FromStr}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use leveldb::database::batch::{Batch, WriteBatch}; +use leveldb::database::{Database, DatabaseReader}; +use leveldb::iterator::{Iterable, LevelDBIterator}; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use leveldb::snapshots::Snapshots; +use serde::{Deserialize, Serialize}; + +use crate::error::{DataError, DbError, EncodingError, ParsingError}; +use crate::peer_storage::{PeerStorage, PeerStorageError, PeerStorageNotFoundError}; +use crate::utils::now_as_duration; + +const U64_LENGTH: usize = std::mem::size_of::(); + +/// PeerStorageDisk is an on-disk implementation of the PeerStorage interface using LevelDB. +pub struct PeerStorageDisk { + db: Database, + connected_peers: Mutex>, +} + +impl PeerStorageDisk { + /// Returns a new PeerStorageDisk instance. + pub fn new(db_path: PathBuf) -> Result, PeerStorageError> { + let mut options = Options::new(); + options.create_if_missing = true; + let db = Database::open(&db_path, &options).map_err(|err| DbError::Open(db_path, err))?; + + Ok(Arc::new(PeerStorageDisk { + db, + connected_peers: Mutex::new(HashMap::new()), + })) + } + + /// Helper to lookup peer info + fn get_peer_info( + addr: SocketAddr, + db: &impl DatabaseReader, + ) -> Result, PeerStorageError> { + let key = compute_peer_key(addr); + let Some(encoded) = db + .get_u8(&ReadOptions::new(), &key) + .map_err(DbError::Read)? + else { + return Ok(None); + }; + let info = decode_peer_info(encoded)?; + + Ok(Some(info)) + } + + /// Helper to delete a peer + fn delete_peer( + &self, + addr: SocketAddr, + last_attempt: Duration, + last_success: Duration, + ) -> Result<(), PeerStorageError> { + let peer_key = compute_peer_key(addr); + let attempt_key = compute_last_attempt_time_key(last_attempt, Some(addr)); + let success_key = compute_last_success_time_key(last_success, Some(addr)); + let batch = WriteBatch::new(); + batch.delete_u8(&peer_key); + batch.delete_u8(&attempt_key); + batch.delete_u8(&success_key); + + self.db + .write(&WriteOptions::new(), &batch) + .map_err(|err| PeerStorageError::Db(DbError::Write(err))) + } + + /// Helper to return a copy of the connected set + fn get_connected_peers(&self) -> HashMap { + // copy the set of connected peers + let mut connected_peers = HashMap::new(); + for (addr, _) in self.connected_peers.lock().unwrap().iter() { + connected_peers.insert(*addr, true); + } + + connected_peers + } +} + +impl PeerStorage for PeerStorageDisk { + /// Stores a peer address. Returns true if the peer was newly added to storage. + fn store(&self, addr: SocketAddr) -> Result { + // do we know about it already? + if Self::get_peer_info(addr, &self.db)?.is_some() { + // we've seen it + return Ok(false); + }; + + let info = PeerInfo { + first_seen: now_as_duration(), + last_success: Duration::ZERO, + last_attempt: Duration::ZERO, + }; + + let batch = WriteBatch::new(); + info.write_to_batch(addr, &batch)?; + + // compute last attempt by time db key + let attempt_key = compute_last_attempt_time_key(info.last_attempt, Some(addr)); + batch.put_u8(&attempt_key, &[0x1]); + + // write the batch + self.db + .write(&WriteOptions::new(), &batch) + .map_err(DbError::Write)?; + + Ok(true) + } + + /// Returns some peers for us to attempt to connect to. + fn get(&self, count: usize) -> Result, PeerStorageError> { + let start_key = compute_last_attempt_time_key(Duration::ZERO, None); + let end_key = compute_last_attempt_time_key(now_as_duration(), None); + let mut addrs = Vec::new(); + + let connected_peers = self.get_connected_peers(); + + // try finding peers + let snapshot = self.db.snapshot(); + let iter = snapshot + .keys_iter(&ReadOptions::new()) + .from(&start_key) + .to(&end_key); + + for key in iter { + let (_when, addr) = decode_last_attempt_time_key(&key)?; + if connected_peers.contains_key(&addr) { + // already connected + continue; + } + + // is it time to retry this address? + if let Some(info) = Self::get_peer_info(addr, &snapshot)? { + if !info.should_retry() { + continue; + } + } + + // add it to the list + addrs.push(addr); + if addrs.len() == count { + break; + } + } + + Ok(addrs) + } + + /// Returns some peers to tell others about last active less than "when" ago. + fn get_since(&self, count: usize, when: Duration) -> Result, PeerStorageError> { + let start_key = compute_last_success_time_key(when, None); + let end_key = compute_last_success_time_key(now_as_duration(), None); + + let mut addrs = Vec::new(); + + // try finding peers + let snapshot = self.db.snapshot(); + let iter = snapshot + .keys_iter(&ReadOptions::new()) + .from(&start_key) + .to(&end_key) + .reverse(); + + for key in iter { + let (_when, addr) = decode_last_success_time_key(&key)?; + // add it to the list + addrs.push(addr); + if addrs.len() == count { + break; + } + } + + Ok(addrs) + } + + /// Explicitly remove a peer address from storage. + fn delete(&self, addr: SocketAddr) -> Result<(), PeerStorageError> { + let Some(info) = Self::get_peer_info(addr, &self.db)? else { + return Err(PeerStorageNotFoundError::PeerInfo(addr).into()); + }; + + self.delete_peer(addr, info.last_attempt, info.last_success) + } + + /// Called prior to attempting to connect to the peer. + fn on_connect_attempt(&self, addr: SocketAddr) -> Result<(), PeerStorageError> { + let Some(mut info) = Self::get_peer_info(addr, &self.db)? else { + return Err(PeerStorageNotFoundError::PeerInfo(addr).into()); + }; + + let batch = WriteBatch::new(); + + // delete last attempt by time entry + let attempt_key_old = compute_last_attempt_time_key(info.last_attempt, Some(addr)); + batch.delete_u8(&attempt_key_old); + + // update last attempt + info.last_attempt = now_as_duration(); + info.write_to_batch(addr, &batch)?; + + // compute new last attempt by time db key + let attempt_key_new = compute_last_attempt_time_key(info.last_attempt, Some(addr)); + batch.put(&attempt_key_new, &[0x1]); + + // write the batch + self.db + .write(&WriteOptions::new(), &batch) + .map_err(|err| PeerStorageError::Db(DbError::WriteBatch(err))) + } + + /// Called upon successful handshake with the peer. + fn on_connect_success(&self, addr: SocketAddr) -> Result<(), PeerStorageError> { + let Some(mut info) = Self::get_peer_info(addr, &self.db)? else { + return Err(PeerStorageNotFoundError::PeerInfo(addr).into()); + }; + + let batch = WriteBatch::new(); + + // delete last success by time entry + let success_key_old = compute_last_success_time_key(info.last_success, Some(addr)); + batch.delete_u8(&success_key_old); + + // update last success + info.last_success = now_as_duration(); + info.write_to_batch(addr, &batch)?; + + // compute new success attempt by time db key + let success_key_new = compute_last_success_time_key(info.last_success, Some(addr)); + batch.put_u8(&success_key_new, &[0x1]); + + // write the batch + self.db + .write(&WriteOptions::new(), &batch) + .map_err(DbError::Write)?; + + // save the connected status in memory + let mut connected_peers = self.connected_peers.lock().unwrap(); + connected_peers.insert(addr, true); + Ok(()) + } + + /// Called upon connection failure. + fn on_connect_failure(&self, addr: SocketAddr) -> Result<(), PeerStorageError> { + let Some(info) = Self::get_peer_info(addr, &self.db)? else { + return Err(PeerStorageNotFoundError::PeerInfo(addr).into()); + }; + + if info.should_delete() { + return self.delete_peer(addr, info.last_attempt, info.last_success); + } + Ok(()) + } + + /// Is called upon disconnection. + fn on_disconnect(&self, addr: SocketAddr) -> Result<(), PeerStorageError> { + let mut connected_peers = self.connected_peers.lock().unwrap(); + match connected_peers.remove(&addr) { + Some(_) => Ok(()), + None => Err(PeerStorageNotFoundError::Peer(addr).into()), + } + } +} + +#[derive(Deserialize, Serialize)] +struct PeerInfo { + first_seen: Duration, + last_attempt: Duration, + last_success: Duration, +} + +impl PeerInfo { + /// Should we retry this connection? + pub fn should_retry(&self) -> bool { + // TODO: why not self.last_attempt == 0? + if self.last_attempt == Duration::ZERO { + // never been tried + return true; + } + + let last_seen = if self.last_success == Duration::ZERO { + // never successfully connected, go by first seen + self.first_seen + } else { + self.last_success + }; + + let now = now_as_duration(); + let hours_since_last_seen = (now - last_seen).as_secs() / (60 * 60); + let minutes_since_last_attempt = (now - self.last_attempt).as_secs() / 60; + let hours_since_last_attempt = minutes_since_last_attempt / 60; + + if hours_since_last_seen == 0 { + return minutes_since_last_attempt > 10; + } + + let retry_interval = (hours_since_last_seen as f64).sqrt().ceil() as u64; + + hours_since_last_attempt > retry_interval + } + + /// Should we delete this peer? + fn should_delete(&self) -> bool { + if self.last_success == Duration::ZERO { + // if we fail connecting on the first try delete it + return true; + } + // has it been over a week since we connected to it? + let week = Duration::from_secs(7 * 24 * 60 * 60); + + self.last_success > week + } + + /// Helper to write the peer info to a batch + fn write_to_batch(&self, addr: SocketAddr, batch: &WriteBatch) -> Result<(), PeerStorageError> { + let key = compute_peer_key(addr); + let encoded = encode_peer_info(self)?; + batch.put_u8(&key, &encoded); + Ok(()) + } +} + +// leveldb schema + +// p{addr} -> serialized peerInfo +// a{time}{addr} -> 1 (time is of last attempt) +// s{time}{addr} -> 1 (time is of last success) + +const PEER_PREFIX: u8 = b'p'; +const PEER_LAST_ATTEMPT_TIME_PREFIX: u8 = b'a'; +const PEER_LAST_SUCCESS_TIME_PREFIX: u8 = b's'; + +const PREFIX_LENGTH: usize = 1; + +fn compute_peer_key(addr: SocketAddr) -> Vec { + let mut key = Vec::new(); + key.push(PEER_PREFIX); + key.extend_from_slice(addr.to_string().as_bytes()); + key +} + +fn compute_last_attempt_time_key(when: Duration, addr: Option) -> Vec { + let mut key = Vec::new(); + key.push(PEER_LAST_ATTEMPT_TIME_PREFIX); + key.extend_from_slice(&(when.as_secs()).to_be_bytes()); + if let Some(addr) = addr { + key.extend_from_slice(addr.to_string().as_bytes()); + } + key +} + +fn decode_last_attempt_time_key(key: &[u8]) -> Result<(u64, SocketAddr), PeerStorageError> { + let when = u64::from_be_bytes( + key[PREFIX_LENGTH..][..U64_LENGTH] + .try_into() + .map_err(DataError::U64)?, + ); + let addr_str = from_utf8(&key[PREFIX_LENGTH + U64_LENGTH..]).map_err(DataError::String)?; + let addr = SocketAddr::from_str(addr_str).map_err(ParsingError::IpAddress)?; + Ok((when, addr)) +} + +fn compute_last_success_time_key(when: Duration, addr: Option) -> Vec { + let mut key = Vec::new(); + key.push(PEER_LAST_SUCCESS_TIME_PREFIX); + key.extend_from_slice(&(when.as_secs()).to_be_bytes()); + if let Some(addr) = addr { + key.extend_from_slice(addr.to_string().as_bytes()); + } + key +} + +fn decode_last_success_time_key(key: &[u8]) -> Result<(Duration, SocketAddr), PeerStorageError> { + let when = u64::from_be_bytes( + key[PREFIX_LENGTH..][..U64_LENGTH] + .try_into() + .map_err(DataError::U64)?, + ); + let who_str = from_utf8(&key[PREFIX_LENGTH + U64_LENGTH..]).map_err(DataError::String)?; + let who = SocketAddr::from_str(who_str).map_err(ParsingError::IpAddress)?; + Ok((Duration::from_secs(when), who)) +} + +fn encode_peer_info(info: &PeerInfo) -> Result, PeerStorageError> { + let encode = bincode::serialize(&info).map_err(EncodingError::BincodeEncode)?; + Ok(encode) +} + +fn decode_peer_info(encoded: Vec) -> Result { + let decode = + bincode::deserialize::(&encoded).map_err(EncodingError::BincodeDecode)?; + Ok(decode) +} + +#[cfg(test)] +mod test { + use faster_hex::hex_string; + + use super::*; + use crate::peer::PEER_ADDR_SELF; + + #[test] + fn test_compute_last_attempt_key() { + let when = Duration::from_secs(123456789); + let addr = PEER_ADDR_SELF; + let key = compute_last_attempt_time_key(when, Some(addr)); + assert_eq!(key[0], PEER_LAST_ATTEMPT_TIME_PREFIX); + assert_eq!(key[1..][..U64_LENGTH], when.as_secs().to_be_bytes()); + assert_eq!(key[1 + U64_LENGTH..], addr.to_string().as_bytes()[..]); + } + + #[test] + fn test_decode_last_attempt_time_key() { + let when = Duration::from_secs(123456789); + let addr = PEER_ADDR_SELF; + let key = compute_last_attempt_time_key(when, Some(addr)); + let result = decode_last_attempt_time_key(&key).unwrap(); + assert_eq!(result.0, when.as_secs()); + assert_eq!(result.1, addr); + } + + #[test] + fn test_compute_last_attempt_time_key_start() { + let key = compute_last_attempt_time_key(Duration::ZERO, None); + assert_eq!(hex_string(&key), "610000000000000000"); + } + + #[test] + fn test_compute_last_attempt_time_key_end() { + let key = compute_last_attempt_time_key(Duration::MAX, None); + assert_eq!(hex_string(&key), "61ffffffffffffffff"); + } + + #[test] + fn test_compute_last_success_time_key() { + let when = Duration::from_secs(123456789); + let addr = PEER_ADDR_SELF; + let key = compute_last_success_time_key(when, Some(addr)); + assert_eq!(key[0], PEER_LAST_SUCCESS_TIME_PREFIX); + assert_eq!(key[1..][..U64_LENGTH], when.as_secs().to_be_bytes()); + assert_eq!(key[1 + U64_LENGTH..], addr.to_string().as_bytes()[..]); + } + + #[test] + fn test_decode_last_success_time_key() { + let when = Duration::from_secs(123456789); + let key = compute_last_success_time_key(when, Some(PEER_ADDR_SELF)); + let result = decode_last_success_time_key(&key).unwrap(); + assert_eq!(result.0, when); + assert_eq!(result.1, PEER_ADDR_SELF); + } + + #[test] + fn test_compute_peer_key() { + let addr = PEER_ADDR_SELF; + let key = compute_peer_key(addr); + assert_eq!(key[0], PEER_PREFIX); + assert_eq!(key[1..], addr.to_string().as_bytes()[..]); + } +} diff --git a/src/processor.rs b/src/processor.rs new file mode 100644 index 0000000..234888d --- /dev/null +++ b/src/processor.rs @@ -0,0 +1,1669 @@ +use std::collections::HashMap; +use std::net::SocketAddr; +use std::str::from_utf8; +use std::sync::{Arc, Mutex}; +use std::time::Instant; + +use ed25519_compact::{PublicKey, Signature}; +use faster_hex::hex_decode; +use ibig::UBig; +use log::{error, info}; +use sha3::{Digest, Sha3_256}; +use thiserror::Error; +use tokio::sync::mpsc::{ + channel, unbounded_channel, Receiver, Sender, UnboundedReceiver, UnboundedSender, +}; +use tokio::sync::oneshot; +use tokio::sync::Mutex as AsyncMutex; +use tokio::task::JoinHandle; + +use crate::block::{ + compute_chain_work, compute_hash_list_root, Block, BlockError, BlockHeader, BlockID, +}; +use crate::block_storage::{BlockStorage, BlockStorageError, BlockStorageNotFoundError}; +use crate::block_storage_disk::BlockStorageDisk; +use crate::checkpoints::{checkpoint_check, CheckpointError}; +use crate::constants::{ + BITCOIN_CASH_RETARGET_ALGORITHM_HEIGHT, BLOCKS_UNTIL_NEW_SERIES, BLOCKS_UNTIL_REWARD_HALVING, + BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING, CRUZBITS_PER_CRUZ, INITIAL_COINBASE_REWARD, + INITIAL_MAX_TRANSACTIONS_PER_BLOCK, INITIAL_TARGET, MAX_FUTURE_SECONDS, MAX_MEMO_LENGTH, + MAX_MONEY, MAX_NUMBER, MAX_TRANSACTIONS_PER_BLOCK, + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT, MAX_TRANSACTION_QUEUE_LENGTH, + MIN_AMOUNT_CRUZBITS, MIN_FEE_CRUZBITS, NUM_BLOCKS_FOR_MEDIAN_TIMESTAMP, RETARGET_INTERVAL, + RETARGET_SMA_WINDOW, RETARGET_TIME, TARGET_SPACING, +}; +use crate::error::{impl_debug_error_chain, ChannelError, EncodingError, ErrChain}; +use crate::ledger::{BranchType, Ledger, LedgerError, LedgerNotFoundError}; +use crate::ledger_disk::LedgerDisk; +use crate::shutdown::{ShutdownChanReceiver, SpawnedError}; +use crate::transaction::{Transaction, TransactionError, TransactionID}; +use crate::transaction_queue::{TransactionQueue, TransactionQueueError}; +use crate::transaction_queue_memory::TransactionQueueMemory; +use crate::utils::now_as_secs; + +pub type TipChangeSenderChan = UnboundedSender; +pub type TipChangeReceiverChan = UnboundedReceiver; +pub type TipChangeChan = (TipChangeSenderChan, TipChangeReceiverChan); +type TipChangeChanChan = ( + UnboundedSender, + Mutex>>, +); + +type TxSenderChan = Sender; +type TxReceiverChan = Receiver; +type TxChan = (TxSenderChan, Mutex>); + +type BlockSenderChan = Sender; +type BlockReceiverChan = Receiver; +type BlockChan = (BlockSenderChan, Mutex>); + +pub type NewTxSenderChan = Sender; +pub type NewTxReceiverChan = Receiver; +pub type NewTxChan = (NewTxSenderChan, NewTxReceiverChan); + +type NewTxChanChan = ( + UnboundedSender, + Mutex>>, +); +pub type BlockResultChan = oneshot::Sender>; +pub type TransactionResultChan = oneshot::Sender>; + +/// Processes blocks and transactions in order to construct the ledger. +/// It also manages the storage of all block chain data as well as inclusion of new transactions into the transaction queue. +pub struct Processor { + genesis_id: &'static BlockID, + /// storage of raw block data + block_store: Arc, + /// queue of transactions to confirm + tx_queue: Arc, + /// ledger built from processing blocks + ledger: Arc, + /// receive new transactions to process on this channel + tx_chan: TxChan, + /// receive new blocks to process on this channel + block_chan: BlockChan, + /// receive registration requests for new transaction notifications + register_new_tx_chan: NewTxChanChan, + /// receive unregistration requests for new transactions + unregister_new_tx_chan: NewTxChanChan, + /// receive registration requests for tip change notifications + register_tip_change_chan: TipChangeChanChan, + /// receive unregistration requests for tip change notifications + unregister_tip_change_chan: TipChangeChanChan, + /// channels needing notification of newly processed transactions + new_tx_channels: AsyncMutex>, + /// channels needing notification of changes to main chain tip blocks + tip_change_channels: AsyncMutex>, + shutdown_chan_rx: Mutex>, +} + +/// Is a message sent to registered new transaction channels when a transaction is queued. +pub struct NewTx { + /// transaction id + pub transaction_id: TransactionID, + /// new transaction + pub transaction: Transaction, + /// who sent it + pub source: SocketAddr, +} + +/// Message sent to registered new tip channels on main chain tip (dis-)connection.. +pub struct TipChange { + /// block ID of the main chain tip block + pub block_id: BlockID, + /// full block + pub block: Block, + /// who sent the block that caused this change + pub source: SocketAddr, + /// true if the tip has been connected. false for disconnected + pub connect: bool, + /// true if the tip has been connected and more connections are expected + pub more: bool, +} + +struct TxToProcess { + /// transaction ID + id: TransactionID, + /// transaction to process + tx: Transaction, + /// who sent it + source: SocketAddr, + /// channel to receive the result + result_chan: TransactionResultChan, +} + +struct BlockToProcess { + /// block ID + id: BlockID, + /// block to process + block: Block, + /// who sent it + source: SocketAddr, + /// channel to receive the result + result_chan: BlockResultChan, +} + +impl Processor { + /// Returns a new Processor instance. + pub fn new( + genesis_id: &'static BlockID, + block_store: Arc, + tx_queue: Arc, + ledger: Arc, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Arc { + let tx_chan = channel(100); + let block_chan = channel(10); + let register_new_tx_chan = unbounded_channel(); + let unregister_new_tx_chan = unbounded_channel(); + let register_tip_change_chan = unbounded_channel(); + let unregister_tip_change_chan = unbounded_channel(); + + Arc::new(Self { + genesis_id, + block_store, + tx_queue, + ledger, + tx_chan: (tx_chan.0, Mutex::new(Some(tx_chan.1))), + block_chan: (block_chan.0, Mutex::new(Some(block_chan.1))), + register_new_tx_chan: ( + register_new_tx_chan.0, + Mutex::new(Some(register_new_tx_chan.1)), + ), + unregister_new_tx_chan: ( + unregister_new_tx_chan.0, + Mutex::new(Some(unregister_new_tx_chan.1)), + ), + register_tip_change_chan: ( + register_tip_change_chan.0, + Mutex::new(Some(register_tip_change_chan.1)), + ), + unregister_tip_change_chan: ( + unregister_tip_change_chan.0, + Mutex::new(Some(unregister_tip_change_chan.1)), + ), + new_tx_channels: AsyncMutex::new(Vec::new()), + tip_change_channels: AsyncMutex::new(Vec::new()), + shutdown_chan_rx: Mutex::new(Some(shutdown_chan_rx)), + }) + } + + /// Spawns the Processor's main loop. + pub fn spawn(self: &Arc) -> JoinHandle> { + let arc_self = Arc::clone(self); + tokio::spawn(async move { arc_self.run().await.map_err(Into::into) }) + } + + /// Runs the Processor's main loop. + /// It verifies and processes blocks and transactions. + pub async fn run(&self) -> Result<(), ProcessorError> { + let mut tx_chan_rx = self.tx_chan.1.lock().unwrap().take().unwrap(); + let mut block_chan_rx = self.block_chan.1.lock().unwrap().take().unwrap(); + let mut register_new_tx_chan_rx = + self.register_new_tx_chan.1.lock().unwrap().take().unwrap(); + let mut unregister_new_tx_chan_rx = self + .unregister_new_tx_chan + .1 + .lock() + .unwrap() + .take() + .unwrap(); + let mut register_tip_change_chan_rx = self + .register_tip_change_chan + .1 + .lock() + .unwrap() + .take() + .unwrap(); + let mut unregister_tip_change_chan_rx = self + .unregister_tip_change_chan + .1 + .lock() + .unwrap() + .take() + .unwrap(); + let mut shutdown_chan_rx = self.shutdown_chan_rx.lock().unwrap().take().unwrap(); + + loop { + tokio::select! { + Some(tx_to_process) = tx_chan_rx.recv() => { + // process a transaction + let result = self.process_transaction( + tx_to_process.id, + tx_to_process.tx, + tx_to_process.source, + ).await; + + if let Err(err) = tx_to_process.result_chan.send(result).map_err(ProcessTransactionError::from) { + error!("{:?}", err); + } + } + + Some(block_to_process) = block_chan_rx.recv() => { + // process a block + let txs_len = block_to_process.block.transactions.len(); + let start = Instant::now(); + + let result = self.process_block( + block_to_process.id, + block_to_process.block, + block_to_process.source, + ).await; + + let duration = start.elapsed(); + + info!( + "Processing took {:?}, {} transaction(s), transaction queue length: {}", + duration, + txs_len, + self.tx_queue.len() + ); + + // send back the result + if let Err(err) = block_to_process.result_chan.send(result).map_err(ProcessBlockError::from) { + error!("{:?}", err); + } + } + + Some(ch) = register_new_tx_chan_rx.recv() => { + let mut new_tx_channels = self.new_tx_channels.lock().await; + new_tx_channels.push(ch); + } + + Some(ch) = unregister_new_tx_chan_rx.recv() => { + let mut new_tx_channels = self.new_tx_channels.lock().await; + if let Some(index) = new_tx_channels + .iter() + .position(|c| c.same_channel(&ch)) { + new_tx_channels.remove(index); + } + } + + Some(ch) = register_tip_change_chan_rx.recv() => { + let mut tip_change_channels = + self.tip_change_channels.lock().await; + tip_change_channels.push(ch); + } + + Some(ch) = unregister_tip_change_chan_rx.recv() => { + let mut tip_change_channels = + self.tip_change_channels.lock().await; + if let Some(index) = tip_change_channels + .iter() + .position(|c| c.same_channel(&ch)) { + tip_change_channels.remove(index); + } + } + + _ = &mut shutdown_chan_rx => { + info!("Processor shutting down"); + break Ok(()) + } + } + } + } + + /// Is called to process a new candidate transaction from the transaction queue + pub async fn process_candidate_transaction( + &self, + id: &TransactionID, + tx: &Transaction, + from: &SocketAddr, + ) -> Result<(), ProcessTransactionError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + self.tx_chan + .0 + .send(TxToProcess { + id: *id, + tx: tx.clone(), + source: *from, + result_chan: result_chan_tx, + }) + .await?; + result_chan_rx.await? + } + + /// Called to process a new candidate block chain tip. + pub async fn process_candidate_block( + &self, + id: BlockID, + block: Block, + source: SocketAddr, + ) -> Result<(), ProcessBlockError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let block_to_process = BlockToProcess { + id, + block, + source, + result_chan: result_chan_tx, + }; + + self.block_chan.0.send(block_to_process).await?; + result_chan_rx.await? + } + + /// Called to register to receive notifications of newly queued transactions. + pub fn register_for_new_transactions(&self, new_tx_chan_tx: NewTxSenderChan) { + self.register_new_tx_chan + .0 + .send(new_tx_chan_tx) + .expect("send on register new tx channel"); + } + + /// Called to unregister to receive notifications of newly queued transactions + pub fn unregister_for_new_transactions(&self, new_tx_chan_tx: NewTxSenderChan) { + self.unregister_new_tx_chan + .0 + .send(new_tx_chan_tx) + .expect("send on unregister new tx channel"); + } + + /// Called to register to receive notifications of tip block changes. + pub fn register_for_tip_change(&self, tip_change_chan_tx: TipChangeSenderChan) { + self.register_tip_change_chan + .0 + .send(tip_change_chan_tx) + .expect("send on register tip change channel") + } + + /// Called to unregister to receive notifications of tip block changes. + pub fn unregister_for_tip_change(&self, tip_change_chan_tx: TipChangeSenderChan) { + self.unregister_tip_change_chan + .0 + .send(tip_change_chan_tx) + .expect("send on unregister tip change channel"); + } + + /// Process a transaction + pub async fn process_transaction( + &self, + id: TransactionID, + tx: Transaction, + source: SocketAddr, + ) -> Result<(), ProcessTransactionError> { + info!("Processing transaction {}", id); + + // min fee? if not waste no more time + if tx.fee.expect("transaction should have a fee") < MIN_FEE_CRUZBITS { + return Err(ProcessTransactionError::MinimumFee( + id, + (MIN_FEE_CRUZBITS / CRUZBITS_PER_CRUZ) as f64, + )); + } + + // min amount? if not waste no more time + if tx.amount < MIN_AMOUNT_CRUZBITS { + return Err(ProcessTransactionError::AmountTooSmall( + id, + (MIN_AMOUNT_CRUZBITS / CRUZBITS_PER_CRUZ) as f64, + )); + } + + // context-free checks + Self::check_transaction(&id, &tx)?; + + // no loose coinbases + if tx.is_coinbase() { + return Err(ProcessTransactionError::CoinbaseInBlockOnly(id)); + } + + // is the queue full? + if self.tx_queue.len() >= MAX_TRANSACTION_QUEUE_LENGTH as usize { + return Err(ProcessTransactionError::QueueIsFull(id)); + } + + // is it confirmed already? + if self.ledger.get_transaction_index(&id)?.is_some() { + return Err(ProcessTransactionError::ConfirmedAlready(id)); + } + + // check series, maturity and expiration + let Some((_block_id, tip_height)) = self.ledger.get_chain_tip()? else { + return Err(LedgerNotFoundError::ChainTip.into()); + }; + + // is the series current for inclusion in the next block? + if !Self::check_transaction_series(&tx, tip_height + 1) { + return Err(ProcessTransactionError::SeriesInvalid(id)); + } + + // would it be mature if included in the next block? + if !tx.is_mature(tip_height + 1) { + return Err(ProcessTransactionError::NotMature(id)); + } + + // is it expired if included in the next block? + if tx.is_expired(tip_height + 1) { + return Err(ProcessTransactionError::Expired( + id, + tip_height, + tx.expires.expect("transaction should expire"), + )); + } + + // verify signature + if !tx.verify()? { + return Err(ProcessTransactionError::SignatureVerificationFailed(id)); + } + + // rejects a transaction if sender would have insufficient balance + if !self.tx_queue.add(&id, &tx)? { + // don't notify others if the transaction already exists in the queue + return Ok(()); + } + + // notify channels + let new_tx_channels = self.new_tx_channels.lock().await; + for new_tx in new_tx_channels.iter() { + if let Err(err) = new_tx + .send(NewTx { + transaction_id: id, + transaction: tx.clone(), + source, + }) + .await + .map_err(ProcessTransactionError::from) + { + error!("{:?}", err) + } + } + Ok(()) + } + + /// Context-free transaction sanity checker + fn check_transaction( + id: &TransactionID, + tx: &Transaction, + ) -> Result<(), ProcessTransactionError> { + // sane-ish time. + // transaction timestamps are strictly for user and application usage. + // we make no claims to their validity and rely on them for nothing. + if tx.time > MAX_NUMBER { + return Err(ProcessTransactionError::TimeTooLarge(*id)); + } + + // no nonces larger than i32 + if tx.nonce > i32::MAX as u32 { + return Err(ProcessTransactionError::NonceTooLarge(*id)); + } + + if tx.is_coinbase() { + // no sender in coinbase + if tx.from.is_some() { + return Err(ProcessTransactionError::CoinbaseSenderNotAllowed(*id)); + } + + // no fee in coinbase + if tx.fee.is_some() { + return Err(ProcessTransactionError::CoinbaseFeeNotAllowed(*id)); + } + + // no maturity for coinbase + if tx.matures.is_some() { + return Err(ProcessTransactionError::CoinbaseMaturityNotAllowed(*id)); + } + + // no expiration for coinbase + if tx.expires.is_some() { + return Err(ProcessTransactionError::CoinbaseExpired(*id)); + } + + // no signature on coinbase + if tx.signature.is_some() { + return Err(ProcessTransactionError::CoinbaseSignatureNotAllowed(*id)); + } + } else { + // sanity check sender + if let Some(from) = tx.from { + if from.len() != PublicKey::BYTES { + return Err(ProcessTransactionError::SenderInvalid(*id)); + } + } else { + return Err(ProcessTransactionError::SenderMissing(*id)); + } + + // sanity check fee + if let Some(fee) = tx.fee { + if fee > MAX_MONEY { + return Err(ProcessTransactionError::FeeTooLarge(*id)); + } + } else { + return Err(ProcessTransactionError::FeeMissing(*id)); + } + + // sanity check maturity + if let Some(matures) = tx.matures { + if matures > MAX_NUMBER { + return Err(ProcessTransactionError::MaturityTooLarge(*id)); + } + } + + // sanity check expiration + if let Some(expires) = tx.expires { + if expires > MAX_NUMBER { + return Err(ProcessTransactionError::ExpirationTooLarge(*id)); + } + } + + // sanity check signature + if let Some(signature) = tx.signature { + if signature.len() != Signature::BYTES { + return Err(ProcessTransactionError::SignatureInvalid(*id)); + } + } else { + return Err(ProcessTransactionError::SignatureMissing(*id)); + } + } + + // sanity check recipient + if tx.to.len() != PublicKey::BYTES { + return Err(ProcessTransactionError::RecipientInvalid(*id)); + } + + // sanity check amount + if tx.amount > MAX_MONEY { + return Err(ProcessTransactionError::AmountTooLarge(*id)); + } + + if let Some(memo) = &tx.memo { + // make sure memo is valid ascii/utf8 + if from_utf8(memo.as_bytes()).is_err() { + return Err(ProcessTransactionError::MemoCharactersInvalid(*id)); + } + // check memo length + if memo.len() > MAX_MEMO_LENGTH { + return Err(ProcessTransactionError::MemoLengthExceeded(*id)); + } + } + + // sanity check series + if tx.series > MAX_NUMBER { + return Err(ProcessTransactionError::SeriesTooLarge(*id)); + } + Ok(()) + } + + /// The series must be within the acceptable range given the current height + pub fn check_transaction_series(tx: &Transaction, height: u64) -> bool { + if tx.from.is_none() { + // coinbases must start a new series right on time + return tx.series == height / BLOCKS_UNTIL_NEW_SERIES + 1; + } + + // user transactions have a grace period (1 full series) to mitigate effects + // of any potential queueing delay and/or reorgs near series switchover time + let high = height / BLOCKS_UNTIL_NEW_SERIES + 1; + let mut low = high - 1; + + if low == 0 { + low = 1; + } + + tx.series >= low && tx.series <= high + } + + /// Process a block + pub async fn process_block( + &self, + id: BlockID, + block: Block, + source: SocketAddr, + ) -> Result<(), ProcessBlockError> { + info!("Processing block {}", id); + + // did we process this block already? + let branch_type = self.ledger.get_branch_type(&id)?; + + if branch_type != BranchType::Unknown { + info!("Already processed block {}", id); + return Ok(()); + } + + // sanity check the block + let now = now_as_secs(); + Self::check_block(&id, &block, now)?; + + // have we processed its parent? + let branch_type = self.ledger.get_branch_type(&block.header.previous)?; + + if branch_type != BranchType::Main && branch_type != BranchType::Side { + if id == *self.genesis_id { + // store it + self.block_store.store(&id, &block, now)?; + // begin the ledger + self.connect_block(&id, &block, &source, false).await?; + info!("Connected block {}", id); + return Ok(()); + } + + // current block is an orphan + return Err(ProcessBlockError::Orphan(id)); + } + + // attempt to extend the chain + self.accept_block(&id, &block, now, source).await + } + + /// Context-free block sanity checker + fn check_block(id: &BlockID, block: &Block, now: u64) -> Result<(), ProcessBlockError> { + // sanity check time + if block.header.time > MAX_NUMBER { + return Err(ProcessBlockError::TimeTooLarge(*id)); + } + + // check timestamp isn't too far in the future + if block.header.time > now + MAX_FUTURE_SECONDS { + return Err(ProcessBlockError::TimestampInvalid( + block.header.time, + now, + *id, + )); + } + + // proof-of-work should satisfy declared target + if !block.check_pow(id) { + return Err(ProcessBlockError::ProofOfWorkInsufficient(*id)); + } + + // sanity check nonce + if block.header.nonce > MAX_NUMBER { + return Err(ProcessBlockError::NonceTooLarge(*id)); + } + + // sanity check height + if block.header.height > MAX_NUMBER { + return Err(ProcessBlockError::HeightTooLarge(*id)); + } + + // check against known checkpoints + checkpoint_check(id, block.header.height)?; + + // sanity check transaction count + if block.header.transaction_count > MAX_TRANSACTIONS_PER_BLOCK { + return Err(ProcessBlockError::TransactionCountTooLarge(*id)); + } + + if block.header.transaction_count as usize != block.transactions.len() { + return Err(ProcessBlockError::TransactionCountMismatch(*id)); + } + + // must have at least one transaction + if block.transactions.is_empty() { + return Err(ProcessBlockTransactionsError::Missing(*id).into()); + } + + // first tx must be a coinbase + if !block.transactions[0].is_coinbase() { + return Err(ProcessBlockTransactionsError::CoinbaseMissing(*id).into()); + } + + // check max number of transactions + let max = Self::compute_max_transactions_per_block(block.header.height); + + if block.transactions.len() > max as usize { + return Err(ProcessBlockTransactionsError::Exceeded( + *id, + block.transactions.len(), + max, + ) + .into()); + } + + // the rest must not be coinbases + if block.transactions.len() > 1 { + for transaction in block.transactions[1..].iter() { + if transaction.is_coinbase() { + return Err(ProcessBlockTransactionsError::CoinbaseMultiple(*id).into()); + } + } + } + + // basic transaction checks that don't depend on context + let mut tx_ids = HashMap::new(); + for tx in block.transactions.iter() { + let id = tx.id()?; + Self::check_transaction(&id, tx)?; + tx_ids.insert(id, true); + } + + // TODO: how does this check for duplicates? + // check for duplicate transactions + if tx_ids.len() != block.transactions.len() { + return Err(ProcessBlockTransactionsError::Duplicate(*id).into()); + } + + // verify hash list root + let mut hasher = Sha3_256::new(); + let hash_list_root = compute_hash_list_root(&mut hasher, &block.transactions)?; + + if hash_list_root != block.header.hash_list_root { + return Err(ProcessBlockError::HashListRootMismatch(*id)); + } + Ok(()) + } + + /// Computes the maximum number of transactions allowed in a block at the given height. Inspired by BIP 101 + pub fn compute_max_transactions_per_block(height: u64) -> u32 { + if height >= MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT { + // I guess we can revisit this sometime in the next 35 years if necessary + return MAX_TRANSACTIONS_PER_BLOCK; + } + + // piecewise-linear-between-doublings growth + let doublings = height / BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING; + if doublings >= 64 { + panic!("Overflow uint64") + } + let remainder = height % BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING; + let factor = 1 << doublings; + + let interpolate = INITIAL_MAX_TRANSACTIONS_PER_BLOCK as u64 * factor as u64 * remainder + / BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING; + INITIAL_MAX_TRANSACTIONS_PER_BLOCK * factor + interpolate as u32 + } + + /// Attempt to extend the chain with the new block + async fn accept_block( + &self, + id: &BlockID, + block: &Block, + now: u64, + source: SocketAddr, + ) -> Result<(), ProcessBlockError> { + let Some((prev_header, _height)) = + self.block_store.get_block_header(&block.header.previous)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(block.header.previous).into()); + }; + + // check height + let new_height = prev_header.height + 1; + if block.header.height != new_height { + return Err(ProcessBlockError::HeightMismatch( + new_height, + block.header.height, + *id, + )); + } + + // did we process it already? + let branch_type = self.ledger.get_branch_type(id)?; + + if branch_type != BranchType::Unknown { + info!("Already processed block {}", id); + return Ok(()); + } + + // check declared proof of work is correct + let target = Self::compute_target(&prev_header, &self.block_store, &self.ledger)?; + + if block.header.target != target { + return Err(ProcessBlockError::TargetInvalid( + block.header.target, + target, + *id, + )); + } + + // check that cumulative work is correct + let chain_work = compute_chain_work(&block.header.target, &prev_header.chain_work); + if block.header.chain_work != chain_work { + return Err(ProcessBlockError::ChainWorkInvalid( + block.header.chain_work, + chain_work, + *id, + )); + } + + // check that the timestamp isn't too far in the past + let median_timestamp = Self::compute_median_timestamp(&prev_header, &self.block_store)?; + + if block.header.time <= median_timestamp { + return Err(ProcessBlockError::TimestampTooEarly(*id)); + } + + // check series, maturity, expiration then verify signatures and calculate total fees + let mut fees = 0; + + for tx in &block.transactions { + let tx_id = tx.id()?; + + if !Self::check_transaction_series(tx, block.header.height) { + return Err(ProcessBlockTransactionsError::SeriesInvalid(tx_id).into()); + } + + if !tx.is_coinbase() { + if !tx.is_mature(block.header.height) { + return Err(ProcessBlockTransactionsError::Immature(tx_id).into()); + } + + if tx.is_expired(block.header.height) { + return Err(ProcessBlockTransactionsError::Expired( + tx_id, + block.header.height, + tx.expires.expect("should not be none"), + ) + .into()); + } + + // if it's in the queue with the same signature we've verified it already + if !self + .tx_queue + .exists_signed(&tx_id, tx.signature.expect("signature")) + { + match tx.verify() { + Ok(ok) => { + if !ok { + return Err( + ProcessBlockTransactionsError::SignatureVerificationFailed( + tx_id, + ) + .into(), + ); + } + } + Err(err) => { + return Err(err.into()); + } + }; + } + + if let Some(fee) = tx.fee { + fees += fee; + } else { + return Err(ProcessBlockTransactionsError::FeeMissing(tx_id).into()); + } + } + } + + // verify coinbase reward + let reward = Self::block_creation_reward(block.header.height) + fees; + if block.transactions[0].amount != reward { + // in cruzbit every last issued bit must be accounted for in public key balances + return Err(ProcessBlockTransactionsError::CoinbaseInvalidAmount(*id).into()); + } + + // store the block if we think we're going to accept it + self.block_store.store(id, block, now)?; + + // get the current tip before we try adjusting the chain + let Some((tip_id, _height)) = self.ledger.get_chain_tip()? else { + return Err(LedgerNotFoundError::ChainTip.into()); + }; + + // finish accepting the block if possible + if let Err(err) = self + .accept_block_continue(id, block, now, prev_header, &source) + .await + { + // we may have disconnected the old best chain and partially + // connected the new one before encountering a problem. re-activate it now + if let Err(err2) = self.reconnect_tip(&tip_id, &source).await { + info!("Error reconnecting tip: {}, block: {}", err2, tip_id); + } + + // return the original error + return Err(err); + } + Ok(()) + } + + /// Computes the expected block reward for the given height. + pub fn block_creation_reward(height: u64) -> u64 { + let halvings = height / BLOCKS_UNTIL_REWARD_HALVING; + if halvings >= 64 { + return 0; + } + INITIAL_COINBASE_REWARD >> halvings + } + + /// Compute the median timestamp of the last NUM_BLOCKS_FOR_MEDIAN_TIMESTAMP blocks + /// Convenience method to get the current main chain's tip ID, header, and storage time. + /// Compute expected target of the current block + pub fn compute_target( + prev_header: &BlockHeader, + block_store: &Arc, + ledger: &Arc, + ) -> Result { + if prev_header.height >= BITCOIN_CASH_RETARGET_ALGORITHM_HEIGHT { + return Self::compute_target_bitcoin_cash(prev_header, block_store, ledger); + } + + Self::compute_target_bitcoin(prev_header, block_store) + } + + /// Original target computation + pub fn compute_target_bitcoin( + prev_header: &BlockHeader, + block_store: &Arc, + ) -> Result { + if (prev_header.height + 1) % RETARGET_INTERVAL != 0 { + // not 2016th block, use previous block's value + return Ok(prev_header.target); + } + + // defend against time warp attack + let mut blocks_to_go_back = RETARGET_INTERVAL - 1; + if (prev_header.height + 1) != RETARGET_INTERVAL { + blocks_to_go_back = RETARGET_INTERVAL; + } + + // walk back to the first block of the interval + let mut first_header = prev_header.clone(); + for _ in 0..blocks_to_go_back { + let Some((block_header, _when)) = + block_store.get_block_header(&first_header.previous)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(first_header.previous).into()); + }; + first_header = block_header; + } + + let mut actual_timespan = prev_header.time - first_header.time; + + let min_timespan = RETARGET_TIME / 4; + let max_timespan = RETARGET_TIME * 4; + + if actual_timespan < min_timespan { + actual_timespan = min_timespan; + } + if actual_timespan > max_timespan { + actual_timespan = max_timespan; + } + + let actual_timespan_int = UBig::from(actual_timespan); + let retarget_time_int = UBig::from(RETARGET_TIME); + + let mut initial_target_bytes = BlockID::new(); + hex_decode(INITIAL_TARGET.as_bytes(), &mut initial_target_bytes) + .map_err(EncodingError::HexDecode)?; + + let max_target_int = UBig::from_be_bytes(&initial_target_bytes[..]); + let prev_target_int = UBig::from_be_bytes(&prev_header.target[..]); + let new_target_int = prev_target_int * actual_timespan_int; + let new_target_int = new_target_int / retarget_time_int; + + let target = if new_target_int > max_target_int { + BlockID::from(max_target_int) + } else { + BlockID::from(new_target_int) + }; + Ok(target) + } + + /// Revised target computation + fn compute_target_bitcoin_cash( + prev_header: &BlockHeader, + block_store: &Arc, + ledger: &Arc, + ) -> Result { + let Some(first_id) = + ledger.get_block_id_for_height(prev_header.height - RETARGET_SMA_WINDOW)? + else { + return Err(LedgerNotFoundError::BlockIDForHeight(prev_header.height).into()); + }; + + let Some((first_header, _when)) = block_store.get_block_header(&first_id)? else { + return Err(BlockStorageNotFoundError::BlockHeader(first_id).into()); + }; + + let work_int = prev_header.chain_work.as_big_int() - first_header.chain_work.as_big_int(); + let work_int = work_int * UBig::from(TARGET_SPACING); + + // "In order to avoid difficulty cliffs, we bound the amplitude of the + // adjustment we are going to do to a factor in [0.5, 2]." - Bitcoin-ABC + let mut actual_timespan = prev_header.time - first_header.time; + if actual_timespan > 2 * RETARGET_SMA_WINDOW * TARGET_SPACING { + actual_timespan = 2 * RETARGET_SMA_WINDOW * TARGET_SPACING; + } else if actual_timespan < (RETARGET_SMA_WINDOW / 2) * TARGET_SPACING { + actual_timespan = (RETARGET_SMA_WINDOW / 2) * TARGET_SPACING; + } + + let work_int = work_int / actual_timespan; + + // T = (2^256 / W) - 1 + let max_int = UBig::from(2u8).pow(256); + let new_target_int = max_int / work_int; + let new_target_int = new_target_int - UBig::from(1u8); + + // don't go above the initial target + let mut initial_target_bytes = BlockID::new(); + hex_decode(INITIAL_TARGET.as_bytes(), &mut initial_target_bytes) + .map_err(EncodingError::HexEncode)?; + + let max_target_int = UBig::from_be_bytes(&initial_target_bytes); + + let target_id = if new_target_int > max_target_int { + BlockID::from(max_target_int) + } else { + BlockID::from(new_target_int) + }; + Ok(target_id) + } + + /// Compute the median timestamp of the last NUM_BLOCKS_FOR_MEDIAN_TIMESTAMP blocks + pub fn compute_median_timestamp( + prev_header: &BlockHeader, + block_store: &Arc, + ) -> Result { + let mut prev_header = prev_header.clone(); + let mut timestamps = Vec::new(); + + for _ in 0..NUM_BLOCKS_FOR_MEDIAN_TIMESTAMP { + timestamps.push(prev_header.time); + prev_header = match block_store.get_block_header(&prev_header.previous)? { + Some((block_header, _when)) => block_header, + None => break, + }; + } + timestamps.sort(); + Ok(timestamps.remove(timestamps.len() / 2)) + } + + /// Continue accepting the block + async fn accept_block_continue( + &self, + id: &BlockID, + block: &Block, + block_when: u64, + prev_header: BlockHeader, + source: &SocketAddr, + ) -> Result<(), ProcessBlockError> { + // get the current tip + let Some((tip_id, tip_header, tip_when)) = + Self::get_chain_tip_header(&self.ledger, &self.block_store)? + else { + return Err(LedgerNotFoundError::ChainTipHeader.into()); + }; + + if *id == tip_id { + // can happen if we failed connecting a new block + return Ok(()); + } + + // is this block better than the current tip? + if !block.header.compare(&tip_header, block_when, tip_when) { + // flag this as a side branch block + info!("Block {} does not represent the tip of the best chain", id); + return self + .ledger + .set_branch_type(id, BranchType::Side) + .map_err(ProcessBlockError::Ledger); + } + + // the new block is the better chain + let mut tip_ancestor = tip_header; + let mut new_ancestor = prev_header; + + let mut min_height = tip_ancestor.height; + if new_ancestor.height < min_height { + min_height = new_ancestor.height; + } + + let mut blocks_to_disconnect = Vec::new(); + let mut blocks_to_connect = Vec::new(); + + // walk back each chain to the common min_height + let mut tip_ancestor_id = tip_id; + while tip_ancestor.height > min_height { + blocks_to_disconnect.push(tip_ancestor_id); + tip_ancestor_id = tip_ancestor.previous; + let Some((block_header, _when)) = + self.block_store.get_block_header(&tip_ancestor_id)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(tip_ancestor_id).into()); + }; + tip_ancestor = block_header; + } + + let mut new_ancestor_id = block.header.previous; + while new_ancestor.height > min_height { + blocks_to_connect.insert(0, new_ancestor_id); + new_ancestor_id = new_ancestor.previous; + + let Some((block_header, _when)) = + self.block_store.get_block_header(&new_ancestor_id)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(new_ancestor_id).into()); + }; + new_ancestor = block_header; + } + + // scan both chains until we get to the common ancestor + while new_ancestor != tip_ancestor { + blocks_to_disconnect.push(tip_ancestor_id); + blocks_to_connect.insert(0, new_ancestor_id); + tip_ancestor_id = tip_ancestor.previous; + + let Some((block_header, _when)) = + self.block_store.get_block_header(&tip_ancestor_id)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(tip_ancestor_id).into()); + }; + tip_ancestor = block_header; + new_ancestor_id = new_ancestor.previous; + + let Some((block_header, _when)) = + self.block_store.get_block_header(&new_ancestor_id)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(new_ancestor_id).into()); + }; + new_ancestor = block_header; + } + + // we're at common ancestor. disconnect any main chain blocks we need to + for id in blocks_to_disconnect { + let Some(block_to_disconnect) = self.block_store.get_block(&id)? else { + return Err(BlockStorageNotFoundError::Block(id).into()); + }; + + self.disconnect_block(&id, &block_to_disconnect, source) + .await?; + } + + // connect any new chain blocks we need to + for id in blocks_to_connect { + let Some(blocks_to_connect) = self.block_store.get_block(&id)? else { + return Err(BlockStorageNotFoundError::Block(id).into()); + }; + + self.connect_block(&id, &blocks_to_connect, source, true) + .await?; + } + + // and finally connect the new block + self.connect_block(id, block, source, false).await + } + + /// Update the ledger and transaction queue and notify undo tip channels + async fn disconnect_block( + &self, + id: &BlockID, + block: &Block, + source: &SocketAddr, + ) -> Result<(), ProcessorError> { + // Update the ledger + let tx_ids = self.ledger.disconnect_block(id, block)?; + + info!( + "Block {} has been disconnected, height: {}", + &id, block.header.height + ); + + // Add newly disconnected non-coinbase transactions back to the queue + self.tx_queue + .add_batch(&tx_ids[1..], &block.transactions[1..]); + + // Notify tip change channels + let tip_change_channels = self.tip_change_channels.lock().await; + for tip_tx in tip_change_channels.iter() { + if let Err(err) = tip_tx + .send(TipChange { + block_id: *id, + block: block.clone(), + source: *source, + connect: false, + more: false, + }) + .map_err(ProcessorError::from) + { + error!("{:?}", err); + } + } + Ok(()) + } + + /// Update the ledger and transaction queue and notify new tip channels + async fn connect_block( + &self, + id: &BlockID, + block: &Block, + source: &SocketAddr, + more: bool, + ) -> Result<(), ProcessBlockError> { + // Update the ledger + let tx_ids = self.ledger.connect_block(id, block)?; + + info!( + "Block {} is the new tip, height: {}", + id, block.header.height + ); + + // Remove newly confirmed non-coinbase transactions from the queue + self.tx_queue + .remove_batch(&tx_ids[1..], block.header.height, more)?; + + // Notify tip change channels + let tip_change_channels = self.tip_change_channels.lock().await; + + for tip_tx in tip_change_channels.iter() { + if let Err(err) = tip_tx + .send(TipChange { + block_id: *id, + block: block.clone(), + source: *source, + connect: true, + more, + }) + .map_err(ProcessBlockError::from) + { + error!("{:?}", err); + } + } + Ok(()) + } + + /// Try to reconnect the previous tip block when accept_block_continue fails for the new block + async fn reconnect_tip( + &self, + id: &BlockID, + source: &SocketAddr, + ) -> Result<(), ProcessBlockError> { + let Some(block) = self.block_store.get_block(id)? else { + return Err(BlockStorageNotFoundError::Block(*id).into()); + }; + + let Some((_block_header, when)) = self.block_store.get_block_header(id)? else { + return Err(BlockStorageNotFoundError::BlockHeader(*id).into()); + }; + + let Some((prev_header, _when)) = + self.block_store.get_block_header(&block.header.previous)? + else { + return Err(BlockStorageNotFoundError::BlockHeader(block.header.previous).into()); + }; + + self.accept_block_continue(id, &block, when, prev_header, source) + .await + } + + /// Convenience method to get the current main chain's tip ID, header, and storage time. + pub fn get_chain_tip_header( + ledger: &Arc, + block_store: &Arc, + ) -> Result, ProcessorError> { + // TODO: can the tip exist and not the header? + let Some((tip_id, _height)) = ledger.get_chain_tip()? else { + return Ok(None); + }; + + // get the header + let Some((tip_header, tip_when)) = block_store.get_block_header(&tip_id)? else { + return Err(BlockStorageNotFoundError::BlockHeader(tip_id).into()); + }; + Ok(Some((tip_id, tip_header, tip_when))) + } +} + +#[derive(Error, Debug)] +pub enum ProcessorError { + #[error("block storage")] + BlockStorage(#[from] BlockStorageError), + #[error("block storage not found")] + BlockStorageNotFound(#[from] BlockStorageNotFoundError), + #[error("channel")] + Channel(#[from] ChannelError), + #[error("ledger")] + Ledger(#[from] LedgerError), +} + +impl From> for ProcessorError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("tip change", err.to_string())) + } +} + +impl From for ProcessorError { + fn from(err: tokio::sync::oneshot::error::RecvError) -> Self { + Self::Channel(ChannelError::OneshotReceive("block result", err)) + } +} + +#[derive(Error)] +pub enum ProcessBlockError { + #[error("chain work invalid {0}, expected {1} for block {2}")] + ChainWorkInvalid(BlockID, BlockID, BlockID), + #[error("hash list root mismatch for block {0}")] + HashListRootMismatch(BlockID), + #[error("expected height {0} found {1} for block {2}")] + HeightMismatch(u64, u64, BlockID), + #[error("height value is too large, block {0}")] + HeightTooLarge(BlockID), + #[error("nonce value is too large, block {0}")] + NonceTooLarge(BlockID), + #[error("block {0} is an orphan")] + Orphan(BlockID), + #[error("proof-of-work insufficient for block {0}")] + ProofOfWorkInsufficient(BlockID), + #[error("target is invalid {0}, expected {1} for block {2}")] + TargetInvalid(BlockID, BlockID, BlockID), + #[error("time value is too large, block {0}")] + TimeTooLarge(BlockID), + #[error("timestamp {0} too far in the future, now {1}, block {2}")] + TimestampInvalid(u64, u64, BlockID), + #[error("timestamp is too early for block {0}")] + TimestampTooEarly(BlockID), + #[error("transaction count in header doesn't match block {0}")] + TransactionCountMismatch(BlockID), + #[error("transaction count too large in header of block {0}")] + TransactionCountTooLarge(BlockID), + + #[error("block")] + Block(#[from] BlockError), + #[error("block storage")] + BlockStorage(#[from] BlockStorageError), + #[error("block storage not found")] + BlockStorageNotFound(#[from] BlockStorageNotFoundError), + #[error("channel")] + Channel(#[from] ChannelError), + #[error("checkpoint")] + Checkpoint(#[from] CheckpointError), + #[error("encoding")] + Encoding(#[from] EncodingError), + #[error("ledger")] + Ledger(#[from] LedgerError), + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + #[error("processing block transactions")] + ProcessBlockTransactions(#[from] ProcessBlockTransactionsError), + #[error("processing transaction")] + ProcessTransaction(#[from] ProcessTransactionError), + #[error("processor")] + Processor(#[source] Box), + #[error("transaction")] + Transaction(#[from] TransactionError), + #[error("transaction queue")] + TransactionQueue(#[from] TransactionQueueError), +} + +// needs boxed because it's recursive +impl From for ProcessBlockError { + fn from(value: ProcessorError) -> Self { + Self::Processor(Box::new(value)) + } +} + +impl From> for ProcessBlockError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("block", err.to_string())) + } +} + +impl From> for ProcessBlockError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("tip change", err.to_string())) + } +} + +impl From for ProcessBlockError { + fn from(err: tokio::sync::oneshot::error::RecvError) -> Self { + Self::Channel(ChannelError::OneshotReceive("process block result", err)) + } +} + +impl From> for ProcessBlockError { + fn from(_err: Result<(), ProcessBlockError>) -> Self { + Self::Channel(ChannelError::OneshotSend("process block result")) + } +} + +impl_debug_error_chain!(ProcessBlockError, "processing blocking"); + +#[derive(Error, Debug)] +pub enum ProcessBlockTransactionsError { + #[error("coinbase pays incorrect amount, block {0}")] + CoinbaseInvalidAmount(BlockID), + #[error("first transaction is not a coinbase in block {0}")] + CoinbaseMissing(BlockID), + #[error("multiple coinbase transactions in block {0}")] + CoinbaseMultiple(BlockID), + #[error("duplicate transaction in block {0}")] + Duplicate(BlockID), + #[error("block {0} contains too many transactions {1}, max: {2}")] + Exceeded(BlockID, usize, u32), + #[error("transaction {0} is expired, height: {1}, expires: {2}")] + Expired(TransactionID, u64, u64), + #[error("missing transaction fee, transaction: {0}")] + FeeMissing(TransactionID), + #[error("transaction {0} is immature")] + Immature(TransactionID), + #[error("no transactions in block {0}")] + Missing(BlockID), + #[error("transaction {0} would have an invalid series")] + SeriesInvalid(TransactionID), + #[error("signature verification failed, transaction: {0}")] + SignatureVerificationFailed(TransactionID), +} + +#[derive(Error)] +pub enum ProcessTransactionError { + #[error("transaction {0} contains too large of an amount")] + AmountTooLarge(TransactionID), + #[error("transaction {0} amount too small, minimum is {1:.6}")] + AmountTooSmall(TransactionID, f64), + #[error("coinbase can't expire, transaction: {0}")] + CoinbaseExpired(TransactionID), + #[error("coinbase can't have a fee, transaction: {0}")] + CoinbaseFeeNotAllowed(TransactionID), + #[error("coinbase transaction {0} only allowed in block")] + CoinbaseInBlockOnly(TransactionID), + #[error("coinbase can't have a maturity, transaction: {0}")] + CoinbaseMaturityNotAllowed(TransactionID), + #[error("coinbase can't have a sender, transaction: {0}")] + CoinbaseSenderNotAllowed(TransactionID), + #[error("coinbase can't have a signature, transaction: {0}")] + CoinbaseSignatureNotAllowed(TransactionID), + #[error("transaction {0} is already confirmed")] + ConfirmedAlready(TransactionID), + #[error("expiration missing, transaction: {0}")] + ExpirationMissing(TransactionID), + #[error("expiration too large, transaction: {0}")] + ExpirationTooLarge(TransactionID), + #[error("transaction {0} is expired, height: {1}, expires: {2}")] + Expired(TransactionID, u64, u64), + #[error("transaction fee missing, transaction: {0}")] + FeeMissing(TransactionID), + #[error("transaction {0} contains too large of a fee")] + FeeTooLarge(TransactionID), + #[error("transaction {0} is immature")] + Immature(TransactionID), + #[error("maturity too large, transaction: {0}")] + MaturityTooLarge(TransactionID), + #[error("transaction {0} memo contains invalid utf8 characters")] + MemoCharactersInvalid(TransactionID), + #[error("transaction {0} memo length exceeded")] + MemoLengthExceeded(TransactionID), + #[error("transaction {0} doesn't pay minimum fee {1:.6}")] + MinimumFee(TransactionID, f64), + #[error("nonce value is too large, transaction: {0}")] + NonceTooLarge(TransactionID), + #[error("transaction {0} would not be mature")] + NotMature(TransactionID), + #[error("no room for transaction {0}, queue is full")] + QueueIsFull(TransactionID), + #[error("transaction {0} missing recipient")] + RecipientInvalid(TransactionID), + #[error("transaction sender invalid, transaction: {0}")] + SenderInvalid(TransactionID), + #[error("transaction sender missing, transaction: {0}")] + SenderMissing(TransactionID), + #[error("series invalid, transaction: {0}")] + SeriesInvalid(TransactionID), + #[error("series missing, transaction: {0}")] + SeriesMissing(TransactionID), + #[error("series too large, transaction: {0}")] + SeriesTooLarge(TransactionID), + #[error("transaction signature invalid, transaction: {0}")] + SignatureInvalid(TransactionID), + #[error("transaction signature missing, transaction: {0}")] + SignatureMissing(TransactionID), + #[error("signature verification failed, transaction: {0}")] + SignatureVerificationFailed(TransactionID), + #[error("transaction time too large, transaction: {0}")] + TimeTooLarge(TransactionID), + + #[error("failed to get transaction index for transaction {0}")] + LedgerGetTransactionIndex(TransactionID, #[source] LedgerError), + + #[error("channel")] + Channel(#[from] ChannelError), + #[error("ledger")] + Ledger(#[from] LedgerError), + #[error("ledger not found")] + LedgerNotFound(#[from] LedgerNotFoundError), + #[error("transaction")] + Transaction(#[from] TransactionError), + #[error("transaction queue")] + TransactionQueue(#[from] TransactionQueueError), +} + +impl From> for ProcessTransactionError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("new tx", err.to_string())) + } +} + +impl From> for ProcessTransactionError { + fn from(err: tokio::sync::mpsc::error::SendError) -> Self { + Self::Channel(ChannelError::Send("tx to process", err.to_string())) + } +} + +impl From for ProcessTransactionError { + fn from(err: tokio::sync::oneshot::error::RecvError) -> Self { + Self::Channel(ChannelError::OneshotReceive("tx result", err)) + } +} + +impl From> for ProcessTransactionError { + fn from(_err: Result<(), ProcessTransactionError>) -> Self { + Self::Channel(ChannelError::OneshotSend("process transaction result")) + } +} + +impl_debug_error_chain!(ProcessTransactionError, "processing transaction"); + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_block_creation_reward() { + let max_halvings = 64; + let mut previous = INITIAL_COINBASE_REWARD * 2; + for halvings in 0..max_halvings { + let height = halvings * BLOCKS_UNTIL_REWARD_HALVING; + let reward = Processor::block_creation_reward(height); + assert!( + reward <= INITIAL_COINBASE_REWARD, + "Reward {} at height {} greater than initial reward {}", + reward, + height, + INITIAL_COINBASE_REWARD, + ); + assert_eq!( + reward, + previous / 2, + "Reward {} at height {} not equal to half previous period reward", + reward, + height, + ); + previous = reward + } + assert_eq!( + Processor::block_creation_reward(max_halvings * BLOCKS_UNTIL_REWARD_HALVING), + 0, + "Expected 0 reward by {} halving", + max_halvings + ); + } + + #[test] + fn test_compute_max_transactions_per_block() { + let max_doublings = 64; + let mut previous = INITIAL_MAX_TRANSACTIONS_PER_BLOCK / 2; + + // verify the max is always doubling as expected + for doublings in 0..max_doublings { + let mut height = doublings * BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING; + let max = Processor::compute_max_transactions_per_block(height); + assert!( + max >= INITIAL_MAX_TRANSACTIONS_PER_BLOCK, + "Max {} at height {} less than initial", + max, + height + ); + + let mut expect = previous * 2; + if expect > MAX_TRANSACTIONS_PER_BLOCK { + expect = MAX_TRANSACTIONS_PER_BLOCK; + } + assert_eq!( + max, expect, + "Max {} at height {} not equal to expected max {}", + max, height, expect + ); + + if doublings > 0 { + let mut previous2 = max; + // walk back over the previous period and make sure: + // 1) the max is never greater than this period's first max + // 2) the max is always <= the previous as we walk back + height -= 1; + while height >= (doublings - 1) * BLOCKS_UNTIL_TRANSACTIONS_PER_BLOCK_DOUBLING { + let max2 = Processor::compute_max_transactions_per_block(height); + assert!( + max2 <= max, + "Max {} at height {} is greater than next period's first max {}", + max2, + height, + max, + ); + assert!( + max2 <= previous2, + "Max {} at height {} is greater than previous max {} at height {}", + max2, + height, + previous2, + height + 1, + ); + previous2 = max2; + if let Some(new_height) = height.checked_sub(1) { + height = new_height; + } else { + break; + } + } + } + previous = max; + } + let max = Processor::compute_max_transactions_per_block( + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT, + ); + assert_eq!( + max, MAX_TRANSACTIONS_PER_BLOCK, + "Expected {} at height {}, found {}", + MAX_TRANSACTIONS_PER_BLOCK, MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT, max, + ); + + let max = Processor::compute_max_transactions_per_block( + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT + 1, + ); + assert_eq!( + max, + MAX_TRANSACTIONS_PER_BLOCK, + "Expected {} at height {}, found", + MAX_TRANSACTIONS_PER_BLOCK, + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT + 1, + ); + let max = Processor::compute_max_transactions_per_block( + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT - 1, + ); + assert!( + max < MAX_TRANSACTIONS_PER_BLOCK, + "Expected less than max at height {}, found {}", + MAX_TRANSACTIONS_PER_BLOCK_EXCEEDED_AT_HEIGHT - 1, + max + ); + } +} diff --git a/src/protocol.rs b/src/protocol.rs new file mode 100644 index 0000000..5a2b1be --- /dev/null +++ b/src/protocol.rs @@ -0,0 +1,553 @@ +use base64ct::{Base64, Encoding}; +use cuckoofilter::ExportedCuckooFilter; +use ed25519_compact::PublicKey; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_with::{serde_as, skip_serializing_none, DefaultOnNull, DeserializeAs, SerializeAs}; + +use crate::block::{Block, BlockHeader, BlockID}; +use crate::error::{DataError, EncodingError}; +use crate::transaction::{AsBase64, Transaction, TransactionID}; + +/// The name of this version of the cruzbit peer protocol. +pub const PROTOCOL: &str = "cruzbit.1"; + +#[skip_serializing_none] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type", content = "body")] +/// A message frame for all messages in the cruzbit.1 protocol. +/// { "type": tag, "body": { content } } +pub enum Message { + /// Send a public key's balance to a peer. + Balance(BalanceMessage), + + /// Send a public key's balances to a peer. + Balances(BalancesMessage), + + /// Send a peer a block's header. + BlockHeader(Option), + + /// Send a peer a complete block. + Block(Option>), + + /// Request the addition of the given public keys to the current filter. + /// The filter is created if it's not set. + FilterAdd(FilterAddMessage), + + FilterBlockUndo(FilterBlockMessage), + + FilterBlock(FilterBlockMessage), + + /// Request that we load a filter which is used to filter transactions returned to the peer based on interest. + FilterLoad(FilterLoadMessage), + + /// Indicates whether or not the filter request was successful. + FilterResult(Option), + + /// Returns a pared down view of the unconfirmed transaction queue containing only transactions relevant to the peer given their filter. + FilterTransactionQueue(FilterTransactionQueueMessage), + + /// Find a common ancestor with a peer. + FindCommonAncestor(FindCommonAncestorMessage), + + /// Requests a public key's balance. + GetBalance(GetBalanceMessage), + + /// Requests a set of public key balances. + GetBalances(GetBalancesMessage), + + /// Request a block header by height. + GetBlockHeaderByHeight(GetBlockHeaderByHeightMessage), + + /// Request a block header. + GetBlockHeader(GetBlockHeaderMessage), + + /// Request a block for download by height. + GetBlockByHeight(GetBlockByHeightMessage), + + /// Request a block for download. + GetBlock(GetBlockMessage), + + GetFilterTransactionQueue, + + /// Request peer addresses. + GetPeerAddresses, + + /// Requests transactions associated with a given public key over a given height range of the block chain. + GetPublicKeyTransactions(GetPublicKeyTransactionsMessage), + + /// Used by a mining peer to request mining work. + GetWork(GetWorkMessage), + + /// Request the tip header + GetTipHeader, + + /// Request a confirmed transaction. + GetTransaction(GetTransactionMessage), + + /// Sent in response to a PushTransactionMessage. + GetTransactionResult(PushTransactionResultMessage), + + GetTransactionRelayPolicy, + + /// Communicates blocks available for download. + InvBlock(InvBlockMessage), + + /// Communicate a list of potential peer addresses known by a peer. + /// Sent in response to the empty GetPeerAddresses message type. + PeerAddresses(PeerAddressesMessage), + + /// Requests transactions associated with a given public key over a given height range of the block chain. + PublicKeyTransactions(PublicKeyTransactionsMessage), + + /// Push a newly processed unconfirmed transaction to peers. + PushTransaction(PushTransactionMessage), + + /// Sent in response to a PushTransactionMessage. + PushTransactionResult(PushTransactionResultMessage), + + SubmitWork(SubmitWorkMessage), + + /// Inform a mining peer of the result of its work. + SubmitWorkResult(SubmitWorkResultMessage), + + /// Send a peer the header for the tip block in the block chain. + /// It is sent in response to the empty GetTipHeader message type. + TipHeader(Option), + + /// Send a peer a confirmed transaction. + Transaction(TransactionMessage), + + /// Communicate this node's current settings for min fee and min amount. + /// Sent in response to the empty GetTransactionRelayPolicy message type. + TransactionRelayPolicy(TransactionRelayPolicyMessage), + + /// Used by a client to send work to perform to a mining peer. + /// The timestamp and nonce in the header can be manipulated by the mining peer. + /// It is the mining peer's responsibility to ensure the timestamp is not set below + /// the minimum timestamp and that the nonce does not exceed MAX_NUMBER (2^53-1). + Work(WorkMessage), +} + +impl std::fmt::Display for Message { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", std::mem::discriminant(self)) + } +} + +/// Communicate blocks available for download. +/// MessageType: InvBlock +#[derive(Deserialize, Serialize)] +pub struct InvBlockMessage { + pub block_ids: Vec, +} + +/// Request a block for download. +/// MessageType: GetBlock +#[derive(Deserialize, Serialize)] +pub struct GetBlockMessage { + pub block_id: BlockID, +} + +/// Request a block for download by height. +/// MessageType: GetBlockByHeight +#[derive(Serialize, Deserialize)] +pub struct GetBlockByHeightMessage { + pub height: u64, +} + +/// Send a peer a complete block. +/// MessageType: Block +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct BlockMessage { + pub block_id: BlockID, + pub block: Option, +} + +/// Request a block header. +/// Type: "get_block_header". +#[derive(Deserialize, Serialize)] +pub struct GetBlockHeaderMessage { + pub block_id: BlockID, +} + +/// Request a block header by height. +/// MessageType: GetBlockHeaderByHeight +#[derive(Deserialize, Serialize)] +pub struct GetBlockHeaderByHeightMessage { + pub height: u64, +} + +/// Send a peer a block's header. +/// MessageType: BlockHeader +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct BlockHeaderMessage { + pub block_id: BlockID, + #[serde(rename = "header")] + pub block_header: Option, +} + +/// Find a common ancestor with a peer. +/// MessageType::FindCommonAncestor +#[derive(Deserialize, Serialize)] +pub struct FindCommonAncestorMessage { + pub block_ids: Vec, +} + +/// Requests a public key's balance. +/// MessageType::GetBalance +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct GetBalanceMessage { + #[serde_as(as = "PublicKeySerde")] + pub public_key: PublicKey, +} + +/// Send a public key's balance to a peer. +/// MessageType::Balance +#[serde_as] +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct BalanceMessage { + pub block_id: Option, + pub height: Option, + #[serde_as(as = "Option")] + pub public_key: Option, + pub balance: Option, + pub error: Option, +} + +/// Requests a set of public key balances. +/// MessageType::GetBalances +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct GetBalancesMessage { + #[serde_as(as = "Vec")] + pub public_keys: Vec, +} + +/// Send a public key balances to a peer. +/// MessageType::Balances +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct BalancesMessage { + pub block_id: Option, + pub height: Option, + pub balances: Option>, + pub error: Option, +} + +/// An entry in the BalancesMessage's Balances field. +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct PublicKeyBalance { + #[serde_as(as = "PublicKeySerde")] + pub public_key: PublicKey, + pub balance: u64, +} + +/// Request a confirmed transaction. +/// MessageType::GetTransaction +#[derive(Deserialize, Serialize)] +pub struct GetTransactionMessage { + pub transaction_id: TransactionID, +} + +/// Send a peer a confirmed transaction. +/// MessageType::Transaction +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct TransactionMessage { + pub block_id: Option, + pub height: Option, + pub transaction_id: TransactionID, + pub transaction: Option, +} + +/// Used to send a peer the header for the tip block in the block chain. +/// MessageType::TipHeader It is sent in response to the empty GetTipHeader message type. +#[derive(Deserialize, Serialize)] +pub struct TipHeaderMessage { + pub block_id: BlockID, + #[serde(rename = "header")] + pub block_header: BlockHeader, + pub time_seen: u64, +} + +/// PushTransactionMessage is used to push a newly processed unconfirmed transaction to peers. +/// MessageType::PushTransaction +#[derive(Deserialize, Serialize)] +pub struct PushTransactionMessage { + pub transaction: Transaction, +} + +/// Sent in response to a PushTransactionMessage. +/// Type: PushTransactionResult +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct PushTransactionResultMessage { + pub transaction_id: Option, + pub error: Option, +} + +/// Used to request that we load a filter which is used to filter transactions returned to the peer based on interest. +/// MessageType: FilterLoad +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct FilterLoadMessage { + pub r#type: String, + #[serde_as(as = "ExportedCuckooFilterSerde")] + pub filter: ExportedCuckooFilter, +} + +/// Used to request the addition of the given public keys to the current filter. The filter is created if it's not set. +/// MessageType: FilterAdd +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct FilterAddMessage { + #[serde_as(as = "Vec")] + pub public_keys: Vec, +} + +/// Indicates whether or not the filter request was successful. +/// MessageType: FilterResult +#[derive(Deserialize, Serialize)] +pub struct FilterResultMessage { + pub error: String, +} + +/// Returns a pared down view of the unconfirmed transaction queue containing only transactions relevant to the peer given their filter. +/// MessageType::FilterTransactionQueue +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct FilterTransactionQueueMessage { + pub transactions: Option>, + pub error: Option, +} + +/// Requests transactions associated with a given public key over a given height range of the block chain. +/// MessageType::GetPublicKeyTransactions +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct GetPublicKeyTransactionsMessage { + #[serde_as(as = "PublicKeySerde")] + pub public_key: PublicKey, + pub start_height: u64, + pub start_index: u32, + pub end_height: u64, + pub limit: usize, +} + +/// Used to return a list of block headers and the transactions relevant to the public key over a given height range of the block chain. +/// MessageType:PublicKeyTransactions +#[serde_as] +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct PublicKeyTransactionsMessage { + #[serde_as(as = "Option")] + pub public_key: Option, + pub start_height: Option, + pub stop_height: Option, + pub stop_index: Option, + #[serde_as(as = "DefaultOnNull")] + pub filter_blocks: Option>, + pub error: Option, +} + +/// Communicate a list of potential peer addresses known by a peer. +/// Type: "peer_addresses". Sent in response to the empty GetPeerAddresses message type. +#[derive(Deserialize, Serialize)] +pub struct PeerAddressesMessage { + pub addresses: Vec, +} + +/// Communicate this node's current settings for min fee and min amount. +/// MessageType: TransactionRelayPolicy. Sent in response to the empty GetTransactionRelayPolicy message type. +#[derive(Deserialize, Serialize)] +pub struct TransactionRelayPolicyMessage { + pub min_fee: u64, + pub min_amount: u64, +} + +/// Used by a mining peer to request mining work. +/// MessageType: GetWork +#[serde_as] +#[derive(Deserialize, Serialize)] +pub struct GetWorkMessage { + #[serde_as(as = "Vec")] + pub public_keys: Vec, + pub memo: String, +} + +/// Used by a client to send work to perform to a mining peer. +/// The timestamp and nonce in the header can be manipulated by the mining peer. +/// It is the mining peer's responsibility to ensure the timestamp is not set below +/// the minimum timestamp and that the nonce does not exceed MAX_NUMBER (2^53-1). +/// MessageType::Work +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct WorkMessage { + pub work_id: Option, + pub header: Option, + pub min_time: Option, + pub error: Option, +} + +/// Used by a mining peer to submit a potential solution to the client. +/// MessageType: SubmitWork +#[derive(Deserialize, Serialize)] +pub struct SubmitWorkMessage { + pub work_id: u32, + pub header: BlockHeader, +} + +/// Inform a mining peer of the result of its work. +/// MessageType: SubmitWorkResult +#[skip_serializing_none] +#[derive(Deserialize, Serialize)] +pub struct SubmitWorkResultMessage { + pub work_id: u32, + pub error: Option, +} + +/// Represents a pared down block containing only transactions relevant to the peer given their filter. +#[serde_as] +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct FilterBlockMessage { + pub block_id: BlockID, + pub header: BlockHeader, + #[serde_as(as = "DefaultOnNull")] + pub transactions: Vec, +} + +/// Serializer / Deserialize for Public Key's +pub struct PublicKeySerde; + +impl SerializeAs for PublicKeySerde { + fn serialize_as(value: &PublicKey, serializer: S) -> Result + where + S: Serializer, + { + self::serialize_public_key(*value, serializer) + } +} + +impl<'de> DeserializeAs<'de, PublicKey> for PublicKeySerde { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + self::deserialize_public_key(deserializer) + } +} + +pub fn serialize_public_key(pub_key: PublicKey, serializer: S) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&pub_key.as_base64()) +} + +pub fn deserialize_public_key<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let encoded: String = Deserialize::deserialize(deserializer)?; + let mut buf = [0u8; PublicKey::BYTES]; + let decoded = Base64::decode(&encoded, &mut buf) + .map_err(EncodingError::Base64Decode) + .map_err(serde::de::Error::custom)?; + let pub_key = PublicKey::from_slice(decoded) + .map_err(DataError::Ed25519) + .map_err(serde::de::Error::custom)?; + Ok(pub_key) +} + +/// Serializer / Deserialize for the Cuckoo Filter +pub struct ExportedCuckooFilterSerde; + +impl SerializeAs for ExportedCuckooFilterSerde { + fn serialize_as(value: &ExportedCuckooFilter, serializer: S) -> Result + where + S: Serializer, + { + self::serialize_cuckoo_filter(value, serializer) + } +} + +impl<'de> DeserializeAs<'de, ExportedCuckooFilter> for ExportedCuckooFilterSerde { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + self::deserialize_cuckoo_filter(deserializer) + } +} + +pub fn serialize_cuckoo_filter( + cuckoo_filter: &ExportedCuckooFilter, + serializer: S, +) -> Result +where + S: Serializer, +{ + serializer.serialize_str(&Base64::encode_string(&cuckoo_filter.values)) +} + +pub fn deserialize_cuckoo_filter<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let encoded: String = Deserialize::deserialize(deserializer)?; + let values = Base64::decode_vec(encoded.as_str()) + .map_err(EncodingError::Base64Decode) + .map_err(serde::de::Error::custom)?; + let length = values.len(); + Ok(ExportedCuckooFilter { values, length }) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::block::test_utils::make_test_block; + + #[test] + fn test_serialize_find_common_ancestor() { + let block = make_test_block(1); + let block_id = block.id().unwrap(); + let block_ids = vec![block_id]; + let message = FindCommonAncestorMessage { block_ids }; + let serialized = serde_json::to_string(&message).unwrap(); + let json = format!(r#"{{"block_ids":["{}"]}}"#, block_id); + assert_eq!(serialized, json); + } + + #[test] + fn test_deserialize_inv_block_message() { + let block = make_test_block(1); + let block_id = block.id().unwrap(); + let block_ids = format!(r#"{{"block_ids":["{}"]}}"#, block_id); + let inv = serde_json::from_str::(&block_ids).unwrap(); + assert_eq!(inv.block_ids.len(), 1); + assert_eq!(block_id, inv.block_ids[0]); + } + + #[test] + fn test_deserialize_block_message() { + let block = make_test_block(1); + let block_id = block.id().unwrap(); + let block_json = serde_json::to_string(&block).unwrap(); + let block_message_json = format!( + r#"{{"block_id": "{}", "block": {} }}"#, + block_id, block_json + ); + let block_message = serde_json::from_str::(&block_message_json).unwrap(); + assert_eq!(block_id, block_message.block_id); + assert_eq!( + block.transactions[0].to, + block_message.block.unwrap().transactions[0].to + ); + } +} diff --git a/src/shutdown.rs b/src/shutdown.rs new file mode 100644 index 0000000..fbce628 --- /dev/null +++ b/src/shutdown.rs @@ -0,0 +1,97 @@ +use std::error::Error as StdError; + +use log::error; +use thiserror::Error; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use crate::error::{ChannelError, ErrChain}; +use crate::impl_debug_error_chain; + +pub type ShutdownChanSender = oneshot::Sender<()>; +pub type ShutdownChanReceiver = oneshot::Receiver<()>; +pub type ShutdownChan = (ShutdownChanSender, ShutdownChanReceiver); +pub type SpawnedError = Box; +pub type SpawnedHandle = JoinHandle>; + +pub struct Shutdown { + monitor_handle: JoinHandle<()>, + shutdown_chan_tx: ShutdownChanSender, +} + +impl Shutdown { + pub fn new(spawned_handle: SpawnedHandle, shutdown_chan_tx: ShutdownChanSender) -> Self { + let monitor = ShutdownMonitor::new(spawned_handle); + let monitor_handle = monitor.spawn(); + + Self { + monitor_handle, + shutdown_chan_tx, + } + } + + pub async fn send(self) { + if let Err(err) = self.shutdown_chan_tx.send(()) { + error!("{:?}", ShutdownError::from(err)); + } + + if let Err(err) = self.monitor_handle.await { + error!("{:?}", ShutdownError::Join(err)); + } + } + + pub fn is_finished(&self) -> bool { + self.monitor_handle.is_finished() + } +} + +pub struct ShutdownMonitor { + spawned_handle: SpawnedHandle, +} + +impl ShutdownMonitor { + fn new(spawned_handle: SpawnedHandle) -> Self { + Self { spawned_handle } + } + + fn spawn(self) -> JoinHandle<()> { + tokio::spawn(self.run()) + } + + /// Await and report on the JoinHandle result + async fn run(self) { + match self.spawned_handle.await { + Ok(Ok(_)) => {} + Ok(Err(err)) => { + error!("{:?}", ShutdownError::Spawned(err)); + } + Err(err) => { + error!("{:?}", ShutdownError::Join(err)); + } + } + } +} + +/// Helper function to return a shutdown channel +pub fn shutdown_channel() -> ShutdownChan { + oneshot::channel::<()>() +} + +#[derive(Error)] +pub enum ShutdownError { + #[error("channel")] + Channel(#[from] ChannelError), + #[error(transparent)] + Spawned(#[from] SpawnedError), + + #[error("join")] + Join(#[from] tokio::task::JoinError), +} + +impl_debug_error_chain!(ShutdownError, "shutdown"); + +impl From<()> for ShutdownError { + fn from(_err: ()) -> Self { + Self::Channel(ChannelError::OneshotSend("shutdown")) + } +} diff --git a/src/tls.rs b/src/tls.rs new file mode 100644 index 0000000..6829718 --- /dev/null +++ b/src/tls.rs @@ -0,0 +1,176 @@ +use std::fs::{self, File}; +use std::io::BufReader; +use std::path::{Path, PathBuf}; +use std::result; +use std::sync::Arc; + +use rand::Rng; +use rcgen::{ + Certificate, CertificateParams, DnType, ExtendedKeyUsagePurpose, IsCa, KeyUsagePurpose, + SerialNumber, +}; +use rustls::crypto::{verify_tls12_signature, verify_tls13_signature}; +use rustls_pemfile::{certs, private_key}; +use thiserror::Error; +use tokio_rustls::rustls::client::danger::{ + HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier, +}; +use tokio_rustls::rustls::pki_types::{CertificateDer, ServerName, UnixTime}; +use tokio_rustls::rustls::{ + self, ClientConfig, DigitallySignedStruct, RootCertStore, ServerConfig, +}; + +use crate::error::{EncodingError, FileError, KeyError}; + +const CERT_NAME: &str = "cert.pem"; +const KEY_NAME: &str = "key.pem"; + +/// Client config +pub fn client_config(tls_verify: bool) -> ClientConfig { + let mut config = ClientConfig::builder() + .with_root_certificates(RootCertStore::empty()) + .with_no_client_auth(); + + if !tls_verify { + config + .dangerous() + .set_certificate_verifier(NoCertificateVerification::new()); + } + + config +} + +/// Server config +pub fn server_config( + cert_path: &PathBuf, + key_path: &PathBuf, +) -> Result, TlsError> { + let cert_file = File::open(cert_path).map_err(|err| FileError::Open(cert_path.clone(), err))?; + let mut cert_reader = BufReader::new(cert_file); + let certs = certs(&mut cert_reader) + .collect::, _>>() + .map_err(|err| FileError::Open(cert_path.clone(), err))?; + + let key_file = File::open(key_path).map_err(|err| FileError::Open(key_path.clone(), err))?; + let mut key_reader = BufReader::new(key_file); + let Some(private_key) = + private_key(&mut key_reader).map_err(|err| FileError::Read(key_path.clone(), err))? + else { + return Err(KeyError::PrivateKeyDecode(EncodingError::Pem).into()); + }; + + let config = ServerConfig::builder() + .with_no_client_auth() + .with_single_cert(certs, private_key)?; + + Ok(Arc::new(config)) +} + +/// Generate ephemeral x.509 certificate and private key pair. They're written to the -datadir +pub fn generate_self_signed_cert_and_key( + tls_data_dir: &PathBuf, +) -> Result<(PathBuf, PathBuf), TlsError> { + // build the certificate + let mut params = CertificateParams::default(); + params.alg = &rcgen::PKCS_ECDSA_P256_SHA256; + params.is_ca = IsCa::ExplicitNoCa; + let mut rng = rand::thread_rng(); + let serial_number = rng.gen_range(0..u64::MAX); + params.serial_number = Some(SerialNumber::from(serial_number)); + // remove the default CN + params.distinguished_name.remove(DnType::CommonName); + params + .distinguished_name + .push(DnType::OrganizationName, "cruzbit client"); + params.key_usages.push(KeyUsagePurpose::KeyEncipherment); + params.key_usages.push(KeyUsagePurpose::DigitalSignature); + params + .extended_key_usages + .push(ExtendedKeyUsagePurpose::ServerAuth); + let cert = Certificate::from_params(params)?; + + // create the cert + let certificate_pem = cert.serialize_pem()?; + let cert_path = Path::new(".").join(tls_data_dir).join(CERT_NAME); + fs::write(&cert_path, certificate_pem) + .map_err(|err| FileError::Write(cert_path.clone(), err))?; + + // create the key + let private_key_pem = cert.serialize_private_key_pem(); + let key_path = Path::new(".").join(tls_data_dir).join(KEY_NAME); + fs::write(&key_path, private_key_pem) + .map_err(|err| FileError::Write(cert_path.clone(), err))?; + + Ok((cert_path, key_path)) +} + +#[derive(Debug)] +pub struct NoCertificateVerification; + +impl NoCertificateVerification { + pub fn new() -> Arc { + Arc::new(Self {}) + } +} + +impl ServerCertVerifier for NoCertificateVerification { + fn verify_server_cert( + &self, + _end_entity: &CertificateDer, + _intermediates: &[CertificateDer], + _server_name: &ServerName, + _ocsp_response: &[u8], + _now: UnixTime, + ) -> result::Result { + Ok(ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls12_signature( + message, + cert, + dss, + &rustls::crypto::ring::default_provider().signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13_signature( + message, + cert, + dss, + &rustls::crypto::ring::default_provider().signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + rustls::crypto::ring::default_provider() + .signature_verification_algorithms + .supported_schemes() + } +} + +#[derive(Error, Debug)] +pub enum TlsError { + #[error("file")] + File(#[from] FileError), + #[error("key")] + Key(#[from] KeyError), + + #[error("rcgen")] + Rcgen(#[from] rcgen::Error), + #[error("rustls")] + Rustls(#[from] tokio_rustls::rustls::Error), + #[error("rustls pemfile")] + RustlsPemfinle(#[from] std::io::Error), +} diff --git a/src/transaction.rs b/src/transaction.rs new file mode 100644 index 0000000..c5d116b --- /dev/null +++ b/src/transaction.rs @@ -0,0 +1,435 @@ +use std::fmt::{self, Display}; +use std::hash::Hash; +use std::ops::{Deref, DerefMut}; + +use base64ct::{Base64, Encoding}; +use ed25519_compact::{PublicKey, SecretKey, Signature}; +use faster_hex::hex_encode; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_with::{serde_as, skip_serializing_none, DeserializeAs, SerializeAs}; +use sha3::{Digest, Sha3_256}; +use thiserror::Error; + +use crate::constants::BLOCKS_UNTIL_NEW_SERIES; +use crate::error::{DataError, EncodingError, JsonError}; +use crate::protocol::PublicKeySerde; +use crate::utils::{now_as_secs, rand_int31}; + +/// Represents a ledger transaction. It transfers value from one public key to another. +#[serde_as] +#[skip_serializing_none] +#[derive(Deserialize, Serialize, Debug, Clone, Eq, PartialEq)] +pub struct Transaction { + pub time: u64, + /// collision prevention. pseudorandom. not used for crypto + pub nonce: u32, + #[serde_as(as = "Option")] + pub from: Option, + #[serde_as(as = "PublicKeySerde")] + pub to: PublicKey, + pub amount: u64, + pub fee: Option, + /// max 100 characters + pub memo: Option, + /// block height. if set transaction can't be mined before + pub matures: Option, + /// block height. if set transaction can't be mined after + pub expires: Option, + /// +1 roughly once a week to allow for pruning history + pub series: u64, + #[serde_as(as = "Option")] + pub signature: Option, +} + +impl Transaction { + /// Returns a new unsigned transaction. + pub fn new( + from: Option, + to: PublicKey, + amount: u64, + fee: Option, + matures: Option, + expires: Option, + height: u64, + memo: Option, + ) -> Self { + Self { + time: now_as_secs(), + nonce: rand_int31(), + from, + to, + amount, + fee, + memo, + matures, + expires, + series: Self::compute_transaction_series(from.is_some(), height), + signature: None, + } + } + + /// Computes an ID for a given transaction. + pub fn id(&self) -> Result { + // never include the signature in the ID + // this way we never have to think about signature malleability + let json = if self.signature.is_some() { + let mut tx = self.clone(); + tx.signature = None; + serde_json::to_string(&tx).map_err(JsonError::Serialize)? + } else { + serde_json::to_string(self).map_err(JsonError::Serialize)? + }; + + let hash = TransactionID::from(&Sha3_256::digest(json.as_bytes())[..]); + Ok(hash) + } + + /// Sign this transaction. + pub fn sign(&mut self, priv_key: SecretKey) -> Result<(), TransactionError> { + let id = self.id()?; + self.signature = Some(priv_key.sign(id, None)); + Ok(()) + } + + /// Verify that the transaction is properly signed. + pub fn verify(&self) -> Result { + let id = self.id()?; + let from = self.from.expect("transaction should have a sender"); + let signature = self.signature.expect("transaction should have a signature"); + match from.verify(id, &signature) { + Ok(_) => Ok(true), + Err(_) => Ok(false), + } + } + /// Returns true if the transaction is a coinbase. A coinbase is the first transaction in every block + /// used to reward the miner for mining the block. + pub fn is_coinbase(&self) -> bool { + self.from.is_none() + } + + /// Returns true if the transaction is relevant to the given public key. + pub fn contains(&self, pub_key: PublicKey) -> bool { + if !self.is_coinbase() && pub_key == self.from.expect("transaction should have a sender") { + return true; + } + + pub_key == self.to + } + + /// Returns true if the transaction can be mined at the given height. + pub fn is_mature(&self, height: u64) -> bool { + if self.matures.is_none() { + return true; + } + + self.matures >= Some(height) + } + + /// Returns true if the transaction cannot be mined at the given height. + pub fn is_expired(&self, height: u64) -> bool { + if self.expires.is_none() { + return false; + } + + self.expires < Some(height) + } + + /// Compute the series to use for a new transaction. + fn compute_transaction_series(is_coinbase: bool, height: u64) -> u64 { + if is_coinbase { + // coinbases start using the new series right on time + height / BLOCKS_UNTIL_NEW_SERIES + 1 + } else { + // otherwise don't start using a new series until 100 blocks in to mitigate + // potential reorg issues right around the switchover + height.saturating_sub(100) / BLOCKS_UNTIL_NEW_SERIES + 1 + } + } +} + +#[derive(Error, Debug)] +pub enum TransactionError { + #[error("json")] + Json(#[from] JsonError), +} + +/// SHA3-256 hash +pub const TRANSACTION_ID_LENGTH: usize = 32; + +/// A transaction's unique identifier. +#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)] +pub struct TransactionID([u8; TRANSACTION_ID_LENGTH]); + +impl TransactionID { + pub fn new() -> Self { + Default::default() + } + + /// Returns TransactionID as a hex string + pub fn as_hex(&self) -> String { + format!("{}", self) + } +} + +impl AsRef<[u8]> for TransactionID { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +impl Deref for TransactionID { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for TransactionID { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl Display for TransactionID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut buf = [0u8; TRANSACTION_ID_LENGTH * 2]; + let _ = hex_encode(self, &mut buf); + write!(f, "{}", String::from_utf8_lossy(&buf)) + } +} + +impl From> for TransactionID { + fn from(value: Vec) -> Self { + TransactionID( + value + .try_into() + .expect("incorrect bytes for transaction id"), + ) + } +} + +impl From<&[u8]> for TransactionID { + fn from(value: &[u8]) -> Self { + TransactionID( + value + .try_into() + .expect("incorrect bytes for transaction id"), + ) + } +} + +impl FromIterator for TransactionID { + fn from_iter>(iter: I) -> Self { + iter.into_iter().collect::>().into() + } +} + +impl Serialize for TransactionID { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + faster_hex::nopfx_lowercase::serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for TransactionID { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + faster_hex::nopfx_lowercase::deserialize(deserializer) + } +} + +pub trait AsBase64 { + fn as_base64(&self) -> String; +} + +impl AsBase64 for PublicKey { + fn as_base64(&self) -> String { + // 4 * PublicKey::BYTES.div_ceil(3) = 44 + let mut buf = [0u8; 44]; + let encoded = Base64::encode(self.as_ref(), &mut buf) + .map_err(EncodingError::Base64Encode) + .unwrap(); + encoded.to_string() + } +} + +pub struct SignatureSerde; + +impl SerializeAs for SignatureSerde { + fn serialize_as(value: &Signature, serializer: S) -> Result + where + S: Serializer, + { + self::serialize_signature(*value, serializer) + } +} + +impl<'de> DeserializeAs<'de, Signature> for SignatureSerde { + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + self::deserialize_signature(deserializer) + } +} + +pub fn serialize_signature(signature: Signature, serializer: S) -> Result +where + S: Serializer, +{ + // 4 * Signature::BYTES.div_ceil(3) = 88 + let mut buf = [0u8; 88]; + serializer.serialize_str( + Base64::encode(&signature[..], &mut buf) + .map_err(EncodingError::Base64Encode) + .map_err(serde::ser::Error::custom)?, + ) +} + +pub fn deserialize_signature<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let encoded: String = Deserialize::deserialize(deserializer)?; + let mut buf = [0u8; Signature::BYTES]; + let decoded = Base64::decode(&encoded, &mut buf) + .map_err(EncodingError::Base64Decode) + .map_err(serde::de::Error::custom)?; + let signature = Signature::from_slice(decoded) + .map_err(DataError::Ed25519) + .map_err(serde::de::Error::custom)?; + Ok(signature) +} + +#[cfg(test)] +mod test { + use ed25519_compact::KeyPair; + + use super::*; + use crate::block::test_utils::make_test_block; + use crate::constants::CRUZBITS_PER_CRUZ; + + #[test] + fn test_id() { + let block = make_test_block(1); + let transaction_id = block.transactions[0].id(); + assert!(transaction_id.is_ok(), "failed to hash block id") + } + + #[test] + fn test_transaction() { + // create a sender + let key_pair = KeyPair::generate(); + let (pub_key, priv_key) = (key_pair.pk, key_pair.sk); + // create a recipient + let key_pair = KeyPair::generate(); + let (pub_key2, priv_key2) = (key_pair.pk, key_pair.sk); + + // create the unsigned transaction + let mut tx = Transaction::new( + Some(pub_key), + pub_key2, + 50 * CRUZBITS_PER_CRUZ, + None, + None, + None, + 0, + Some("for lunch".to_owned()), + ); + + // sign the transaction + tx.sign(priv_key).unwrap(); + + // verify the transaction + let ok = tx.verify().unwrap(); + assert!(ok, "Verification failed"); + // re-sign the transaction with the wrong private key + tx.sign(priv_key2).unwrap(); + + // verify the transaction (should fail) + let ok = tx.verify().unwrap(); + assert!(!ok, "Expected verification failure"); + } + + #[test] + fn test_transaction_test_vector1() { + // create transaction for Test Vector 1 + let mut pub_key_bytes = [0u8; PublicKey::BYTES]; + Base64::decode( + "80tvqyCax0UdXB+TPvAQwre7NxUHhISm/bsEOtbF+yI=", + &mut pub_key_bytes, + ) + .unwrap(); + let pub_key = PublicKey::from_slice(&pub_key_bytes).unwrap(); + + let mut pub_key_bytes = [0u8; PublicKey::BYTES]; + Base64::decode( + "YkJHRtoQDa1TIKhN7gKCx54bavXouJy4orHwcRntcZY=", + &mut pub_key_bytes, + ) + .unwrap(); + let pub_key2 = PublicKey::from_slice(&pub_key_bytes).unwrap(); + + let mut tx = Transaction::new( + Some(pub_key), + pub_key2, + 50 * CRUZBITS_PER_CRUZ, + Some(2 * CRUZBITS_PER_CRUZ), + None, + None, + 0, + Some("for lunch".to_owned()), + ); + tx.time = 1558565474; + tx.nonce = 2019727887; + + // check JSON matches test vector + let tx_json = serde_json::to_string(&tx).unwrap(); + assert_eq!( + r#"{"time":1558565474,"nonce":2019727887,"from":"80tvqyCax0UdXB+TPvAQwre7NxUHhISm/bsEOtbF+yI=","to":"YkJHRtoQDa1TIKhN7gKCx54bavXouJy4orHwcRntcZY=","amount":5000000000,"fee":200000000,"memo":"for lunch","series":1}"#, + tx_json, + "JSON differs from test vector" + ); + + // check ID matches test vector + let id = tx.id().unwrap(); + assert_eq!( + id.as_hex().as_str(), + "fc04870db147eb31823ce7c68ef366a7e94c2a719398322d746ddfd0f5c98776", + "ID {} differs from test vector", + id + ); + + // add signature from test vector + let mut sig_bytes = [0u8; Signature::BYTES]; + Base64::decode("Fgb3q77evL5jZIXHMrpZ+wBOs2HZx07WYehi6EpHSlvnRv4wPvrP2sTTzAAmdvJZlkLrHXw1ensjXBiDosucCw==", &mut sig_bytes).unwrap(); + tx.signature = Some(Signature::from_slice(&sig_bytes).unwrap()); + + // verify the transaction + let ok = tx.verify().unwrap(); + assert!(ok, "Verification failed"); + + // re-sign the transaction with private key from test vector + let mut priv_key_bytes = [0u8; SecretKey::BYTES]; + Base64::decode("EBQtXb3/Ht6KFh8/+Lxk9aDv2Zrag5G8r+dhElbCe07zS2+rIJrHRR1cH5M+8BDCt7s3FQeEhKb9uwQ61sX7Ig==", &mut priv_key_bytes).unwrap(); + let priv_key = SecretKey::from_slice(&priv_key_bytes).unwrap(); + tx.sign(priv_key).unwrap(); + + // verify the transaction + let ok = tx.verify().unwrap(); + assert!(ok, "Verification failed"); + + // re-sign the transaction with the wrong private key + let priv_key2 = KeyPair::generate().sk; + tx.sign(priv_key2).unwrap(); + + // verify the transaction (should fail) + let ok = tx.verify().unwrap(); + assert!(!ok, "Expected verification failure"); + } +} diff --git a/src/transaction_queue.rs b/src/transaction_queue.rs new file mode 100644 index 0000000..3079459 --- /dev/null +++ b/src/transaction_queue.rs @@ -0,0 +1,49 @@ +use ed25519_compact::{PublicKey, Signature}; +use thiserror::Error; + +use crate::balance_cache::BalanceCacheError; +use crate::transaction::{AsBase64, Transaction, TransactionID}; + +/// An interface to a queue of transactions to be confirmed. +pub trait TransactionQueue { + /// Adds the transaction to the queue. Returns true if the transaction was added to the queue on this call. + fn add(&self, id: &TransactionID, tx: &Transaction) -> Result; + + /// Adds a batch of transactions to the queue (a block has been disconnected.) + /// "height" is the block chain height after this disconnection. + fn add_batch(&self, ids: &[TransactionID], txs: &[Transaction]); + + /// Removes a batch of transactions from the queue (a block has been connected.) + /// "height" is the block chain height after this connection. + /// "more" indicates if more connections are coming. + fn remove_batch( + &self, + ids: &[TransactionID], + height: u64, + more: bool, + ) -> Result<(), TransactionQueueError>; + + /// Returns transactions in the queue for the miner. + fn get(&self, limit: usize) -> Vec; + + /// Returns true if the given transaction is in the queue. + fn exists(&self, id: &TransactionID) -> bool; + + /// Returns true if the given transaction is in the queue and contains the given signature. + fn exists_signed(&self, id: &TransactionID, signature: Signature) -> bool; + + /// Returns the queue length. + fn len(&self) -> usize; + + /// Returns true if the queue has a length of 0. + fn is_empty(&self) -> bool; +} + +#[derive(Error, Debug)] +pub enum TransactionQueueError { + #[error("transaction {0} sender {} has insufficient balance", .1.as_base64())] + SenderBalanceInsufficient(TransactionID, PublicKey), + + #[error("balance cache")] + BalanceCache(#[from] BalanceCacheError), +} diff --git a/src/transaction_queue_memory.rs b/src/transaction_queue_memory.rs new file mode 100644 index 0000000..b56fe97 --- /dev/null +++ b/src/transaction_queue_memory.rs @@ -0,0 +1,194 @@ +use std::collections::{HashMap, VecDeque}; +use std::sync::{Arc, RwLock}; + +use ed25519_compact::Signature; + +use crate::balance_cache::BalanceCache; +use crate::constants::{MIN_AMOUNT_CRUZBITS, MIN_FEE_CRUZBITS}; +use crate::ledger_disk::LedgerDisk; +use crate::processor::Processor; +use crate::transaction::{Transaction, TransactionID}; +use crate::transaction_queue::{TransactionQueue, TransactionQueueError}; + +/// An in-memory FIFO implementation of the TransactionQueue interface. +pub struct TransactionQueueMemory { + tx_map: RwLock>, + tx_queue: RwLock>, + balance_cache: RwLock, +} + +impl TransactionQueueMemory { + /// Returns a new TransactionQueueMemory instance. + pub fn new(ledger: Arc) -> Arc { + // don't accept transactions that would leave an unspendable balance with this node + let min_balance = MIN_AMOUNT_CRUZBITS + MIN_FEE_CRUZBITS; + + Arc::new(Self { + tx_map: RwLock::new(HashMap::new()), + tx_queue: RwLock::new(VecDeque::new()), + balance_cache: RwLock::new(BalanceCache::new(ledger, min_balance)), + }) + } + + /// Rebuild the balance cache and remove transactions now in violation + fn reprocess_queue(&self, height: u64) -> Result<(), TransactionQueueError> { + // invalidate the cache + let mut balance_cache = self.balance_cache.write().unwrap(); + balance_cache.reset(); + + // remove invalidated transactions from the queue + let mut tx_ids_to_remove = Vec::new(); + let mut tx_queue = self.tx_queue.write().unwrap(); + let mut tx_map = self.tx_map.write().unwrap(); + + for tx_id in tx_queue.iter() { + let tx = tx_map.get(tx_id).expect("tx queue to contain id"); + + // check that the series would still be valid + if !Processor::check_transaction_series(tx, height + 1) || + // check maturity and expiration if included in the next block + !tx.is_mature(height + 1) || tx.is_expired(height + 1) || + // don't re-mine any now unconfirmed spam + tx.fee < Some(MIN_FEE_CRUZBITS) || tx.amount < MIN_AMOUNT_CRUZBITS + { + // transaction has been invalidated. add it for removal and continue + tx_ids_to_remove.push(*tx_id); + continue; + } + + // check balance + let ok = balance_cache.apply(tx)?; + if !ok { + // transaction has been invalidated. add it for removal and continue + tx_ids_to_remove.push(*tx_id); + continue; + } + } + + // only retain elements that haven't been selected for removal + tx_map.retain(|tx_id, _tx| !tx_ids_to_remove.contains(tx_id)); + tx_queue.retain(|tx_id| !tx_ids_to_remove.contains(tx_id)); + + Ok(()) + } +} + +impl TransactionQueue for TransactionQueueMemory { + /// Adds the transaction to the queue. Returns true if the transaction was added to the queue on this call. + fn add(&self, id: &TransactionID, tx: &Transaction) -> Result { + let mut tx_map = self.tx_map.write().unwrap(); + + if tx_map.contains_key(id) { + return Ok(false); + } + + // check sender balance and update sender and receiver balances + let mut balance_cache = self.balance_cache.write().unwrap(); + if !balance_cache.apply(tx)? { + // insufficient sender balance + return Err(TransactionQueueError::SenderBalanceInsufficient( + *id, + tx.from.expect("transaction should have a sender"), + )); + } + + // add to the back of the queue + let mut tx_queue = self.tx_queue.write().unwrap(); + tx_queue.push_back(*id); + tx_map.insert(*id, tx.clone()); + + Ok(true) + } + + /// Returns the queue length. + fn len(&self) -> usize { + self.tx_queue.read().unwrap().len() + } + + /// Returns true if the queue has a length of 0. + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns transactions in the queue for the miner. + fn get(&self, limit: usize) -> Vec { + let tx_queue = self.tx_queue.read().unwrap(); + let tx_map = self.tx_map.read().unwrap(); + tx_queue + .iter() + .take(limit) + .filter_map(|tx_id| tx_map.get(tx_id).cloned()) + .collect() + } + + /// Adds a batch of transactions to the queue (a block has been disconnected.) + fn add_batch(&self, ids: &[TransactionID], txs: &[Transaction]) { + let mut tx_queue = self.tx_queue.write().unwrap(); + let mut tx_map = self.tx_map.write().unwrap(); + + // add to front in reverse order. + // we want formerly confirmed transactions to have the highest + // priority for getting into the next block. + for (i, tx) in txs.iter().rev().enumerate() { + let tx_id = ids[i]; + + if tx_map.contains_key(&tx_id) { + if let Some(index) = tx_queue + .iter() + .position(|queue_tx_id| *queue_tx_id == tx_id) + { + // remove it from its current position + tx_queue.remove(index); + } + } + + tx_queue.push_front(tx_id); + tx_map.insert(tx_id, tx.clone()); + } + + // we don't want to invalidate anything based on maturity/expiration/balance yet. + // if we're disconnecting a block we're going to be connecting some shortly. + } + + /// Removes a batch of transactions from the queue (a block has been connected.) + /// "height" is the block chain height after this connection. + /// "more" indicates if more connections are coming. + fn remove_batch( + &self, + ids: &[TransactionID], + height: u64, + more: bool, + ) -> Result<(), TransactionQueueError> { + // create a scope for the guards + { + let mut tx_map = self.tx_map.write().unwrap(); + + // remove the transactions from the queue + let mut tx_queue = self.tx_queue.write().unwrap(); + tx_queue.retain(|tx_id| !ids.contains(tx_id)); + tx_map.retain(|tx_id, _tx| !ids.contains(tx_id)); + } + + if more { + // we don't want to invalidate anything based on series/maturity/expiration/balance + // until we're done connecting all of the blocks we intend to + Ok(()) + } else { + self.reprocess_queue(height) + } + } + + /// Returns true if the given transaction is in the queue. + fn exists(&self, id: &TransactionID) -> bool { + self.tx_map.read().unwrap().contains_key(id) + } + + /// Return true if the given transaction is in the queue and contains the given signature. + fn exists_signed(&self, id: &TransactionID, signature: Signature) -> bool { + if let Some(tx) = self.tx_map.read().unwrap().get(id) { + tx.signature.expect("transaction should have a signature") == signature + } else { + false + } + } +} diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 0000000..ad94079 --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,59 @@ +use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use rand::Rng; + +use crate::error::ParsingError; + +/// Returns a non-negative pseudo-random 31-bit integer as a u32 +pub fn rand_int31() -> u32 { + rand::thread_rng().gen_range(0..=i32::MAX) as u32 +} + +/// Returns duration since Unix epoch +pub fn now_as_duration() -> Duration { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time went backwards") +} + +/// Returns number of seconds since Unix epoch +pub fn now_as_secs() -> u64 { + now_as_duration().as_secs() +} + +/// Parse and resolve host to a SocketAddr +pub fn resolve_host(host: &str) -> Result { + let mut addrs = host.to_socket_addrs().map_err(ParsingError::ToSocketAddr)?; + match addrs.next() { + Some(addr) => Ok(addr), + None => Err(ParsingError::ToSocketAddr(std::io::Error::new( + std::io::ErrorKind::AddrNotAvailable, + format!("failed to parse address: {}", host), + ))), + } +} + +/// Determine if an ip address is in the reserved space +pub fn addr_is_reserved(socket_addr: &SocketAddr) -> bool { + match socket_addr.ip() { + IpAddr::V4(v4) => { + // 127.0.0.0/8 + v4.is_loopback() + // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 + || v4.is_private() + // 169.254.0.0/16 + || v4.is_link_local() + // 0.0.0.0 + || v4.is_unspecified() + } + IpAddr::V6(v6) => { + v6.is_loopback() // ::1/128 + + // fc00::/7 (nightly only) + // || v6.is_unique_local() + // fe80::/10 (nightly only) + // || v6.is_unicast_link_local() + } + } +} diff --git a/src/wallet.rs b/src/wallet.rs new file mode 100644 index 0000000..e3b939f --- /dev/null +++ b/src/wallet.rs @@ -0,0 +1,1053 @@ +use std::collections::hash_map::DefaultHasher; +use std::mem; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::sync::{Arc, Mutex, MutexGuard, Weak}; + +use argon2::{Algorithm, Argon2, Params, Version}; +use crypto_secretbox::aead::{Aead, KeyInit}; +use crypto_secretbox::XSalsa20Poly1305; +use cuckoofilter::{CuckooError, CuckooFilter}; +use ed25519_compact::{KeyPair, PublicKey, SecretKey}; +use futures::stream::SplitSink; +use futures::{SinkExt, StreamExt}; +use leveldb::batch::{Batch, WriteBatch}; +use leveldb::database::{self, Database}; +use leveldb::iterator::{Iterable, LevelDBIterator}; +use leveldb::options::{Options, ReadOptions, WriteOptions}; +use log::{error, info}; +use rand::RngCore; +use thiserror::Error; +use tokio::net::TcpStream; +use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::sync::{oneshot, Mutex as AsyncMutex}; +use tokio::task::JoinHandle; +use tokio::time::timeout; +use tokio_tungstenite::tungstenite::client::IntoClientRequest; +use tokio_tungstenite::tungstenite::Message as WsMessage; +use tokio_tungstenite::{ + connect_async_tls_with_config, Connector, MaybeTlsStream, WebSocketStream, +}; + +use crate::block::{BlockHeader, BlockID}; +use crate::error::{impl_debug_error_chain, ChannelError, DataError, DbError, ErrChain, JsonError}; +use crate::peer::{PeerConnectionError, CONNECT_WAIT, WRITE_WAIT}; +use crate::protocol::{ + FilterBlockMessage, FilterLoadMessage, GetBalanceMessage, GetPublicKeyTransactionsMessage, + GetTransactionMessage, Message, PublicKeyTransactionsMessage, PushTransactionMessage, +}; +use crate::shutdown::{shutdown_channel, Shutdown, ShutdownChanReceiver, SpawnedError}; +use crate::tls::client_config; +use crate::transaction::{AsBase64, Transaction, TransactionError, TransactionID}; + +pub type TransactionCallback = Box, PushTransactionMessage) + Send + Sync>; +pub type FilterBlockCallback = Box, FilterBlockMessage) + Send + Sync>; + +type OutChanSender = Sender<(Message, ResultChanSender)>; +type OutChanReceiver = Receiver<(Message, ResultChanSender)>; +type ResultChanSender = oneshot::Sender; + +/// Used to hold the result of synchronous requests +struct WalletResult { + err: Option, + message: Option, +} + +struct WalletConnection { + out_chan_tx: OutChanSender, + shutdown: Shutdown, +} + +struct WalletInner { + passphrase: String, + filter: CuckooFilter, + transaction_callback: Option, + filter_block_callback: Option, +} + +/// Wallet manages keys and transactions on behalf of a user. +pub struct Wallet { + inner: Mutex, + connection: AsyncMutex>, + db: Database, +} + +impl Wallet { + /// Returns a new Wallet instance. + pub fn new(wallet_db_path: PathBuf) -> Result, WalletError> { + let mut options = Options::new(); + options.create_if_missing = true; + let db = Database::open(&wallet_db_path, &options) + .map_err(|err| DbError::Open(wallet_db_path, err))?; + let wallet = Arc::new(Self { + inner: Mutex::new(WalletInner { + passphrase: "".to_owned(), + filter: CuckooFilter::new(), + transaction_callback: None, + filter_block_callback: None, + }), + connection: AsyncMutex::new(None), + db, + }); + wallet.initialize_filter()?; + + Ok(wallet) + } + + pub fn set_passphrase(&self, passphrase: String) -> Result { + // test that the passphrase was the most recent used + let mut inner = self.inner(); + let pub_key = match self + .db + .get_u8(&ReadOptions::new(), &[NEWEST_PUBLIC_KEY_PREFIX]) + .map_err(DbError::Read)? + { + Some(v) => PublicKey::from_slice(&v[..]).map_err(DataError::Ed25519)?, + None => { + inner.passphrase = passphrase; + return Ok(true); + } + }; + + // fetch the private key + let priv_key_db_key = encode_private_key_db_key(pub_key); + let Some(encrypted_priv_key) = self + .db + .get_u8(&ReadOptions::new(), &priv_key_db_key) + .map_err(DbError::Read)? + else { + return Ok(false); + }; + + // decrypt it + decrypt_private_key(&encrypted_priv_key, &passphrase)?; + + // set it + inner.passphrase = passphrase; + + Ok(true) + } + + /// Generates, encrypts and stores new private keys and returns the public keys. + pub fn new_keys(&self, count: usize) -> Result, WalletError> { + let mut inner = self.inner(); + let mut pub_keys = Vec::with_capacity(count); + let batch = WriteBatch::new(); + + for i in 0..count { + // generate a new key + let keypair = KeyPair::generate(); + let pub_key = keypair.pk; + let priv_key = keypair.sk; + pub_keys.push(pub_key); + + // encrypt the private key + let encrypted_priv_key = encrypt_private_key(&priv_key, &inner.passphrase)?; + let decrypted_priv_key = decrypt_private_key(&encrypted_priv_key, &inner.passphrase)?; + + // safety check + if decrypted_priv_key != priv_key { + return Err(WalletError::EncryptKeyMismatch); + } + + // store the key + let priv_key_db_key = encode_private_key_db_key(pub_key); + batch.put_u8(&priv_key_db_key, &encrypted_priv_key); + if i + 1 == count { + batch.put_u8(&[NEWEST_PUBLIC_KEY_PREFIX], &pub_key[..]) + } + + // update the filter + if let Err(err) = inner.filter.add(&pub_key[..]) { + let err = WalletError::FilterInsertFailed(err); + error!("{:?}", err); + } + } + + let wo = WriteOptions { sync: true }; + self.db.write(&wo, &batch).map_err(DbError::WriteBatch)?; + + Ok(pub_keys) + } + + /// Adds an existing key pair to the database. + pub fn add_key(&self, pub_key: PublicKey, priv_key: SecretKey) -> Result<(), WalletError> { + // encrypt the private key + let inner = self.inner(); + let encrypted_priv_key = encrypt_private_key(&priv_key, &inner.passphrase)?; + let decrypted_priv_key = decrypt_private_key(&encrypted_priv_key, &inner.passphrase)?; + + // safety check + if decrypted_priv_key != priv_key { + return Err(WalletError::EncryptKeyMismatch); + } + + // store the key + let priv_key_db_key = encode_private_key_db_key(pub_key); + let wo = WriteOptions { sync: true }; + self.db + .put_u8(&wo, &priv_key_db_key, &encrypted_priv_key) + .map_err(DbError::Write)?; + + Ok(()) + } + + /// Returns all of the public keys from the database. + pub fn get_keys(&self) -> Result, WalletError> { + let priv_key_db_key = [PRIVATE_KEY_PREFIX]; + let mut pub_keys = Vec::new(); + + let iter = self + .db + .keys_iter(&ReadOptions::new()) + .prefix(&priv_key_db_key); + + for key in iter { + let pub_key = decode_private_key_db_key(&key)?; + pub_keys.push(pub_key) + } + + Ok(pub_keys) + } + + /// Retrieve a private key for a given public key + pub fn get_private_key(&self, pub_key: PublicKey) -> Result, WalletError> { + // fetch the private key + let priv_key_db_key = encode_private_key_db_key(pub_key); + match self + .db + .get_u8(&ReadOptions::new(), &priv_key_db_key) + .map_err(DbError::Read)? + { + Some(encrypted_priv_key) => { + let inner = self.inner(); + let priv_key = decrypt_private_key(&encrypted_priv_key, &inner.passphrase)?; + Ok(Some(priv_key)) + } + None => Ok(None), + } + } + + /// Creates a ConnectionHandler that connects and interfaces with a peer + pub async fn connect( + self: &Arc, + peer: SocketAddr, + genesis_id: &BlockID, + tls_verify: bool, + ) -> Result<(), WalletError> { + if !self.is_connected().await { + let (shutdown_chan_tx, shutdown_chan_rx) = shutdown_channel(); + let (out_chan_tx, out_chan_rx) = channel(1); + let mut connection_handler = + ConnectionHandler::new(Arc::downgrade(self), out_chan_rx, shutdown_chan_rx); + connection_handler + .connect(peer, genesis_id, tls_verify) + .await?; + let shutdown = Shutdown::new(connection_handler.spawn(), shutdown_chan_tx); + *self.connection.lock().await = Some(WalletConnection { + out_chan_tx, + shutdown, + }); + } + + Ok(()) + } + + /// Returns true if the wallet is connected to a peer. + pub async fn is_connected(&self) -> bool { + let connection = self.connection.lock().await; + if let Some(connection) = connection.as_ref() { + return !connection.shutdown.is_finished(); + } + + false + } + + /// Sets a callback to receive new transactions relevant to the wallet. + pub fn set_transaction_callback(&self, callback: TransactionCallback) { + self.inner().transaction_callback = Some(callback) + } + + /// Sets a callback to receive new filter blocks with confirmed transactions relevant to this wallet. + pub fn set_filter_block_callback(&self, callback: FilterBlockCallback) { + self.inner().filter_block_callback = Some(callback); + } + + /// Returns a public key's balance as well as the current block height. + pub async fn get_balance(&self, pub_key: &PublicKey) -> Result<(u64, u64), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send(( + Message::GetBalance(GetBalanceMessage { + public_key: *pub_key, + }), + result_chan_tx, + )) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + let Some(Message::Balance(b)) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + Ok(( + b.balance.expect("result should have a balance"), + b.height.expect("result should have a height"), + )) + } + + /// Returns a set of public key balances as well as the current block height. + /// Returns the current tip of the main chain's header. + pub async fn get_tip_header(&self) -> Result<(BlockID, BlockHeader), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send((Message::GetTipHeader, result_chan_tx)) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + let Some(Message::TipHeader(Some(th))) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + Ok((th.block_id, th.block_header)) + } + + /// Returns the peer's transaction relay policy. + pub async fn get_transaction_relay_policy(&self) -> Result<(u64, u64), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send((Message::GetTransactionRelayPolicy, result_chan_tx)) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + + let Some(Message::TransactionRelayPolicy(trp)) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + Ok((trp.min_fee, trp.min_amount)) + } + + /// Sets the filter for the connection. + pub async fn set_filter(&self) -> Result<(), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let filter = self.inner().filter.export(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send(( + Message::FilterLoad(FilterLoadMessage { + r#type: "cuckoo".to_owned(), + filter, + }), + result_chan_tx, + )) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + + match result.message { + Some(Message::FilterResult(None)) => Ok(()), + Some(Message::FilterResult(Some(fr))) => Err(WalletError::FilterResult(fr.error)), + _ => Err(WalletError::WalletResultMissing), + } + } + + /// Send creates, signs and pushes a transaction out to the network. + pub async fn send( + &self, + from: PublicKey, + to: PublicKey, + amount: u64, + fee: u64, + mut matures: Option, + mut expires: Option, + memo: Option, + ) -> Result { + // fetch the private key + let priv_key_db_key = encode_private_key_db_key(from); + let Some(encrypted_priv_key) = self + .db + .get_u8(&ReadOptions::new(), &priv_key_db_key) + .map_err(DbError::Read)? + else { + return Err(WalletNotFoundError::PublicKey(from).into()); + }; + + // decrypt it + let priv_key = decrypt_private_key(&encrypted_priv_key, &self.inner().passphrase)?; + + // get the current tip header + let (_block_id, header) = self.get_tip_header().await?; + + // set these relative to the current height + if let Some(matures) = matures.as_mut() { + *matures += header.height; + } + if let Some(expires) = expires.as_mut() { + *expires += header.height; + } + + // create the transaction + let mut tx = Transaction::new( + Some(from), + to, + amount, + Some(fee), + matures, + expires, + header.height, + memo, + ); + + // sign it + tx.sign(priv_key)?; + + // push it + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send(( + Message::PushTransaction(PushTransactionMessage { transaction: tx }), + result_chan_tx, + )) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + let Some(Message::PushTransactionResult(ptr)) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + if let Some(err) = ptr.error { + Err(WalletError::PushTransactionResultMessage(err)) + } else if let Some(transaction_id) = ptr.transaction_id { + Ok(transaction_id) + } else { + Err(WalletError::WalletResultNotFound) + } + } + + /// Retrieves information about a historic transaction. + pub async fn get_transaction( + &self, + id: TransactionID, + ) -> Result<(Option, Option, Option), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send(( + Message::GetTransaction(GetTransactionMessage { transaction_id: id }), + result_chan_tx, + )) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + + let Some(Message::Transaction(t)) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + Ok((t.transaction, t.block_id, t.height)) + } + + /// Retrieves information about historic transactions involving the given public key. + pub async fn get_public_key_transactions( + &self, + public_key: PublicKey, + start_height: u64, + end_height: u64, + start_index: u32, + limit: usize, + ) -> Result<(u64, u64, u32, Option>), WalletError> { + let (result_chan_tx, result_chan_rx) = oneshot::channel(); + let mut conn = self.connection.lock().await; + let conn = conn.as_mut().unwrap(); + conn.out_chan_tx + .send(( + Message::GetPublicKeyTransactions(GetPublicKeyTransactionsMessage { + public_key, + start_height, + start_index, + end_height, + limit, + }), + result_chan_tx, + )) + .await?; + + let result = result_chan_rx.await?; + if let Some(err) = result.err { + return Err(WalletError::ConnectionHandler(err)); + } + let Some(Message::PublicKeyTransactions(pkt)) = result.message else { + return Err(WalletError::WalletResultMissing); + }; + + if let Some(error) = pkt.error { + Err(WalletError::PublicKeyTransactionResultMessage(error)) + } else if let PublicKeyTransactionsMessage { + public_key: _public_key, + start_height: Some(start_height), + stop_height: Some(stop_height), + stop_index: Some(stop_index), + filter_blocks, + error: _error, + } = pkt + { + Ok((start_height, stop_height, stop_index, filter_blocks)) + } else { + Err(WalletError::WalletResultNotFound) + } + } + + /// Verifies that the private key associated with the given public key is intact in the database. + pub fn verify_key(&self, pub_key: PublicKey) -> Result<(), WalletError> { + // fetch the private key + let priv_key_db_key = encode_private_key_db_key(pub_key); + let Some(encrypted_priv_key) = self + .db + .get_u8(&ReadOptions::new(), &priv_key_db_key) + .map_err(DbError::Read)? + else { + return Err(WalletNotFoundError::PrivateKey(pub_key).into()); + }; + + // decrypt it + let priv_key = decrypt_private_key(&encrypted_priv_key, &self.inner().passphrase)?; + + // check to make sure it can be used to derive the same public key + let pub_key_derived = priv_key.public_key(); + if pub_key_derived != pub_key { + return Err(WalletError::PrivateKeyDerive); + } + + Ok(()) + } + + /// Called by WalletConnection for a FilterBlockMessage + fn on_filter_block(self: &Arc, fb: FilterBlockMessage) { + let Some(ref filter_block_callback) = self.inner().filter_block_callback else { + return; + }; + filter_block_callback(self, fb); + } + + /// Called by WalletConnection for a PushTransactionMessage + fn on_push_transaction(self: &Arc, pt: PushTransactionMessage) { + if let Some(transaction_callback) = self.inner().transaction_callback.as_ref() { + transaction_callback(self, pt); + } + } + + /// Is called to shutdown the wallet + pub async fn shutdown(&self) -> Result<(), WalletError> { + let mut conn = self.connection.lock().await; + let Some(conn) = conn.take() else { + return Ok(()); + }; + conn.shutdown.send().await; + + Ok(()) + } + + /// Initialize the filter + fn initialize_filter(&self) -> Result<(), WalletError> { + let mut capacity = 4096; + + let pub_keys = self.get_keys()?; + if pub_keys.len() > capacity / 2 { + capacity = pub_keys.len() * 2; + } + + let mut inner = self.inner(); + inner.filter = CuckooFilter::with_capacity(capacity); + + for pub_key in pub_keys { + if let Err(err) = inner.filter.add(&pub_key[..]) { + let err = WalletError::FilterInsertFailed(err); + error!("{:?}", err); + } + } + + Ok(()) + } + + /// Attempt to repair a corrupt walletdb + pub fn repair_db(wallet_db_path: PathBuf) -> Result<(), WalletError> { + database::management::repair(&wallet_db_path, &Options::new()) + .map_err(|err| DbError::Repair(wallet_db_path, err))?; + Ok(()) + } + + /// Helper to retrieve inner values + fn inner(&self) -> MutexGuard { + self.inner.lock().unwrap() + } +} + +/// leveldb schema +/// n -> newest public key +/// k{pubkey} -> encrypted private key +const NEWEST_PUBLIC_KEY_PREFIX: u8 = b'n'; +const PRIVATE_KEY_PREFIX: u8 = b'k'; +const PREFIX_LENGTH: usize = 1; + +type PrivateKeyDbKey = [u8; PREFIX_LENGTH + PublicKey::BYTES]; + +fn encode_private_key_db_key(pub_key: PublicKey) -> PrivateKeyDbKey { + let mut key: PrivateKeyDbKey = [0u8; mem::size_of::()]; + key[0] = PRIVATE_KEY_PREFIX; + key[1..].copy_from_slice(&pub_key[..]); + key +} + +fn decode_private_key_db_key(key: &[u8]) -> Result { + let pub_key = PublicKey::from_slice(&key[PREFIX_LENGTH..][..PublicKey::BYTES]) + .map_err(DataError::Ed25519)?; + Ok(pub_key) +} + +// encryption utility functions + +/// NaCl secretbox encrypt a private key with an Argon2id key derived from passphrase +fn encrypt_private_key(priv_key: &SecretKey, passphrase: &str) -> Result, WalletError> { + let salt = generate_salt(); + let secret_key = stretch_passphrase(passphrase, &salt)?; + let mut nonce = [0u8; XSalsa20Poly1305::NONCE_SIZE]; + let mut rng = rand::thread_rng(); + rng.fill_bytes(&mut nonce); + let secretbox = XSalsa20Poly1305::new(&secret_key.into()); + let encrypted = secretbox.encrypt(&nonce.into(), &priv_key[..])?; + + // prepend the salt + let mut encrypted_priv_key = Vec::with_capacity(encrypted.len() + ARGON_SALT_LENGTH); + encrypted_priv_key.extend_from_slice(&salt); + encrypted_priv_key.extend_from_slice(&nonce); + encrypted_priv_key.extend_from_slice(&encrypted); + + Ok(encrypted_priv_key) +} + +/// NaCl secretbox decrypt a private key with an Argon2id key derived from passphrase +fn decrypt_private_key( + encrypted_priv_key: &[u8], + passphrase: &str, +) -> Result { + let salt = &encrypted_priv_key[..ARGON_SALT_LENGTH]; + let secret_key = stretch_passphrase(passphrase, salt)?; + let mut nonce = [0u8; XSalsa20Poly1305::NONCE_SIZE]; + nonce.copy_from_slice(&encrypted_priv_key[ARGON_SALT_LENGTH..][..XSalsa20Poly1305::NONCE_SIZE]); + let secretbox = XSalsa20Poly1305::new(&secret_key.into()); + let decrypted_priv_key = secretbox.decrypt( + &nonce.into(), + &encrypted_priv_key[ARGON_SALT_LENGTH + XSalsa20Poly1305::NONCE_SIZE..], + )?; + + Ok(SecretKey::from_slice(&decrypted_priv_key[..]).map_err(DataError::Ed25519)?) +} + +const ARGON_SALT_LENGTH: usize = 16; +const ARGON_KEY_LENGTH: usize = 32; +const ARGON_TIME: u32 = 1; +const ARGON_MEMORY: u32 = 64 * 1024; +const ARGON_THREADS: u32 = 4; + +/// Generate a suitable salt for use with Argon2id +fn generate_salt() -> [u8; ARGON_SALT_LENGTH] { + let mut salt = [0u8; ARGON_SALT_LENGTH]; + let mut rng = rand::thread_rng(); + rng.fill_bytes(&mut salt); + salt +} + +/// Stretch passphrase into a 32 byte key with Argon2id +fn stretch_passphrase( + passphrase: &str, + salt: &[u8], +) -> Result<[u8; ARGON_KEY_LENGTH], WalletError> { + let mut output_key_material = [0u8; ARGON_KEY_LENGTH]; + let params = Params::new( + ARGON_MEMORY, + ARGON_TIME, + ARGON_THREADS, + Some(ARGON_KEY_LENGTH), + )?; + Argon2::new(Algorithm::Argon2id, Version::V0x13, params).hash_password_into( + passphrase.as_bytes(), + salt, + &mut output_key_material, + )?; + + Ok(output_key_material) +} + +#[derive(Error)] +pub enum WalletError { + #[error("unable to encrypt/decrypt private keys")] + EncryptKeyMismatch, + #[error("unable to insert into filter")] + FilterInsertFailed(#[source] CuckooError), + #[error("filter result: {0}")] + FilterResult(String), + #[error("private key cannot be used to derive the same public key. possibly corrupt.")] + PrivateKeyDerive, + #[error("public key transaction result message: {0}")] + PublicKeyTransactionResultMessage(String), + #[error("transaction result message: {0}")] + PushTransactionResultMessage(String), + #[error("wallet result is missing")] + WalletResultMissing, + #[error("empty result returned")] + WalletResultNotFound, + + #[error("connection handler")] + ConnectionHandler(#[from] ConnectionHandlerError), + #[error("wallet not found")] + WalletNotFound(#[from] WalletNotFoundError), + + #[error("channel")] + Channel(#[from] ChannelError), + #[error("data")] + Data(#[from] DataError), + #[error("db")] + Db(#[from] DbError), + #[error("peer connection")] + PeerConnnection(#[from] PeerConnectionError), + #[error("transaction")] + Transaction(#[from] TransactionError), + + #[error("argon2: {0}")] + Argon2(argon2::Error), + #[error("secretbox: {0}")] + Secretbox(crypto_secretbox::Error), +} + +impl_debug_error_chain!(WalletError, "wallet"); + +impl From> for WalletError { + fn from(err: tokio::sync::mpsc::error::SendError<(Message, ResultChanSender)>) -> Self { + Self::Channel(ChannelError::Send("out", err.to_string())) + } +} + +impl From for WalletError { + fn from(err: tokio::sync::oneshot::error::RecvError) -> Self { + Self::Channel(ChannelError::OneshotReceive("wallet result", err)) + } +} + +impl From for WalletError { + fn from(err: argon2::Error) -> Self { + Self::Argon2(err) + } +} + +impl From for WalletError { + fn from(err: crypto_secretbox::Error) -> Self { + Self::Secretbox(err) + } +} + +#[derive(Error, Debug)] +pub enum WalletNotFoundError { + #[error("public key not found: {}", .0.as_base64())] + PublicKey(PublicKey), + #[error("private key not found for public key: {}", .0.as_base64())] + PrivateKey(PublicKey), +} + +type WsSink = SplitSink>, WsMessage>; + +struct WebSocket { + websocket: Option>>, + addr: SocketAddr, +} + +struct ConnectionHandler { + wallet: Weak, + conn: Option, + /// outgoing messages and results for synchronous requests + out_chan_rx: OutChanReceiver, + shutdown_chan_rx: ShutdownChanReceiver, +} + +impl ConnectionHandler { + pub fn new( + wallet: Weak, + out_chan_rx: OutChanReceiver, + shutdown_chan_rx: ShutdownChanReceiver, + ) -> Self { + Self { + wallet, + conn: None, + out_chan_rx, + shutdown_chan_rx, + } + } + + /// Connects to a peer for transaction history, balance information, and sending new transactions. + /// The threat model assumes the peer the wallet is speaking to is not an adversary. + pub async fn connect( + &mut self, + addr: SocketAddr, + genesis_id: &BlockID, + tls_verify: bool, + ) -> Result<(), PeerConnectionError> { + let url = format!("wss://{}/{}", addr, &genesis_id); + let request = url.into_client_request()?; + + // by default clients skip verification as most peers are using ephemeral certificates and keys. + let client_config = client_config(tls_verify); + + let (conn, _response) = match timeout( + CONNECT_WAIT, + connect_async_tls_with_config( + request, + None, + true, + Some(Connector::Rustls(Arc::new(client_config))), + ), + ) + .await + { + Err(err) => { + return Err(PeerConnectionError::Timeout(addr, err)); + } + Ok(Ok(v)) => v, + Ok(Err(err)) => { + return Err(PeerConnectionError::Connect(addr, err)); + } + }; + + self.conn = Some(WebSocket { + websocket: Some(conn), + addr, + }); + + Ok(()) + } + + pub fn spawn(self) -> JoinHandle> { + tokio::spawn(async { self.run().await.map_err(Into::into) }) + } + + /// Executes the Wallet's main loop. + /// It manages reading and writing to the peer WebSocket. + pub async fn run(mut self) -> Result<(), ConnectionHandlerError> { + let conn = self.conn.as_mut().unwrap().websocket.take().unwrap(); + let (mut ws_sender, mut ws_receiver) = conn.split(); + let mut result_chan = None; + + // reader / writer loop + loop { + tokio::select! { + Some((message, result_chan_tx)) = self.out_chan_rx.recv() => { + let json = match serde_json::to_string(&message).map_err(JsonError::Deserialize) { + Ok(v) => v, + Err(err) => { + result_chan_tx.send(WalletResult{ + err: Some(err.into()), + message: None + })?; + break Ok(()) + } + }; + + // send outgoing message to peer + if let Err(err) = self.send_with_timeout(&mut ws_sender, WsMessage::Text(json)).await { + result_chan_tx.send(WalletResult { + err: Some(err.into()), + message: None + })?; + break Ok(()) + } else { + result_chan = Some(result_chan_tx); + } + } + + // new message from peer + msg = ws_receiver.next() => { + let message = match msg { + Some(Ok(v)) => v, + Some(Err(err)) => { + if let Some(result_chan) = result_chan.take() { + result_chan.send(WalletResult { + err: Some(err.into()), + message: None + })?; + } else { + break Err(PeerConnectionError::Websocket(err).into()) + } + break Ok(()) + } + None => { + break Err(PeerConnectionError::Dropped(self.addr()).into()) + } + }; + + match message { + WsMessage::Text(ref json) => { + let message = match serde_json::from_str::(json).map_err(JsonError::Deserialize) { + Ok(v) => v, + Err(err) => { + if let Some(result_chan) = result_chan.take() { + result_chan.send(WalletResult{ + err: Some(err.into()), + message: None + })?; + } else { + break Err(err.into()) + } + break Ok(()) + } + }; + + match message { + Message::FilterBlock(fb) => { + self.wallet.upgrade().unwrap().on_filter_block(fb); + } + + Message::PushTransaction(pt) => { + self.wallet.upgrade().unwrap().on_push_transaction(pt); + } + + Message::Balance(_) | + Message::FilterResult(_) | + Message::PublicKeyTransactions(_) | + Message::PushTransactionResult(_) | + Message::TipHeader(_) | + Message::Transaction(_) | + Message::TransactionRelayPolicy(_) => { + if let Some(result_chan) = result_chan.take() { + result_chan.send(WalletResult { + message: Some(message), + err: None + })?; + } + }, + + // other message types + _ => {} + } + } + + WsMessage::Close(_) => { + info!( + "Received close message from: {}", + self.addr() + ); + break Ok(()) + } + + // other websocket messages + _ => {} + } + } + + _ = &mut self.shutdown_chan_rx => { + ws_sender.close().await.map_err(PeerConnectionError::Websocket)?; + break Ok(()) + } + } + } + } + + fn addr(&self) -> SocketAddr { + self.conn.as_ref().expect("expected connection").addr + } + + /// Send outgoing messages with a write timeout period + async fn send_with_timeout( + &self, + ws_sender: &mut WsSink, + message: WsMessage, + ) -> Result<(), PeerConnectionError> { + match timeout(WRITE_WAIT, ws_sender.send(message)).await { + Err(err) => Err(PeerConnectionError::Timeout(self.addr(), err)), + Ok(Err(err)) => Err(err.into()), + _ => Ok(()), + } + } +} + +impl Drop for ConnectionHandler { + fn drop(&mut self) { + if let Some(ref conn) = self.conn { + info!("Closed connection with: {}", conn.addr); + } + } +} + +#[derive(Error)] +pub enum ConnectionHandlerError { + #[error("channel")] + Channel(#[from] ChannelError), + #[error("json")] + Json(#[from] JsonError), + #[error("peer connection")] + PeerConnection(#[from] PeerConnectionError), + + #[error("websocket message")] + WsMessage(#[from] tokio_tungstenite::tungstenite::Error), +} + +impl_debug_error_chain!(ConnectionHandlerError, "connection handler"); + +impl From for ConnectionHandlerError { + fn from(_err: WalletResult) -> Self { + Self::Channel(ChannelError::OneshotSend("wallet result")) + } +} + +#[cfg(test)] +mod test { + use ed25519_compact::KeyPair; + + use super::*; + + #[test] + fn test_private_key_encryption() { + let priv_key = KeyPair::generate().sk; + let passphrase = "the quick brown fox whatever whatever"; + let encrypted_priv_key = encrypt_private_key(&priv_key, passphrase).unwrap(); + let decrypted_priv_key = decrypt_private_key(&encrypted_priv_key, "nope"); + assert!(matches!( + decrypted_priv_key, + Err(WalletError::Secretbox(crypto_secretbox::aead::Error)) + )); + + let decrypted_priv_key = decrypt_private_key(&encrypted_priv_key, passphrase); + assert!(decrypted_priv_key.is_ok(), "decryption failed"); + assert_eq!( + decrypted_priv_key.unwrap(), + priv_key, + "private key mismatch after decryption" + ); + } +}