From a3b3c6faaff774fff4555c2f44a6a575f77fb9e8 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 16 Oct 2023 01:09:58 +0200 Subject: [PATCH 01/18] Run `cargo update` (#7742) --- Cargo.lock | 286 ++++++++++++++++++++++++----------------------------- 1 file changed, 128 insertions(+), 158 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b8d40c3b34..17c4283671a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -91,9 +91,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -150,15 +150,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -208,9 +208,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" +checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" dependencies = [ "flate2", "futures-core", @@ -238,7 +238,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -249,7 +249,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -407,7 +407,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -429,7 +429,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.37", + "syn 2.0.38", "which", ] @@ -561,9 +561,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a" +checksum = "c79ad7fb2dd38f3dabd76b09c6a5a20c038fc0213ef1e9afd30eb777f120f019" dependencies = [ "memchr", "serde", @@ -794,7 +794,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1058,7 +1058,7 @@ checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1075,14 +1075,14 @@ dependencies = [ [[package]] name = "cxx-gen" -version = "0.7.107" +version = "0.7.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb5062549c7a2c56d9e807eb0244e18c5750616d62e77fd78d9886a2877662d3" +checksum = "1318697052dbc5a12f8e5e603441413d6096350c29b8361eb45a9c531be61dda" dependencies = [ "codespan-reporting", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1099,7 +1099,7 @@ checksum = "4a076022ece33e7686fb76513518e219cca4fce5750a8ae6d1ce6c0f48fd1af9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1147,7 +1147,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1169,7 +1169,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core 0.20.3", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1231,7 +1231,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1258,7 +1258,7 @@ checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ "curve25519-dalek", "ed25519", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "hex", "rand_core 0.6.4", "serde", @@ -1335,25 +1335,14 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ - "errno-dragonfly", "libc", "windows-sys 0.48.0", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "eyre" version = "0.6.8" @@ -1375,9 +1364,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "ff" @@ -1416,9 +1405,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", @@ -1534,7 +1523,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -1733,9 +1722,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" dependencies = [ "ahash", "allocator-api2", @@ -2039,12 +2028,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad227c3af19d4914570ad36d30409928b75967c298feb9ea1969db3a610bb14e" +checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.1", "serde", ] @@ -2089,9 +2078,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aa511b2e298cd49b1856746f6bb73e17036bcd66b25f5e92cdcdbec9bd75686" +checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc" dependencies = [ "console", "lazy_static", @@ -2156,9 +2145,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -2273,9 +2262,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] name = "libgit2-sys" @@ -2301,9 +2290,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "librocksdb-sys" @@ -2349,9 +2338,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lock_api" @@ -2421,9 +2410,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" @@ -2479,7 +2468,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -2633,9 +2622,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -2893,9 +2882,9 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" +checksum = "c022f1e7b65d6a24c0dbbd5fb344c66881bc01f3e5ae74a1c8100f2f985d98a4" dependencies = [ "memchr", "thiserror", @@ -2904,9 +2893,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" +checksum = "35513f630d46400a977c4cb58f78e1bfbe01434316e60c37d27b9ad6139c66d8" dependencies = [ "pest", "pest_generator", @@ -2914,22 +2903,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" +checksum = "bc9fc1b9e7057baba189b5c626e2d6f40681ae5b6eb064dc7c7834101ec8123a" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "pest_meta" -version = "2.7.3" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" +checksum = "1df74e9e7ec4053ceb980e7c0c8bd3594e977fde1af91daba9c928e8e8c6708d" dependencies = [ "once_cell", "pest", @@ -2943,7 +2932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.1", + "indexmap 2.0.2", ] [[package]] @@ -2963,7 +2952,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3048,7 +3037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3107,9 +3096,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -3172,7 +3161,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.37", + "syn 2.0.38", "tempfile", "which", ] @@ -3187,7 +3176,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3445,8 +3434,8 @@ checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.0", - "regex-syntax 0.8.0", + "regex-automata 0.4.1", + "regex-syntax 0.8.1", ] [[package]] @@ -3460,13 +3449,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d58da636bd923eae52b7e9120271cbefb16f399069ee566ca5ebf9c30e32238" +checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.0", + "regex-syntax 0.8.1", ] [[package]] @@ -3483,9 +3472,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3cbb081b9784b07cceb8824c8583f86db4814d172ab043f3c23f7dc600bf83d" +checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" [[package]] name = "reqwest" @@ -3555,9 +3544,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "911b295d2d302948838c8ac142da1ee09fa7863163b44e6715bc9357905878b8" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" dependencies = [ "cc", "getrandom 0.2.10", @@ -3644,9 +3633,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.14" +version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" +checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ "bitflags 2.4.0", "errno", @@ -3675,7 +3664,7 @@ checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", "ring 0.16.20", - "rustls-webpki 0.101.6", + "rustls-webpki", "sct", ] @@ -3688,16 +3677,6 @@ dependencies = [ "base64 0.21.4", ] -[[package]] -name = "rustls-webpki" -version = "0.100.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6a5fc258f1c1276dfe3016516945546e2d5383911efc0fc4f1cdc5df3a4ae3" -dependencies = [ - "ring 0.16.20", - "untrusted 0.7.1", -] - [[package]] name = "rustls-webpki" version = "0.101.6" @@ -3898,9 +3877,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" dependencies = [ "serde_derive", ] @@ -3916,13 +3895,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.189" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -3931,7 +3910,7 @@ version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "indexmap 2.0.1", + "indexmap 2.0.2", "itoa", "ryu", "serde", @@ -3978,7 +3957,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.1", + "indexmap 2.0.2", "serde", "serde_json", "serde_with_macros 3.3.0", @@ -4006,14 +3985,14 @@ dependencies = [ "darling 0.20.3", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -4022,9 +4001,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -4052,9 +4031,9 @@ checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" [[package]] name = "similar" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf" +checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" [[package]] name = "sketches-ddsketch" @@ -4201,9 +4180,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.37" +version = "2.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" dependencies = [ "proc-macro2", "quote", @@ -4288,22 +4267,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4318,9 +4297,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" dependencies = [ "deranged", "itoa", @@ -4333,15 +4312,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] @@ -4409,7 +4388,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4511,7 +4490,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.1", + "indexmap 2.0.2", "toml_datetime", "winnow", ] @@ -4522,7 +4501,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.0.1", + "indexmap 2.0.2", "serde", "serde_spanned", "toml_datetime", @@ -4566,7 +4545,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4678,13 +4657,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] [[package]] @@ -4908,17 +4887,17 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b11c96ac7ee530603dcdf68ed1557050f374ce55a5a07193ebf8cbc9f8927e9" +checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" dependencies = [ "base64 0.21.4", "log", "once_cell", "rustls 0.21.7", - "rustls-webpki 0.100.3", + "rustls-webpki", "url", - "webpki-roots 0.23.1", + "webpki-roots 0.25.2", ] [[package]] @@ -5052,7 +5031,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-shared", ] @@ -5086,7 +5065,7 @@ checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5113,7 +5092,7 @@ version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.17.2", + "ring 0.17.3", "untrusted 0.9.0", ] @@ -5126,15 +5105,6 @@ dependencies = [ "webpki", ] -[[package]] -name = "webpki-roots" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03058f88386e5ff5310d9111d53f48b17d732b401aeb83a8d5190f2ac459338" -dependencies = [ - "rustls-webpki 0.100.3", -] - [[package]] name = "webpki-roots" version = "0.25.2" @@ -5327,9 +5297,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "037711d82167854aff2018dfd193aa0fef5370f456732f0d5a0c59b0f1b4b907" dependencies = [ "memchr", ] @@ -5640,7 +5610,7 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.0.1", + "indexmap 2.0.2", "itertools 0.11.0", "lazy_static", "metrics", @@ -5688,7 +5658,7 @@ dependencies = [ "futures", "hex", "hyper", - "indexmap 2.0.1", + "indexmap 2.0.2", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -5740,7 +5710,7 @@ dependencies = [ "hex-literal", "howudoin", "humantime-serde", - "indexmap 2.0.1", + "indexmap 2.0.2", "insta", "itertools 0.11.0", "jubjub", @@ -5777,7 +5747,7 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.0.1", + "indexmap 2.0.2", "insta", "itertools 0.11.0", "lazy_static", @@ -5835,7 +5805,7 @@ dependencies = [ "howudoin", "humantime-serde", "hyper", - "indexmap 2.0.1", + "indexmap 2.0.2", "indicatif", "inferno", "insta", @@ -5902,5 +5872,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.37", + "syn 2.0.38", ] From c43142332ae503323e46b082cba20f4c7f72ca78 Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 16 Oct 2023 01:10:08 +0200 Subject: [PATCH 02/18] chore: Update checkpoints (#7741) * Update Testnet checkpoints * Update Mainnet checkpoints --- .../src/checkpoint/main-checkpoints.txt | 50 ++++++++++++++++ .../src/checkpoint/test-checkpoints.txt | 59 +++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/zebra-consensus/src/checkpoint/main-checkpoints.txt b/zebra-consensus/src/checkpoint/main-checkpoints.txt index 7259517375d..540ea6ce644 100644 --- a/zebra-consensus/src/checkpoint/main-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/main-checkpoints.txt @@ -11210,3 +11210,53 @@ 2237898 0000000000d828b783a0b565084948017dd54667b470e18bf7fd808e98afc438 2238298 00000000001f208cadd174f7726b1bc958bf478ec3cffa5f093c757e17281a36 2238698 000000000113210e4cfe24701b23df168d63381008d30eb87d2c833554e58055 +2239098 00000000005662febe1341946f498f5d9d5dc6c39630fa750d5a6bb231b1b3be +2239498 0000000000fb27120130bb4c2721b2adeaa25bb9038606e3d93ba928fd8b3584 +2239898 0000000000645790ce398fd7cc2d51244dec038eeeef9222163d332fb7f0acdc +2240298 00000000000aa7b30793674e33dc2dce26e5cdcd1fd788c11c9ae69a49fbb111 +2240698 0000000000f18c1523d59120c350ae85afc1f0effb9d7ca68f36e60412262881 +2241098 00000000009899cefe84a57de16725a2394ddd3d9765322bbc863afdb834fa84 +2241498 00000000004bd9303ee01ccb844ecbcbc35b115bad101e9cb84cc53760e3dd25 +2241898 00000000000fcd10c7cb3a60f8fc461654e9b30d8d5b0b547e7d6a886985431a +2242298 000000000036012b82075cb3994fdbaa6c4e6c137a354d0e2f7beed9022532d6 +2242698 00000000018fcab0b410cf2f8578e7f28173c963707753f73dc3918d8ac33597 +2243098 00000000016a59104d11603f53aa968d7efcb98c2a8f7099137790aa586d007d +2243498 00000000002a4ca2ef0df65b639ecf1d695efa06d374e3c9e17400b3a899d4a8 +2243898 00000000018f5d41aadab6a95df0658bc43de8ab1f2321bdb0828d6ed90d175b +2244298 0000000001071cf163c54a45439692d66e413cdc56f267b2fbff50682552baf6 +2244698 0000000000fe501883b6751ba7a1e51ca5b1f2af5675b651a35c43ca5f1dc0ae +2245098 00000000013d99facdcad44f8ab2c184ec8ea4deed4126049ddbf6b8436ba966 +2245498 00000000007a81bb85f63d73d6245e1ad44c69018f0b7a5916229e5ff29396ec +2245898 00000000000daf5f830d8b912c49dd5c86e50fdb95cd678fdbc32812b910c72f +2246298 000000000081a003991eb2ef03669e00fdd020f9b1c257773d49565e89cfb929 +2246698 000000000157bc4eedef772ff51727ebdbe1a416cf5f122899fad97cdb80b5f2 +2247098 000000000065afd59544b99fd69aaf47b48ee0c4c46542d53bcf1d10eabd961b +2247498 000000000149a6d21161b70e2201026ab97c7b864bd6af2649b56b8a87aae66d +2247898 000000000143b94184bbff80663b71b155a412d1e4c6f705ec684064aef5532a +2248298 0000000000a440d2dfd1f3e02e85982f8885c66374cf7a408ae50b446b7a95f9 +2248698 0000000000b737791ab7a870038f2a6857faf9ac3880c3f8bd7a099c4efe86c0 +2249098 000000000003ba095b8fa156964270acce4695319400934d0427aa70574fc18c +2249498 00000000009af5342fda5276c7e8136010430fc7b88f12d0529053d0c1dc34a8 +2249898 0000000001521367d10ac5dfabc821b36d311cdd7f3e455535693cefa44baa40 +2250298 0000000001d0cc4d9cada586cbc6dbcee099381bf141cfbd9017d4d5ae494cce +2250698 0000000000b2255b80bb3e66a18e457e574aac3dbaf2d5217642985ed3d8928d +2251098 0000000001728f8f71b980897ab0f1bc6414e2a0bd1a01a9c85805b8a4246d67 +2251498 00000000009c64581a5dcb6f5daeb9fc939f3f82332c227b0270a29b4131ec8c +2251898 00000000009145fb53ba0c0a0aef0203348d84c4271c824919fe30351de36513 +2252298 0000000000e928e6a013d4d03cdffeab9d9e7ea5062234ec03b6ed03d35f51ed +2252698 000000000024ae8486c74f3e7eb2bfa02b50eeac23ac96cd0e36ffe9d8b0ce96 +2253098 000000000042c7ff48fcc7291cbc36a97930897216959a4baf77a5c3edd2b3da +2253498 00000000003f7c72b280fdf5a14d27f924d8b46d5cbd84b73966920897540311 +2253898 000000000086f805f8cd33c03bb66d2656216af0ed005b6a2fdd2ead709557a7 +2254298 0000000000e63424d7f007f6dccc724fa8aad1c328b4f28fd0c2e0f8774720a8 +2254698 000000000046527138e00a07325372fc078dd7306528029050c800bb30eee7b8 +2255098 0000000000d59a69903fe48f8cd10801d8a3dfcad643824856925cf97715f32a +2255498 00000000010e3dee658abf231630be0fc93cc333350560ac2ab7ce3d942f2c0d +2255898 0000000000aa37a5e6feb3549b6f866b5d99567cec4f5eb8be9fc3a60d195e99 +2256298 000000000161018956e2f22c64ebd1f40d5d5724dc43c0edda9de6200fd6021b +2256698 000000000017ff6d6cc047533708b7382aaac4f79373508fa25855f33ca3e138 +2257098 0000000000e025ca0a42f0649940216d86ae3634b91405557b2a17dc9bbf7e8c +2257498 000000000061e5e737670a3612041e6326a74bc6bb865b221ede2fe148ace070 +2257898 00000000008820c978833e535149174f965a6b3e4a6ac4b1ddc48345478ba64c +2258298 000000000100d9383f9c9ff554d473e8a276855beb6804b6b9a62f809b567a40 +2258698 0000000000a91531d88040a47c0f1b8f5b16ea1833aa97affaa04d5e7fc18aed diff --git a/zebra-consensus/src/checkpoint/test-checkpoints.txt b/zebra-consensus/src/checkpoint/test-checkpoints.txt index e161e22bb59..b46e6ca51a4 100644 --- a/zebra-consensus/src/checkpoint/test-checkpoints.txt +++ b/zebra-consensus/src/checkpoint/test-checkpoints.txt @@ -6306,3 +6306,62 @@ 2522000 0005f889589b8c25d4abe97edc6fca1f867a2df04903ca2d38d38a1d404a3c0d 2522400 001ff1664752da479f0b4657f8bc3e132da097fe5532e06183f68cf59959c967 2522800 000036ebe2e67a1c5db21fc028d2f5ea6651c61d9e03d259b51163a9bbb0cf1b +2523200 0028c4b53d0333e8c856eb723feb1d43a550ce4cdb49c01715798c44451b49a5 +2523600 000e5ea0858c53d9e579650272035d2b062c94b80ad8d42a4a54fec7db1c92cc +2524000 000ba1e0a3745d05714cc9d84c8c72ce40819b82a378899394b2a21a117cb928 +2524400 001177f2afbedaf47206bd6be1557a8b4b943095323953e5b567d89a7049cb47 +2524800 0087c080962b0aed55a34dcdaa88f17513b54f4e609fff399810a203659405d8 +2525200 00ae5fcf53d50e069e311fcce52728a360bf034e5a3a98117c4cfe7f1a3225f9 +2525600 00221359c0b24c843d636b08ffe26242580bad5624adedfb58616a4716ec1fc5 +2526000 001fbf2ccdde41f716066a2d6fe547036bba0b0e616bac244f54395b04369490 +2526400 00665e692722e54a0adb4e38c6b2d9d032ffe97e43f4be364199be06a2c7f598 +2526800 000d26a95d4021506be2b08b1b3cf580507bc30503bdd6f1217a0a537202418c +2527200 003d1aa9aa844c569a4889d2fb8fc294c8c6850686abfa07f12ed9d8940b6c86 +2527600 0016f1dbafe5a61726cd1376238752f684e756fed07916db675023f5bcfc6591 +2528000 001bed30a08703a5b1a98264bf48ce953e0733349fac1b223a83d30761c7a0e8 +2528400 00152b454e4c97de3cb76df03811bd2bb627b69240e472afc750e0527235355a +2528800 001f8d71cc39da83900ba7db716099fb9963a21ec1ce74eeddfbb4920858052a +2529200 001574302f3ef2968b97391a025c0c178b62f5381e8cedcc3e728a78657790bc +2529600 002dc58bb40d57724d4497c73b651189e6679fbfa57560ba75a84b178a8fa99c +2530000 000d90ebe86cd1b15080ad419d5a59a13035dfffaa9e3e0f6016e44faff164db +2530400 001220b459188e9ff99a230b1ff87a32ed39e4baabc5dd3f99ab0967a1982a0f +2530800 001b79e826bc9c24c19c25abdc963da520859ed67c1e4787cfea1e915ffab835 +2531200 0015b035801bb9846cc2554c97ac726d3738f3be59dc4ce7d49e020f98103bb8 +2531600 000815786b2a3aab5ce13a67be5d038e060de43804016b9a1f2bacc5c098694e +2532000 0001e47dde90fd6f2868652068170b727c1e422b311ae60cc770655728af6b12 +2532400 0018c05b120af179f4b9476f866b908049eb74f3fcaa0fc6b636d2dbc7049f01 +2532800 00397dc0c12ec82cf7074815e187b0840f40b0a028782f28dc40da689a103a20 +2533200 00892c1c2bfa29d94c15b666fc4f0db6d1e3f97e9cd63df656996ef5947ceab9 +2533600 000c0703db639039fade3a641832a587be391a0c98ed57f61f222f9750501e44 +2534000 000fafb4b3abb9db1414291b7ceb42fde471cb1d595042cdb770280a195f8667 +2534400 0037f3f98539e0c6a259cc66f90779c105b217fc7d9aa5251c458353751797d6 +2534800 000920c9b1170ee274a7cada6d58e25b076e9b3b4e615fee462729b04184e65a +2535200 0029dac81f58f49a814e68ab87abde862eb34bc06a2dbb6910e9523476ab775b +2535600 0027ea1f5a7b07cfd83d554b202d9796b0bb37cfc19ae118ed1f2362e24028d4 +2536000 00088288e1323f66e3f5b31cca66908d2ebbbb1660def0fafda2d31080cd05f6 +2536400 0003895374a5e5e65b30e281267c22cdd5663cdeed83199e5641e0be76ca1cfc +2536800 000169dd5e51b4abbe196e584765e97a2f72b1afbb8cd6117214d66fd04193ac +2537200 004331e57869225dd41d7d13efb16048c7098638262ff9e139c349fdf4219e1e +2537600 00045b97eced9319e929824f9bea56c571a77d3b104e6b3c44ab41812426e540 +2538000 0037bb8156d3ee43ff0dc54d9a169c47c7fbed2f12048e470fd7efbe40db6754 +2538400 002ad8f8d3845963bdcc10d40c63e31cb3b341c9f4538146889475e39ffd1ea9 +2538800 000983cfdebb96b6338d57603afd55d233fe16078743b21c9960ec66ffece45c +2539200 0015604f16dde84d9466c1ddb3ea7390c37bb7d070320f50ceff3927e84ab850 +2539600 0019335125c2ade7ad3ce3e7394bbb13951df7b9f873821efccc49a7e08e0bec +2540000 0010c1345d4412a8367aec519bee8d566053cab59109c333201ce35eefe9dd21 +2540400 0002a4795e1e927f7b5ab6f355737557613d5269c52e25e523691167930fb35f +2540800 000bd4153715bbe7ba1118d4314461850adc16ad2cb12a0e99fcce22b462f3bc +2541200 00028d6ab82bd6bfe3b4d351e33fc500342548f05e9d00a26f46e146103bb23a +2541600 000a0786992577c16735b8f4bd3aca629582e3a02b7d5686457c1196a73e9b03 +2542000 0013052546b09cba64ea3a4e119047d96c4cd7a6e043eb0f43f1cf1704585586 +2542400 001fbc9e2686653ae5e6b673cd438fd3fb88acbc0b01e6fc7d7738f3e86cb314 +2542800 0017f5fe146be4bb9eb00b91a1e3ad3cc80bd14abf262540b279cb10c291fc90 +2543200 001de3b190b8359667564d37ec81193c081ed66fb3380710bfd404edc901307d +2543600 000b26694e9241f10b71b95a118531cefa1d78a010e1077bb484aa1687d29e1b +2544000 0014ebfa07905683441b84289cee9ddd2f23f2f1fb23a02ffb4f4c937829e7a3 +2544400 000b8ad313960c5276e49ca99d3d799fd20486aa23c1e7c3fd29707b1d59db61 +2544800 003566e38a998f15fd07fbb8bdaec4e74947a1f932ad54dd76208925baae6e4a +2545200 0008c02621c639ccd6ec96ed8910a01b57cfecbdd853b58d774d21b07d98b21f +2545600 000922846ef68b656beee14815e999df2dd44625fdaa9fca703354db588d1b5a +2546000 0008416c201ea4c6694b57a7571c4a38b4bccbe07a445d36d301d2d4d286a361 +2546400 000d9091cc81528757991f79e116b4de846d76d4a04028358ba19a65e5acc480 From 050767025926e432f6fffe667fbddd2f0be63ef2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:13:38 +0000 Subject: [PATCH 03/18] build(deps): bump the log-time group with 1 update (#7748) Bumps the log-time group with 1 update: [tracing](https://github.com/tokio-rs/tracing). - [Release notes](https://github.com/tokio-rs/tracing/releases) - [Commits](https://github.com/tokio-rs/tracing/compare/tracing-0.1.37...tracing-0.1.39) --- updated-dependencies: - dependency-name: tracing dependency-type: direct:production update-type: version-update:semver-patch dependency-group: log-time ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 9 ++++----- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 ++-- zebra-consensus/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- zebra-rpc/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 10 files changed, 14 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17c4283671a..4c8629e1c82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4633,11 +4633,10 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "ee2ef2af84856a50c1d430afce2fdded0a4ec7eda868db86409b4543df0797f9" dependencies = [ - "cfg-if 1.0.0", "log", "pin-project-lite", "tracing-attributes", @@ -4668,9 +4667,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 1856241d8c5..8cf59359144 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -29,7 +29,7 @@ rayon = "1.7.0" tokio = { version = "1.33.0", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.8" tower = { version = "0.4.13", features = ["util", "buffer"] } -tracing = "0.1.37" +tracing = "0.1.39" tracing-futures = "0.2.5" [dev-dependencies] diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index e4c8b2a001c..e9fa523c6b9 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -19,7 +19,7 @@ categories = ["algorithms", "asynchronous"] pin-project = "1.1.3" tower = "0.4.13" futures-core = "0.3.28" -tracing = "0.1.37" +tracing = "0.1.39" [dev-dependencies] tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 5d8de1c8675..10831fd3858 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -92,7 +92,7 @@ humantime = "2.1.0" displaydoc = "0.2.4" static_assertions = "1.1.0" thiserror = "1.0.48" -tracing = "0.1.37" +tracing = "0.1.39" # Serialization hex = { version = "0.4.3", features = ["serde"] } @@ -138,7 +138,7 @@ color-eyre = "0.6.2" # Enable a feature that makes tinyvec compile much faster. tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } spandoc = "0.2.2" -tracing = "0.1.37" +tracing = "0.1.39" # Make the optional testing dependencies required proptest = "1.3.1" diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index 4c2c167d817..dd96d7da561 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -55,7 +55,7 @@ metrics = "0.21.1" thiserror = "1.0.48" tokio = { version = "1.33.0", features = ["time", "sync", "tracing", "rt-multi-thread"] } tower = { version = "0.4.13", features = ["timeout", "util", "buffer"] } -tracing = "0.1.37" +tracing = "0.1.39" tracing-futures = "0.2.5" orchard = "0.5.0" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index d1903789da1..faed08811c6 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -69,7 +69,7 @@ tower = { version = "0.4.13", features = ["retry", "discover", "load", "load-she metrics = "0.21.1" tracing-futures = "0.2.5" tracing-error = { version = "0.2.0", features = ["traced-error"] } -tracing = "0.1.37" +tracing = "0.1.39" # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 95e48569d6f..617cb001eb2 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -57,7 +57,7 @@ indexmap = { version = "2.0.1", features = ["serde"] } tokio = { version = "1.33.0", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" -tracing = "0.1.37" +tracing = "0.1.39" hex = { version = "0.4.3", features = ["serde"] } serde = { version = "1.0.188", features = ["serde_derive"] } diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 95712071720..b7d6a6365be 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -66,7 +66,7 @@ thiserror = "1.0.48" rayon = "1.7.0" tokio = { version = "1.33.0", features = ["rt-multi-thread", "sync", "tracing"] } tower = { version = "0.4.13", features = ["buffer", "util"] } -tracing = "0.1.37" +tracing = "0.1.39" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 3417e77263a..c3c7381e6ca 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -41,7 +41,7 @@ thiserror = "1.0.48" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-error = "0.2.0" -tracing = "0.1.37" +tracing = "0.1.39" [dev-dependencies] tempfile = "3.8.0" diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index ff38f3f5b23..9f450d23843 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -179,7 +179,7 @@ tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-appender = "0.2.2" tracing-error = "0.2.0" tracing-futures = "0.2.5" -tracing = "0.1.37" +tracing = "0.1.39" metrics = "0.21.1" From 13230d09fecd556f649e5de56ec1756b51c2a271 Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 17 Oct 2023 01:04:33 +0200 Subject: [PATCH 04/18] chore: Release v1.3.0 (#7610) * Update `CHANGELOG.md` * Apply suggestions from code review Co-authored-by: teor * Group together test PRs for `z_getsubtreesbyindex` * chore: Release * Update the end of support height * Add progress bars changelog entry * Update CHANGELOG.md Co-authored-by: teor * Update CHANGELOG.md Co-authored-by: teor * Update `CHANGELOG.md` * Update `CHANGELOG.md` * Update `README.md` * Update `ESTIMATED_RELEASE_HEIGHT` Set the release height to start on ~ Monday, 2023-10-16. * Update CHANGELOG.md Co-authored-by: teor * Remove changelog entries invisible to users --------- Co-authored-by: teor --- CHANGELOG.md | 60 ++++++++++++++++++++ Cargo.lock | 24 ++++---- README.md | 2 +- book/src/user/docker.md | 2 +- book/src/user/install.md | 4 +- tower-batch-control/Cargo.toml | 2 +- tower-fallback/Cargo.toml | 2 +- zebra-chain/Cargo.toml | 4 +- zebra-consensus/Cargo.toml | 14 ++--- zebra-network/Cargo.toml | 4 +- zebra-node-services/Cargo.toml | 4 +- zebra-rpc/Cargo.toml | 14 ++--- zebra-script/Cargo.toml | 4 +- zebra-state/Cargo.toml | 6 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 8 +-- zebrad/Cargo.toml | 16 +++--- zebrad/src/components/sync/end_of_support.rs | 2 +- 18 files changed, 117 insertions(+), 57 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a45994e65c..8457fce4916 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,66 @@ All notable changes to Zebra are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org). +## [Zebra 1.3.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.3.0) - 2023-10-16 + +This release adds RPC methods for the "Spend before Sync" light wallet feature, +and fixes performance issues and bugs in the mining solution rate RPCs. Progress +bars can now be enabled using a config, please help us test them! + +It contains the following updates: + +### User Testing: Progress Bars + +Zebra has progress bars! When progress bars are enabled, you can see Zebra's blocks, +transactions, and peer connections in your terminal. We're asking Zebra users to test this +feature, and give us [feedback on the forums](https://forum.zcashcommunity.com/t/zebra-progress-bars/44485). + +To show progress bars while running Zebra, add these lines to your `zebrad.toml`: +```toml +[tracing] +progress_bar = "summary" +``` + +For more details, including a known issue with time estimates, +read our [progress bars blog post](https://zfnd.org/experimental-zebra-progress-bars/). + +### Security + +- Fix database concurrency bugs that could have led to panics or incorrect history tree data (#7590, #7663) + +### Added + +- Zebra's progress bars can now be enabled using a `zebrad.toml` config (#7615) +- Add missing elasticsearch flag feature to lib docs (#7568) +- Add missing Docker variables and examples (#7552) +- Check database format is valid on startup and shutdown (#7566, #7606). We expect to catch almost all database validity errors in CI (#7602, #7627), so users are unlikely to see them on startup or shutdown. + +#### Spend before Sync Support + +- Add state requests and support code for the `z_getsubtreesbyindex` RPC (#7408, #7734) +- Implement the `z_getsubtreesbyindex` RPC (#7436) +- Test the `z_getsubtreesbyindex` RPC (#7515, #7521, #7566, #7514, #7628) +- Format subtree roots in little-endian order (#7466) +- Add note subtree indexes for new and existing blocks (#7437) +- Upgrade subtrees from the tip backwards, for compatibility with wallet syncing (#7531) +- Handle a subtree comparison edge case correctly (#7587) + +### Changed + +- Return errors instead of panicking in methods for Heights (#7591) +- Update tests for compatibility with the ECC's `lightwalletd` fork (#7349) + +### Fixed + +- Refactor docs for feature flags (#7567) +- Match zcashd's getblockchaininfo capitalisation for NU5 (#7454) +- Fix bugs and performance of `getnetworksolps` & `getnetworkhashps` RPCs (#7647) + +### Contributors + +Thank you to everyone who contributed to this release, we couldn't make Zebra without you: +@arya2, @gustavovalverde, @oxarbitrage, @rex4539, @teor2345 and @upbqdn. + ## [Zebra 1.2.0](https://github.com/ZcashFoundation/zebra/releases/tag/v1.2.0) - 2023-09-01 ### Highlights diff --git a/Cargo.lock b/Cargo.lock index 4c8629e1c82..23b663a7ce8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4571,7 +4571,7 @@ dependencies = [ [[package]] name = "tower-batch-control" -version = "0.2.41-beta.5" +version = "0.2.41-beta.6" dependencies = [ "color-eyre", "ed25519-zebra", @@ -4595,7 +4595,7 @@ dependencies = [ [[package]] name = "tower-fallback" -version = "0.2.41-beta.5" +version = "0.2.41-beta.6" dependencies = [ "futures-core", "pin-project", @@ -5492,7 +5492,7 @@ dependencies = [ [[package]] name = "zebra-chain" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "bitflags 2.4.0", "bitflags-serde-legacy", @@ -5553,7 +5553,7 @@ dependencies = [ [[package]] name = "zebra-consensus" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "bellman", "blake2b_simd", @@ -5598,7 +5598,7 @@ dependencies = [ [[package]] name = "zebra-network" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "bitflags 2.4.0", "byteorder", @@ -5639,7 +5639,7 @@ dependencies = [ [[package]] name = "zebra-node-services" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "color-eyre", "jsonrpc-core", @@ -5651,7 +5651,7 @@ dependencies = [ [[package]] name = "zebra-rpc" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "chrono", "futures", @@ -5683,7 +5683,7 @@ dependencies = [ [[package]] name = "zebra-script" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "displaydoc", "hex", @@ -5696,7 +5696,7 @@ dependencies = [ [[package]] name = "zebra-state" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "bincode", "chrono", @@ -5740,7 +5740,7 @@ dependencies = [ [[package]] name = "zebra-test" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "color-eyre", "futures", @@ -5768,7 +5768,7 @@ dependencies = [ [[package]] name = "zebra-utils" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" dependencies = [ "color-eyre", "hex", @@ -5789,7 +5789,7 @@ dependencies = [ [[package]] name = "zebrad" -version = "1.2.0" +version = "1.3.0" dependencies = [ "abscissa_core", "atty", diff --git a/README.md b/README.md index 52cec8703ff..853b4c47f10 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ - [Getting Started](#getting-started) - [Docker](#docker) - [Building Zebra](#building-zebra) - - [Optional Configs & Features](#optional-features) + - [Optional Configs & Features](#optional-configs--features) - [Known Issues](#known-issues) - [Future Work](#future-work) - [Documentation](#documentation) diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 5119017de01..439cefd27fe 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -17,7 +17,7 @@ docker run --detach zfnd/zebra:latest ### Build it locally ```shell -git clone --depth 1 --branch v1.2.0 https://github.com/ZcashFoundation/zebra.git +git clone --depth 1 --branch v1.3.0 https://github.com/ZcashFoundation/zebra.git docker build --file docker/Dockerfile --target runtime --tag zebra:local . docker run --detach zebra:local ``` diff --git a/book/src/user/install.md b/book/src/user/install.md index 28e7fc1bab3..2a03398d6e4 100644 --- a/book/src/user/install.md +++ b/book/src/user/install.md @@ -20,7 +20,7 @@ To compile Zebra directly from GitHub, or from a GitHub release source archive: ```sh git clone https://github.com/ZcashFoundation/zebra.git cd zebra -git checkout v1.2.0 +git checkout v1.3.0 ``` 3. Build and Run `zebrad` @@ -33,7 +33,7 @@ target/release/zebrad start ### Compiling from git using cargo install ```sh -cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.2.0 zebrad +cargo install --git https://github.com/ZcashFoundation/zebra --tag v1.3.0 zebrad ``` ### Compiling on ARM diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index 8cf59359144..7b63bc9e0ef 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-batch-control" -version = "0.2.41-beta.5" +version = "0.2.41-beta.6" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Tower middleware for batch request processing" # # Legal diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index e9fa523c6b9..4aa8a698d6a 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "tower-fallback" -version = "0.2.41-beta.5" +version = "0.2.41-beta.6" authors = ["Zcash Foundation "] description = "A Tower service combinator that sends requests to a first service, then retries processing on a second fallback service if the first service errors." license = "MIT OR Apache-2.0" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 10831fd3858..2a5e75eb69f 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-chain" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "Core Zcash data structures" license = "MIT OR Apache-2.0" @@ -126,7 +126,7 @@ proptest-derive = { version = "0.4.0", optional = true } rand = { version = "0.8.5", optional = true } rand_chacha = { version = "0.3.1", optional = true } -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.29", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } [dev-dependencies] # Benchmarks diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index dd96d7da561..3a25c105d92 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-consensus" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "Implementation of Zcash consensus checks" license = "MIT OR Apache-2.0" @@ -62,13 +62,13 @@ orchard = "0.5.0" zcash_proofs = { version = "0.12.1", features = ["local-prover", "multicore", "download-params"] } -tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.5" } -tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.5" } +tower-fallback = { path = "../tower-fallback/", version = "0.2.41-beta.6" } +tower-batch-control = { path = "../tower-batch-control/", version = "0.2.41-beta.6" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.29" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.29" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.29" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index faed08811c6..3dfdd1d8128 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-network" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation ", "Tower Maintainers "] description = "Networking code for Zebra" # # Legal @@ -83,7 +83,7 @@ howudoin = { version = "0.1.2", optional = true } proptest = { version = "1.3.1", optional = true } proptest-derive = { version = "0.4.0", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } [dev-dependencies] proptest = "1.3.1" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 52170430f65..dd43c4df7c5 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-node-services" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "The interfaces of some Zebra node services" license = "MIT OR Apache-2.0" @@ -35,7 +35,7 @@ rpc-client = [ ] [dependencies] -zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.29" } +zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.30" } # Optional dependencies diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 617cb001eb2..6b214c5d6fa 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-rpc" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "A Zebra JSON Remote Procedure Call (JSON-RPC) interface" license = "MIT OR Apache-2.0" @@ -70,12 +70,12 @@ zcash_address = { version = "0.3.0", optional = true } # Test-only feature proptest-impl proptest = { version = "1.3.1", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29", features = ["json-conversion"] } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.29" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.29" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.29" } -zebra-script = { path = "../zebra-script", version = "1.0.0-beta.29" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.29" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["json-conversion"] } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } +zebra-script = { path = "../zebra-script", version = "1.0.0-beta.30" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } [dev-dependencies] insta = { version = "1.33.0", features = ["redactions", "json", "ron"] } diff --git a/zebra-script/Cargo.toml b/zebra-script/Cargo.toml index 1acb8048465..da39011298a 100644 --- a/zebra-script/Cargo.toml +++ b/zebra-script/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-script" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "Zebra script verification wrapping zcashd's zcash_script library" license = "MIT OR Apache-2.0" @@ -17,7 +17,7 @@ categories = ["api-bindings", "cryptography::cryptocurrencies"] [dependencies] zcash_script = "0.1.13" -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } thiserror = "1.0.48" displaydoc = "0.2.4" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index b7d6a6365be..30c17c00524 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-state" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "State contextual verification and storage code for Zebra" license = "MIT OR Apache-2.0" @@ -73,13 +73,13 @@ tracing = "0.1.39" elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } serde_json = { version = "1.0.107", package = "serde_json", optional = true } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29", features = ["async-error"] } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30", features = ["async-error"] } # prod feature progress-bar howudoin = { version = "0.1.2", optional = true } # test feature proptest-impl -zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.29", optional = true } +zebra-test = { path = "../zebra-test/", version = "1.0.0-beta.30", optional = true } proptest = { version = "1.3.1", optional = true } proptest-derive = { version = "0.4.0", optional = true } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index c3c7381e6ca..6cc4c5bdd35 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-test" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "Test harnesses and test vectors for Zebra" license = "MIT OR Apache-2.0" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 756417a2c9d..10da1450b7a 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "zebra-utils" -version = "1.0.0-beta.29" +version = "1.0.0-beta.30" authors = ["Zcash Foundation "] description = "Developer tools for Zebra maintenance and testing" license = "MIT OR Apache-2.0" @@ -74,11 +74,11 @@ tracing-error = "0.2.0" tracing-subscriber = "0.3.17" thiserror = "1.0.48" -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.29" } -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } # These crates are needed for the block-template-to-proposal binary -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.29", optional = true } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30", optional = true } # These crates are needed for the zebra-checkpoints binary itertools = { version = "0.11.0", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 9f450d23843..715eccc38a2 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -1,7 +1,7 @@ [package] # Crate metadata name = "zebrad" -version = "1.2.0" +version = "1.3.0" authors = ["Zcash Foundation "] description = "The Zcash Foundation's independent, consensus-compatible implementation of a Zcash node" license = "MIT OR Apache-2.0" @@ -142,15 +142,15 @@ test_sync_past_mandatory_checkpoint_mainnet = [] test_sync_past_mandatory_checkpoint_testnet = [] [dependencies] -zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.29" } -zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.29" } -zebra-network = { path = "../zebra-network", version = "1.0.0-beta.29" } -zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.29" } -zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.29" } -zebra-state = { path = "../zebra-state", version = "1.0.0-beta.29" } +zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.30" } +zebra-consensus = { path = "../zebra-consensus", version = "1.0.0-beta.30" } +zebra-network = { path = "../zebra-network", version = "1.0.0-beta.30" } +zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.30" } +zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30" } +zebra-state = { path = "../zebra-state", version = "1.0.0-beta.30" } # Required for crates.io publishing, but it's only used in tests -zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.29", optional = true } +zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.30", optional = true } abscissa_core = "0.7.0" clap = { version = "4.4.6", features = ["cargo"] } diff --git a/zebrad/src/components/sync/end_of_support.rs b/zebrad/src/components/sync/end_of_support.rs index a4582ff38bf..f7ca757d2b3 100644 --- a/zebrad/src/components/sync/end_of_support.rs +++ b/zebrad/src/components/sync/end_of_support.rs @@ -13,7 +13,7 @@ use zebra_chain::{ use crate::application::release_version; /// The estimated height that this release will be published. -pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_212_380; +pub const ESTIMATED_RELEASE_HEIGHT: u32 = 2_264_000; /// The maximum number of days after `ESTIMATED_RELEASE_HEIGHT` where a Zebra server will run /// without halting. From 5b9877a52042cb7f29a943999706b4ba5c643771 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 00:25:55 +0000 Subject: [PATCH 05/18] build(deps): bump the formats group with 1 update (#7746) Bumps the formats group with 1 update: [regex](https://github.com/rust-lang/regex). - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.10.0...1.10.2) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch dependency-group: formats ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 18 +++++++++--------- zebra-network/Cargo.toml | 2 +- zebra-state/Cargo.toml | 2 +- zebra-test/Cargo.toml | 2 +- zebra-utils/Cargo.toml | 2 +- zebrad/Cargo.toml | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23b663a7ce8..4986314dd21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3428,14 +3428,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.0" +version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d119d7c7ca818f8a53c300863d4f87566aac09943aef5b355bb83969dae75d87" +checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.1", - "regex-syntax 0.8.1", + "regex-automata 0.4.3", + "regex-syntax 0.8.2", ] [[package]] @@ -3449,13 +3449,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "465c6fc0621e4abc4187a2bda0937bfd4f722c2730b29562e19689ea796c9a4b" +checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.1", + "regex-syntax 0.8.2", ] [[package]] @@ -3472,9 +3472,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56d84fdd47036b038fc80dd333d10b6aab10d5d31f4a366e20014def75328d33" +checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 3dfdd1d8128..a1a1e85f585 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -55,7 +55,7 @@ ordered-map = "0.4.2" pin-project = "1.1.3" rand = "0.8.5" rayon = "1.7.0" -regex = "1.10.0" +regex = "1.10.2" serde = { version = "1.0.188", features = ["serde_derive"] } tempfile = "3.8.0" thiserror = "1.0.48" diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 30c17c00524..ab5d9a9d331 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -55,7 +55,7 @@ itertools = "0.11.0" lazy_static = "1.4.0" metrics = "0.21.1" mset = "0.1.1" -regex = "1.10.0" +regex = "1.10.2" rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.20" diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index 6cc4c5bdd35..a6fdb52fb78 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -23,7 +23,7 @@ itertools = "0.11.0" proptest = "1.3.1" once_cell = "1.18.0" rand = "0.8.5" -regex = "1.10.0" +regex = "1.10.2" tokio = { version = "1.33.0", features = ["full", "tracing", "test-util"] } tower = { version = "0.4.13", features = ["util"] } diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 10da1450b7a..f4c7d47bff4 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -84,7 +84,7 @@ zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.30", optional = true itertools = { version = "0.11.0", optional = true } # These crates are needed for the search-issue-refs binary -regex = { version = "1.10.0", optional = true } +regex = { version = "1.10.2", optional = true } # Avoid default openssl dependency to reduce the dependency tree and security alerts. reqwest = { version = "0.11.22", default-features = false, features = ["rustls-tls"], optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index 715eccc38a2..c8350c9d61b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -234,7 +234,7 @@ hex = "0.4.3" hex-literal = "0.4.1" jsonrpc-core = "18.0.0" once_cell = "1.18.0" -regex = "1.10.0" +regex = "1.10.2" insta = { version = "1.33.0", features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable From 2a077f21c77953937adb04dcc5fd2e258502076d Mon Sep 17 00:00:00 2001 From: teor Date: Tue, 17 Oct 2023 12:56:00 +1000 Subject: [PATCH 06/18] fix(ci): Skip mempool checks if it wasn't queried by lightwalletd (#7744) * Correctly skip checks in lightwalletd send transactions test * Wait longer for transaction query * Wait longer before re-checking the mempool * Skip mempool checks if it wasn't queried by lightwalletd * rustfmt * clippy and rustfmt * Fix attribute issue --- .../lightwalletd/send_transaction_test.rs | 37 +++++++++++++++---- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index 5cfb1c68a30..870d3992b56 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -16,7 +16,7 @@ //! were obtained. This is to ensure that zebra does not reject the transactions because they have //! already been seen in a block. -use std::{cmp::min, sync::Arc}; +use std::{cmp::min, sync::Arc, time::Duration}; use color_eyre::eyre::Result; @@ -36,6 +36,7 @@ use crate::common::{ sync::wait_for_zebrad_and_lightwalletd_sync, wallet_grpc::{self, connect_to_lightwalletd, Empty, Exclude}, }, + sync::LARGE_CHECKPOINT_TIMEOUT, test_type::TestType::{self, *}, }; @@ -159,10 +160,12 @@ pub async fn run() -> Result<()> { .await? .into_inner(); - // Lightwalletd won't call `get_raw_mempool` again until 2 seconds after the last call + // Lightwalletd won't call `get_raw_mempool` again until 2 seconds after the last call: // + // + // So we need to wait much longer than that here. let sleep_until_lwd_last_mempool_refresh = - tokio::time::sleep(std::time::Duration::from_secs(2)); + tokio::time::sleep(std::time::Duration::from_secs(4)); let transaction_hashes: Vec = transactions.iter().map(|tx| tx.hash()).collect(); @@ -201,7 +204,7 @@ pub async fn run() -> Result<()> { zebrad.expect_stdout_line_matches("sending mempool transaction broadcast")?; // Wait for more transactions to verify, `GetMempoolTx` only returns txs where tx.HasShieldedElements() // - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(2)).await; sleep_until_lwd_last_mempool_refresh.await; tracing::info!("calling GetMempoolTx gRPC to fetch transactions..."); @@ -210,10 +213,26 @@ pub async fn run() -> Result<()> { .await? .into_inner(); - // check that lightwalletd queries the mempool. - zebrad.expect_stdout_line_matches("answered mempool request .*req.*=.*TransactionIds")?; + // Sometimes lightwalletd doesn't check the mempool, and waits for the next block instead. + // If that happens, we skip the rest of the test. + tracing::info!("checking if lightwalletd has queried the mempool..."); - // GetMempoolTx: make sure at least one of the transactions were inserted into the mempool. + // We need a short timeout here, because sometimes this message is not logged. + zebrad = zebrad.with_timeout(Duration::from_secs(60)); + let tx_log = + zebrad.expect_stdout_line_matches("answered mempool request .*req.*=.*TransactionIds"); + // Reset the failed timeout and give the rest of the test enough time to finish. + #[allow(unused_assignments)] + { + zebrad = zebrad.with_timeout(LARGE_CHECKPOINT_TIMEOUT); + } + + if tx_log.is_err() { + tracing::info!("lightwalletd didn't query the mempool, skipping mempool contents checks"); + return Ok(()); + } + + tracing::info!("checking the mempool contains some of the sent transactions..."); let mut counter = 0; while let Some(tx) = transactions_stream.message().await? { let hash: [u8; 32] = tx.hash.clone().try_into().expect("hash is correct length"); @@ -228,6 +247,8 @@ pub async fn run() -> Result<()> { counter += 1; } + // GetMempoolTx: make sure at least one of the transactions were inserted into the mempool. + // // TODO: Update `load_transactions_from_future_blocks()` to return block height offsets and, // only check if a transaction from the first block has shielded elements assert!( @@ -235,7 +256,7 @@ pub async fn run() -> Result<()> { "failed to read v4+ transactions with shielded elements from future blocks in mempool via lightwalletd" ); - // GetMempoolTx: make sure at least one of the transactions were inserted into the mempool. + // TODO: GetMempoolStream: make sure at least one of the transactions were inserted into the mempool. tracing::info!("calling GetMempoolStream gRPC to fetch transactions..."); let mut transaction_stream = rpc_client.get_mempool_stream(Empty {}).await?.into_inner(); From 51bb15c33996c245c4eb8343a1e3577d4ce2f5a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:01:19 +0000 Subject: [PATCH 07/18] build(deps): bump the data-structures group with 1 update (#7747) Bumps the data-structures group with 1 update: [bitflags](https://github.com/bitflags/bitflags). - [Release notes](https://github.com/bitflags/bitflags/releases) - [Changelog](https://github.com/bitflags/bitflags/blob/main/CHANGELOG.md) - [Commits](https://github.com/bitflags/bitflags/compare/2.4.0...2.4.1) --- updated-dependencies: - dependency-name: bitflags dependency-type: direct:production update-type: version-update:semver-patch dependency-group: data-structures ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alfredo Garcia --- Cargo.lock | 18 +++++++++--------- zebra-chain/Cargo.toml | 2 +- zebra-network/Cargo.toml | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4986314dd21..0fc52d9156c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -416,7 +416,7 @@ version = "0.68.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cexpr", "clang-sys", "lazy_static", @@ -470,9 +470,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitflags-serde-legacy" @@ -480,7 +480,7 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b64e60c28b6d25ad92e8b367801ff9aa12b41d05fc8798055d296bace4a60cc" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "serde", ] @@ -1602,7 +1602,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf97ba92db08df386e10c8ede66a2a0369bd277090afd8710e19e38de9ec0cd" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "libc", "libgit2-sys", "log", @@ -3111,7 +3111,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand 0.8.5", @@ -3637,7 +3637,7 @@ version = "0.38.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", @@ -5494,7 +5494,7 @@ dependencies = [ name = "zebra-chain" version = "1.0.0-beta.30" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "bitflags-serde-legacy", "bitvec", "blake2b_simd", @@ -5600,7 +5600,7 @@ dependencies = [ name = "zebra-network" version = "1.0.0-beta.30" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "byteorder", "bytes", "chrono", diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 2a5e75eb69f..630ce281134 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -54,7 +54,7 @@ bench = ["zebra-test"] # Cryptography bitvec = "1.0.1" -bitflags = "2.4.0" +bitflags = "2.4.1" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.2" blake2s_simd = "1.0.2" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index a1a1e85f585..246c9fc07f7 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -40,7 +40,7 @@ progress-bar = [ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] [dependencies] -bitflags = "2.4.0" +bitflags = "2.4.1" byteorder = "1.5.0" bytes = "1.5.0" chrono = { version = "0.4.31", default-features = false, features = ["clock", "std"] } From bea90dfd628b5c6c10f85349bf0b02df226aa7df Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 18 Oct 2023 07:54:01 +1000 Subject: [PATCH 08/18] change(doc): Add instructions for new placeholder crates (#7751) * Add instructions for new placeholder crates * minor changes --------- Co-authored-by: Alfredo Garcia --- book/src/dev/crate-owners.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/book/src/dev/crate-owners.md b/book/src/dev/crate-owners.md index 88a3460aa88..0c0b91af111 100644 --- a/book/src/dev/crate-owners.md +++ b/book/src/dev/crate-owners.md @@ -35,7 +35,23 @@ please paste the token found on https://crates.io/me below Login token for `crates.io` saved ``` -## Crate Ownership +## Publishing New Crates + +We publish a new placeholder crate as soon as we have a good idea for a crate name. + +Before starting with the publishing, please clone zebra and use the `main` branch to create the placeholder crate, you need `cargo release` installed in the system and be logged to crates.io with `cargo login`. + +Next, execute the following commands to publish a new placeholder and set the owners: +```sh +cargo new new-crate-name +cd new-crate-name +cargo release publish --verbose --package new-crate-name --execute +cargo owner --add oxarbitrage +cargo owner --add teor2345 +cargo owner --add github:zcashfoundation:owners +``` + +## Changing Crate Ownership crates.io has two kinds of owners: group owners and individual owners. All owners can publish and yank crates. But [only individual owners can change crate owners](https://doc.rust-lang.org/cargo/reference/publishing.html#cargo-owner). From 08ce2ad500a4d3c989b29d47c5a32a0c52bbc21d Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 18 Oct 2023 14:15:17 +1000 Subject: [PATCH 09/18] feature(mining): Enable mining RPCs by default in production builds (#7740) * Enable getblocktemplate-rpcs in production builds * Update mining instructions * Consistently use ZF funding stream addresses in examples * Simplify skip_getblocktemplate() * Set the test miner address based on the network * Correctly skip checks in lightwalletd send transactions test * Wait longer for transaction query * Wait longer before re-checking the mempool * Skip mempool checks if it wasn't queried by lightwalletd * rustfmt * clippy and rustfmt * Fix attribute issue * Fix typo Co-authored-by: Arya --------- Co-authored-by: Arya --- README.md | 6 +++- book/src/user/mining-docker.md | 17 ++++----- book/src/user/mining-testnet-s-nomp.md | 4 +-- book/src/user/mining.md | 21 +++++------ zebra-rpc/src/config/mining.rs | 5 +-- zebrad/Cargo.toml | 9 +++-- zebrad/src/lib.rs | 2 +- zebrad/tests/acceptance.rs | 49 +++++++++++++------------- zebrad/tests/common/config.rs | 31 +++++++++++++--- zebrad/tests/common/launch.rs | 25 +++++++------ zebrad/tests/common/sync.rs | 10 +++--- zebrad/tests/common/test_type.rs | 12 +++---- 12 files changed, 104 insertions(+), 87 deletions(-) diff --git a/README.md b/README.md index 853b4c47f10..2730399617f 100644 --- a/README.md +++ b/README.md @@ -128,11 +128,15 @@ In future releases, the `progress_bar = "summary"` config will show a few key me and the "detailed" config will show all available metrics. Please let us know which metrics are important to you! +##### Configuring Mining + +Zebra can be configured for mining by passing a `MINER_ADDRESS` and port mapping to Docker. +See the [mining support docs](https://zebra.zfnd.org/user/mining-docker.html) for more details. + ##### Custom Build Features You can also build Zebra with additional [Cargo features](https://doc.rust-lang.org/cargo/reference/features.html#command-line-feature-options): -- `getblocktemplate-rpcs` for [mining support](https://zebra.zfnd.org/user/mining.html) - `prometheus` for [Prometheus metrics](https://zebra.zfnd.org/user/metrics.html) - `sentry` for [Sentry monitoring](https://zebra.zfnd.org/user/tracing.html#sentry-production-monitoring) - `elasticsearch` for [experimental Elasticsearch support](https://zebra.zfnd.org/user/elasticsearch.html) diff --git a/book/src/user/mining-docker.md b/book/src/user/mining-docker.md index e5d974317b8..002848c0ca3 100644 --- a/book/src/user/mining-docker.md +++ b/book/src/user/mining-docker.md @@ -1,20 +1,21 @@ # Mining with Zebra in Docker -Some of our published [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) -have the `.experimental` suffix in their name. We compile these images with the -`getblocktemplate-rpcs` feature, and you can use them for your mining -operations. For example, executing +Zebra's [Docker images](https://hub.docker.com/r/zfnd/zebra/tags) can be used for your mining +operations. If you don't have Docker, see the +[manual configuration instructions](https://zebra.zfnd.org/user/mining.html). + +Using docker, you can start mining by running: ```bash -docker run -e MINER_ADDRESS="t1XhG6pT9xRqRQn3BHP7heUou1RuYrbcrCc" -p 8232:8232 zfnd/zebra:v1.1.0.experimental +docker run -e MINER_ADDRESS="t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" -p 8232:8232 zfnd/zebra:latest ``` -will start a container on Mainnet and bind port 8232 on your Docker host. If you +This command starts a container on Mainnet and binds port 8232 on your Docker host. If you want to start generating blocks, you need to let Zebra sync first. Note that you must pass the address for your mining rewards via the `MINER_ADDRESS` environment variable when you are starting the container, as we -did in the example above. The address we used starts with the prefix `t1`, +did with the ZF funding stream address above. The address we used starts with the prefix `t1`, meaning it is a Mainnet P2PKH address. Please remember to set your own address for the rewards. @@ -35,7 +36,7 @@ variable to `Testnet` and use a Testnet address for the rewards. For example, running ```bash -docker run -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -p 18232:18232 zfnd/zebra:v1.1.0.experimental +docker run -e NETWORK="Testnet" -e MINER_ADDRESS="t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" -p 18232:18232 zfnd/zebra:latest ``` will start a container on Testnet and bind port 18232 on your Docker host, which diff --git a/book/src/user/mining-testnet-s-nomp.md b/book/src/user/mining-testnet-s-nomp.md index 711afb24b6e..70fbc7a7647 100644 --- a/book/src/user/mining-testnet-s-nomp.md +++ b/book/src/user/mining-testnet-s-nomp.md @@ -82,7 +82,7 @@ These fixes disable mining pool operator payments and miner payments: they just ## Install `s-nomp`
General instructions with Debian/Ubuntu examples - + #### Install dependencies 1. Install `redis` and run it on the default port: @@ -136,7 +136,7 @@ These fixes disable mining pool operator payments and miner payments: they just
Arch-specific instructions - + #### Install dependencies 1. Install [`redis`](https://redis.io/docs/getting-started/) and run it on the default port: diff --git a/book/src/user/mining.md b/book/src/user/mining.md index cb7a2be2ccf..fd593096012 100644 --- a/book/src/user/mining.md +++ b/book/src/user/mining.md @@ -1,10 +1,10 @@ # Mining Zcash with zebra -Zebra's RPC methods support miners and mining pools. +Zebra's RPC methods support miners and mining pools. Contents: -- [Download and build Zebra](#download-and-build-zebra) +- [Download Zebra](#download-and-build-zebra) - [Configure zebra for mining](#configure-zebra-for-mining) - [Miner address](#miner-address) - [RPC section](#rpc-section) @@ -12,18 +12,13 @@ Contents: - [Testing the setup](#testing-the-setup) - [Run a mining pool](#run-a-mining-pool) -## Download and build Zebra for mining +## Download Zebra [#download-and-build-zebra]: #download-and-build-zebra -Before installing please make sure you have the [Zebra dependencies](https://github.com/ZcashFoundation/zebra#build-instructions) in your OS. +The easiest way to run Zebra for mining is with [our docker images](https://zebra.zfnd.org/user/mining-docker.html). -```console -cargo install --locked --features getblocktemplate-rpcs --git https://github.com/ZcashFoundation/zebra zebrad -``` - -Zebra binary will be at ` ~/.cargo/bin/zebrad`. - -It is important to install with the `getblocktemplate-rpcs` feature so the final binary will contain mining support. +If you have [installed Zebra another way](https://zebra.zfnd.org/user/install.html), follow the +instructions below to start mining: ## Configure zebra for mining [#configure-zebra-for-mining]: #configure-zebra-for-mining @@ -46,10 +41,10 @@ Node miner address is required. At the moment zebra only allows `p2pkh` or `p2sh ``` [mining] -miner_address = 't3Vz22vK5z2LcKEdg16Yv4FFneEL1zg9ojd' +miner_address = 't3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1' ``` -The above address is just the first address of the [Founders' Reward](https://zips.z.cash/protocol/protocol.pdf#foundersreward) section of the Zcash protocol. It's a Mainnet address and it is used here purely as an example. +The above address is the ZF Mainnet funding stream address. It is used here purely as an example. ### RPC section [#rpc-section]: #rpc-section diff --git a/zebra-rpc/src/config/mining.rs b/zebra-rpc/src/config/mining.rs index f8224859c3f..1a27baa3646 100644 --- a/zebra-rpc/src/config/mining.rs +++ b/zebra-rpc/src/config/mining.rs @@ -46,9 +46,6 @@ impl Config { /// This is used to ignore the mining section of the configuration if the feature is not /// enabled, allowing us to log a warning when the config found is different from the default. pub fn skip_getblocktemplate(&self) -> bool { - #[cfg(feature = "getblocktemplate-rpcs")] - return false; - #[cfg(not(feature = "getblocktemplate-rpcs"))] - return true; + !cfg!(feature = "getblocktemplate-rpcs") } } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index c8350c9d61b..2e2439890db 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -52,14 +52,14 @@ features = [ [features] # In release builds, don't compile debug logging code, to improve performance. -default = ["release_max_level_info", "progress-bar"] +default = ["release_max_level_info", "progress-bar", "getblocktemplate-rpcs"] # Default features for official ZF binary release builds default-release-binaries = ["default", "sentry"] # Production features that activate extra dependencies, or extra features in dependencies -# Experimental mining RPC support +# Mining RPC support getblocktemplate-rpcs = [ "zebra-rpc/getblocktemplate-rpcs", "zebra-consensus/getblocktemplate-rpcs", @@ -68,12 +68,13 @@ getblocktemplate-rpcs = [ "zebra-chain/getblocktemplate-rpcs", ] +# Experimental elasticsearch indexing elasticsearch = [ "zebra-state/elasticsearch", ] +# Tracing and monitoring sentry = ["dep:sentry"] -flamegraph = ["tracing-flame", "inferno"] journald = ["tracing-journald"] filter-reload = ["hyper"] @@ -105,6 +106,8 @@ max_level_info = ["tracing/max_level_info", "log/max_level_info"] max_level_debug = ["tracing/max_level_debug", "log/max_level_debug"] # Testing features that activate extra dependencies +flamegraph = ["tracing-flame", "inferno"] + proptest-impl = [ "proptest", "proptest-derive", diff --git a/zebrad/src/lib.rs b/zebrad/src/lib.rs index 9ebe9b3ffff..b36ca2e182a 100644 --- a/zebrad/src/lib.rs +++ b/zebrad/src/lib.rs @@ -57,7 +57,7 @@ //! //! ### JSON-RPC //! -//! * `getblocktemplate-rpcs`: Experimental mining pool RPC support (currently incomplete) +//! * `getblocktemplate-rpcs`: Mining pool RPC support, enabled by default in production builds. //! //! ### Metrics //! diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c4fbdc707e2..0c40df31a50 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -207,7 +207,7 @@ fn generate_no_args() -> Result<()> { let _init_guard = zebra_test::init(); let child = testdir()? - .with_config(&mut default_test_config()?)? + .with_config(&mut default_test_config(Mainnet)?)? .spawn_child(args!["generate"])?; let output = child.wait_with_output()?; @@ -269,7 +269,7 @@ fn generate_args() -> Result<()> { fn help_no_args() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut default_test_config()?)?; + let testdir = testdir()?.with_config(&mut default_test_config(Mainnet)?)?; let child = testdir.spawn_child(args!["help"])?; let output = child.wait_with_output()?; @@ -314,7 +314,7 @@ fn start_no_args() -> Result<()> { let _init_guard = zebra_test::init(); // start caches state, so run one of the start tests with persistent state - let testdir = testdir()?.with_config(&mut persistent_test_config()?)?; + let testdir = testdir()?.with_config(&mut persistent_test_config(Mainnet)?)?; let mut child = testdir.spawn_child(args!["-v", "start"])?; @@ -341,7 +341,7 @@ fn start_no_args() -> Result<()> { fn start_args() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut default_test_config()?)?; + let testdir = testdir()?.with_config(&mut default_test_config(Mainnet)?)?; let testdir = &testdir; let mut child = testdir.spawn_child(args!["start"])?; @@ -366,7 +366,7 @@ fn start_args() -> Result<()> { #[tokio::test] async fn db_init_outside_future_executor() -> Result<()> { let _init_guard = zebra_test::init(); - let config = default_test_config()?; + let config = default_test_config(Mainnet)?; let start = Instant::now(); @@ -392,7 +392,7 @@ async fn db_init_outside_future_executor() -> Result<()> { fn persistent_mode() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut persistent_test_config()?)?; + let testdir = testdir()?.with_config(&mut persistent_test_config(Mainnet)?)?; let testdir = &testdir; let mut child = testdir.spawn_child(args!["-v", "start"])?; @@ -457,7 +457,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) let _init_guard = zebra_test::init(); - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; let run_dir = testdir()?; let ignored_cache_dir = run_dir.path().join("state"); @@ -547,7 +547,7 @@ fn ephemeral(cache_dir_config: EphemeralConfig, cache_dir_check: EphemeralCheck) fn version_no_args() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut default_test_config()?)?; + let testdir = testdir()?.with_config(&mut default_test_config(Mainnet)?)?; let child = testdir.spawn_child(args!["--version"])?; let output = child.wait_with_output()?; @@ -568,7 +568,7 @@ fn version_no_args() -> Result<()> { fn version_args() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut default_test_config()?)?; + let testdir = testdir()?.with_config(&mut default_test_config(Mainnet)?)?; let testdir = &testdir; // unrecognized option `-f` @@ -622,7 +622,7 @@ fn app_no_args() -> Result<()> { let _init_guard = zebra_test::init(); // start caches state, so run one of the start tests with persistent state - let testdir = testdir()?.with_config(&mut persistent_test_config()?)?; + let testdir = testdir()?.with_config(&mut persistent_test_config(Mainnet)?)?; tracing::info!(?testdir, "running zebrad with no config (default settings)"); @@ -1319,7 +1319,7 @@ async fn metrics_endpoint() -> Result<()> { let url = format!("http://{endpoint}"); // Write a configuration that has metrics endpoint_addr set - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; config.metrics.endpoint_addr = Some(endpoint.parse().unwrap()); let dir = testdir()?.with_config(&mut config)?; @@ -1376,7 +1376,7 @@ async fn tracing_endpoint() -> Result<()> { let url_filter = format!("{url_default}/filter"); // Write a configuration that has tracing endpoint_addr option set - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; config.tracing.endpoint_addr = Some(endpoint.parse().unwrap()); let dir = testdir()?.with_config(&mut config)?; @@ -1483,7 +1483,7 @@ async fn rpc_endpoint(parallel_cpu_threads: bool) -> Result<()> { // Write a configuration that has RPC listen_addr set // [Note on port conflict](#Note on port conflict) - let mut config = random_known_rpc_port_config(parallel_cpu_threads)?; + let mut config = random_known_rpc_port_config(parallel_cpu_threads, Mainnet)?; let dir = testdir()?.with_config(&mut config)?; let mut child = dir.spawn_child(args!["start"])?; @@ -1542,7 +1542,7 @@ async fn rpc_endpoint_client_content_type() -> Result<()> { // Write a configuration that has RPC listen_addr set // [Note on port conflict](#Note on port conflict) - let mut config = random_known_rpc_port_config(true)?; + let mut config = random_known_rpc_port_config(true, Mainnet)?; let dir = testdir()?.with_config(&mut config)?; let mut child = dir.spawn_child(args!["start"])?; @@ -1628,7 +1628,7 @@ fn non_blocking_logger() -> Result<()> { // Write a configuration that has RPC listen_addr set // [Note on port conflict](#Note on port conflict) - let mut config = random_known_rpc_port_config(false)?; + let mut config = random_known_rpc_port_config(false, Mainnet)?; config.tracing.filter = Some("trace".to_string()); config.tracing.buffer_limit = 100; let zebra_rpc_address = config.rpc.listen_addr.unwrap(); @@ -2049,7 +2049,7 @@ fn zebra_zcash_listener_conflict() -> Result<()> { let listen_addr = format!("127.0.0.1:{port}"); // Write a configuration that has our created network listen_addr - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; config.network.listen_addr = listen_addr.parse().unwrap(); let dir1 = testdir()?.with_config(&mut config)?; let regex1 = regex::escape(&format!("Opened Zcash protocol endpoint at {listen_addr}")); @@ -2078,7 +2078,7 @@ fn zebra_metrics_conflict() -> Result<()> { let listen_addr = format!("127.0.0.1:{port}"); // Write a configuration that has our created metrics endpoint_addr - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; config.metrics.endpoint_addr = Some(listen_addr.parse().unwrap()); let dir1 = testdir()?.with_config(&mut config)?; let regex1 = regex::escape(&format!(r"Opened metrics endpoint at {listen_addr}")); @@ -2107,7 +2107,7 @@ fn zebra_tracing_conflict() -> Result<()> { let listen_addr = format!("127.0.0.1:{port}"); // Write a configuration that has our created tracing endpoint_addr - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; config.tracing.endpoint_addr = Some(listen_addr.parse().unwrap()); let dir1 = testdir()?.with_config(&mut config)?; let regex1 = regex::escape(&format!(r"Opened tracing endpoint at {listen_addr}")); @@ -2141,7 +2141,7 @@ fn zebra_rpc_conflict() -> Result<()> { // [Note on port conflict](#Note on port conflict) // // This is the required setting to detect port conflicts. - let mut config = random_known_rpc_port_config(false)?; + let mut config = random_known_rpc_port_config(false, Mainnet)?; let dir1 = testdir()?.with_config(&mut config)?; let regex1 = regex::escape(&format!( @@ -2168,7 +2168,7 @@ fn zebra_state_conflict() -> Result<()> { // A persistent config has a fixed temp state directory, but asks the OS to // automatically choose an unused port - let mut config = persistent_test_config()?; + let mut config = persistent_test_config(Mainnet)?; let dir_conflict = testdir()?.with_config(&mut config)?; // Windows problems with this match will be worked on at #1654 @@ -2333,7 +2333,7 @@ async fn delete_old_databases() -> Result<()> { return Ok(()); } - let mut config = default_test_config()?; + let mut config = default_test_config(Mainnet)?; let run_dir = testdir()?; let cache_dir = run_dir.path().join("state"); @@ -2449,7 +2449,7 @@ async fn submit_block() -> Result<()> { #[test] fn end_of_support_is_checked_at_start() -> Result<()> { let _init_guard = zebra_test::init(); - let testdir = testdir()?.with_config(&mut default_test_config()?)?; + let testdir = testdir()?.with_config(&mut default_test_config(Mainnet)?)?; let mut child = testdir.spawn_child(args!["start"])?; // Give enough time to start up the eos task. @@ -2608,10 +2608,9 @@ async fn state_format_test( let test_name = &format!("{base_test_name}/apply_fake_version/{fake_version}"); tracing::info!(?network, "running {test_name} using zebra-state"); - let mut config = UseAnyState - .zebrad_config(test_name, false, Some(dir.path())) + let config = UseAnyState + .zebrad_config(test_name, false, Some(dir.path()), network) .expect("already checked config")?; - config.network.network = network; zebra_state::write_database_format_version_to_disk(fake_version, &config.state, network) .expect("can't write fake database version to disk"); diff --git a/zebrad/tests/common/config.rs b/zebrad/tests/common/config.rs index b6eead94f3e..3abf83d7eca 100644 --- a/zebrad/tests/common/config.rs +++ b/zebrad/tests/common/config.rs @@ -15,6 +15,7 @@ use std::{ use color_eyre::eyre::Result; use tempfile::TempDir; +use zebra_chain::parameters::Network::{self, *}; use zebra_test::net::random_known_port; use zebrad::{ components::{mempool, sync, tracing}, @@ -28,10 +29,11 @@ use crate::common::cached_state::DATABASE_FORMAT_CHECK_INTERVAL; /// - an ephemeral state, /// - the minimum syncer lookahead limit, and /// - shorter task intervals, to improve test coverage. -pub fn default_test_config() -> Result { +pub fn default_test_config(net: Network) -> Result { const TEST_DURATION: Duration = Duration::from_secs(30); let network = zebra_network::Config { + network: net, // The OS automatically chooses an unused port. listen_addr: "127.0.0.1:0".parse()?, crawl_new_peer_interval: TEST_DURATION, @@ -65,6 +67,21 @@ pub fn default_test_config() -> Result { let mut state = zebra_state::Config::ephemeral(); state.debug_validity_check_interval = Some(DATABASE_FORMAT_CHECK_INTERVAL); + // These are the ZF funding stream addresses for mainnet and testnet. + #[allow(unused_mut)] + let mut mining = zebra_rpc::config::mining::Config::default(); + + #[cfg(feature = "getblocktemplate-rpcs")] + { + let miner_address = if network.network == Mainnet { + "t3dvVE3SQEi7kqNzwrfNePxZ1d4hUyztBA1" + } else { + "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" + }; + + mining.miner_address = Some(miner_address.parse().expect("hard-coded address is valid")); + } + let config = ZebradConfig { network, state, @@ -72,14 +89,15 @@ pub fn default_test_config() -> Result { mempool, consensus, tracing, + mining, ..ZebradConfig::default() }; Ok(config) } -pub fn persistent_test_config() -> Result { - let mut config = default_test_config()?; +pub fn persistent_test_config(network: Network) -> Result { + let mut config = default_test_config(network)?; config.state.ephemeral = false; Ok(config) } @@ -105,7 +123,10 @@ pub fn config_file_full_path(config_file: PathBuf) -> PathBuf { /// Returns a `zebrad` config with a random known RPC port. /// /// Set `parallel_cpu_threads` to true to auto-configure based on the number of CPU cores. -pub fn random_known_rpc_port_config(parallel_cpu_threads: bool) -> Result { +pub fn random_known_rpc_port_config( + parallel_cpu_threads: bool, + network: Network, +) -> Result { // [Note on port conflict](#Note on port conflict) let listen_port = random_known_port(); let listen_ip = "127.0.0.1".parse().expect("hard-coded IP is valid"); @@ -113,7 +134,7 @@ pub fn random_known_rpc_port_config(parallel_cpu_threads: bool) -> Result + Debug>( } // Get the zebrad config - let mut config = test_type - .zebrad_config(test_name, use_internet_connection, None) + let config = test_type + .zebrad_config(test_name, use_internet_connection, None, network) .expect("already checked config")?; - config.network.network = network; - let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); // Writes a configuration that has RPC listen_addr set (if needed). @@ -311,12 +309,15 @@ where } // Get the zebrad config - let mut config = test_type - .zebrad_config(test_name, use_internet_connection, replace_cache_dir) + let config = test_type + .zebrad_config( + test_name, + use_internet_connection, + replace_cache_dir, + network, + ) .expect("already checked config")?; - config.network.network = network; - let (zebrad_failure_messages, zebrad_ignore_messages) = test_type.zebrad_failure_messages(); // Writes a configuration that does not have RPC listen_addr set. @@ -350,8 +351,10 @@ pub fn can_spawn_zebrad_for_test_type + Debug>( } // Check if we have any necessary cached states for the zebrad config. - // The cache_dir value doesn't matter here. - test_type.zebrad_config(test_name, true, None).is_some() + // The cache_dir and network values don't matter here. + test_type + .zebrad_config(test_name, true, None, Mainnet) + .is_some() } /// Panics if `$pred` is false, with an error report containing: diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index dd0a1390294..599a10c4b5a 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -206,8 +206,7 @@ pub fn sync_until( let reuse_tempdir = reuse_tempdir.into(); // Use a persistent state, so we can handle large syncs - let mut config = persistent_test_config()?; - config.network.network = network; + let mut config = persistent_test_config(network)?; config.state.debug_stop_at_height = Some(height.0); config.mempool.debug_enable_at_height = mempool_behavior.enable_at_height(); config.consensus.checkpoint_sync = checkpoint_sync; @@ -338,8 +337,8 @@ pub fn check_sync_logs_until( } /// Returns a test config for caching Zebra's state up to the mandatory checkpoint. -pub fn cached_mandatory_checkpoint_test_config() -> Result { - let mut config = persistent_test_config()?; +pub fn cached_mandatory_checkpoint_test_config(network: Network) -> Result { + let mut config = persistent_test_config(network)?; config.state.cache_dir = "/zebrad-cache".into(); // To get to the mandatory checkpoint, we need to sync lots of blocks. @@ -387,9 +386,8 @@ pub fn create_cached_database_height( eprintln!("creating cached database"); // Use a persistent state, so we can handle large syncs - let mut config = cached_mandatory_checkpoint_test_config()?; + let mut config = cached_mandatory_checkpoint_test_config(network)?; // TODO: add convenience methods? - config.network.network = network; config.state.debug_stop_at_height = Some(height.0); config.consensus.debug_skip_parameter_preload = debug_skip_parameter_preload; config.consensus.checkpoint_sync = checkpoint_sync; diff --git a/zebrad/tests/common/test_type.rs b/zebrad/tests/common/test_type.rs index adb5fd4b897..ddd7386c137 100644 --- a/zebrad/tests/common/test_type.rs +++ b/zebrad/tests/common/test_type.rs @@ -8,6 +8,7 @@ use std::{ use indexmap::IndexSet; +use zebra_chain::parameters::Network; use zebra_network::CacheDir; use zebra_test::{command::NO_MATCHES_REGEX_ITER, prelude::*}; use zebrad::config::ZebradConfig; @@ -176,12 +177,13 @@ impl TestType { test_name: Str, use_internet_connection: bool, replace_cache_dir: Option<&Path>, + network: Network, ) -> Option> { let config = if self.needs_zebra_rpc_server() { // This is what we recommend our users configure. - random_known_rpc_port_config(true) + random_known_rpc_port_config(true, network) } else { - default_test_config() + default_test_config(network) }; let mut config = match config { @@ -209,12 +211,6 @@ impl TestType { config.mempool.debug_enable_at_height = Some(0); } - // Add a fake miner address for mining RPCs - #[cfg(feature = "getblocktemplate-rpcs")] - let _ = config.mining.miner_address.insert( - zebra_chain::transparent::Address::from_script_hash(config.network.network, [0x7e; 20]), - ); - // If we have a cached state, or we don't want to be ephemeral, update the config to use it if replace_cache_dir.is_some() || self.needs_zebra_cached_state() { let zebra_state_path = replace_cache_dir From fc0133e886d3dbf7364ddd24915844dda0650e4f Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 18 Oct 2023 07:16:02 +0100 Subject: [PATCH 10/18] ref(workflows): consolidate workflows based on their purpose (#7616) * ref(workflows): consolidate workflows based on their purpose This also renames the workflows to make their naming more consistent and adding a naming convention Fixes: #6166 Fixes: #6167 * fix(workflows): use correct name for patch * fix(workflow): docker unit tests * fix(release): validation error Error: ``` Validation Failed: {"resource":"Release","code":"invalid","field":"target_commitish"} ``` Fixes: https://github.com/release-drafter/release-drafter/issues/1125 * fix(workflows): reference correct name * fix: remove extra workflow * fix(workflows): use larger runners * fix(workflow): remove code already in docker unit-test * fix(unit-tests): start zebra the right way * fix: typo in patch name * chore: move job to logical order * imp(workflows): use better name for gcp tests * add: missing merge changes * chore: use better name for find-disks * fix(ci): use the `entrypoint.sh` to change the Network * fix(ci): add missing `ZEBRA_CONF_PATH` variable * fix(ci): allow to build the entrypoint file with testnet * fix(entrypoint): allow to create a dir and file with a single variable * refactor: test config file in CI and CD with a reusable workflow * fix(ci): wrong name used * fix(ci): use checkout * fix(ci): improve docker config tests * fix(ci): use better name for protection rules * Fix changed workflow file name in docs patch file * Apply suggestions from code review Co-authored-by: teor Co-authored-by: Arya * fix(cd): depend on file tests * fix(docs): adapt to new workflow name * fix: revert test coverage on CD * chore: reduce diff * fix(ci): allow using variable images for reusable workflows * fix(dockerfile): use variables or default for config path and file * fix(entrypoint): if `$ZEBRA_CONF_PATH` is set, do not override it * Fix patch job names and remove failure job testnet dependencies --------- Co-authored-by: teor Co-authored-by: Arya --- .../workflows/cd-deploy-nodes-gcp.patch.yml | 39 +++ ...s-delivery.yml => cd-deploy-nodes-gcp.yml} | 138 +++------ ...ces.yml => chore-delete-gcp-resources.yml} | 0 ...ement.yml => chore-project-management.yml} | 0 ...ly.patch.yml => ci-build-crates.patch.yml} | 2 +- ...s-individually.yml => ci-build-crates.yml} | 4 +- ...verage.patch.yml => ci-coverage.patch.yml} | 2 +- .../{coverage.yml => ci-coverage.yml} | 4 +- ...yml => ci-integration-tests-gcp.patch.yml} | 52 +--- ...ocker.yml => ci-integration-tests-gcp.yml} | 253 ++-------------- .../{lint.patch.yml => ci-lint.patch.yml} | 0 .github/workflows/{lint.yml => ci-lint.yml} | 2 +- .../workflows/ci-unit-tests-docker.patch.yml | 82 ++++++ .github/workflows/ci-unit-tests-docker.yml | 269 ++++++++++++++++++ ...s.patch.yml => ci-unit-tests-os.patch.yml} | 4 +- ...ntegration-os.yml => ci-unit-tests-os.yml} | 31 +- .../workflows/continous-delivery.patch.yml | 35 --- ...tinous-integration-docker.patch-always.yml | 26 -- ...tch.yml => docs-deploy-firebase.patch.yml} | 2 +- .../{docs.yml => docs-deploy-firebase.yml} | 2 +- ...ion.yml => docs-dockerhub-description.yml} | 0 ...al-deploy.yml => manual-zcashd-deploy.yml} | 0 .github/workflows/release-binaries.yml | 6 +- .github/workflows/release-drafter.yml | 1 + ...r-image.yml => sub-build-docker-image.yml} | 0 ...h.yml => sub-build-lightwalletd.patch.yml} | 4 +- ...walletd.yml => sub-build-lightwalletd.yml} | 6 +- ...-params.yml => sub-build-zcash-params.yml} | 8 +- ...l => sub-deploy-integration-tests-gcp.yml} | 8 +- ...ed-disks.yml => sub-find-cached-disks.yml} | 0 .github/workflows/sub-test-zebra-config.yml | 79 +++++ book/src/dev/continuous-integration.md | 2 +- docker/Dockerfile | 6 +- docker/entrypoint.sh | 5 +- 34 files changed, 595 insertions(+), 477 deletions(-) create mode 100644 .github/workflows/cd-deploy-nodes-gcp.patch.yml rename .github/workflows/{continous-delivery.yml => cd-deploy-nodes-gcp.yml} (77%) rename .github/workflows/{delete-gcp-resources.yml => chore-delete-gcp-resources.yml} (100%) rename .github/workflows/{project-management.yml => chore-project-management.yml} (100%) rename .github/workflows/{build-crates-individually.patch.yml => ci-build-crates.patch.yml} (97%) rename .github/workflows/{build-crates-individually.yml => ci-build-crates.yml} (98%) rename .github/workflows/{coverage.patch.yml => ci-coverage.patch.yml} (93%) rename .github/workflows/{coverage.yml => ci-coverage.yml} (97%) rename .github/workflows/{continous-integration-docker.patch.yml => ci-integration-tests-gcp.patch.yml} (67%) rename .github/workflows/{continous-integration-docker.yml => ci-integration-tests-gcp.yml} (71%) rename .github/workflows/{lint.patch.yml => ci-lint.patch.yml} (100%) rename .github/workflows/{lint.yml => ci-lint.yml} (99%) create mode 100644 .github/workflows/ci-unit-tests-docker.patch.yml create mode 100644 .github/workflows/ci-unit-tests-docker.yml rename .github/workflows/{continous-integration-os.patch.yml => ci-unit-tests-os.patch.yml} (96%) rename .github/workflows/{continous-integration-os.yml => ci-unit-tests-os.yml} (97%) delete mode 100644 .github/workflows/continous-delivery.patch.yml delete mode 100644 .github/workflows/continous-integration-docker.patch-always.yml rename .github/workflows/{docs.patch.yml => docs-deploy-firebase.patch.yml} (94%) rename .github/workflows/{docs.yml => docs-deploy-firebase.yml} (99%) rename .github/workflows/{dockerhub-description.yml => docs-dockerhub-description.yml} (100%) rename .github/workflows/{zcashd-manual-deploy.yml => manual-zcashd-deploy.yml} (100%) rename .github/workflows/{build-docker-image.yml => sub-build-docker-image.yml} (100%) rename .github/workflows/{zcash-lightwalletd.patch.yml => sub-build-lightwalletd.patch.yml} (85%) rename .github/workflows/{zcash-lightwalletd.yml => sub-build-lightwalletd.yml} (97%) rename .github/workflows/{zcash-params.yml => sub-build-zcash-params.yml} (86%) rename .github/workflows/{deploy-gcp-tests.yml => sub-deploy-integration-tests-gcp.yml} (99%) rename .github/workflows/{find-cached-disks.yml => sub-find-cached-disks.yml} (100%) create mode 100644 .github/workflows/sub-test-zebra-config.yml diff --git a/.github/workflows/cd-deploy-nodes-gcp.patch.yml b/.github/workflows/cd-deploy-nodes-gcp.patch.yml new file mode 100644 index 00000000000..457bedc2a3b --- /dev/null +++ b/.github/workflows/cd-deploy-nodes-gcp.patch.yml @@ -0,0 +1,39 @@ +name: Deploy Nodes to GCP + +on: + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/cd-deploy-nodes-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' + +jobs: + build: + name: Build CD Docker / Build images + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-configuration-file: + name: Test CD default Docker config file / Test default-conf in Docker + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-zebra-conf-path: + name: Test CD custom Docker config file / Test custom-conf in Docker + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' \ No newline at end of file diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/cd-deploy-nodes-gcp.yml similarity index 77% rename from .github/workflows/continous-delivery.yml rename to .github/workflows/cd-deploy-nodes-gcp.yml index 0b9a888c6ed..0b167ceb63c 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,4 +1,4 @@ -name: CD +name: Deploy Nodes to GCP # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete @@ -47,8 +47,8 @@ on: # # workflow definitions # - 'docker/**' # - '.dockerignore' - # - '.github/workflows/continous-delivery.yml' - # - '.github/workflows/build-docker-image.yml' + # - '.github/workflows/cd-deploy-nodes-gcp.yml' + # - '.github/workflows/sub-build-docker-image.yml' # Only runs the Docker image tests, doesn't deploy any instances pull_request: @@ -66,14 +66,13 @@ on: # workflow definitions - 'docker/**' - '.dockerignore' - - '.github/workflows/continous-delivery.yml' - - '.github/workflows/find-cached-disks.yml' + - '.github/workflows/cd-deploy-nodes-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' release: types: - published - jobs: # If a release was made we want to extract the first part of the semver from the # tag_name @@ -108,7 +107,7 @@ jobs: # The image will be commonly named `zebrad:` build: name: Build CD Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime @@ -118,99 +117,40 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: - name: Test Zebra CD Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest + name: Test CD default Docker config file needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Make sure Zebra can sync at least one full checkpoint on mainnet - - name: Run tests using the default config - shell: /usr/bin/bash -exo pipefail {0} - run: | - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - - # Use a subshell to handle the broken pipe error gracefully - ( - trap "" PIPE; - docker logs \ - --tail all \ - --follow \ - default-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter" - ) || true - LOGS_EXIT_STATUS=$? - - docker stop default-conf-tests - - EXIT_STATUS=$(docker wait default-conf-tests || echo "Error retrieving exit status"); - echo "docker exit status: $EXIT_STATUS"; - - # If grep found the pattern, exit with the Docker container exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; - fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'default-conf' + docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' # Test reconfiguring the docker image for testnet. test-configuration-file-testnet: - name: Test testnet Zebra CD Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest + name: Test CD testnet Docker config file needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Make sure Zebra can sync the genesis block on testnet - - name: Run tests using a testnet config - shell: /usr/bin/bash -exo pipefail {0} - run: | - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - # Use a subshell to handle the broken pipe error gracefully - ( - trap "" PIPE; - docker logs \ - --tail all \ - --follow \ - testnet-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e "net.*=.*Test.*estimated progress to chain tip.*Genesis" \ - -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"; - ) || true - LOGS_EXIT_STATUS=$? - - docker stop testnet-conf-tests - - EXIT_STATUS=$(docker wait testnet-conf-tests || echo "Error retrieving exit status"); - echo "docker exit status: $EXIT_STATUS"; - - # If grep found the pattern, exit with the Docker container exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; - fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; + # Make sure Zebra can sync the genesis block on testnet + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'testnet-conf' + docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Testnet' + + # Test that Zebra works using $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test CD custom Docker config file + needs: build + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'custom-conf' + docker_image: ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. @@ -225,14 +165,14 @@ jobs: # otherwise a new major version is deployed in a new MiG. # # Runs: - # - on every push/merge to the `main` branch + # - on every push to the `main` branch # - on every release, when it's published deploy-nodes: strategy: matrix: network: [Mainnet, Testnet] name: Deploy ${{ matrix.network }} nodes - needs: [ build, test-configuration-file, versioning ] + needs: [ build, versioning, test-configuration-file, test-zebra-conf-path ] runs-on: ubuntu-latest timeout-minutes: 60 permissions: @@ -334,7 +274,7 @@ jobs: # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single ${{ inputs.network }} instance - needs: [ build, test-configuration-file ] + needs: [ build, test-configuration-file, test-zebra-conf-path ] runs-on: ubuntu-latest timeout-minutes: 30 permissions: @@ -401,7 +341,7 @@ jobs: failure-issue: name: Open or update issues for release failures # When a new job is added to this workflow, add it to this list. - needs: [ versioning, build, test-configuration-file, deploy-nodes, deploy-instance ] + needs: [ versioning, build, deploy-nodes, deploy-instance ] # Only open tickets for failed or cancelled jobs that are not coming from PRs. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml similarity index 100% rename from .github/workflows/delete-gcp-resources.yml rename to .github/workflows/chore-delete-gcp-resources.yml diff --git a/.github/workflows/project-management.yml b/.github/workflows/chore-project-management.yml similarity index 100% rename from .github/workflows/project-management.yml rename to .github/workflows/chore-project-management.yml diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/ci-build-crates.patch.yml similarity index 97% rename from .github/workflows/build-crates-individually.patch.yml rename to .github/workflows/ci-build-crates.patch.yml index a4e69340fb5..b99adfc4cfe 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/ci-build-crates.patch.yml @@ -14,7 +14,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' jobs: matrix: diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/ci-build-crates.yml similarity index 98% rename from .github/workflows/build-crates-individually.yml rename to .github/workflows/ci-build-crates.yml index 33f8b1ef12c..e12f6031cb8 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/ci-build-crates.yml @@ -22,7 +22,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' pull_request: paths: # production code and test code @@ -34,7 +34,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} diff --git a/.github/workflows/coverage.patch.yml b/.github/workflows/ci-coverage.patch.yml similarity index 93% rename from .github/workflows/coverage.patch.yml rename to .github/workflows/ci-coverage.patch.yml index 241f92e73e5..e2defe8ee7b 100644 --- a/.github/workflows/coverage.patch.yml +++ b/.github/workflows/ci-coverage.patch.yml @@ -12,7 +12,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' jobs: coverage: diff --git a/.github/workflows/coverage.yml b/.github/workflows/ci-coverage.yml similarity index 97% rename from .github/workflows/coverage.yml rename to .github/workflows/ci-coverage.yml index b66944b38c5..0c913be1ad0 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -27,7 +27,7 @@ on: - '**/clippy.toml' # workflow definitions - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' pull_request: paths: - '**/*.rs' @@ -38,7 +38,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml similarity index 67% rename from .github/workflows/continous-integration-docker.patch.yml rename to .github/workflows/ci-integration-tests-gcp.patch.yml index 4b9755519a7..f671429b5bf 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -1,4 +1,4 @@ -name: CI Docker +name: Integration Tests on GCP # These jobs *don't* depend on cached Google Cloud state disks, # so they can be skipped when the modified files make the actual workflow run. @@ -20,10 +20,10 @@ on: # workflow definitions - 'docker/**' - '.dockerignore' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/find-cached-disks.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) @@ -39,48 +39,6 @@ jobs: steps: - run: 'echo "No build required"' - test-all: - name: Test all - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-all-getblocktemplate-rpcs: - name: Test all with getblocktemplate-rpcs feature - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-fake-activation-heights: - name: Test with fake activation heights - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-empty-sync: - name: Test checkpoint sync from empty state - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-lightwalletd-integration: - name: Test integration with lightwalletd - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-configuration-file: - name: Test Zebra default Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-zebra-conf-path: - name: Test Zebra custom Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - test-stateful-sync: name: Zebra checkpoint update / Run sync-past-checkpoint test runs-on: ubuntu-latest diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/ci-integration-tests-gcp.yml similarity index 71% rename from .github/workflows/continous-integration-docker.yml rename to .github/workflows/ci-integration-tests-gcp.yml index ee87f999ce0..1132762c210 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -1,4 +1,4 @@ -name: CI Docker +name: Integration Tests on GCP # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed @@ -62,10 +62,10 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/build-docker-image.yml' - - '.github/workflows/find-cached-disks.yml' + - '.github/workflows/ci-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' + - '.github/workflows/sub-find-cached-disks.yml' push: branches: @@ -86,10 +86,10 @@ on: # workflow definitions - 'docker/**' - '.dockerignore' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/find-cached-disks.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/ci-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: # to also run a job on Mergify head branches, @@ -101,20 +101,20 @@ jobs: # The default network is mainnet unless a manually triggered workflow or repository variable # is configured differently. # - # The outputs for this job have the same names as the workflow outputs in find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} - uses: ./.github/workflows/find-cached-disks.yml + uses: ./.github/workflows/sub-find-cached-disks.yml with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Check if the cached state disks used by the tests are available for testnet. # - # The outputs for this job have the same names as the workflow outputs in find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml # Some outputs are ignored, because we don't run those jobs on testnet. get-available-disks-testnet: name: Check if cached state disks exist for testnet - uses: ./.github/workflows/find-cached-disks.yml + uses: ./.github/workflows/sub-find-cached-disks.yml with: network: 'Testnet' @@ -125,7 +125,7 @@ jobs: # testnet when running the image. build: name: Build CI Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: tests @@ -135,199 +135,6 @@ jobs: rust_lib_backtrace: full rust_log: info - # zebrad tests without cached state - - # TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run all the zebra tests, including tests that are ignored by default. - # Skips tests that need a cached state disk or a lightwalletd binary. - # - # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. - # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. - # - # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings - test-all: - name: Test all - runs-on: ubuntu-latest-xl - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. - # - # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # zebrad tests without cached state with `getblocktemplate-rpcs` feature - # - # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. - test-all-getblocktemplate-rpcs: - name: Test all with getblocktemplate-rpcs feature - runs-on: ubuntu-latest-xl - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run state tests with fake activation heights. - # - # This test changes zebra-chain's activation heights, - # which can recompile all the Zebra crates, - # so we want its build products to be cached separately. - # - # Also, we don't want to accidentally use the fake heights in other tests. - # - # (The gRPC feature is a zebrad feature, so it isn't needed here.) - test-fake-activation-heights: - name: Test with fake activation heights - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with fake activation heights - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights - env: - TEST_FAKE_ACTIVATION_HEIGHTS: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-empty-sync: - name: Test checkpoint sync from empty state - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad large sync tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test launching lightwalletd with an empty lightwalletd and Zebra state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-lightwalletd-integration: - name: Test integration with lightwalletd - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with empty lightwalletd launch - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration - env: - ZEBRA_TEST_LIGHTWALLETD: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the default config with the latest Zebra version - test-configuration-file: - name: Test Zebra default Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the default config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) - docker stop default-conf-tests - docker logs default-conf-tests - exit "$EXIT_STATUS" - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the $ZEBRA_CONF_PATH - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) - docker stop variable-conf-tests - docker logs variable-conf-tests - exit "$EXIT_STATUS" - env: - ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # END TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - # zebrad cached checkpoint state tests # Regenerate mandatory checkpoint Zebra cached state disks. @@ -340,7 +147,7 @@ jobs: regenerate-stateful-disks: name: Zebra checkpoint needs: [ build, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad @@ -369,7 +176,7 @@ jobs: test-stateful-sync: name: Zebra checkpoint update needs: [ regenerate-stateful-disks, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -397,7 +204,7 @@ jobs: test-full-sync: name: Zebra tip needs: [ build, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad @@ -440,7 +247,7 @@ jobs: test-update-sync: name: Zebra tip update needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -473,7 +280,7 @@ jobs: checkpoints-mainnet: name: Generate checkpoints mainnet needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -508,7 +315,7 @@ jobs: test-full-sync-testnet: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad @@ -554,7 +361,7 @@ jobs: checkpoints-testnet: name: Generate checkpoints testnet needs: [ test-full-sync-testnet, get-available-disks-testnet ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -587,7 +394,7 @@ jobs: lightwalletd-full-sync: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: @@ -627,7 +434,7 @@ jobs: lightwalletd-update-sync: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -659,7 +466,7 @@ jobs: lightwalletd-rpc-test: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -684,7 +491,7 @@ jobs: lightwalletd-transactions-test: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -711,7 +518,7 @@ jobs: lightwalletd-grpc-test: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -742,7 +549,7 @@ jobs: get-block-template-test: name: get block template needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -768,7 +575,7 @@ jobs: submit-block-test: name: submit block needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -789,8 +596,8 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path, test-stateful-sync, test-update-sync, checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] - # Only open tickets for failed or cancelled jobs that are not coming from PRs. + needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-stateful-sync, test-update-sync, checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] + # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) runs-on: ubuntu-latest diff --git a/.github/workflows/lint.patch.yml b/.github/workflows/ci-lint.patch.yml similarity index 100% rename from .github/workflows/lint.patch.yml rename to .github/workflows/ci-lint.patch.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/ci-lint.yml similarity index 99% rename from .github/workflows/lint.yml rename to .github/workflows/ci-lint.yml index ee4ba696d31..1e22a99a0f2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/ci-lint.yml @@ -45,7 +45,7 @@ jobs: **/Cargo.lock clippy.toml .cargo/config.toml - .github/workflows/lint.yml + .github/workflows/ci-lint.yml - name: Workflow files id: changed-files-workflows diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml new file mode 100644 index 00000000000..e3e629fb490 --- /dev/null +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -0,0 +1,82 @@ +name: Docker Unit Tests + +# These jobs *don't* depend on cached Google Cloud state disks, +# so they can be skipped when the modified files make the actual workflow run. +on: + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' + +jobs: + # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + get-available-disks: + name: Check if cached state disks exist for Mainnet / Check if cached state disks exist + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + build: + name: Build CI Docker / Build images + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-all: + name: Test all + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-all-getblocktemplate-rpcs: + name: Test all with getblocktemplate-rpcs feature + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-fake-activation-heights: + name: Test with fake activation heights + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-empty-sync: + name: Test checkpoint sync from empty state + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-lightwalletd-integration: + name: Test integration with lightwalletd + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-configuration-file: + name: Test CI default Docker config file / Test default-conf in Docker + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-zebra-conf-path: + name: Test CI custom Docker config file / Test custom-conf in Docker + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml new file mode 100644 index 00000000000..fcd8e74d41f --- /dev/null +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -0,0 +1,269 @@ +name: Docker Unit Tests + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + network: + default: 'Mainnet' + description: 'Network to deploy: Mainnet or Testnet' + required: true + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' + - '.github/workflows/sub-find-cached-disks.yml' + + push: + branches: + - main + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' + +jobs: + # Build the docker image used by the tests. + # + # The default network in the Zebra config in the image is mainnet, unless a manually triggered + # workflow or repository variable is configured differently. Testnet jobs change that config to + # testnet when running the image. + build: + name: Build CI Docker + uses: ./.github/workflows/sub-build-docker-image.yml + with: + dockerfile_path: ./docker/Dockerfile + dockerfile_target: tests + image_name: ${{ vars.CI_IMAGE_NAME }} + no_cache: ${{ inputs.no_cache || false }} + rust_backtrace: full + rust_lib_backtrace: full + rust_log: info + + # Run all the zebra tests, including tests that are ignored by default. + # + # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. + # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. + # + # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings + test-all: + name: Test all + runs-on: ubuntu-latest-xl + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. + # + # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # zebrad tests without cached state with `getblocktemplate-rpcs` feature + # + # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. + test-all-getblocktemplate-rpcs: + name: Test all with getblocktemplate-rpcs feature + runs-on: ubuntu-latest-xl + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Run state tests with fake activation heights. + # + # This test changes zebra-chain's activation heights, + # which can recompile all the Zebra crates, + # so we want its build products to be cached separately. + # + # Also, we don't want to accidentally use the fake heights in other tests. + # + # (The gRPC feature is a zebrad feature, so it isn't needed here.) + test-fake-activation-heights: + name: Test with fake activation heights + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with fake activation heights + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + env: + TEST_FAKE_ACTIVATION_HEIGHTS: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-empty-sync: + name: Test checkpoint sync from empty state + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad large sync tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-lightwalletd-integration: + name: Test integration with lightwalletd + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with empty lightwalletd launch + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration + env: + ZEBRA_TEST_LIGHTWALLETD: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra works using the default config with the latest Zebra version. + test-configuration-file: + name: Test CI default Docker config file + needs: build + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'default-conf' + docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' + + # Test reconfiguring the the docker image for tesnet. + test-configuration-file-testnet: + name: Test CI testnet Docker config file + needs: build + # Make sure Zebra can sync the genesis block on testnet + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'testnet-conf' + docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + # TODO: improve the entrypoint to avoid using `ENTRYPOINT_FEATURES=""` + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml" -e ENTRYPOINT_FEATURES=""' + network: 'Testnet' + + # Test that Zebra works using $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test CI custom Docker config file + needs: build + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'custom-conf' + docker_image: ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + + failure-issue: + name: Open or update issues for main branch failures + # When a new test is added to this workflow, add it to this list. + # + # This list is for reliable tests that are run on the `main` branch. + # Testnet jobs are not in this list, because we expect testnet to fail occasionally. + needs: [ test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path ] + # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. + # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? + if: failure() && github.event.pull_request == null + runs-on: ubuntu-latest + steps: + - uses: jayqi/failed-build-issue-action@v1 + with: + title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" + # New failures open an issue with this label. + # TODO: do we want a different label for each workflow, or each kind of workflow? + label-name: S-ci-fail-auto-issue + # If there is already an open issue with this label, any failures become comments on that issue. + always-create-new-issue: false + github-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/ci-unit-tests-os.patch.yml similarity index 96% rename from .github/workflows/continous-integration-os.patch.yml rename to .github/workflows/ci-unit-tests-os.patch.yml index fe81951024a..2d126cba054 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/ci-unit-tests-os.patch.yml @@ -1,4 +1,4 @@ -name: CI OSes +name: Multi-OS Unit Tests on: pull_request: @@ -11,7 +11,7 @@ on: - '**/deny.toml' - '.cargo/config.toml' - '**/clippy.toml' - - '.github/workflows/continous-integration-os.yml' + - '.github/workflows/ci-unit-tests-os.yml' jobs: test: diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/ci-unit-tests-os.yml similarity index 97% rename from .github/workflows/continous-integration-os.yml rename to .github/workflows/ci-unit-tests-os.yml index 63e7006414b..00594afacd5 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -1,4 +1,4 @@ -name: CI OSes +name: Multi-OS Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed @@ -12,14 +12,12 @@ on: # we build Rust and Zcash parameter caches on main, # so they can be shared by all branches: # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache - push: - branches: - - main + + pull_request: paths: - # production code and test code + # code and tests - '**/*.rs' - # hard-coded checkpoints - # TODO: skip proptest regressions? + # hard-coded checkpoints and proptest regressions - '**/*.txt' # test data snapshots - '**/*.snap' @@ -31,12 +29,16 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/continous-integration-os.yml' - pull_request: + - '.github/workflows/ci-unit-tests-os.yml' + + push: + branches: + - main paths: - # code and tests + # production code and test code - '**/*.rs' - # hard-coded checkpoints and proptest regressions + # hard-coded checkpoints + # TODO: skip proptest regressions? - '**/*.txt' # test data snapshots - '**/*.snap' @@ -48,7 +50,8 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/continous-integration-os.yml' + - '.github/workflows/ci-unit-tests-os.yml' + - '.github/workflows/sub-build-docker-image.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} @@ -58,6 +61,9 @@ env: COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} jobs: + ######################################## + ### Build and test Zebra on all OSes ### + ######################################## test: name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }} # The large timeout is to accommodate: @@ -233,7 +239,6 @@ jobs: - name: Install last version of Protoc uses: arduino/setup-protoc@v2.1.0 with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml deleted file mode 100644 index f51ef601468..00000000000 --- a/.github/workflows/continous-delivery.patch.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: CD - -on: - # Only patch the Docker image test jobs - pull_request: - paths-ignore: - # code and tests - - '**/*.rs' - # hard-coded checkpoints and proptest regressions - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/continous-delivery.yml' - - '.github/workflows/find-cached-disks.yml' - - -jobs: - build: - name: Build CD Docker / Build images - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-configuration-file: - name: Test Zebra CD Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/continous-integration-docker.patch-always.yml b/.github/workflows/continous-integration-docker.patch-always.yml deleted file mode 100644 index e05c97f9d24..00000000000 --- a/.github/workflows/continous-integration-docker.patch-always.yml +++ /dev/null @@ -1,26 +0,0 @@ -# These jobs can be skipped based on cached Google Cloud state disks, -# and some of them just run on the `main` branch, -# so the patch jobs always need to run on every PR. -name: CI Docker - -on: - pull_request: - -jobs: - regenerate-stateful-disks: - name: Zebra checkpoint / Run sync-to-checkpoint test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-full-sync: - name: Zebra tip / Run full-sync test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - lightwalletd-full-sync: - name: lightwalletd tip / Run lwd-full-sync test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/docs.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml similarity index 94% rename from .github/workflows/docs.patch.yml rename to .github/workflows/docs-deploy-firebase.patch.yml index 0c3c94aca03..04b4bdc46d4 100644 --- a/.github/workflows/docs.patch.yml +++ b/.github/workflows/docs-deploy-firebase.patch.yml @@ -18,7 +18,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/docs.yml' + - '.github/workflows/docs-deploy-firebase.yml' jobs: build-docs-book: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs-deploy-firebase.yml similarity index 99% rename from .github/workflows/docs.yml rename to .github/workflows/docs-deploy-firebase.yml index 9d22e44cf13..078c2e6a1fe 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -26,7 +26,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/docs.yml' + - '.github/workflows/docs-deploy-firebase.yml' pull_request: branches: diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml similarity index 100% rename from .github/workflows/dockerhub-description.yml rename to .github/workflows/docs-dockerhub-description.yml diff --git a/.github/workflows/zcashd-manual-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml similarity index 100% rename from .github/workflows/zcashd-manual-deploy.yml rename to .github/workflows/manual-zcashd-deploy.yml diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index 2fe2243e0b5..50c28acb7f8 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -1,5 +1,5 @@ # This workflow is meant to trigger a build of Docker binaries when a release -# is published, it uses the existing `build-docker-image.yml` workflow +# is published, it uses the existing `sub-build-docker-image.yml` workflow # # We use a separate action as we might want to trigger this under # different circumstances than a Continuous Deployment, for example. @@ -21,7 +21,7 @@ jobs: # The image will be named `zebra:` build: name: Build Release Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime @@ -33,7 +33,7 @@ jobs: # The image will be named `zebra:.experimental` build-mining-testnet: name: Build Release Testnet Mining Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index d3987e0a048..39e1ba7bde2 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -37,6 +37,7 @@ jobs: - uses: release-drafter/release-drafter@v5 with: config-name: release-drafter.yml + commitish: main #disable-autolabeler: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml similarity index 100% rename from .github/workflows/build-docker-image.yml rename to .github/workflows/sub-build-docker-image.yml diff --git a/.github/workflows/zcash-lightwalletd.patch.yml b/.github/workflows/sub-build-lightwalletd.patch.yml similarity index 85% rename from .github/workflows/zcash-lightwalletd.patch.yml rename to .github/workflows/sub-build-lightwalletd.patch.yml index 1c4413c2975..c126aec0ffb 100644 --- a/.github/workflows/zcash-lightwalletd.patch.yml +++ b/.github/workflows/sub-build-lightwalletd.patch.yml @@ -1,4 +1,4 @@ -name: zcash-lightwalletd +name: Build lightwalletd # When the real job doesn't run because the files aren't changed, # run a fake CI job to satisfy the branch protection rules. @@ -10,7 +10,7 @@ on: - 'zebrad/src/config.rs' - 'zebrad/src/commands/start.rs' - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' jobs: build: diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/sub-build-lightwalletd.yml similarity index 97% rename from .github/workflows/zcash-lightwalletd.yml rename to .github/workflows/sub-build-lightwalletd.yml index 7ef1786e1f6..adce75ed7b7 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/sub-build-lightwalletd.yml @@ -1,6 +1,6 @@ # TODO: we should stop using this build approach with lightwalletd and move to using our # reusable workflow to building all the docker images of our repo -name: zcash-lightwalletd +name: Build lightwalletd # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed @@ -29,7 +29,7 @@ on: - 'zebrad/src/commands/start.rs' # these workflow definitions actually change the docker image - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' # Update the lightwalletd image when each related PR changes pull_request: @@ -42,7 +42,7 @@ on: - 'zebrad/src/commands/start.rs' # these workflow definitions actually change the docker image - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' env: IMAGE_NAME: lightwalletd diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/sub-build-zcash-params.yml similarity index 86% rename from .github/workflows/zcash-params.yml rename to .github/workflows/sub-build-zcash-params.yml index 28bcea9a424..ee08b6fd3c2 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/sub-build-zcash-params.yml @@ -1,4 +1,4 @@ -name: zcash-params +name: Build zcash-params # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete @@ -28,13 +28,13 @@ on: # workflow definitions - 'docker/zcash-params/Dockerfile' - '.dockerignore' - - '.github/workflows/zcash-params.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/sub-build-zcash-params.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: build: name: Build Zcash Params Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/zcash-params/Dockerfile dockerfile_target: release diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml similarity index 99% rename from .github/workflows/deploy-gcp-tests.yml rename to .github/workflows/sub-deploy-integration-tests-gcp.yml index cd0076c2e19..e718eaa43af 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -1,4 +1,4 @@ -name: Deploy GCP tests +name: Deploy Tests to GCP on: workflow_call: @@ -311,7 +311,7 @@ jobs: # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable # Passes the state version to subsequent steps using $STATE_VERSION env variable # - # TODO: move this script into a file, and call it from find-cached-disks.yml as well. + # TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well. - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name run: | @@ -414,7 +414,7 @@ jobs: # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR # # This path must match the variable used by the tests in Rust, which are also set in - # `continous-integration-docker.yml` to be able to run this tests. + # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root, Zebra will only respect the values from # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used @@ -485,7 +485,7 @@ jobs: # delete the whole cache directory.) # # This paths must match the variables used by the tests in Rust, which are also set in - # `continous-integration-docker.yml` to be able to run this tests. + # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, diff --git a/.github/workflows/find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml similarity index 100% rename from .github/workflows/find-cached-disks.yml rename to .github/workflows/sub-find-cached-disks.yml diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml new file mode 100644 index 00000000000..d8e856f0748 --- /dev/null +++ b/.github/workflows/sub-test-zebra-config.yml @@ -0,0 +1,79 @@ +name: Test Zebra Config Files + +on: + workflow_call: + inputs: + # Status and logging + test_id: + required: true + type: string + description: 'Unique identifier for the test' + grep_patterns: + required: true + type: string + description: 'Patterns to grep for in the logs' + + # Test selection and parameters + docker_image: + required: true + type: string + description: 'Docker image to test' + test_variables: + required: true + type: string + description: 'Environmental variables used to select and configure the test' + network: + required: false + type: string + default: Mainnet + description: 'Zcash network to test against' + +jobs: + test-docker-config: + name: Test ${{ inputs.test_id }} in Docker + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.0 + with: + persist-credentials: false + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Run ${{ inputs.test_id }} test + run: | + docker pull ${{ inputs.docker_image }} + docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ inputs.docker_image }} zebrad start + # Use a subshell to handle the broken pipe error gracefully + ( + trap "" PIPE; + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + ${{ inputs.grep_patterns }} + ) || true + LOGS_EXIT_STATUS=$? + + docker stop ${{ inputs.test_id }} + + EXIT_STATUS=$(docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); + echo "docker exit status: $EXIT_STATUS"; + + # If grep found the pattern, exit with the Docker container exit status + if [ $LOGS_EXIT_STATUS -eq 0 ]; then + exit $EXIT_STATUS; + fi + + # Handle other potential errors here + echo "An error occurred while processing the logs."; + exit 1; + env: + NETWORK: '${{ inputs.network }}' diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 3a7726fb3e0..895085f6395 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -90,7 +90,7 @@ This means that the entire workflow must be re-run when a single test fails. 1. Look for the earliest job that failed, and find the earliest failure. For example, this failure doesn't tell us what actually went wrong: -> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. +> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/sub-build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. https://github.com/ZcashFoundation/zebra/runs/8181760421?check_suite_focus=true#step:41:4 diff --git a/docker/Dockerfile b/docker/Dockerfile index 2b4c770e328..f27ff99fa71 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ # # Build zebrad with these features # Keep these in sync with: -# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L37 +# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/sub-build-docker-image.yml#L37 ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" @@ -190,8 +190,8 @@ ARG FEATURES ENV FEATURES=${FEATURES} # Path and name of the config file -ENV ZEBRA_CONF_DIR=/etc/zebrad -ENV ZEBRA_CONF_FILE=zebrad.toml +ENV ZEBRA_CONF_DIR=${ZEBRA_CONF_DIR:-/etc/zebrad} +ENV ZEBRA_CONF_FILE=${ZEBRA_CONF_FILE:-zebrad.toml} # Expose configured ports EXPOSE 8233 18233 diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 66b8f586586..79dd0c3bbfa 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -79,7 +79,7 @@ fi : "${ENTRYPOINT_FEATURES:=}" # Configuration file path -if [[ -n "${ZEBRA_CONF_DIR}" ]] && [[ -n "${ZEBRA_CONF_FILE}" ]]; then +if [[ -n "${ZEBRA_CONF_DIR}" ]] && [[ -n "${ZEBRA_CONF_FILE}" ]] && [[ -z "${ZEBRA_CONF_PATH}" ]]; then ZEBRA_CONF_PATH="${ZEBRA_CONF_DIR}/${ZEBRA_CONF_FILE}" fi @@ -90,8 +90,7 @@ fi # Users have to opt-in to additional functionality by setting environmental variables. if [[ -n "${ZEBRA_CONF_PATH}" ]] && [[ ! -f "${ZEBRA_CONF_PATH}" ]] && [[ -z "${ENTRYPOINT_FEATURES}" ]]; then # Create the conf path and file - mkdir -p "${ZEBRA_CONF_DIR}" || { echo "Error creating directory ${ZEBRA_CONF_DIR}"; exit 1; } - touch "${ZEBRA_CONF_PATH}" || { echo "Error creating file ${ZEBRA_CONF_PATH}"; exit 1; } + (mkdir -p "$(dirname "${ZEBRA_CONF_PATH}")" && touch "${ZEBRA_CONF_PATH}") || { echo "Error creating file ${ZEBRA_CONF_PATH}"; exit 1; } # Populate the conf file cat < "${ZEBRA_CONF_PATH}" [network] From 01168c8571a517318bca37052ff16d7cc1689c68 Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 18 Oct 2023 02:16:29 -0400 Subject: [PATCH 11/18] change(state): Set upper bound when reading from deleting column family tx_loc_by_transparent_addr_loc (#7732) * Uses range_iter in address_transaction_locations * Uses range_iter in address_transaction_locations * uses u16::MAX instead of usize::MAX * Moves limit code into method * adds allow(dead_code) * Simplifies address_iterator_range * Moves test state init out of loop * Updates docs --- zebra-rpc/src/methods/tests/vectors.rs | 32 +++++++++++---- .../finalized_state/disk_format/block.rs | 1 - .../disk_format/transparent.rs | 40 +++++++++--------- .../finalized_state/zebra_db/transparent.rs | 41 +++---------------- 4 files changed, 51 insertions(+), 63 deletions(-) diff --git a/zebra-rpc/src/methods/tests/vectors.rs b/zebra-rpc/src/methods/tests/vectors.rs index 769d561897b..68f08c184b1 100644 --- a/zebra-rpc/src/methods/tests/vectors.rs +++ b/zebra-rpc/src/methods/tests/vectors.rs @@ -16,6 +16,7 @@ use zebra_chain::{ }; use zebra_node_services::BoxError; +use zebra_state::{LatestChainTip, ReadStateService}; use zebra_test::mock_service::MockService; use super::super::*; @@ -674,19 +675,36 @@ async fn rpc_getaddresstxids_response() { .address(network) .unwrap(); + // Create a populated state service + let (_state, read_state, latest_chain_tip, _chain_tip_change) = + zebra_state::populated_state(blocks.to_owned(), network).await; + if network == Mainnet { // Exhaustively test possible block ranges for mainnet. // // TODO: if it takes too long on slower machines, turn this into a proptest with 10-20 cases for start in 1..=10 { for end in start..=10 { - rpc_getaddresstxids_response_with(network, start..=end, &blocks, &address) - .await; + rpc_getaddresstxids_response_with( + network, + start..=end, + &address, + &read_state, + &latest_chain_tip, + ) + .await; } } } else { // Just test the full range for testnet. - rpc_getaddresstxids_response_with(network, 1..=10, &blocks, &address).await; + rpc_getaddresstxids_response_with( + network, + 1..=10, + &address, + &read_state, + &latest_chain_tip, + ) + .await; } } } @@ -694,13 +712,11 @@ async fn rpc_getaddresstxids_response() { async fn rpc_getaddresstxids_response_with( network: Network, range: RangeInclusive, - blocks: &[Arc], address: &transparent::Address, + read_state: &ReadStateService, + latest_chain_tip: &LatestChainTip, ) { let mut mempool: MockService<_, _, _, BoxError> = MockService::build().for_unit_tests(); - // Create a populated state service - let (_state, read_state, latest_chain_tip, _chain_tip_change) = - zebra_state::populated_state(blocks.to_owned(), network).await; let (rpc, rpc_tx_queue_task_handle) = RpcImpl::new( "RPC test", @@ -710,7 +726,7 @@ async fn rpc_getaddresstxids_response_with( true, Buffer::new(mempool.clone(), 1), Buffer::new(read_state.clone(), 1), - latest_chain_tip, + latest_chain_tip.clone(), ); // call the method with valid arguments diff --git a/zebra-state/src/service/finalized_state/disk_format/block.rs b/zebra-state/src/service/finalized_state/disk_format/block.rs index 84efd60b016..03a7f648053 100644 --- a/zebra-state/src/service/finalized_state/disk_format/block.rs +++ b/zebra-state/src/service/finalized_state/disk_format/block.rs @@ -132,7 +132,6 @@ pub struct TransactionLocation { impl TransactionLocation { /// Creates a transaction location from a block height and transaction index. - #[allow(dead_code)] pub fn from_index(height: Height, transaction_index: u16) -> TransactionLocation { TransactionLocation { height, diff --git a/zebra-state/src/service/finalized_state/disk_format/transparent.rs b/zebra-state/src/service/finalized_state/disk_format/transparent.rs index ace95b04b0f..d3fb01c390f 100644 --- a/zebra-state/src/service/finalized_state/disk_format/transparent.rs +++ b/zebra-state/src/service/finalized_state/disk_format/transparent.rs @@ -416,36 +416,37 @@ impl AddressTransaction { } } - /// Create an [`AddressTransaction`] which starts iteration for the supplied - /// address. Starts at the first UTXO, or at the `query_start` height, - /// whichever is greater. + /// Create a range of [`AddressTransaction`]s which starts iteration for the supplied + /// address. Starts at the first UTXO, or at the `query` start height, whichever is greater. + /// Ends at the maximum possible transaction index for the end height. /// - /// Used to look up the first transaction with - /// [`ReadDisk::zs_next_key_value_from`][1]. + /// Used to look up transactions with [`DiskDb::zs_range_iter`][1]. /// - /// The transaction location might be invalid, if it is based on the - /// `query_start` height. But this is not an issue, since - /// [`ReadDisk::zs_next_key_value_from`][1] will fetch the next existing - /// (valid) value. + /// The transaction locations in the: + /// - start bound might be invalid, if it is based on the `query` start height. + /// - end bound will always be invalid. /// - /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from - pub fn address_iterator_start( + /// But this is not an issue, since [`DiskDb::zs_range_iter`][1] will fetch all existing + /// (valid) values in the range. + /// + /// [1]: super::super::disk_db::DiskDb + pub fn address_iterator_range( address_location: AddressLocation, - query_start: Height, - ) -> AddressTransaction { + query: std::ops::RangeInclusive, + ) -> std::ops::RangeInclusive { // Iterating from the lowest possible transaction location gets us the first transaction. // // The address location is the output location of the first UTXO sent to the address, // and addresses can not spend funds until they receive their first UTXO. let first_utxo_location = address_location.transaction_location(); - // Iterating from the start height filters out transactions that aren't needed. - let query_start_location = TransactionLocation::from_usize(query_start, 0); + // Iterating from the start height to the end height filters out transactions that aren't needed. + let query_start_location = TransactionLocation::from_index(*query.start(), 0); + let query_end_location = TransactionLocation::from_index(*query.end(), u16::MAX); - AddressTransaction { - address_location, - transaction_location: max(first_utxo_location, query_start_location), - } + let addr_tx = |tx_loc| AddressTransaction::new(address_location, tx_loc); + + addr_tx(max(first_utxo_location, query_start_location))..=addr_tx(query_end_location) } /// Update the transaction location to the next possible transaction for the @@ -457,6 +458,7 @@ impl AddressTransaction { /// existing (valid) value. /// /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from + #[allow(dead_code)] pub fn address_iterator_next(&mut self) { // Iterating from the next possible output location gets us the next output, // even if it is in a later block or transaction. diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 2f80751687f..2e8d6c3980a 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -235,44 +235,15 @@ impl ZebraDb { let tx_loc_by_transparent_addr_loc = self.db.cf_handle("tx_loc_by_transparent_addr_loc").unwrap(); - // Manually fetch the entire addresses' transaction locations - let mut addr_transactions = BTreeSet::new(); - // A potentially invalid key representing the first UTXO send to the address, // or the query start height. - let mut transaction_location = AddressTransaction::address_iterator_start( - address_location, - *query_height_range.start(), - ); - - loop { - // Seek to a valid entry for this address, or the first entry for the next address - transaction_location = match self - .db - .zs_next_key_value_from(&tx_loc_by_transparent_addr_loc, &transaction_location) - { - Some((transaction_location, ())) => transaction_location, - // We're finished with the final address in the column family - None => break, - }; - - // We found the next address, so we're finished with this address - if transaction_location.address_location() != address_location { - break; - } - - // We're past the end height, so we're finished with this query - if transaction_location.transaction_location().height > *query_height_range.end() { - break; - } + let transaction_location_range = + AddressTransaction::address_iterator_range(address_location, query_height_range); - addr_transactions.insert(transaction_location); - - // A potentially invalid key representing the next possible output - transaction_location.address_iterator_next(); - } - - addr_transactions + self.db + .zs_range_iter(&tx_loc_by_transparent_addr_loc, transaction_location_range) + .map(|(tx_loc, ())| tx_loc) + .collect() } // Address index queries From 290ccf2d94e68b80008517614b4f3873e06a93fd Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 19 Oct 2023 01:20:54 +1000 Subject: [PATCH 12/18] Update README.md for known progress bar issue (#7761) --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 2730399617f..61a63ef5942 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,8 @@ Configure `tracing.progress_bar` in your `zebrad.toml` to [show key metrics in the terminal using progress bars](https://zfnd.org/experimental-zebra-progress-bars/). When progress bars are active, Zebra automatically sends logs to a file. +There is a known issue where [progress bar estimates become extremely large](https://github.com/console-rs/indicatif/issues/556). + In future releases, the `progress_bar = "summary"` config will show a few key metrics, and the "detailed" config will show all available metrics. Please let us know which metrics are important to you! @@ -157,6 +159,8 @@ performance. There are a few bugs in Zebra that we're still working on fixing: +- [Progress bar estimates can become extremely large](https://github.com/console-rs/indicatif/issues/556). We're waiting on a fix in the progress bar library. + - Zebra currently gossips and connects to [private IP addresses](https://en.wikipedia.org/wiki/IP_address#Private_addresses), we want to [disable private IPs but provide a config (#3117)](https://github.com/ZcashFoundation/zebra/issues/3117) in an upcoming release - If Zebra fails downloading the Zcash parameters, use [the Zcash parameters download script](https://github.com/zcash/zcash/blob/master/zcutil/fetch-params.sh) instead. From d6f4d3170679566ccac6db20d2b405edbbb7c828 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 19 Oct 2023 12:52:14 +0100 Subject: [PATCH 13/18] ref(workflows): use a single job to run GCP tests (#7682) * ref(workflows): consolidate workflows based on their purpose This also renames the workflows to make their naming more consistent and adding a naming convention Fixes: #6166 Fixes: #6167 * fix(workflows): use correct name for patch * fix(workflow): docker unit tests * fix(release): validation error Error: ``` Validation Failed: {"resource":"Release","code":"invalid","field":"target_commitish"} ``` Fixes: https://github.com/release-drafter/release-drafter/issues/1125 * fix(workflows): reference correct name * fix: remove extra workflow * fix(workflows): use larger runners * fix(workflow): remove code already in docker unit-test * fix(unit-tests): start zebra the right way * fix: typo in patch name * chore: move job to logical order * imp(workflows): use better name for gcp tests * ref(workflow): use a single job to run GCP tests * fix(ci): do not format existing cached states if required * test: wait for the instance to be fully ready * fix(ci): use correct logic before formating * fix: use correct condition * fix: get more information * fix(ci): use better shell handling and upgrade OS * fix(ci): use better approach * fix: `$DISK_OPTION` is not being correctly passed * fix typo * fix: more typos * fix: use busybox * fix: mount Docker volume at run and not before * fix: use correct condition and simpler while * add: missing merge changes * chore: use better name for find-disks * fix(ci): use the `entrypoint.sh` to change the Network * fix(ci): add missing `ZEBRA_CONF_PATH` variable * fix(ci): considerate new updates to jobs * fix(ci): allow to build the entrypoint file with testnet * fix(entrypoint): allow to create a dir and file with a single variable * fix(ci): add missing jobs to `failure-issue` * Apply suggestions from code review Co-authored-by: teor * fix(ci): use better comment * refactor: test config file in CI and CD with a reusable workflow * fix(ci): wrong name used * fix(ci): use checkout * fix(ci): improve docker config tests * fix(ci): use better name for protection rules * chore: missing merge conflicts --------- Co-authored-by: teor --- .../sub-deploy-integration-tests-gcp.yml | 322 +++--------------- 1 file changed, 48 insertions(+), 274 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index e718eaa43af..336bbe6b360 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -104,146 +104,14 @@ env: CACHED_STATE_UPDATE_LIMIT: 576 jobs: - # set up and launch the test, if it doesn't use any cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-without-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ !inputs.needs_zebra_state }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.1.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Makes the Zcash network name lowercase. - # - # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }}. - # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - - name: Downcase network name for labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Create a Compute Engine virtual machine - - name: Create ${{ inputs.test_id }} GCP compute instance - id: create-instance - run: | - gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 50GB \ - --boot-disk-type pd-ssd \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --create-disk=name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ - --scopes cloud-platform \ - --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ - --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ - --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ - --tags ${{ inputs.app_name }} \ - --zone ${{ vars.GCP_ZONE }} - - # Format the mounted disk if the test doesn't use a cached state. - - name: Format ${{ inputs.test_id }} volume - shell: /usr/bin/bash -exo pipefail {0} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - set -ex; - # Extract the correct disk name based on the device-name - DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); - sudo mkfs.ext4 -v /dev/$DISK_NAME \ - ' - - # Launch the test without any cached state - - name: Launch ${{ inputs.test_id }} test - id: launch-test - shell: /usr/bin/bash -exo pipefail {0} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - set -ex; - # Extract the correct disk name based on the device-name - export DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); \ - - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - ' - - # Show debug logs if previous job failed - - name: Show debug logs if previous job failed - if: ${{ failure() }} - shell: /usr/bin/bash -exo pipefail {0} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - lsblk; - sudo lsof /dev/$DISK_NAME; - sudo dmesg; - sudo journalctl -b \ - ' - - # set up and launch the test, if it uses cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-with-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ inputs.needs_zebra_state }} + # Show all the test logs, then follow the logs of the test we just launched, until it finishes. + # Then check the result of the test. + # + # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. + test-result: + name: Run ${{ inputs.test_id }} test runs-on: zfnd-runners + timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} outputs: cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }} permissions: @@ -314,6 +182,7 @@ jobs: # TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well. - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name + if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} run: | set -x LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) @@ -381,18 +250,21 @@ jobs: echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" + echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV" # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME variable as the source image to populate the disk cached state + # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance + shell: /usr/bin/bash -x {0} run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 50GB \ --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ + --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ --container-image=gcr.io/google-containers/busybox \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ @@ -403,29 +275,10 @@ jobs: --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - # Launch the test with the previously created Zebra-only cached state. - # Each test runs one of the "Launch test" steps, and skips the other. - # - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. - # - # The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker - # container in one path: - # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR - # - # This path must match the variable used by the tests in Rust, which are also set in - # `ci-unit-tests-docker.yml` to be able to run this tests. - # - # Although we're mounting the disk root, Zebra will only respect the values from - # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used - # to match that variable paths. - - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that just read or write a Zebra state. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} - shell: /usr/bin/bash -exo pipefail {0} + # Format the mounted disk if the test doesn't use a cached state. + - name: Format ${{ inputs.test_id }} volume + if: ${{ !inputs.needs_zebra_state && !inputs.needs_lwd_state }} + shell: /usr/bin/bash -ex {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -435,36 +288,14 @@ jobs: --command=' \ set -ex; # Extract the correct disk name based on the device-name - export DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); \ - - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - ' - - # Show debug logs if previous job failed - - name: Show debug logs if previous job failed - if: ${{ failure() && (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} - shell: /usr/bin/bash -exo pipefail {0} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - lsblk; - sudo lsof /dev/$DISK_NAME; - sudo dmesg; - sudo journalctl -b \ + DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); + sudo mkfs.ext4 -v /dev/$DISK_NAME \ ' - # Launch the test with the previously created Lightwalletd and Zebra cached state. - # Each test runs one of the "Launch test" steps, and skips the other. + # Launch the test with the previously created disk or cached state. + # + # This step uses a $MOUNT_FLAGS variable to mount the disk to the docker container. + # If the test needs Lightwalletd state, we add the Lightwalletd state mount to the $MOUNT_FLAGS variable. # # SSH into the just created VM, and create a Docker container to run the incoming test # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. @@ -473,8 +304,8 @@ jobs: # VM and to the container might require more steps in this workflow, and additional # considerations. # - # The disk mounted in the VM is located at /dev/$DISK_NAME, we want the root `/` of this disk to be - # available in the docker container at two different paths: + # The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker + # container, and might have two different paths (if lightwalletd state is needed): # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR # @@ -484,19 +315,16 @@ jobs: # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not # delete the whole cache directory.) # - # This paths must match the variables used by the tests in Rust, which are also set in + # These paths must match the variables used by the tests in Rust, which are also set in # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, - # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. + # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} + # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that read or write Lightwalletd and Zebra states. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} - shell: /usr/bin/bash -exo pipefail {0} + id: launch-test + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -504,24 +332,31 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - set -ex; + # Extract the correct disk name based on the device-name - export DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); \ + DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-) + + MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" + + # Check if we need to mount for Lightwalletd state + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. + if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then + MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" + fi sudo docker run \ --name ${{ inputs.test_id }} \ --tty \ --detach \ ${{ inputs.test_variables }} \ - --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ + ${MOUNT_FLAGS} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ ' # Show debug logs if previous job failed - name: Show debug logs if previous job failed - if: ${{ failure() && (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} - shell: /usr/bin/bash -exo pipefail {0} + if: ${{ failure() }} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -535,58 +370,6 @@ jobs: sudo journalctl -b \ ' - # Show all the test logs, then follow the logs of the test we just launched, until it finishes. - # Then check the result of the test. - # - # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. - test-result: - name: Run ${{ inputs.test_id }} test - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - needs: [ launch-with-cached-state, launch-without-cached-state ] - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. - if: ${{ !cancelled() }} - timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.1.0 - with: - persist-credentials: false - fetch-depth: '2' - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - # Show all the logs since the container launched, # following until we see zebrad startup messages. # @@ -600,7 +383,7 @@ jobs: # # Errors in the tests are caught by the final test status job. - name: Check startup logs for ${{ inputs.test_id }} - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -608,10 +391,6 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - trap "" PIPE; - - # Temporarily disable "set -e" to handle the broken pipe error gracefully - set +e; sudo docker logs \ --tail all \ --follow \ @@ -633,7 +412,7 @@ jobs: # with that status. # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -641,10 +420,6 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - trap "" PIPE; - - # Temporarily disable "set -e" to handle the broken pipe error gracefully - set +e; sudo docker logs \ --tail all \ --follow \ @@ -653,7 +428,6 @@ jobs: grep --max-count=1 --extended-regexp --color=always \ "test result: .*ok.* [1-9][0-9]* passed.*finished in"; LOGS_EXIT_STATUS=$?; - set -e; EXIT_STATUS=$(sudo docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); echo "sudo docker exit status: $EXIT_STATUS"; @@ -672,7 +446,7 @@ jobs: create-state-image: name: Create ${{ inputs.test_id }} cached state image runs-on: ubuntu-latest - needs: [ test-result, launch-with-cached-state ] + needs: [ test-result ] # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. @@ -779,7 +553,7 @@ jobs: # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. - name: Get database versions from logs - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | INITIAL_DISK_DB_VERSION="" RUNNING_DB_VERSION="" @@ -869,7 +643,7 @@ jobs: # # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - name: Get sync height from logs - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | SYNC_HEIGHT="" @@ -917,7 +691,7 @@ jobs: - name: Get original cached state height from google cloud run: | ORIGINAL_HEIGHT="0" - ORIGINAL_DISK_NAME="${{ format('{0}', needs.launch-with-cached-state.outputs.cached_disk_name) }}" + ORIGINAL_DISK_NAME="${{ format('{0}', needs.test-result.outputs.cached_disk_name) }}" if [[ -n "$ORIGINAL_DISK_NAME" ]]; then ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)") From 2f14efac245f8fd6e6d4ba71836156fc778a420f Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 19 Oct 2023 22:30:53 +1000 Subject: [PATCH 14/18] fix(ci): Add missing timeouts to CI and CD tests (#7730) * Add missing timeouts to CI and CD tests * Remove timeout keys that are not allowed by the syntax --- .github/workflows/ci-build-crates.yml | 1 + .github/workflows/ci-unit-tests-docker.yml | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index e12f6031cb8..8adf9e08047 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -99,6 +99,7 @@ jobs: build: name: Build ${{ matrix.crate }} crate + timeout-minutes: 90 needs: [ matrix, check-matrix ] runs-on: ubuntu-latest strategy: diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index fcd8e74d41f..bf15ce55930 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -91,6 +91,7 @@ jobs: # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings test-all: name: Test all + timeout-minutes: 180 runs-on: ubuntu-latest-xl needs: build steps: @@ -144,6 +145,7 @@ jobs: # (The gRPC feature is a zebrad feature, so it isn't needed here.) test-fake-activation-heights: name: Test with fake activation heights + timeout-minutes: 60 runs-on: ubuntu-latest needs: build steps: @@ -167,6 +169,7 @@ jobs: # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) test-empty-sync: name: Test checkpoint sync from empty state + timeout-minutes: 60 runs-on: ubuntu-latest needs: build steps: @@ -189,6 +192,7 @@ jobs: # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) test-lightwalletd-integration: name: Test integration with lightwalletd + timeout-minutes: 60 runs-on: ubuntu-latest needs: build steps: From 92a074abdfed0e939ce79b791d0bcb7066f17b44 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Oct 2023 13:41:10 +0000 Subject: [PATCH 15/18] build(deps): bump tj-actions/changed-files from 39.2.1 to 39.2.3 (#7768) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 39.2.1 to 39.2.3. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v39.2.1...v39.2.3) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 1e22a99a0f2..84f7d9f3abc 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -37,7 +37,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v39.2.1 + uses: tj-actions/changed-files@v39.2.3 with: files: | **/*.rs @@ -49,7 +49,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v39.2.1 + uses: tj-actions/changed-files@v39.2.3 with: files: | .github/workflows/*.yml From 8c01fecc919bb4918eef67d6c756117e96c7b8a8 Mon Sep 17 00:00:00 2001 From: teor Date: Thu, 19 Oct 2023 23:41:23 +1000 Subject: [PATCH 16/18] Limit jobs launched by the crates workflow (#7754) --- .github/workflows/ci-build-crates.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 8adf9e08047..08244bd804f 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -103,6 +103,8 @@ jobs: needs: [ matrix, check-matrix ] runs-on: ubuntu-latest strategy: + # avoid rate-limit errors by only launching a few of these jobs at a time + max-parallel: 2 fail-fast: true matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} From 64f777274c6d6fb57ff0e9fa2d44107d73012476 Mon Sep 17 00:00:00 2001 From: teor Date: Fri, 20 Oct 2023 00:50:46 +1000 Subject: [PATCH 17/18] fix(security): fix concurrency issues in tree key formats, and CPU usage in genesis tree roots (#7392) * Add tree key format and cached root upgrades * Document the changes in the upgrades * Remove unnecessary clippy::unwrap_in_result * Fix database type * Bump state version * Skip some checks if the database is empty * Fix tests for a short state upgrade * Disable format checks in some tests * Document state performance issues * Clarify upgrade behaviour * Clarify panic messages * Delete incorrect genesis trees write code * Fix metrics handling for genesis * Remove an unused import * Explain why genesis anchors are ok * Update snapshots * Debug a failing test * Fix some tests * Fix missing imports * Move the state check in a test * Fix comment and doc typos Co-authored-by: Marek Co-authored-by: Arya * Clarify what a long upgrade is * Rename unused function arguments Co-authored-by: Marek * Add all_unordered log regex matching methods * Fix timing issues with version upgrades and other logs * Fix argument name in docs Co-authored-by: Marek * Explain match until first for all regexes behaviour better --------- Co-authored-by: Marek Co-authored-by: Arya --- book/src/dev/state-db-upgrades.md | 27 ++- zebra-state/src/constants.rs | 4 +- zebra-state/src/lib.rs | 2 +- .../src/service/check/tests/anchors.rs | 48 +++- zebra-state/src/service/finalized_state.rs | 28 ++- .../src/service/finalized_state/disk_db.rs | 2 +- .../empty_column_families@mainnet_0.snap | 3 - .../empty_column_families@testnet_0.snap | 3 - .../orchard_anchors_raw_data@mainnet_0.snap | 10 + .../orchard_anchors_raw_data@testnet_0.snap | 10 + .../sapling_anchors_raw_data@mainnet_0.snap | 10 + .../sapling_anchors_raw_data@testnet_0.snap | 10 + .../sprout_anchors_raw_data@mainnet_0.snap | 10 + .../sprout_anchors_raw_data@testnet_0.snap | 10 + ...te_commitment_tree_raw_data@mainnet_0.snap | 2 +- ...te_commitment_tree_raw_data@mainnet_1.snap | 2 +- ...te_commitment_tree_raw_data@mainnet_2.snap | 2 +- ...te_commitment_tree_raw_data@testnet_0.snap | 2 +- ...te_commitment_tree_raw_data@testnet_1.snap | 2 +- ...te_commitment_tree_raw_data@testnet_2.snap | 2 +- .../finalized_state/disk_format/upgrade.rs | 33 +++ .../upgrade/cache_genesis_roots.rs | 181 ++++++++++++++ .../disk_format/upgrade/fix_tree_key_type.rs | 138 +++++++++++ .../service/finalized_state/zebra_db/block.rs | 129 +++------- .../snapshots/sprout_trees@mainnet_0.snap | 9 +- .../snapshots/sprout_trees@testnet_0.snap | 9 +- .../service/finalized_state/zebra_db/chain.rs | 98 ++++---- .../finalized_state/zebra_db/metrics.rs | 46 ++-- .../finalized_state/zebra_db/shielded.rs | 225 +++++++++++------- zebra-state/src/tests/setup.rs | 4 +- zebra-test/src/command.rs | 97 +++++++- zebra-test/src/command/to_regex.rs | 19 +- zebrad/tests/acceptance.rs | 98 ++++---- zebrad/tests/common/cached_state.rs | 27 ++- zebrad/tests/common/checkpoints.rs | 8 + .../common/lightwalletd/wallet_grpc_test.rs | 47 ++-- 36 files changed, 997 insertions(+), 360 deletions(-) create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@mainnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@testnet_0.snap create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs create mode 100644 zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs diff --git a/book/src/dev/state-db-upgrades.md b/book/src/dev/state-db-upgrades.md index 6bd6aeaddbd..db5e4bce868 100644 --- a/book/src/dev/state-db-upgrades.md +++ b/book/src/dev/state-db-upgrades.md @@ -40,6 +40,28 @@ This means that: If there is an upgrade failure, it can panic and tell the user to delete their cached state and re-launch Zebra. +### Performance Constraints + +Some column family access patterns can lead to very poor performance. + +Known performance issues include: +- using an iterator on a column family which also deletes keys +- creating large numbers of iterators +- holding an iterator for a long time + +See the performance notes and bug reports in: +- https://github.com/facebook/rocksdb/wiki/Iterator#iterating-upper-bound-and-lower-bound +- https://tracker.ceph.com/issues/55324 +- https://jira.mariadb.org/browse/MDEV-19670 + +But we need to use iterators for some operations, so our alternatives are (in preferred order): +1. Minimise the number of keys we delete, and how often we delete them +2. Avoid using iterators on column families where we delete keys +3. If we must use iterators on those column families, set read bounds to minimise the amount of deleted data that is read + +Currently only UTXOs require key deletion, and only `utxo_loc_by_transparent_addr_loc` requires +deletion and iterators. + ### Implementation Steps - [ ] update the [database format](https://github.com/ZcashFoundation/zebra/blob/main/book/src/dev/state-db-upgrades.md#current) in the Zebra docs @@ -87,7 +109,7 @@ We use the following rocksdb column families: | *Sprout* | | | | | `sprout_nullifiers` | `sprout::Nullifier` | `()` | Create | | `sprout_anchors` | `sprout::tree::Root` | `sprout::NoteCommitmentTree` | Create | -| `sprout_note_commitment_tree` | `block::Height` | `sprout::NoteCommitmentTree` | Delete | +| `sprout_note_commitment_tree` | `()` | `sprout::NoteCommitmentTree` | Update | | *Sapling* | | | | | `sapling_nullifiers` | `sapling::Nullifier` | `()` | Create | | `sapling_anchors` | `sapling::tree::Root` | `()` | Create | @@ -99,7 +121,7 @@ We use the following rocksdb column families: | `orchard_note_commitment_tree` | `block::Height` | `orchard::NoteCommitmentTree` | Create | | `orchard_note_commitment_subtree` | `block::Height` | `NoteCommitmentSubtreeData` | Create | | *Chain* | | | | -| `history_tree` | `block::Height` | `NonEmptyHistoryTree` | Delete | +| `history_tree` | `()` | `NonEmptyHistoryTree` | Update | | `tip_chain_value_pool` | `()` | `ValueBalance` | Update | Zcash structures are encoded using `ZcashSerialize`/`ZcashDeserialize`. @@ -131,6 +153,7 @@ Amounts: Derived Formats: - `*::NoteCommitmentTree`: `bincode` using `serde` + - stored note commitment trees always have cached roots - `NonEmptyHistoryTree`: `bincode` using `serde`, using `zcash_history`'s `serde` implementation diff --git a/zebra-state/src/constants.rs b/zebra-state/src/constants.rs index b5a06851638..af232e0dbfe 100644 --- a/zebra-state/src/constants.rs +++ b/zebra-state/src/constants.rs @@ -49,11 +49,11 @@ pub(crate) const DATABASE_FORMAT_VERSION: u64 = 25; /// - adding new column families, /// - changing the format of a column family in a compatible way, or /// - breaking changes with compatibility code in all supported Zebra versions. -pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 2; +pub(crate) const DATABASE_FORMAT_MINOR_VERSION: u64 = 3; /// The database format patch version, incremented each time the on-disk database format has a /// significant format compatibility fix. -pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 2; +pub(crate) const DATABASE_FORMAT_PATCH_VERSION: u64 = 0; /// Returns the highest database version that modifies the subtree index format. /// diff --git a/zebra-state/src/lib.rs b/zebra-state/src/lib.rs index d1076c68b95..ad2cec55207 100644 --- a/zebra-state/src/lib.rs +++ b/zebra-state/src/lib.rs @@ -66,7 +66,7 @@ pub use response::GetBlockTemplateChainInfo; pub use service::{ arbitrary::{populated_state, CHAIN_TIP_UPDATE_WAIT_LIMIT}, chain_tip::{ChainTipBlock, ChainTipSender}, - finalized_state::MAX_ON_DISK_HEIGHT, + finalized_state::{DiskWriteBatch, MAX_ON_DISK_HEIGHT}, init_test, init_test_services, ReadStateService, }; diff --git a/zebra-state/src/service/check/tests/anchors.rs b/zebra-state/src/service/check/tests/anchors.rs index d96c8b0410b..09d33b29190 100644 --- a/zebra-state/src/service/check/tests/anchors.rs +++ b/zebra-state/src/service/check/tests/anchors.rs @@ -6,8 +6,9 @@ use zebra_chain::{ amount::Amount, block::{Block, Height}, primitives::Groth16Proof, + sapling, serialization::ZcashDeserializeInto, - sprout::JoinSplit, + sprout::{self, JoinSplit}, transaction::{JoinSplitData, LockTime, Transaction, UnminedTx}, }; @@ -18,7 +19,7 @@ use crate::{ write::validate_and_commit_non_finalized, }, tests::setup::{new_state_with_mainnet_genesis, transaction_v4_from_coinbase}, - SemanticallyVerifiedBlock, ValidateContextError, + DiskWriteBatch, SemanticallyVerifiedBlock, ValidateContextError, }; // Sprout @@ -31,12 +32,22 @@ fn check_sprout_anchors() { let (finalized_state, mut non_finalized_state, _genesis) = new_state_with_mainnet_genesis(); - // Bootstrap a block at height == 1. + // Delete the empty anchor from the database + let mut batch = DiskWriteBatch::new(); + batch.delete_sprout_anchor( + &finalized_state, + &sprout::tree::NoteCommitmentTree::default().root(), + ); + finalized_state + .write_batch(batch) + .expect("unexpected I/O error"); + + // Create a block at height 1. let block_1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES .zcash_deserialize_into::() .expect("block should deserialize"); - // Bootstrap a block just before the first Sprout anchors. + // Create a block just before the first Sprout anchors. let block_395 = zebra_test::vectors::BLOCK_MAINNET_395_BYTES .zcash_deserialize_into::() .expect("block should deserialize"); @@ -44,7 +55,7 @@ fn check_sprout_anchors() { // Add initial transactions to [`block_1`]. let block_1 = prepare_sprout_block(block_1, block_395); - // Bootstrap a block at height == 2 that references the Sprout note commitment tree state + // Create a block at height == 2 that references the Sprout note commitment tree state // from [`block_1`]. let block_2 = zebra_test::vectors::BLOCK_MAINNET_2_BYTES .zcash_deserialize_into::() @@ -74,10 +85,13 @@ fn check_sprout_anchors() { ) }); - assert!(matches!( - check_unmined_tx_anchors_result, - Err(ValidateContextError::UnknownSproutAnchor { .. }) - )); + assert!( + matches!( + check_unmined_tx_anchors_result, + Err(ValidateContextError::UnknownSproutAnchor { .. }), + ), + "unexpected result: {check_unmined_tx_anchors_result:?}", + ); // Validate and commit [`block_1`]. This will add an anchor referencing the // empty note commitment tree to the state. @@ -182,7 +196,17 @@ fn check_sapling_anchors() { let (finalized_state, mut non_finalized_state, _genesis) = new_state_with_mainnet_genesis(); - // Bootstrap a block at height == 1 that has the first Sapling note commitments + // Delete the empty anchor from the database + let mut batch = DiskWriteBatch::new(); + batch.delete_sapling_anchor( + &finalized_state, + &sapling::tree::NoteCommitmentTree::default().root(), + ); + finalized_state + .write_batch(batch) + .expect("unexpected I/O error"); + + // Create a block at height 1 that has the first Sapling note commitments let mut block1 = zebra_test::vectors::BLOCK_MAINNET_1_BYTES .zcash_deserialize_into::() .expect("block should deserialize"); @@ -227,7 +251,7 @@ fn check_sapling_anchors() { let block1 = Arc::new(block1).prepare(); - // Bootstrap a block at height == 2 that references the Sapling note commitment tree state + // Create a block at height == 2 that references the Sapling note commitment tree state // from earlier block let mut block2 = zebra_test::vectors::BLOCK_MAINNET_2_BYTES .zcash_deserialize_into::() @@ -315,3 +339,5 @@ fn check_sapling_anchors() { Ok(()) ); } + +// TODO: create a test for orchard anchors diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index e092f0610b3..fde5c414c28 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -42,7 +42,10 @@ pub use disk_format::{OutputIndex, OutputLocation, TransactionLocation, MAX_ON_D pub(super) use zebra_db::ZebraDb; -pub(super) use disk_db::DiskWriteBatch; +#[cfg(any(test, feature = "proptest-impl"))] +pub use disk_db::DiskWriteBatch; +#[cfg(not(any(test, feature = "proptest-impl")))] +use disk_db::DiskWriteBatch; /// The finalized part of the chain state, stored in the db. /// @@ -88,14 +91,33 @@ pub struct FinalizedState { } impl FinalizedState { - /// Returns an on-disk database instance for `config` and `network`. + /// Returns an on-disk database instance for `config`, `network`, and `elastic_db`. /// If there is no existing database, creates a new database on disk. pub fn new( config: &Config, network: Network, #[cfg(feature = "elasticsearch")] elastic_db: Option, ) -> Self { - let db = ZebraDb::new(config, network, false); + Self::new_with_debug( + config, + network, + false, + #[cfg(feature = "elasticsearch")] + elastic_db, + ) + } + + /// Returns an on-disk database instance with the supplied production and debug settings. + /// If there is no existing database, creates a new database on disk. + /// + /// This method is intended for use in tests. + pub(crate) fn new_with_debug( + config: &Config, + network: Network, + debug_skip_format_upgrades: bool, + #[cfg(feature = "elasticsearch")] elastic_db: Option, + ) -> Self { + let db = ZebraDb::new(config, network, debug_skip_format_upgrades); #[cfg(feature = "elasticsearch")] let new_state = Self { diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 3772fb7a789..9ae009f6dcd 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -98,7 +98,7 @@ pub struct DiskDb { // and make them accessible via read-only methods #[must_use = "batches must be written to the database"] #[derive(Default)] -pub(crate) struct DiskWriteBatch { +pub struct DiskWriteBatch { /// The inner RocksDB write batch. batch: rocksdb::WriteBatch, } diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap index 4b37e3baef3..3c333a9fc43 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@mainnet_0.snap @@ -5,13 +5,10 @@ expression: empty_column_families [ "balance_by_transparent_addr: no entries", "history_tree: no entries", - "orchard_anchors: no entries", "orchard_note_commitment_subtree: no entries", "orchard_nullifiers: no entries", - "sapling_anchors: no entries", "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", - "sprout_anchors: no entries", "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", "tx_loc_by_transparent_addr_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap index 4b37e3baef3..3c333a9fc43 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/empty_column_families@testnet_0.snap @@ -5,13 +5,10 @@ expression: empty_column_families [ "balance_by_transparent_addr: no entries", "history_tree: no entries", - "orchard_anchors: no entries", "orchard_note_commitment_subtree: no entries", "orchard_nullifiers: no entries", - "sapling_anchors: no entries", "sapling_note_commitment_subtree: no entries", "sapling_nullifiers: no entries", - "sprout_anchors: no entries", "sprout_nullifiers: no entries", "tip_chain_value_pool: no entries", "tx_loc_by_transparent_addr_loc: no entries", diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..6ff419bc322 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", + v: "", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@testnet_0.snap new file mode 100644 index 00000000000..6ff419bc322 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/orchard_anchors_raw_data@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "ae2935f1dfd8a24aed7c70df7de3a668eb7a49b1319880dde2bbd9031ae5d82f", + v: "", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..fec72b61b35 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", + v: "", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@testnet_0.snap new file mode 100644 index 00000000000..fec72b61b35 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sapling_anchors_raw_data@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "fbc2f4300c01f0b7820d00e3347c8da4ee614674376cbc45359daa54f9b5493e", + v: "", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@mainnet_0.snap new file mode 100644 index 00000000000..57a94746e00 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@mainnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@testnet_0.snap new file mode 100644 index 00000000000..57a94746e00 --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_anchors_raw_data@testnet_0.snap @@ -0,0 +1,10 @@ +--- +source: zebra-state/src/service/finalized_state/disk_format/tests/snapshot.rs +expression: cf_data +--- +[ + KV( + k: "d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", + v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", + ), +] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap index 6d9892d5d65..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_0.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000000", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_1.snap index c8264029db2..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_1.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000001", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_2.snap index 2fa029f60bb..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@mainnet_2.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000002", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap index 6d9892d5d65..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_0.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000000", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_1.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_1.snap index c8264029db2..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_1.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_1.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000001", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_2.snap b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_2.snap index 2fa029f60bb..f48489ff12e 100644 --- a/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_2.snap +++ b/zebra-state/src/service/finalized_state/disk_format/tests/snapshots/sprout_note_commitment_tree_raw_data@testnet_2.snap @@ -4,7 +4,7 @@ expression: cf_data --- [ KV( - k: "000002", + k: "", v: "0001d7c612c817793191a1e68652121876d6b3bde40f4fa52bc314145ce6e5cdd259", ), ] diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs index ee8c050f391..86a04553c50 100644 --- a/zebra-state/src/service/finalized_state/disk_format/upgrade.rs +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade.rs @@ -29,6 +29,8 @@ use crate::{ }; pub(crate) mod add_subtrees; +pub(crate) mod cache_genesis_roots; +pub(crate) mod fix_tree_key_type; /// The kind of database format change or validity check we're performing. #[derive(Clone, Debug, Eq, PartialEq)] @@ -541,6 +543,32 @@ impl DbFormatChange { timer.finish(module_path!(), line!(), "add subtrees upgrade"); } + // Sprout & history tree key formats, and cached genesis tree roots database upgrades. + + let version_for_tree_keys_and_caches = + Version::parse("25.3.0").expect("Hardcoded version string should be valid."); + + // Check if we need to do the upgrade. + if older_disk_version < &version_for_tree_keys_and_caches { + let timer = CodeTimer::start(); + + // It shouldn't matter what order these are run in. + cache_genesis_roots::run(initial_tip_height, db, cancel_receiver)?; + fix_tree_key_type::run(initial_tip_height, db, cancel_receiver)?; + + // Before marking the state as upgraded, check that the upgrade completed successfully. + cache_genesis_roots::detailed_check(db, cancel_receiver)? + .expect("database format is valid after upgrade"); + fix_tree_key_type::detailed_check(db, cancel_receiver)? + .expect("database format is valid after upgrade"); + + // Mark the database as upgraded. Zebra won't repeat the upgrade anymore once the + // database is marked, so the upgrade MUST be complete at this point. + Self::mark_as_upgraded_to(&version_for_tree_keys_and_caches, config, network); + + timer.finish(module_path!(), line!(), "tree keys and caches upgrade"); + } + // # New Upgrades Usually Go Here // // New code goes above this comment! @@ -571,6 +599,9 @@ impl DbFormatChange { // upgrade, they would accidentally break compatibility with older Zebra cached states.) results.push(add_subtrees::subtree_format_calculation_pre_checks(db)); + results.push(cache_genesis_roots::quick_check(db)); + results.push(fix_tree_key_type::quick_check(db)); + // The work is done in the functions we just called. timer.finish(module_path!(), line!(), "format_validity_checks_quick()"); @@ -602,6 +633,8 @@ impl DbFormatChange { db, cancel_receiver, )?); + results.push(cache_genesis_roots::detailed_check(db, cancel_receiver)?); + results.push(fix_tree_key_type::detailed_check(db, cancel_receiver)?); // The work is done in the functions we just called. timer.finish(module_path!(), line!(), "format_validity_checks_detailed()"); diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs new file mode 100644 index 00000000000..57fcacb9d5b --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/cache_genesis_roots.rs @@ -0,0 +1,181 @@ +//! Updating the genesis note commitment trees to cache their roots. +//! +//! This reduces CPU usage when the genesis tree roots are used for transaction validation. +//! Since mempool transactions are cheap to create, this is a potential remote denial of service. + +use std::sync::mpsc; + +use zebra_chain::{block::Height, sprout}; + +use crate::service::finalized_state::{disk_db::DiskWriteBatch, ZebraDb}; + +use super::CancelFormatChange; + +/// Runs disk format upgrade for changing the sprout and history tree key types. +/// +/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. +/// +/// # Panics +/// +/// If the state is empty. +#[allow(clippy::unwrap_in_result)] +#[instrument(skip(upgrade_db, cancel_receiver))] +pub fn run( + _initial_tip_height: Height, + upgrade_db: &ZebraDb, + cancel_receiver: &mpsc::Receiver, +) -> Result<(), CancelFormatChange> { + let sprout_genesis_tree = sprout::tree::NoteCommitmentTree::default(); + let sprout_tip_tree = upgrade_db.sprout_tree_for_tip(); + + let sapling_genesis_tree = upgrade_db + .sapling_tree_by_height(&Height(0)) + .expect("caller has checked for genesis block"); + let orchard_genesis_tree = upgrade_db + .orchard_tree_by_height(&Height(0)) + .expect("caller has checked for genesis block"); + + // Writing the trees back to the database automatically caches their roots. + let mut batch = DiskWriteBatch::new(); + + // Fix the cached root of the Sprout genesis tree in its anchors column family. + + // It's ok to write the genesis tree to the tip tree index, because it's overwritten by + // the actual tip before the batch is written to the database. + batch.update_sprout_tree(upgrade_db, &sprout_genesis_tree); + // This method makes sure the sprout tip tree has a cached root, even if it's the genesis tree. + batch.update_sprout_tree(upgrade_db, &sprout_tip_tree); + + batch.create_sapling_tree(upgrade_db, &Height(0), &sapling_genesis_tree); + batch.create_orchard_tree(upgrade_db, &Height(0), &orchard_genesis_tree); + + // Return before we write if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + upgrade_db + .write_batch(batch) + .expect("updating tree cached roots should always succeed"); + + Ok(()) +} + +/// Quickly check that the genesis trees and sprout tip tree have cached roots. +/// +/// This allows us to fail the upgrade quickly in tests and during development, +/// rather than waiting to see if it failed. +/// +/// # Panics +/// +/// If the state is empty. +pub fn quick_check(db: &ZebraDb) -> Result<(), String> { + // An empty database doesn't have any trees, so its format is trivially correct. + if db.is_empty() { + return Ok(()); + } + + let sprout_genesis_tree = sprout::tree::NoteCommitmentTree::default(); + let sprout_genesis_tree = db + .sprout_tree_by_anchor(&sprout_genesis_tree.root()) + .expect("just checked for genesis block"); + let sprout_tip_tree = db.sprout_tree_for_tip(); + + let sapling_genesis_tree = db + .sapling_tree_by_height(&Height(0)) + .expect("just checked for genesis block"); + let orchard_genesis_tree = db + .orchard_tree_by_height(&Height(0)) + .expect("just checked for genesis block"); + + // Check the entire format before returning any errors. + let sprout_result = sprout_genesis_tree + .cached_root() + .ok_or("no cached root in sprout genesis tree"); + let sprout_tip_result = sprout_tip_tree + .cached_root() + .ok_or("no cached root in sprout tip tree"); + + let sapling_result = sapling_genesis_tree + .cached_root() + .ok_or("no cached root in sapling genesis tree"); + let orchard_result = orchard_genesis_tree + .cached_root() + .ok_or("no cached root in orchard genesis tree"); + + if sprout_result.is_err() + || sprout_tip_result.is_err() + || sapling_result.is_err() + || orchard_result.is_err() + { + let err = Err(format!( + "missing cached genesis root: sprout: {sprout_result:?}, {sprout_tip_result:?} \ + sapling: {sapling_result:?}, orchard: {orchard_result:?}" + )); + warn!(?err); + return err; + } + + Ok(()) +} + +/// Detailed check that all trees have cached roots. +/// +/// # Panics +/// +/// If the state is empty. +pub fn detailed_check( + db: &ZebraDb, + cancel_receiver: &mpsc::Receiver, +) -> Result, CancelFormatChange> { + // This is redundant in some code paths, but not in others. But it's quick anyway. + // Check the entire format before returning any errors. + let mut result = quick_check(db); + + for (root, tree) in db.sprout_trees_full_map() { + // Return early if the format check is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + if tree.cached_root().is_none() { + result = Err(format!( + "found un-cached sprout tree root after running genesis tree root fix \ + {root:?}" + )); + error!(?result); + } + } + + for (height, tree) in db.sapling_tree_by_height_range(..) { + // Return early if the format check is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + if tree.cached_root().is_none() { + result = Err(format!( + "found un-cached sapling tree root after running genesis tree root fix \ + {height:?}" + )); + error!(?result); + } + } + + for (height, tree) in db.orchard_tree_by_height_range(..) { + // Return early if the format check is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + if tree.cached_root().is_none() { + result = Err(format!( + "found un-cached orchard tree root after running genesis tree root fix \ + {height:?}" + )); + error!(?result); + } + } + + Ok(result) +} diff --git a/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs new file mode 100644 index 00000000000..069d9cb4c2b --- /dev/null +++ b/zebra-state/src/service/finalized_state/disk_format/upgrade/fix_tree_key_type.rs @@ -0,0 +1,138 @@ +//! Updating the sprout and history tree key type from `Height` to the empty key `()`. +//! +//! This avoids a potential concurrency bug, and a known database performance issue. + +use std::sync::{mpsc, Arc}; + +use zebra_chain::{block::Height, history_tree::HistoryTree, sprout}; + +use crate::service::finalized_state::{ + disk_db::DiskWriteBatch, disk_format::MAX_ON_DISK_HEIGHT, ZebraDb, +}; + +use super::CancelFormatChange; + +/// Runs disk format upgrade for changing the sprout and history tree key types. +/// +/// Returns `Ok` if the upgrade completed, and `Err` if it was cancelled. +#[allow(clippy::unwrap_in_result)] +#[instrument(skip(upgrade_db, cancel_receiver))] +pub fn run( + _initial_tip_height: Height, + upgrade_db: &ZebraDb, + cancel_receiver: &mpsc::Receiver, +) -> Result<(), CancelFormatChange> { + let sprout_tip_tree = upgrade_db.sprout_tree_for_tip(); + let history_tip_tree = upgrade_db.history_tree(); + + // Writing the trees back to the database automatically updates their format. + let mut batch = DiskWriteBatch::new(); + + // Delete the previous `Height` tip key format, which is now a duplicate. + // It's ok to do a full delete, because the trees are restored before the batch is written. + batch.delete_range_sprout_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); + batch.delete_range_history_tree(upgrade_db, &Height(0), &MAX_ON_DISK_HEIGHT); + + // Update the sprout tip key format in the database. + batch.update_sprout_tree(upgrade_db, &sprout_tip_tree); + batch.update_history_tree(upgrade_db, &history_tip_tree); + + // Return before we write if the upgrade is cancelled. + if !matches!(cancel_receiver.try_recv(), Err(mpsc::TryRecvError::Empty)) { + return Err(CancelFormatChange); + } + + upgrade_db + .write_batch(batch) + .expect("updating tree key formats should always succeed"); + + Ok(()) +} + +/// Quickly check that the sprout and history tip trees have updated key formats. +/// +/// # Panics +/// +/// If the state is empty. +pub fn quick_check(db: &ZebraDb) -> Result<(), String> { + // Check the entire format before returning any errors. + let mut result = Ok(()); + + let mut prev_key = None; + let mut prev_tree: Option> = None; + + for (key, tree) in db.sprout_trees_full_tip() { + // The tip tree should be indexed by `()` (which serializes to an empty array). + if !key.raw_bytes().is_empty() { + result = Err(format!( + "found incorrect sprout tree key format after running key format upgrade \ + key: {key:?}, tree: {:?}", + tree.root() + )); + error!(?result); + } + + // There should only be one tip tree in this column family. + if prev_tree.is_some() { + result = Err(format!( + "found duplicate sprout trees after running key format upgrade\n\ + key: {key:?}, tree: {:?}\n\ + prev key: {prev_key:?}, prev_tree: {:?}\n\ + ", + tree.root(), + prev_tree.unwrap().root(), + )); + error!(?result); + } + + prev_key = Some(key); + prev_tree = Some(tree); + } + + let mut prev_key = None; + let mut prev_tree: Option> = None; + + for (key, tree) in db.history_trees_full_tip() { + // The tip tree should be indexed by `()` (which serializes to an empty array). + if !key.raw_bytes().is_empty() { + result = Err(format!( + "found incorrect history tree key format after running key format upgrade \ + key: {key:?}, tree: {:?}", + tree.hash() + )); + error!(?result); + } + + // There should only be one tip tree in this column family. + if prev_tree.is_some() { + result = Err(format!( + "found duplicate history trees after running key format upgrade\n\ + key: {key:?}, tree: {:?}\n\ + prev key: {prev_key:?}, prev_tree: {:?}\n\ + ", + tree.hash(), + prev_tree.unwrap().hash(), + )); + error!(?result); + } + + prev_key = Some(key); + prev_tree = Some(tree); + } + + result +} + +/// Detailed check that the sprout and history tip trees have updated key formats. +/// This is currently the same as the quick check. +/// +/// # Panics +/// +/// If the state is empty. +pub fn detailed_check( + db: &ZebraDb, + _cancel_receiver: &mpsc::Receiver, +) -> Result, CancelFormatChange> { + // This upgrade only changes two key-value pairs, so checking it is always quick. + Ok(quick_check(db)) +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index 85959849985..0a1a49e72cb 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -24,7 +24,6 @@ use zebra_chain::{ parameters::{Network, GENESIS_PREVIOUS_BLOCK_HASH}, sapling, serialization::TrustedPreallocate, - sprout, transaction::{self, Transaction}, transparent, value_balance::ValueBalance, @@ -456,10 +455,18 @@ impl DiskWriteBatch { prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { let db = &zebra_db.db; - // Commit block and transaction data. - // (Transaction indexes, note commitments, and UTXOs are committed later.) + // Commit block, transaction, and note commitment tree data. self.prepare_block_header_and_transaction_data_batch(db, &finalized.verified)?; + // The consensus rules are silent on shielded transactions in the genesis block, + // because there aren't any in the mainnet or testnet genesis blocks. + // So this means the genesis anchor is the same as the empty anchor, + // which is already present from height 1 to the first shielded transaction. + // + // In Zebra we include the nullifiers and note commitments in the genesis block because it simplifies our code. + self.prepare_shielded_transaction_batch(db, &finalized.verified)?; + self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; + // # Consensus // // > A transaction MUST NOT spend an output of the genesis block coinbase transaction. @@ -467,34 +474,30 @@ impl DiskWriteBatch { // // https://zips.z.cash/protocol/protocol.pdf#txnconsensus // - // By returning early, Zebra commits the genesis block and transaction data, - // but it ignores the genesis UTXO and value pool updates. - if self.prepare_genesis_batch(db, finalized) { - return Ok(()); + // So we ignore the genesis UTXO, transparent address index, and value pool updates + // for the genesis block. This also ignores genesis shielded value pool updates, but there + // aren't any of those on mainnet or testnet. + if !finalized.verified.height.is_min() { + // Commit transaction indexes + self.prepare_transparent_transaction_batch( + db, + network, + &finalized.verified, + &new_outputs_by_out_loc, + &spent_utxos_by_outpoint, + &spent_utxos_by_out_loc, + address_balances, + )?; + + // Commit UTXOs and value pools + self.prepare_chain_value_pools_batch( + db, + &finalized.verified, + spent_utxos_by_outpoint, + value_pool, + )?; } - // Commit transaction indexes - self.prepare_transparent_transaction_batch( - db, - network, - &finalized.verified, - &new_outputs_by_out_loc, - &spent_utxos_by_outpoint, - &spent_utxos_by_out_loc, - address_balances, - )?; - self.prepare_shielded_transaction_batch(db, &finalized.verified)?; - - self.prepare_trees_batch(zebra_db, finalized, prev_note_commitment_trees)?; - - // Commit UTXOs and value pools - self.prepare_chain_value_pools_batch( - db, - &finalized.verified, - spent_utxos_by_outpoint, - value_pool, - )?; - // The block has passed contextual validation, so update the metrics block_precommit_metrics( &finalized.verified.block, @@ -560,72 +563,4 @@ impl DiskWriteBatch { Ok(()) } - - /// If `finalized.block` is a genesis block, prepares a database batch that finishes - /// initializing the database, and returns `true` without actually writing anything. - /// - /// Since the genesis block's transactions are skipped, the returned genesis batch should be - /// written to the database immediately. - /// - /// If `finalized.block` is not a genesis block, does nothing. - /// - /// # Panics - /// - /// If `finalized.block` is a genesis block, and a note commitment tree in `finalized` doesn't - /// match its corresponding empty tree. - pub fn prepare_genesis_batch( - &mut self, - db: &DiskDb, - finalized: &SemanticallyVerifiedBlockWithTrees, - ) -> bool { - if finalized.verified.block.header.previous_block_hash == GENESIS_PREVIOUS_BLOCK_HASH { - assert_eq!( - *finalized.treestate.note_commitment_trees.sprout, - sprout::tree::NoteCommitmentTree::default(), - "The Sprout tree in the finalized block must match the empty Sprout tree." - ); - assert_eq!( - *finalized.treestate.note_commitment_trees.sapling, - sapling::tree::NoteCommitmentTree::default(), - "The Sapling tree in the finalized block must match the empty Sapling tree." - ); - assert_eq!( - *finalized.treestate.note_commitment_trees.orchard, - orchard::tree::NoteCommitmentTree::default(), - "The Orchard tree in the finalized block must match the empty Orchard tree." - ); - - // We want to store the trees of the genesis block together with their roots, and since - // the trees cache the roots after their computation, we trigger the computation. - // - // At the time of writing this comment, the roots are precomputed before this function - // is called, so the roots should already be cached. - finalized.treestate.note_commitment_trees.sprout.root(); - finalized.treestate.note_commitment_trees.sapling.root(); - finalized.treestate.note_commitment_trees.orchard.root(); - - // Insert the empty note commitment trees. Note that these can't be used too early - // (e.g. the Orchard tree before Nu5 activates) since the block validation will make - // sure only appropriate transactions are allowed in a block. - self.zs_insert( - &db.cf_handle("sprout_note_commitment_tree").unwrap(), - finalized.verified.height, - finalized.treestate.note_commitment_trees.sprout.clone(), - ); - self.zs_insert( - &db.cf_handle("sapling_note_commitment_tree").unwrap(), - finalized.verified.height, - finalized.treestate.note_commitment_trees.sapling.clone(), - ); - self.zs_insert( - &db.cf_handle("orchard_note_commitment_tree").unwrap(), - finalized.verified.height, - finalized.treestate.note_commitment_trees.orchard.clone(), - ); - - true - } else { - false - } - } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap index fc004eddd5a..438e0809a21 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@mainnet_0.snap @@ -2,4 +2,11 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_sprout_trees --- -{} +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap index fc004eddd5a..438e0809a21 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap +++ b/zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshots/sprout_trees@testnet_0.snap @@ -2,4 +2,11 @@ source: zebra-state/src/service/finalized_state/zebra_db/block/tests/snapshot.rs expression: stored_sprout_trees --- -{} +{ + Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89)): NoteCommitmentTree( + inner: Frontier( + frontier: None, + ), + cached_root: Some(Root((215, 198, 18, 200, 23, 121, 49, 145, 161, 230, 134, 82, 18, 24, 118, 214, 179, 189, 228, 15, 79, 165, 43, 195, 20, 20, 92, 230, 229, 205, 210, 89))), + ), +} diff --git a/zebra-state/src/service/finalized_state/zebra_db/chain.rs b/zebra-state/src/service/finalized_state/zebra_db/chain.rs index 1e31741b0a0..49649c6c6b9 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/chain.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/chain.rs @@ -14,19 +14,22 @@ use std::{borrow::Borrow, collections::HashMap, sync::Arc}; use zebra_chain::{ - amount::NonNegative, history_tree::HistoryTree, transparent, value_balance::ValueBalance, + amount::NonNegative, block::Height, history_tree::HistoryTree, transparent, + value_balance::ValueBalance, }; use crate::{ - request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, + disk_format::RawBytes, zebra_db::ZebraDb, }, BoxError, SemanticallyVerifiedBlock, }; impl ZebraDb { + // History tree methods + /// Returns the ZIP-221 history tree of the finalized tip. /// /// If history trees have not been activated yet (pre-Heartwood), or the state is empty, @@ -36,22 +39,9 @@ impl ZebraDb { return Arc::::default(); } - // # Performance - // - // Using `zs_last_key_value()` on this column family significantly reduces sync performance - // (#7618). But it seems to work for other column families. This is probably because - // `zs_delete()` is also used on the same column family: - // - // - // - // See also the performance notes in: - // - // - // This bug will be fixed by PR #7392, because it changes this column family to update the - // existing key, rather than deleting old keys. let history_tree_cf = self.db.cf_handle("history_tree").unwrap(); - // # Forwards Compatibility + // # Backwards Compatibility // // This code can read the column family format in 1.2.0 and earlier (tip height key), // and after PR #7392 is merged (empty key). The height-based code can be removed when @@ -59,18 +49,12 @@ impl ZebraDb { // // # Concurrency // - // There is only one tree in this column family, which is atomically updated by a block - // write batch (database transaction). If this update runs between the height read and - // the tree read, the height will be wrong, and the tree will be missing. - // That could cause consensus bugs. + // There is only one entry in this column family, which is atomically updated by a block + // write batch (database transaction). If we used a height as the key in this column family, + // any updates between reading the tip height and reading the tree could cause panics. // - // Instead, try reading the new empty-key format (from PR #7392) first, - // then read the old format if needed. - // - // See ticket #7581 for more details. - // - // TODO: this concurrency bug will be permanently fixed in PR #7392, - // by changing the block update to overwrite the tree, rather than deleting it. + // So we use the empty key `()`. Since the key has a constant value, we will always read + // the latest tree. let mut history_tree: Option> = self.db.zs_get(&history_tree_cf, &()); if history_tree.is_none() { @@ -84,6 +68,18 @@ impl ZebraDb { history_tree.unwrap_or_default() } + /// Returns all the history tip trees. + /// We only store the history tree for the tip, so this method is mainly used in tests. + pub fn history_trees_full_tip( + &self, + ) -> impl Iterator)> + '_ { + let history_tree_cf = self.db.cf_handle("history_tree").unwrap(); + + self.db.zs_range_iter(&history_tree_cf, ..) + } + + // Value pool methods + /// Returns the stored `ValueBalance` for the best chain at the finalized tip height. pub fn finalized_value_pool(&self) -> ValueBalance { let value_pool_cf = self.db.cf_handle("tip_chain_value_pool").unwrap(); @@ -94,43 +90,31 @@ impl ZebraDb { } impl DiskWriteBatch { - /// Prepare a database batch containing the history tree updates - /// from `finalized.block`, and return it (without actually writing anything). - /// - /// If this method returns an error, it will be propagated, - /// and the batch should not be written to the database. - /// - /// # Errors - /// - /// - Returns any errors from updating the history tree - #[allow(clippy::unwrap_in_result)] - pub fn prepare_history_batch( - &mut self, - db: &DiskDb, - finalized: &SemanticallyVerifiedBlockWithTrees, - ) -> Result<(), BoxError> { - let history_tree_cf = db.cf_handle("history_tree").unwrap(); + // History tree methods - let height = finalized.verified.height; + /// Updates the history tree for the tip, if it is not empty. + pub fn update_history_tree(&mut self, zebra_db: &ZebraDb, tree: &HistoryTree) { + let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap(); - // Update the tree in state - let current_tip_height = height - 1; - if let Some(h) = current_tip_height { - self.zs_delete(&history_tree_cf, h); + if let Some(tree) = tree.as_ref().as_ref() { + self.zs_insert(&history_tree_cf, (), tree); } + } - // TODO: if we ever need concurrent read-only access to the history tree, - // store it by `()`, not height. - // Otherwise, the ReadStateService could access a height - // that was just deleted by a concurrent StateService write. - // This requires a database version update. - if let Some(history_tree) = finalized.treestate.history_tree.as_ref().as_ref() { - self.zs_insert(&history_tree_cf, height, history_tree); - } + /// Legacy method: Deletes the range of history trees at the given [`Height`]s. + /// Doesn't delete the upper bound. + /// + /// From state format 25.3.0 onwards, the history trees are indexed by an empty key, + /// so this method does nothing. + pub fn delete_range_history_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) { + let history_tree_cf = zebra_db.db.cf_handle("history_tree").unwrap(); - Ok(()) + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + self.zs_delete_range(&history_tree_cf, from, to); } + // Value pool methods + /// Prepare a database batch containing the chain value pool update from `finalized.block`, /// and return it (without actually writing anything). /// diff --git a/zebra-state/src/service/finalized_state/zebra_db/metrics.rs b/zebra-state/src/service/finalized_state/zebra_db/metrics.rs index 9f102ec80ed..75b342e2cdc 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/metrics.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/metrics.rs @@ -21,25 +21,9 @@ pub(crate) fn block_precommit_metrics(block: &Block, hash: block::Hash, height: .flat_map(|t| t.outputs().iter()) .count(); - let sprout_nullifier_count = block - .transactions - .iter() - .flat_map(|t| t.sprout_nullifiers()) - .count(); - - let sapling_nullifier_count = block - .transactions - .iter() - .flat_map(|t| t.sapling_nullifiers()) - .count(); - - // Work around a compiler panic (ICE) with flat_map(): - // https://github.com/rust-lang/rust/issues/105044 - let orchard_nullifier_count: usize = block - .transactions - .iter() - .map(|t| t.orchard_nullifiers().count()) - .sum(); + let sprout_nullifier_count = block.sprout_nullifiers().count(); + let sapling_nullifier_count = block.sapling_nullifiers().count(); + let orchard_nullifier_count = block.orchard_nullifiers().count(); tracing::debug!( ?hash, @@ -50,7 +34,8 @@ pub(crate) fn block_precommit_metrics(block: &Block, hash: block::Hash, height: sprout_nullifier_count, sapling_nullifier_count, orchard_nullifier_count, - "preparing to commit finalized block" + "preparing to commit finalized {:?}block", + if height.is_min() { "genesis " } else { "" } ); metrics::counter!("state.finalized.block.count", 1); @@ -60,14 +45,7 @@ pub(crate) fn block_precommit_metrics(block: &Block, hash: block::Hash, height: "state.finalized.cumulative.transactions", transaction_count as u64 ); - metrics::counter!( - "state.finalized.cumulative.transparent_prevouts", - transparent_prevout_count as u64 - ); - metrics::counter!( - "state.finalized.cumulative.transparent_newouts", - transparent_newout_count as u64 - ); + metrics::counter!( "state.finalized.cumulative.sprout_nullifiers", sprout_nullifier_count as u64 @@ -80,4 +58,16 @@ pub(crate) fn block_precommit_metrics(block: &Block, hash: block::Hash, height: "state.finalized.cumulative.orchard_nullifiers", orchard_nullifier_count as u64 ); + + // The outputs from the genesis block can't be spent, so we skip them here. + if !height.is_min() { + metrics::counter!( + "state.finalized.cumulative.transparent_prevouts", + transparent_prevout_count as u64 + ); + metrics::counter!( + "state.finalized.cumulative.transparent_newouts", + transparent_newout_count as u64 + ); + } } diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 58d9e43a6c1..a8e76931b76 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -30,6 +30,7 @@ use crate::{ request::SemanticallyVerifiedBlockWithTrees, service::finalized_state::{ disk_db::{DiskDb, DiskWriteBatch, ReadDisk, WriteDisk}, + disk_format::RawBytes, zebra_db::ZebraDb, }, BoxError, SemanticallyVerifiedBlock, @@ -61,7 +62,7 @@ impl ZebraDb { } /// Returns `true` if the finalized state contains `sprout_anchor`. - #[allow(unused)] + #[allow(dead_code)] pub fn contains_sprout_anchor(&self, sprout_anchor: &sprout::tree::Root) -> bool { let sprout_anchors = self.db.cf_handle("sprout_anchors").unwrap(); self.db.zs_contains(&sprout_anchors, &sprout_anchor) @@ -88,17 +89,9 @@ impl ZebraDb { return Arc::::default(); } - // # Performance - // - // Using `zs_last_key_value()` on this column family significantly reduces sync performance - // (#7618). This is probably because `zs_delete()` is also used on the same column family. - // See the comment in `ZebraDb::history_tree()` for details. - // - // This bug will be fixed by PR #7392, because it changes this column family to update the - // existing key, rather than deleting old keys. let sprout_tree_cf = self.db.cf_handle("sprout_note_commitment_tree").unwrap(); - // # Forwards Compatibility + // # Backwards Compatibility // // This code can read the column family format in 1.2.0 and earlier (tip height key), // and after PR #7392 is merged (empty key). The height-based code can be removed when @@ -106,15 +99,12 @@ impl ZebraDb { // // # Concurrency // - // There is only one tree in this column family, which is atomically updated by a block - // write batch (database transaction). If this update runs between the height read and - // the tree read, the height will be wrong, and the tree will be missing. - // That could cause consensus bugs. + // There is only one entry in this column family, which is atomically updated by a block + // write batch (database transaction). If we used a height as the column family tree, + // any updates between reading the tip height and reading the tree could cause panics. // - // See the comment in `ZebraDb::history_tree()` for details. - // - // TODO: this concurrency bug will be permanently fixed in PR #7392, - // by changing the block update to overwrite the tree, rather than deleting it. + // So we use the empty key `()`. Since the key has a constant value, we will always read + // the latest tree. let mut sprout_tree: Option> = self.db.zs_get(&sprout_tree_cf, &()); @@ -147,7 +137,7 @@ impl ZebraDb { /// Returns all the Sprout note commitment trees in the database. /// /// Calling this method can load a lot of data into RAM, and delay block commit transactions. - #[allow(dead_code, clippy::unwrap_in_result)] + #[allow(dead_code)] pub fn sprout_trees_full_map( &self, ) -> HashMap> { @@ -157,6 +147,15 @@ impl ZebraDb { .zs_items_in_range_unordered(&sprout_anchors_handle, ..) } + /// Returns all the Sprout note commitment tip trees. + /// We only store the sprout tree for the tip, so this method is mainly used in tests. + pub fn sprout_trees_full_tip( + &self, + ) -> impl Iterator)> + '_ { + let sprout_trees = self.db.cf_handle("sprout_note_commitment_tree").unwrap(); + self.db.zs_range_iter(&sprout_trees, ..) + } + // # Sapling trees /// Returns the Sapling note commitment tree of the finalized tip or the empty tree if the state @@ -200,7 +199,6 @@ impl ZebraDb { } /// Returns the Sapling note commitment trees in the supplied range, in increasing height order. - #[allow(clippy::unwrap_in_result)] pub fn sapling_tree_by_height_range( &self, range: R, @@ -213,7 +211,6 @@ impl ZebraDb { } /// Returns the Sapling note commitment trees in the reversed range, in decreasing height order. - #[allow(clippy::unwrap_in_result)] pub fn sapling_tree_by_reversed_height_range( &self, range: R, @@ -258,7 +255,6 @@ impl ZebraDb { /// /// This method is specifically designed for the `z_getsubtreesbyindex` state request. /// It might not work for other RPCs or state checks. - #[allow(clippy::unwrap_in_result)] pub fn sapling_subtree_list_by_index_for_rpc( &self, start_index: NoteCommitmentSubtreeIndex, @@ -373,7 +369,6 @@ impl ZebraDb { } /// Returns the Orchard note commitment trees in the supplied range, in increasing height order. - #[allow(clippy::unwrap_in_result)] pub fn orchard_tree_by_height_range( &self, range: R, @@ -386,7 +381,6 @@ impl ZebraDb { } /// Returns the Orchard note commitment trees in the reversed range, in decreasing height order. - #[allow(clippy::unwrap_in_result)] pub fn orchard_tree_by_reversed_height_range( &self, range: R, @@ -431,7 +425,6 @@ impl ZebraDb { /// /// This method is specifically designed for the `z_getsubtreesbyindex` state request. /// It might not work for other RPCs or state checks. - #[allow(clippy::unwrap_in_result)] pub fn orchard_subtree_list_by_index_for_rpc( &self, start_index: NoteCommitmentSubtreeIndex, @@ -589,74 +582,112 @@ impl DiskWriteBatch { finalized: &SemanticallyVerifiedBlockWithTrees, prev_note_commitment_trees: Option, ) -> Result<(), BoxError> { - let db = &zebra_db.db; - - let sprout_anchors = db.cf_handle("sprout_anchors").unwrap(); - let sapling_anchors = db.cf_handle("sapling_anchors").unwrap(); - let orchard_anchors = db.cf_handle("orchard_anchors").unwrap(); - - let sprout_tree_cf = db.cf_handle("sprout_note_commitment_tree").unwrap(); - let sapling_tree_cf = db.cf_handle("sapling_note_commitment_tree").unwrap(); - let orchard_tree_cf = db.cf_handle("orchard_note_commitment_tree").unwrap(); - let height = finalized.verified.height; let trees = finalized.treestate.note_commitment_trees.clone(); - // Use the cached values that were previously calculated in parallel. - let sprout_root = trees.sprout.root(); - let sapling_root = trees.sapling.root(); - let orchard_root = trees.orchard.root(); - - // Index the new anchors. - // Note: if the root hasn't changed, we write the same value again. - self.zs_insert(&sprout_anchors, sprout_root, &trees.sprout); - self.zs_insert(&sapling_anchors, sapling_root, ()); - self.zs_insert(&orchard_anchors, orchard_root, ()); - - // Delete the previously stored Sprout note commitment tree. - let current_tip_height = height - 1; - if let Some(h) = current_tip_height { - self.zs_delete(&sprout_tree_cf, h); + let prev_sprout_tree = prev_note_commitment_trees.as_ref().map_or_else( + || zebra_db.sprout_tree_for_tip(), + |prev_trees| prev_trees.sprout.clone(), + ); + let prev_sapling_tree = prev_note_commitment_trees.as_ref().map_or_else( + || zebra_db.sapling_tree_for_tip(), + |prev_trees| prev_trees.sapling.clone(), + ); + let prev_orchard_tree = prev_note_commitment_trees.as_ref().map_or_else( + || zebra_db.orchard_tree_for_tip(), + |prev_trees| prev_trees.orchard.clone(), + ); + + // Update the Sprout tree and store its anchor only if it has changed + if height.is_min() || prev_sprout_tree != trees.sprout { + self.update_sprout_tree(zebra_db, &trees.sprout) } - // TODO: if we ever need concurrent read-only access to the sprout tree, - // store it by `()`, not height. Otherwise, the ReadStateService could - // access a height that was just deleted by a concurrent StateService - // write. This requires a database version update. - self.zs_insert(&sprout_tree_cf, height, trees.sprout); - - // Store the Sapling tree only if it is not already present at the previous height. - if height.is_min() - || prev_note_commitment_trees.as_ref().map_or_else( - || zebra_db.sapling_tree_for_tip(), - |trees| trees.sapling.clone(), - ) != trees.sapling - { - self.zs_insert(&sapling_tree_cf, height, trees.sapling); - } + // Store the Sapling tree, anchor, and any new subtrees only if they have changed + if height.is_min() || prev_sapling_tree != trees.sapling { + self.create_sapling_tree(zebra_db, &height, &trees.sapling); - // Store the Orchard tree only if it is not already present at the previous height. - if height.is_min() - || prev_note_commitment_trees - .map_or_else(|| zebra_db.orchard_tree_for_tip(), |trees| trees.orchard) - != trees.orchard - { - self.zs_insert(&orchard_tree_cf, height, trees.orchard); + if let Some(subtree) = trees.sapling_subtree { + self.insert_sapling_subtree(zebra_db, &subtree); + } } - if let Some(subtree) = trees.sapling_subtree { - self.insert_sapling_subtree(zebra_db, &subtree); - } + // Store the Orchard tree, anchor, and any new subtrees only if they have changed + if height.is_min() || prev_orchard_tree != trees.orchard { + self.create_orchard_tree(zebra_db, &height, &trees.orchard); - if let Some(subtree) = trees.orchard_subtree { - self.insert_orchard_subtree(zebra_db, &subtree); + if let Some(subtree) = trees.orchard_subtree { + self.insert_orchard_subtree(zebra_db, &subtree); + } } - self.prepare_history_batch(db, finalized) + self.update_history_tree(zebra_db, &finalized.treestate.history_tree); + + Ok(()) + } + + // Sprout tree methods + + /// Updates the Sprout note commitment tree for the tip, and the Sprout anchors. + pub fn update_sprout_tree( + &mut self, + zebra_db: &ZebraDb, + tree: &sprout::tree::NoteCommitmentTree, + ) { + let sprout_anchors = zebra_db.db.cf_handle("sprout_anchors").unwrap(); + let sprout_tree_cf = zebra_db + .db + .cf_handle("sprout_note_commitment_tree") + .unwrap(); + + // Sprout lookups need all previous trees by their anchors. + // The root must be calculated first, so it is cached in the database. + self.zs_insert(&sprout_anchors, tree.root(), tree); + self.zs_insert(&sprout_tree_cf, (), tree); + } + + /// Legacy method: Deletes the range of Sprout note commitment trees at the given [`Height`]s. + /// Doesn't delete anchors from the anchor index. Doesn't delete the upper bound. + /// + /// From state format 25.3.0 onwards, the Sprout trees are indexed by an empty key, + /// so this method does nothing. + pub fn delete_range_sprout_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) { + let sprout_tree_cf = zebra_db + .db + .cf_handle("sprout_note_commitment_tree") + .unwrap(); + + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + self.zs_delete_range(&sprout_tree_cf, from, to); + } + + /// Deletes the given Sprout note commitment tree `anchor`. + #[allow(dead_code)] + pub fn delete_sprout_anchor(&mut self, zebra_db: &ZebraDb, anchor: &sprout::tree::Root) { + let sprout_anchors = zebra_db.db.cf_handle("sprout_anchors").unwrap(); + self.zs_delete(&sprout_anchors, anchor); } // Sapling tree methods + /// Inserts or overwrites the Sapling note commitment tree at the given [`Height`], + /// and the Sapling anchors. + pub fn create_sapling_tree( + &mut self, + zebra_db: &ZebraDb, + height: &Height, + tree: &sapling::tree::NoteCommitmentTree, + ) { + let sapling_anchors = zebra_db.db.cf_handle("sapling_anchors").unwrap(); + let sapling_tree_cf = zebra_db + .db + .cf_handle("sapling_note_commitment_tree") + .unwrap(); + + self.zs_insert(&sapling_anchors, tree.root(), ()); + self.zs_insert(&sapling_tree_cf, height, tree); + } + /// Inserts the Sapling note commitment subtree into the batch. pub fn insert_sapling_subtree( &mut self, @@ -679,7 +710,8 @@ impl DiskWriteBatch { self.zs_delete(&sapling_tree_cf, height); } - /// Deletes the range of Sapling note commitment trees at the given [`Height`]s. Doesn't delete the upper bound. + /// Deletes the range of Sapling note commitment trees at the given [`Height`]s. + /// Doesn't delete anchors from the anchor index. Doesn't delete the upper bound. #[allow(dead_code)] pub fn delete_range_sapling_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) { let sapling_tree_cf = zebra_db @@ -691,6 +723,13 @@ impl DiskWriteBatch { self.zs_delete_range(&sapling_tree_cf, from, to); } + /// Deletes the given Sapling note commitment tree `anchor`. + #[allow(dead_code)] + pub fn delete_sapling_anchor(&mut self, zebra_db: &ZebraDb, anchor: &sapling::tree::Root) { + let sapling_anchors = zebra_db.db.cf_handle("sapling_anchors").unwrap(); + self.zs_delete(&sapling_anchors, anchor); + } + /// Deletes the range of Sapling subtrees at the given [`NoteCommitmentSubtreeIndex`]es. /// Doesn't delete the upper bound. pub fn delete_range_sapling_subtree( @@ -710,6 +749,24 @@ impl DiskWriteBatch { // Orchard tree methods + /// Inserts or overwrites the Orchard note commitment tree at the given [`Height`], + /// and the Orchard anchors. + pub fn create_orchard_tree( + &mut self, + zebra_db: &ZebraDb, + height: &Height, + tree: &orchard::tree::NoteCommitmentTree, + ) { + let orchard_anchors = zebra_db.db.cf_handle("orchard_anchors").unwrap(); + let orchard_tree_cf = zebra_db + .db + .cf_handle("orchard_note_commitment_tree") + .unwrap(); + + self.zs_insert(&orchard_anchors, tree.root(), ()); + self.zs_insert(&orchard_tree_cf, height, tree); + } + /// Inserts the Orchard note commitment subtree into the batch. pub fn insert_orchard_subtree( &mut self, @@ -732,7 +789,8 @@ impl DiskWriteBatch { self.zs_delete(&orchard_tree_cf, height); } - /// Deletes the range of Orchard note commitment trees at the given [`Height`]s. Doesn't delete the upper bound. + /// Deletes the range of Orchard note commitment trees at the given [`Height`]s. + /// Doesn't delete anchors from the anchor index. Doesn't delete the upper bound. #[allow(dead_code)] pub fn delete_range_orchard_tree(&mut self, zebra_db: &ZebraDb, from: &Height, to: &Height) { let orchard_tree_cf = zebra_db @@ -744,6 +802,13 @@ impl DiskWriteBatch { self.zs_delete_range(&orchard_tree_cf, from, to); } + /// Deletes the given Orchard note commitment tree `anchor`. + #[allow(dead_code)] + pub fn delete_orchard_anchor(&mut self, zebra_db: &ZebraDb, anchor: &orchard::tree::Root) { + let orchard_anchors = zebra_db.db.cf_handle("orchard_anchors").unwrap(); + self.zs_delete(&orchard_anchors, anchor); + } + /// Deletes the range of Orchard subtrees at the given [`NoteCommitmentSubtreeIndex`]es. /// Doesn't delete the upper bound. pub fn delete_range_orchard_subtree( diff --git a/zebra-state/src/tests/setup.rs b/zebra-state/src/tests/setup.rs index 296ee10a0e1..1432e72f368 100644 --- a/zebra-state/src/tests/setup.rs +++ b/zebra-state/src/tests/setup.rs @@ -92,9 +92,11 @@ pub(crate) fn new_state_with_mainnet_genesis( let config = Config::ephemeral(); let network = Mainnet; - let mut finalized_state = FinalizedState::new( + let mut finalized_state = FinalizedState::new_with_debug( &config, network, + // The tests that use this setup function also commit invalid blocks to the state. + true, #[cfg(feature = "elasticsearch")] None, ); diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index d65d438307f..18a529fe32d 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -1,6 +1,7 @@ //! Launching test commands for Zebra integration and acceptance tests. use std::{ + collections::HashSet, convert::Infallible as NoDir, fmt::{self, Debug, Write as _}, io::{BufRead, BufReader, ErrorKind, Read, Write as _}, @@ -25,7 +26,7 @@ mod arguments; pub mod to_regex; pub use self::arguments::Arguments; -use self::to_regex::{CollectRegexSet, ToRegexSet}; +use self::to_regex::{CollectRegexSet, RegexSetExt, ToRegexSet}; /// A super-trait for [`Iterator`] + [`Debug`]. pub trait IteratorDebug: Iterator + Debug {} @@ -781,7 +782,7 @@ impl TestChild { self } - /// Checks each line of the child's stdout against `success_regex`, + /// Checks each line of the child's stdout against any regex in `success_regex`, /// and returns the first matching line. Prints all stdout lines. /// /// Kills the child on error, or after the configured timeout has elapsed. @@ -815,7 +816,7 @@ impl TestChild { } } - /// Checks each line of the child's stderr against `success_regex`, + /// Checks each line of the child's stderr against any regex in `success_regex`, /// and returns the first matching line. Prints all stderr lines to stdout. /// /// Kills the child on error, or after the configured timeout has elapsed. @@ -847,6 +848,96 @@ impl TestChild { } } + /// Checks each line of the child's stdout, until it finds every regex in `unordered_regexes`, + /// and returns all lines matched by any regex, until each regex has been matched at least once. + /// If the output finishes or the command times out before all regexes are matched, returns an error with + /// a list of unmatched regexes. Prints all stdout lines. + /// + /// Kills the child on error, or after the configured timeout has elapsed. + /// See [`Self::expect_line_matching_regex_set`] for details. + // + // TODO: these methods could block if stderr is full and stdout is waiting for stderr to be read + #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] + pub fn expect_stdout_line_matches_all_unordered( + &mut self, + unordered_regexes: RegexList, + ) -> Result> + where + RegexList: IntoIterator + Debug, + RegexList::Item: ToRegexSet, + { + let regex_list = unordered_regexes.collect_regex_set()?; + + let mut unmatched_indexes: HashSet = (0..regex_list.len()).collect(); + let mut matched_lines = Vec::new(); + + while !unmatched_indexes.is_empty() { + let line = self + .expect_stdout_line_matches(regex_list.clone()) + .map_err(|err| { + let unmatched_regexes = regex_list.patterns_for_indexes(&unmatched_indexes); + + err.with_section(|| { + format!("{unmatched_regexes:#?}").header("Unmatched regexes:") + }) + .with_section(|| format!("{matched_lines:#?}").header("Matched lines:")) + })?; + + let matched_indices: HashSet = regex_list.matches(&line).iter().collect(); + unmatched_indexes = &unmatched_indexes - &matched_indices; + + matched_lines.push(line); + } + + Ok(matched_lines) + } + + /// Checks each line of the child's stderr, until it finds every regex in `unordered_regexes`, + /// and returns all lines matched by any regex, until each regex has been matched at least once. + /// If the output finishes or the command times out before all regexes are matched, returns an error with + /// a list of unmatched regexes. Prints all stderr lines. + /// + /// Kills the child on error, or after the configured timeout has elapsed. + /// See [`Self::expect_line_matching_regex_set`] for details. + // + // TODO: these methods could block if stdout is full and stderr is waiting for stdout to be read + #[instrument(skip(self))] + #[allow(clippy::unwrap_in_result)] + pub fn expect_stderr_line_matches_all_unordered( + &mut self, + unordered_regexes: RegexList, + ) -> Result> + where + RegexList: IntoIterator + Debug, + RegexList::Item: ToRegexSet, + { + let regex_list = unordered_regexes.collect_regex_set()?; + + let mut unmatched_indexes: HashSet = (0..regex_list.len()).collect(); + let mut matched_lines = Vec::new(); + + while !unmatched_indexes.is_empty() { + let line = self + .expect_stderr_line_matches(regex_list.clone()) + .map_err(|err| { + let unmatched_regexes = regex_list.patterns_for_indexes(&unmatched_indexes); + + err.with_section(|| { + format!("{unmatched_regexes:#?}").header("Unmatched regexes:") + }) + .with_section(|| format!("{matched_lines:#?}").header("Matched lines:")) + })?; + + let matched_indices: HashSet = regex_list.matches(&line).iter().collect(); + unmatched_indexes = &unmatched_indexes - &matched_indices; + + matched_lines.push(line); + } + + Ok(matched_lines) + } + /// Checks each line of the child's stdout against `success_regex`, /// and returns the first matching line. Does not print any output. /// diff --git a/zebra-test/src/command/to_regex.rs b/zebra-test/src/command/to_regex.rs index 66e00c874e0..0d362394f1f 100644 --- a/zebra-test/src/command/to_regex.rs +++ b/zebra-test/src/command/to_regex.rs @@ -1,6 +1,6 @@ //! Convenience traits for converting to [`Regex`] and [`RegexSet`]. -use std::iter; +use std::{collections::HashSet, iter}; use itertools::Itertools; use regex::{Error, Regex, RegexBuilder, RegexSet, RegexSetBuilder}; @@ -151,3 +151,20 @@ where RegexSet::new(regexes) } } + +/// A trait for getting additional information from a [`RegexSet`]. +pub trait RegexSetExt { + /// Returns the regex patterns for the supplied `indexes`. + fn patterns_for_indexes(&self, indexes: &HashSet) -> Vec; +} + +impl RegexSetExt for RegexSet { + fn patterns_for_indexes(&self, indexes: &HashSet) -> Vec { + self.patterns() + .iter() + .enumerate() + .filter(|(index, _regex)| indexes.contains(index)) + .map(|(_index, regex)| regex.to_string()) + .collect() + } +} diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 0c40df31a50..c51bdc98949 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -190,7 +190,9 @@ use common::{ test_type::TestType::{self, *}, }; -use crate::common::cached_state::{wait_for_state_version_message, wait_for_state_version_upgrade}; +use crate::common::cached_state::{ + wait_for_state_version_message, wait_for_state_version_upgrade, DATABASE_FORMAT_UPGRADE_IS_LONG, +}; /// The maximum amount of time that we allow the creation of a future to block the `tokio` executor. /// @@ -1847,18 +1849,36 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { zebrad.expect_stdout_line_matches("loaded Zebra state cache .*tip.*=.*None")?; } - // Launch lightwalletd, if needed - let lightwalletd_and_port = if test_type.launches_lightwalletd() { + // Wait for the state to upgrade and the RPC port, if the upgrade is short. + // + // If incompletely upgraded states get written to the CI cache, + // change DATABASE_FORMAT_UPGRADE_IS_LONG to true. + if test_type.launches_lightwalletd() && !DATABASE_FORMAT_UPGRADE_IS_LONG { tracing::info!( ?test_type, ?zebra_rpc_address, "waiting for zebrad to open its RPC port..." ); - zebrad.expect_stdout_line_matches(format!( - "Opened RPC endpoint at {}", - zebra_rpc_address.expect("lightwalletd test must have RPC port") - ))?; + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + [format!( + "Opened RPC endpoint at {}", + zebra_rpc_address.expect("lightwalletd test must have RPC port") + )], + )?; + } else { + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + None, + )?; + } + // Launch lightwalletd, if needed + let lightwalletd_and_port = if test_type.launches_lightwalletd() { tracing::info!( ?zebra_rpc_address, "launching lightwalletd connected to zebrad", @@ -1957,17 +1977,17 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { use_internet_connection, )?; - // Before we write a cached state image, wait for a database upgrade. - // - // TODO: this line will hang if the state upgrade finishes before zebra is synced. - // But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes. - // If it happens for a later upgrade, this code can be moved earlier in the test, - // as long as all the cached states are version 25.2.2 or later. - wait_for_state_version_upgrade( - &mut zebrad, - &state_version_message, - database_format_version_in_code(), - )?; + // Wait for the state to upgrade, if the upgrade is long. + // If this line hangs, change DATABASE_FORMAT_UPGRADE_IS_LONG to false, + // or combine "wait for sync" with "wait for state version upgrade". + if DATABASE_FORMAT_UPGRADE_IS_LONG { + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + None, + )?; + } (zebrad, Some(lightwalletd)) } @@ -1984,17 +2004,16 @@ fn lightwalletd_integration_test(test_type: TestType) -> Result<()> { tracing::info!(?test_type, "waiting for zebrad to sync to the tip"); zebrad.expect_stdout_line_matches(SYNC_FINISHED_REGEX)?; - // Before we write a cached state image, wait for a database upgrade. - // - // TODO: this line will hang if the state upgrade finishes before zebra is synced. - // But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes. - // If it happens for a later upgrade, this code can be moved earlier in the test, - // as long as all the cached states are version 25.2.2 or later. - wait_for_state_version_upgrade( - &mut zebrad, - &state_version_message, - database_format_version_in_code(), - )?; + // Wait for the state to upgrade, if the upgrade is long. + // If this line hangs, change DATABASE_FORMAT_UPGRADE_IS_LONG to false. + if DATABASE_FORMAT_UPGRADE_IS_LONG { + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + None, + )?; + } (zebrad, None) } @@ -2719,6 +2738,15 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { // Store the state version message so we can wait for the upgrade later if needed. let state_version_message = wait_for_state_version_message(&mut zebrad)?; + // It doesn't matter how long the state version upgrade takes, + // because the sync finished regex is repeated every minute. + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + None, + )?; + // Wait for zebrad to load the full cached blockchain. zebrad.expect_stdout_line_matches(SYNC_FINISHED_REGEX)?; @@ -2758,18 +2786,6 @@ async fn fully_synced_rpc_z_getsubtreesbyindex_snapshot_test() -> Result<()> { ), ]; - // Before we write a cached state image, wait for a database upgrade. - // - // TODO: this line will hang if the state upgrade finishes before zebra is synced. - // But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes. - // If it happens for a later upgrade, this code can be moved earlier in the test, - // as long as all the cached states are version 25.2.2 or later. - wait_for_state_version_upgrade( - &mut zebrad, - &state_version_message, - database_format_version_in_code(), - )?; - for i in zcashd_test_vectors { let res = client.call("z_getsubtreesbyindex", i.1).await?; let body = res.bytes().await; diff --git a/zebrad/tests/common/cached_state.rs b/zebrad/tests/common/cached_state.rs index 588d889b562..b893024c9ad 100644 --- a/zebrad/tests/common/cached_state.rs +++ b/zebrad/tests/common/cached_state.rs @@ -6,6 +6,7 @@ #![allow(dead_code)] use std::{ + iter, path::{Path, PathBuf}, time::Duration, }; @@ -39,6 +40,16 @@ pub const ZEBRA_CACHED_STATE_DIR: &str = "ZEBRA_CACHED_STATE_DIR"; /// but long enough that it doesn't impact performance. pub const DATABASE_FORMAT_CHECK_INTERVAL: Duration = Duration::from_secs(5 * 60); +/// Is the current state version upgrade longer than the typical CI update sync time? +/// This is the time it takes Zebra to sync from a previously cached state to the current tip. +/// +/// If this is set to `false`, but the state upgrades finish after zebrad is synced, +/// incomplete upgrades will be written to the cached state. +/// +/// If this is set to `true`, but the state upgrades finish before zebrad is synced, +/// some tests will hang. +pub const DATABASE_FORMAT_UPGRADE_IS_LONG: bool = false; + /// Type alias for a boxed state service. pub type BoxStateService = BoxService; @@ -64,6 +75,7 @@ pub fn wait_for_state_version_message(zebrad: &mut TestChild) -> Result( zebrad: &mut TestChild, state_version_message: &str, required_version: Version, + extra_required_log_regexes: impl IntoIterator + std::fmt::Debug, ) -> Result<()> { if state_version_message.contains("launching upgrade task") { tracing::info!( zebrad = ?zebrad.cmd, %state_version_message, %required_version, + ?extra_required_log_regexes, "waiting for zebrad state upgrade..." ); - let upgrade_message = zebrad.expect_stdout_line_matches(&format!( + let upgrade_pattern = format!( "marked database format as upgraded.*format_upgrade_version.*=.*{required_version}" - ))?; + ); + let extra_required_log_regexes = extra_required_log_regexes.into_iter(); + let required_logs: Vec = iter::once(upgrade_pattern) + .chain(extra_required_log_regexes) + .collect(); + + let upgrade_messages = zebrad.expect_stdout_line_matches_all_unordered(&required_logs)?; tracing::info!( zebrad = ?zebrad.cmd, %state_version_message, %required_version, - %upgrade_message, + ?required_logs, + ?upgrade_messages, "zebrad state has been upgraded" ); } diff --git a/zebrad/tests/common/checkpoints.rs b/zebrad/tests/common/checkpoints.rs index c43b5ca7e92..a1aa1dbb43c 100644 --- a/zebrad/tests/common/checkpoints.rs +++ b/zebrad/tests/common/checkpoints.rs @@ -87,12 +87,19 @@ pub async fn run(network: Network) -> Result<()> { // Before we write a cached state image, wait for a database upgrade. // + // It is ok if the logs are in the wrong order and the test sometimes fails, + // because testnet is unreliable anyway. + // // TODO: this line will hang if the state upgrade is slower than the RPC server spawn. // But that is unlikely, because both 25.1 and 25.2 are quick on testnet. + // + // TODO: combine this check with the CHECKPOINT_VERIFIER_REGEX and RPC endpoint checks. + // This is tricky because we need to get the last checkpoint log. wait_for_state_version_upgrade( &mut zebrad, &state_version_message, database_format_version_in_code(), + None, )?; } @@ -105,6 +112,7 @@ pub async fn run(network: Network) -> Result<()> { ); let last_checkpoint = zebrad.expect_stdout_line_matches(CHECKPOINT_VERIFIER_REGEX)?; + // TODO: do this with a regex? let (_prefix, last_checkpoint) = last_checkpoint .split_once("max_checkpoint_height") diff --git a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs index 2d7d2d06b87..3030f1c63ba 100644 --- a/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs +++ b/zebrad/tests/common/lightwalletd/wallet_grpc_test.rs @@ -43,10 +43,13 @@ use zebra_chain::{ parameters::NetworkUpgrade::{Nu5, Sapling}, serialization::ZcashDeserializeInto, }; -use zebra_state::latest_version_for_adding_subtrees; +use zebra_state::database_format_version_in_code; use crate::common::{ - cached_state::{wait_for_state_version_message, wait_for_state_version_upgrade}, + cached_state::{ + wait_for_state_version_message, wait_for_state_version_upgrade, + DATABASE_FORMAT_UPGRADE_IS_LONG, + }, launch::spawn_zebrad_for_rpc, lightwalletd::{ can_spawn_lightwalletd_for_rpc, spawn_lightwalletd_for_rpc, @@ -107,7 +110,22 @@ pub async fn run() -> Result<()> { ?zebra_rpc_address, "launched zebrad, waiting for zebrad to open its RPC port..." ); - zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {zebra_rpc_address}"))?; + + // Wait for the state to upgrade, if the upgrade is short. + // + // If incompletely upgraded states get written to the CI cache, + // change DATABASE_FORMAT_UPGRADE_IS_LONG to true. + // + // If this line hangs, move it before the RPC port check. + // (The RPC port is usually much faster than even a quick state upgrade.) + if !DATABASE_FORMAT_UPGRADE_IS_LONG { + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + [format!("Opened RPC endpoint at {zebra_rpc_address}")], + )?; + } tracing::info!( ?zebra_rpc_address, @@ -135,6 +153,17 @@ pub async fn run() -> Result<()> { use_internet_connection, )?; + // Wait for the state to upgrade, if the upgrade is long. + // If this line hangs, change DATABASE_FORMAT_UPGRADE_IS_LONG to false. + if DATABASE_FORMAT_UPGRADE_IS_LONG { + wait_for_state_version_upgrade( + &mut zebrad, + &state_version_message, + database_format_version_in_code(), + None, + )?; + } + tracing::info!( ?lightwalletd_rpc_port, "connecting gRPC client to lightwalletd...", @@ -384,18 +413,6 @@ pub async fn run() -> Result<()> { zebrad::application::user_agent() ); - // Before we call `z_getsubtreesbyindex`, we might need to wait for a database upgrade. - // - // TODO: this line will hang if the state upgrade finishes before the subtree tests start. - // But that is unlikely with the 25.2 upgrade, because it takes 20+ minutes. - // If it happens for a later upgrade, this code can be moved earlier in the test, - // as long as all the cached states are version 25.2.2 or later. - wait_for_state_version_upgrade( - &mut zebrad, - &state_version_message, - latest_version_for_adding_subtrees(), - )?; - // Call `z_getsubtreesbyindex` separately for... // ... Sapling. From 56e48eea2f5eaeac3dbe4e7aee19023301b3c1b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 19 Oct 2023 17:05:38 +0000 Subject: [PATCH 18/18] build(deps): bump actions/checkout from 4.1.0 to 4.1.1 (#7766) Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.0 to 4.1.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.1.0...v4.1.1) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/cd-deploy-nodes-gcp.yml | 4 ++-- .github/workflows/chore-delete-gcp-resources.yml | 4 ++-- .github/workflows/ci-build-crates.patch.yml | 2 +- .github/workflows/ci-build-crates.yml | 4 ++-- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/ci-lint.yml | 10 +++++----- .github/workflows/ci-unit-tests-os.yml | 10 +++++----- .github/workflows/docs-deploy-firebase.yml | 6 +++--- .github/workflows/docs-dockerhub-description.yml | 2 +- .github/workflows/manual-zcashd-deploy.yml | 2 +- .github/workflows/release-crates-io.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- .github/workflows/sub-build-lightwalletd.yml | 4 ++-- .github/workflows/sub-deploy-integration-tests-gcp.yml | 6 +++--- .github/workflows/sub-find-cached-disks.yml | 2 +- .github/workflows/sub-test-zebra-config.yml | 2 +- 16 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 0b167ceb63c..bf6b9cb5842 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -181,7 +181,7 @@ jobs: if: ${{ !cancelled() && !failure() && ((github.event_name == 'push' && github.ref_name == 'main') || github.event_name == 'release') }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false @@ -283,7 +283,7 @@ jobs: if: github.event_name == 'workflow_dispatch' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/chore-delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml index 439ce8a2ec0..19ed00db56d 100644 --- a/.github/workflows/chore-delete-gcp-resources.yml +++ b/.github/workflows/chore-delete-gcp-resources.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false @@ -239,7 +239,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/ci-build-crates.patch.yml b/.github/workflows/ci-build-crates.patch.yml index b99adfc4cfe..e173752dd06 100644 --- a/.github/workflows/ci-build-crates.patch.yml +++ b/.github/workflows/ci-build-crates.patch.yml @@ -23,7 +23,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 # Setup Rust with stable toolchain and minimal profile - name: Setup Rust diff --git a/.github/workflows/ci-build-crates.yml b/.github/workflows/ci-build-crates.yml index 08244bd804f..77ea4b44e2c 100644 --- a/.github/workflows/ci-build-crates.yml +++ b/.github/workflows/ci-build-crates.yml @@ -50,7 +50,7 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 - uses: r7kamura/rust-problem-matchers@v1.4.0 # Setup Rust with stable toolchain and minimal profile @@ -109,7 +109,7 @@ jobs: matrix: ${{ fromJson(needs.matrix.outputs.matrix) }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index 0c913be1ad0..aea08fbba4f 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest-xl steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 84f7d9f3abc..02881a0280e 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -30,7 +30,7 @@ jobs: rust: ${{ steps.changed-files-rust.outputs.any_changed == 'true' }} workflows: ${{ steps.changed-files-workflows.outputs.any_changed == 'true' }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false fetch-depth: 0 @@ -62,7 +62,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false @@ -112,7 +112,7 @@ jobs: if: ${{ needs.changed-files.outputs.rust == 'true' }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 @@ -142,7 +142,7 @@ jobs: needs: changed-files if: ${{ needs.changed-files.outputs.workflows == 'true' }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 - name: actionlint uses: reviewdog/action-actionlint@v1.39.1 with: @@ -155,7 +155,7 @@ jobs: runs-on: ubuntu-latest needs: changed-files steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 - uses: plettich/action-codespell@master with: github_token: ${{ secrets.github_token }} diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index 00594afacd5..642c9b8119f 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -95,7 +95,7 @@ jobs: features: " --features getblocktemplate-rpcs" steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 @@ -209,7 +209,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 @@ -231,7 +231,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 @@ -274,7 +274,7 @@ jobs: continue-on-error: ${{ matrix.checks == 'advisories' }} steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 @@ -295,7 +295,7 @@ jobs: steps: - name: Checkout git repository - uses: actions/checkout@v4.1.0 + uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 diff --git a/.github/workflows/docs-deploy-firebase.yml b/.github/workflows/docs-deploy-firebase.yml index 078c2e6a1fe..806211131c1 100644 --- a/.github/workflows/docs-deploy-firebase.yml +++ b/.github/workflows/docs-deploy-firebase.yml @@ -72,7 +72,7 @@ jobs: pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v4.1.0 + uses: actions/checkout@v4.1.1 with: persist-credentials: false @@ -125,7 +125,7 @@ jobs: pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v4.1.0 + uses: actions/checkout@v4.1.1 with: persist-credentials: false @@ -181,7 +181,7 @@ jobs: pull-requests: write steps: - name: Checkout the source code - uses: actions/checkout@v4.1.0 + uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/docs-dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml index 275a0e7b5fd..e20b126646f 100644 --- a/.github/workflows/docs-dockerhub-description.yml +++ b/.github/workflows/docs-dockerhub-description.yml @@ -17,7 +17,7 @@ jobs: dockerHubDescription: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/manual-zcashd-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml index 5ea57ef7652..11ace8f39e1 100644 --- a/.github/workflows/manual-zcashd-deploy.yml +++ b/.github/workflows/manual-zcashd-deploy.yml @@ -22,7 +22,7 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/release-crates-io.yml b/.github/workflows/release-crates-io.yml index 860f3ba3cb9..8546ee774db 100644 --- a/.github/workflows/release-crates-io.yml +++ b/.github/workflows/release-crates-io.yml @@ -70,7 +70,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - name: Checkout git repository - uses: actions/checkout@v4.1.0 + uses: actions/checkout@v4.1.1 with: persist-credentials: false diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index c2988823cfd..f57e74e9415 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -61,7 +61,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false - uses: r7kamura/rust-problem-matchers@v1.4.0 diff --git a/.github/workflows/sub-build-lightwalletd.yml b/.github/workflows/sub-build-lightwalletd.yml index adce75ed7b7..513b4bd1cd9 100644 --- a/.github/workflows/sub-build-lightwalletd.yml +++ b/.github/workflows/sub-build-lightwalletd.yml @@ -56,14 +56,14 @@ jobs: id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: # Note: check service.proto when modifying lightwalletd repo repository: zcash/lightwalletd ref: 'master' persist-credentials: false - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: path: zebra persist-credentials: false diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 336bbe6b360..4b84696df25 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -118,7 +118,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false fetch-depth: '2' @@ -455,7 +455,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false fetch-depth: '2' @@ -762,7 +762,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false fetch-depth: '2' diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 03181c18711..3d0182c45a8 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -30,7 +30,7 @@ jobs: contents: 'read' id-token: 'write' steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false fetch-depth: 0 diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index d8e856f0748..a7288d5f733 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -34,7 +34,7 @@ jobs: timeout-minutes: 15 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4.1.0 + - uses: actions/checkout@v4.1.1 with: persist-credentials: false