From c1415ed9cee2f8b68433da025e205d0da7912c47 Mon Sep 17 00:00:00 2001 From: Lazy Nina <81658138+lazynina@users.noreply.github.com> Date: Mon, 3 Apr 2023 14:26:19 -0600 Subject: [PATCH] bump go version to 1.20 (#490) * bump go version to 1.20 * run gofmt --- .github/workflows/ci.yml | 4 +- .gitignore | 1 + desohash/sha3m/doc.go | 6 +-- go.mod | 2 +- integration_testing/blocksync_test.go | 32 +++++++------- integration_testing/connection_bridge.go | 7 ++- integration_testing/hypersync_test.go | 46 +++++++++---------- integration_testing/txindex_test.go | 10 ++--- lib/block_view_new_message.go | 10 ++--- lib/block_view_test.go | 10 +++-- lib/blockchain.go | 39 +++++++++-------- lib/constants.go | 56 ++++++++++++------------ lib/mempool.go | 38 ++++++++-------- lib/miner.go | 24 +++++----- lib/snapshot.go | 3 +- lib/txindex.go | 4 +- lib/varint.go | 14 +++--- 17 files changed, 158 insertions(+), 148 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b8b78dec..76f49729b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.20" - name: Checkout branch uses: actions/checkout@v3 @@ -52,7 +52,7 @@ jobs: - name: Install Go uses: actions/setup-go@v2 with: - go-version: "1.18" + go-version: "1.20" - name: Checkout branch uses: actions/checkout@v3 diff --git a/.gitignore b/.gitignore index d0069daaf..c7b1bb1aa 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ backend .idea local_scripts/* +lib/mem.log diff --git a/desohash/sha3m/doc.go b/desohash/sha3m/doc.go index afa097ec2..94bdca5c7 100644 --- a/desohash/sha3m/doc.go +++ b/desohash/sha3m/doc.go @@ -4,15 +4,15 @@ // Package sha3m implements the SHA-3 fixed-output-length hash functions with modifications for DeSo proof-of-work // -// Guidance +// # Guidance // -// Security strengths +// # Security strengths // // The SHA3-x (x equals 224, 256, 384, or 512) functions have a security // strength against preimage attacks of x bits. Since they only produce "x" // bits of output, their collision-resistance is only "x/2" bits. // -// The sponge construction +// # The sponge construction // // A sponge builds a pseudo-random function from a public pseudo-random // permutation, by applying the permutation to a state of "rate + capacity" diff --git a/go.mod b/go.mod index 2582db52e..1cbd55b30 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/deso-protocol/core -go 1.18 +go 1.20 require ( github.com/DataDog/datadog-go v4.5.0+incompatible diff --git a/integration_testing/blocksync_test.go b/integration_testing/blocksync_test.go index af1bc3637..8be96d735 100644 --- a/integration_testing/blocksync_test.go +++ b/integration_testing/blocksync_test.go @@ -10,11 +10,11 @@ import ( ) // TestSimpleBlockSync test if a node can successfully sync from another node: -// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. -// 3. bridge node1 and node2 -// 4. node2 syncs MaxSyncBlockHeight blocks from node1. -// 5. compare node1 db matches node2 db. +// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. +// 3. bridge node1 and node2 +// 4. node2 syncs MaxSyncBlockHeight blocks from node1. +// 5. compare node1 db matches node2 db. func TestSimpleBlockSync(t *testing.T) { require := require.New(t) _ = require @@ -54,13 +54,13 @@ func TestSimpleBlockSync(t *testing.T) { } // TestSimpleSyncRestart tests if a node can successfully restart while syncing blocks. -// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. -// 3. bridge node1 and node2 -// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1. +// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. +// 3. bridge node1 and node2 +// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1. // 5. node2 disconnects from node1 and reboots. // 6. node2 reconnects with node1 and syncs remaining blocks. -// 7. compare node1 db matches node2 db. +// 7. compare node1 db matches node2 db. func TestSimpleSyncRestart(t *testing.T) { require := require.New(t) _ = require @@ -105,14 +105,14 @@ func TestSimpleSyncRestart(t *testing.T) { // TestSimpleSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while syncing blocks, and // then connect to a different node and sync the remaining blocks. -// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks. -// 2. node1 and node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. -// 3. bridge node1 and node2 -// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1. +// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks. +// 2. node1 and node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. +// 3. bridge node1 and node2 +// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1. // 5. node2 disconnects from node1 and reboots. // 6. node2 reconnects with node3 and syncs remaining blocks. -// 7. compare node1 state matches node2 state. -// 8. compare node3 state matches node2 state. +// 7. compare node1 state matches node2 state. +// 8. compare node3 state matches node2 state. func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { require := require.New(t) _ = require diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index 0cf247af3..1d0228467 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -31,11 +31,14 @@ import ( // // Let's say we have two nodes, nodeA and nodeB, that we want to bridge together. The connection bridge will then // simulate the creation of two outbound and two inbound node connections: +// // nodeA : connectionOutboundA -> connectionInboundB : nodeB // nodeB : connectionOutboundB -> connectionInboundA : nodeA +// // For example, let's say nodeA wants to send a GET_HEADERS message to nodeB, the traffic will look like this: -// GET_HEADERS: nodeA -> connectionOutboundA -> connectionInboundB -> nodeB -// HEADER_BUNDLE: nodeB -> connectionInboundB -> connectionOutboundA -> nodeA +// +// GET_HEADERS: nodeA -> connectionOutboundA -> connectionInboundB -> nodeB +// HEADER_BUNDLE: nodeB -> connectionInboundB -> connectionOutboundA -> nodeA // // This middleware design of the ConnectionBridge allows us to have much higher control over the communication // between the two nodes. In particular, we have full control over the `connectionOutboundA -> connectionInboundB` diff --git a/integration_testing/hypersync_test.go b/integration_testing/hypersync_test.go index fc0b9bd87..aad90ee0e 100644 --- a/integration_testing/hypersync_test.go +++ b/integration_testing/hypersync_test.go @@ -10,11 +10,11 @@ import ( ) // TestSimpleHyperSync test if a node can successfully hyper sync from another node: -// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod. -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. -// 3. bridge node1 and node2. -// 4. node2 hypersyncs from node1 -// 5. once done, compare node1 state, db, and checksum matches node2. +// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod. +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. +// 3. bridge node1 and node2. +// 4. node2 hypersyncs from node1 +// 5. once done, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSync(t *testing.T) { require := require.New(t) _ = require @@ -58,12 +58,12 @@ func TestSimpleHyperSync(t *testing.T) { } // TestHyperSyncFromHyperSyncedNode test if a node can successfully hypersync from another hypersynced node: -// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. -// 3. bridge node1 and node2. -// 4. node2 hypersyncs state. -// 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2. -// 6. compare node1 state, db, and checksum matches node2, and node3. +// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. +// 3. bridge node1 and node2. +// 4. node2 hypersyncs state. +// 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2. +// 6. compare node1 state, db, and checksum matches node2, and node3. func TestHyperSyncFromHyperSyncedNode(t *testing.T) { require := require.New(t) _ = require @@ -128,12 +128,12 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) { } // TestSimpleHyperSyncRestart test if a node can successfully hyper sync from another node: -// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod. -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. -// 3. bridge node1 and node2. -// 4. node2 hyper syncs a portion of the state from node1 and then restarts. -// 5. node2 reconnects to node1 and hypersyncs again. -// 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2. +// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod. +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records. +// 3. bridge node1 and node2. +// 4. node2 hyper syncs a portion of the state from node1 and then restarts. +// 5. node2 reconnects to node1 and hypersyncs again. +// 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2. func TestSimpleHyperSyncRestart(t *testing.T) { require := require.New(t) _ = require @@ -183,12 +183,12 @@ func TestSimpleHyperSyncRestart(t *testing.T) { } // TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while hypersyncing. -// 1. Spawn three nodes node1, node2, and node3 with max block height of MaxSyncBlockHeight blocks. -// 2. node1, node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. -// 3. bridge node1 and node2 -// 4. node2 hypersyncs from node1 but we restart node2 midway. -// 5. after restart, bridge node2 with node3 and resume hypersync. -// 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical. +// 1. Spawn three nodes node1, node2, and node3 with max block height of MaxSyncBlockHeight blocks. +// 2. node1, node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator. +// 3. bridge node1 and node2 +// 4. node2 hypersyncs from node1 but we restart node2 midway. +// 5. after restart, bridge node2 with node3 and resume hypersync. +// 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical. func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) { require := require.New(t) _ = require diff --git a/integration_testing/txindex_test.go b/integration_testing/txindex_test.go index b01f7d3b2..aa13fd265 100644 --- a/integration_testing/txindex_test.go +++ b/integration_testing/txindex_test.go @@ -10,11 +10,11 @@ import ( ) // TestSimpleTxIndex test if a node can successfully build txindex after block syncing from another node: -// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. -// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator, and builds txindex afterwards. -// 3. bridge node1 and node2 -// 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards. -// 5. compare node1 db and txindex matches node2. +// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks. +// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator, and builds txindex afterwards. +// 3. bridge node1 and node2 +// 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards. +// 5. compare node1 db and txindex matches node2. func TestSimpleTxIndex(t *testing.T) { require := require.New(t) _ = require diff --git a/lib/block_view_new_message.go b/lib/block_view_new_message.go index 23ec505d4..72d9a9d64 100644 --- a/lib/block_view_new_message.go +++ b/lib/block_view_new_message.go @@ -591,11 +591,11 @@ func (bav *UtxoView) deleteDmThreadIndex(dmThreadKey DmThreadKey) { // with a uint64 unix timestamp in nanoseconds TimestampNanos. The first AccessGroupId identifies the sender of the message, // and the second AccessGroupId identifies the recipient of the message, i.e. we have the following basic index structure: // -// Nothing that an AccessGroupId consists of an owner public key and a group key name (string) -// ... -// NewMessageEntry indexing: -// -> NewMessageEntry +// Nothing that an AccessGroupId consists of an owner public key and a group key name (string) +// ... +// NewMessageEntry indexing: +// -> NewMessageEntry // // Depending on the type of the thread, this message will be indexed in different ways. For example, if the thread is a // dm thread, then we will store a single NewMessageEntry for each message sent, and two DmThreadEntry entries for diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 4032065ec..6a1e1ae5b 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -1400,14 +1400,16 @@ func TestBasicTransfer(t *testing.T) { // TestBasicTransferSignatures thoroughly tests all possible ways to sign a DeSo transaction. // There are three available signature schemas that are accepted by the DeSo blockchain: +// // (1) Transaction signed by user's main public key // (2) Transaction signed by user's derived key with "DerivedPublicKey" passed in ExtraData -// (3) Transaction signed by user's derived key using DESO-DER signature standard. +// (3) Transaction signed by user's derived key using DESO-DER signature standard. // // We will try all these schemas while running three main tests scenarios: -// - try signing and processing a basicTransfer -// - try signing and processing a authorizeDerivedKey -// - try signing and processing a authorizeDerivedKey followed by a basicTransfer +// - try signing and processing a basicTransfer +// - try signing and processing a authorizeDerivedKey +// - try signing and processing a authorizeDerivedKey followed by a basicTransfer +// // We use basicTransfer as a placeholder for a normal DeSo transaction (alternatively, we could have used a post, // follow, nft, etc transaction). For each scenario we try signing the transaction with either user's main public // key, a derived key, or a random key. Basically, we try every possible context in which a transaction can be signed. diff --git a/lib/blockchain.go b/lib/blockchain.go index a48ead5fa..620ee4206 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -695,11 +695,11 @@ func fastLog2Floor(n uint32) uint8 { // // In addition, there are two special cases: // -// - When no locators are provided, the stop hash is treated as a request for -// that block, so it will either return the node associated with the stop hash -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, nodes starting -// after the genesis block will be returned +// - When no locators are provided, the stop hash is treated as a request for +// that block, so it will either return the node associated with the stop hash +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, nodes starting +// after the genesis block will be returned // // This is primarily a helper function for the locateBlocks and locateHeaders // functions. @@ -804,11 +804,11 @@ func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, // // In addition, there are two special cases: // -// - When no locators are provided, the stop hash is treated as a request for -// that header, so it will either return the header for the stop hash itself -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, headers starting -// after the genesis block will be returned +// - When no locators are provided, the stop hash is treated as a request for +// that header, so it will either return the header for the stop hash itself +// if it is known, or nil if it is unknown +// - When locators are provided, but none of them are known, headers starting +// after the genesis block will be returned // // This function is safe for concurrent access. func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash *BlockHash) []*MsgDeSoHeader { @@ -834,8 +834,9 @@ func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash // from the block being located. // // For example, assume a block chain with a side chain as depicted below: -// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 -// \-> 16a -> 17a +// +// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18 +// \-> 16a -> 17a // // The block locator for block 17a would be the hashes of blocks: // [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] @@ -1118,8 +1119,8 @@ func (ss SyncState) String() string { } } -// - Latest block height is after the latest checkpoint (if enabled) -// - Latest block has a timestamp newer than 24 hours ago +// - Latest block height is after the latest checkpoint (if enabled) +// - Latest block has a timestamp newer than 24 hours ago // // This function MUST be called with the ChainLock held (for reads). func (bc *Blockchain) chainState() SyncState { @@ -2752,12 +2753,14 @@ var ( // The number of hashing attempts in expectation it would take to produce the // hash passed in. This is computed as: -// E(min(X_i, ..., X_n)) where: -// - n = (number of attempted hashes) and -// - the X_i are all U(0, MAX_HASH) +// +// E(min(X_i, ..., X_n)) where: +// - n = (number of attempted hashes) and +// - the X_i are all U(0, MAX_HASH) +// // -> E(min(X_i, ..., X_n)) = MAX_HASH / (n + 1) // -> E(n) ~= MAX_HASH / min_hash - 1 -// - where min_hash is the block hash +// - where min_hash is the block hash // // We approximate this as MAX_HASH / (min_hash + 1), adding 1 to min_hash in // order to mitigate the possibility of a divide-by-zero error. diff --git a/lib/constants.go b/lib/constants.go index 5a36b2869..a36987ab8 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -274,46 +274,46 @@ type ForkHeights struct { // step is to define a new value in ForkHeights, and set the value accordingly for // mainnet, testnet, and regtest param structs. Add a name for your migration so that // it can be accessed robustly. -// 1. Define a new block height in the EncoderMigrationHeights struct. This should map +// 1. Define a new block height in the EncoderMigrationHeights struct. This should map // 1:1 with the fork height defined prior. -// 2. Add conditional statements to the RawEncode / RawDecodeWithoutMetadata methods that +// 2. Add conditional statements to the RawEncode / RawDecodeWithoutMetadata methods that // trigger at the defined height. -// 3. Add a condition to GetVersionByte to return version associated with the migration height. +// 3. Add a condition to GetVersionByte to return version associated with the migration height. // // So for example, let's say you want to add a migration for UtxoEntry at height 1200. // -// 0. Add a field to ForkHeight that marks the point at which this entry will come -// into play: +// 0. Add a field to ForkHeight that marks the point at which this entry will come +// into play: // - Add the following to the ForkHeight struct: -// UtxoEntryTestHeight uint64 +// UtxoEntryTestHeight uint64 // - Add the following to the individual param structs (MainnetForkHeights, TestnetForkHeights, -// and RegtestForkHeights): -// UtxoEntryTestHeight: 1200 (may differ for mainnet vs testnet & regtest) +// and RegtestForkHeights): +// UtxoEntryTestHeight: 1200 (may differ for mainnet vs testnet & regtest) // - Add the migration name below DefaultMigration -// UtxoEntryTestHeight MigrationName = "UtxoEntryTestHeight" +// UtxoEntryTestHeight MigrationName = "UtxoEntryTestHeight" // -// 1. Add a field to the EncoderMigrationHeights that looks like this: -// UtxoEntryTestHeight MigrationHeight +// 1. Add a field to the EncoderMigrationHeights that looks like this: +// UtxoEntryTestHeight MigrationHeight // -// 2. Modify func (utxoEntry *UtxoEntry) RawEncode/RawDecodeWithoutMetadata. E.g. add the following condition at the -// end of RawEncodeWithoutMetadata (note the usage of the MigrationName UtxoEntryTestHeight): -// if MigrationTriggered(blockHeight, UtxoEntryTestHeight) { -// data = append(data, byte(127)) -// } -// And this at the end of RawDecodeWithoutMetadata: -// if MigrationTriggered(blockHeight, UtxoEntryTestHeight) { -// _, err = rr.ReadByte() -// if err != nil { -// return errors.Wrapf(err, "UtxoEntry.Decode: Problem reading random byte.") -// } -// } -// MAKE SURE TO WRITE CORRECT CONDITIONS FOR THE HEIGHTS IN BOTH ENCODE AND DECODE! +// 2. Modify func (utxoEntry *UtxoEntry) RawEncode/RawDecodeWithoutMetadata. E.g. add the following condition at the +// end of RawEncodeWithoutMetadata (note the usage of the MigrationName UtxoEntryTestHeight): +// if MigrationTriggered(blockHeight, UtxoEntryTestHeight) { +// data = append(data, byte(127)) +// } +// And this at the end of RawDecodeWithoutMetadata: +// if MigrationTriggered(blockHeight, UtxoEntryTestHeight) { +// _, err = rr.ReadByte() +// if err != nil { +// return errors.Wrapf(err, "UtxoEntry.Decode: Problem reading random byte.") +// } +// } +// MAKE SURE TO WRITE CORRECT CONDITIONS FOR THE HEIGHTS IN BOTH ENCODE AND DECODE! // -// 3. Modify func (utxo *UtxoEntry) GetVersionByte to return the correct encoding version depending on the height. Use the -// function GetMigrationVersion to chain encoder migrations (Note the variadic parameter of GetMigrationVersion and -// the usage of the MigrationName UtxoEntryTestHeight) +// 3. Modify func (utxo *UtxoEntry) GetVersionByte to return the correct encoding version depending on the height. Use the +// function GetMigrationVersion to chain encoder migrations (Note the variadic parameter of GetMigrationVersion and +// the usage of the MigrationName UtxoEntryTestHeight) // -// return GetMigrationVersion(blockHeight, UtxoEntryTestHeight) +// return GetMigrationVersion(blockHeight, UtxoEntryTestHeight) // // That's it! type MigrationName string diff --git a/lib/mempool.go b/lib/mempool.go index 062ab72af..620446533 100644 --- a/lib/mempool.go +++ b/lib/mempool.go @@ -347,16 +347,16 @@ func (mp *DeSoMempool) resetPool(newPool *DeSoMempool) { // UpdateAfterConnectBlock updates the mempool after a block has been added to the // blockchain. It does this by basically removing all known transactions in the block // from the mempool as follows: -// - Build a map of all of the transactions in the block indexed by their hash. -// - Create a new mempool object. -// - Iterate through all the transactions in the mempool and add the transactions -// to the new pool object *only if* they don't appear in the block. Do this for -// transactions in the pool and in the unconnectedTx pool. -// - Compute which transactions were newly-accepted into the pool by effectively diffing -// the new pool's transactions with the old pool's transactions. -// - Once the new pool object is up-to-date, the fields of the new pool object -// replace the fields of the original pool object. -// - Return the newly added transactions computed earlier. +// - Build a map of all of the transactions in the block indexed by their hash. +// - Create a new mempool object. +// - Iterate through all the transactions in the mempool and add the transactions +// to the new pool object *only if* they don't appear in the block. Do this for +// transactions in the pool and in the unconnectedTx pool. +// - Compute which transactions were newly-accepted into the pool by effectively diffing +// the new pool's transactions with the old pool's transactions. +// - Once the new pool object is up-to-date, the fields of the new pool object +// replace the fields of the original pool object. +// - Return the newly added transactions computed earlier. // // TODO: This is fairly inefficient but the story is the same as for // UpdateAfterDisconnectBlock. @@ -454,15 +454,15 @@ func (mp *DeSoMempool) UpdateAfterConnectBlock(blk *MsgDeSoBlock) (_txnsAddedToM // UpdateAfterDisconnectBlock updates the mempool to reflect that a block has been // disconnected from the blockchain. It does this by basically adding all the // transactions in the block back to the mempool as follows: -// - A new pool object is created containing no transactions. -// - The block's transactions are added to this new pool object. This is done in order -// to minimize dependency-related conflicts with transactions already in the mempool. -// - Then the transactions in the original pool are layered on top of the block's -// transactions in the new pool object. Again this is done to avoid dependency -// issues since the ordering of followed by -// is much less likely to have issues. -// - Then, once the new pool object is up-to-date, the fields of the new pool object -// replace the fields of the original pool object. +// - A new pool object is created containing no transactions. +// - The block's transactions are added to this new pool object. This is done in order +// to minimize dependency-related conflicts with transactions already in the mempool. +// - Then the transactions in the original pool are layered on top of the block's +// transactions in the new pool object. Again this is done to avoid dependency +// issues since the ordering of followed by +// is much less likely to have issues. +// - Then, once the new pool object is up-to-date, the fields of the new pool object +// replace the fields of the original pool object. // // This function is safe for concurrent access. It is assumed the ChainLock is // held before this function is a accessed. diff --git a/lib/miner.go b/lib/miner.go index b7fa22a85..9d4fb714e 100644 --- a/lib/miner.go +++ b/lib/miner.go @@ -319,18 +319,18 @@ func CopyBytesIntoBlockHash(data []byte) *BlockHash { // ProofOfWorkHash is a hash function designed for computing DeSo block hashes. // It seems the optimal hash function is one that satisfies two properties: -// 1) It is not computable by any existing ASICs. If this property isn't satisfied -// then miners with pre-existing investments in ASICs for other coins can very -// cheaply mine on our chain for a short period of time to pull off a 51% attack. -// This has actually happened with "merge-mined" coins like Namecoin. -// 2) If implemented on an ASIC, there is an "orders of magnitude" speed-up over -// using a CPU or GPU. This is because ASICs require some amount of capital -// expenditure up-front in order to mine, which then aligns the owner of the -// ASIC to care about the health of the network over a longer period of time. In -// contrast, a hash function that is CPU or GPU-mineable can be attacked with -// an AWS fleet early on. This also may result in a more eco-friendly chain, since -// the hash power will be more bottlenecked by up-front CapEx rather than ongoing -// electricity cost, as is the case with GPU-mined coins. +// 1. It is not computable by any existing ASICs. If this property isn't satisfied +// then miners with pre-existing investments in ASICs for other coins can very +// cheaply mine on our chain for a short period of time to pull off a 51% attack. +// This has actually happened with "merge-mined" coins like Namecoin. +// 2. If implemented on an ASIC, there is an "orders of magnitude" speed-up over +// using a CPU or GPU. This is because ASICs require some amount of capital +// expenditure up-front in order to mine, which then aligns the owner of the +// ASIC to care about the health of the network over a longer period of time. In +// contrast, a hash function that is CPU or GPU-mineable can be attacked with +// an AWS fleet early on. This also may result in a more eco-friendly chain, since +// the hash power will be more bottlenecked by up-front CapEx rather than ongoing +// electricity cost, as is the case with GPU-mined coins. // // Note that our pursuit of (2) above runs counter to existing dogma which seeks to // prioritize "ASIC-resistance" in hash functions. diff --git a/lib/snapshot.go b/lib/snapshot.go index 9f890512e..b4fbd3fdf 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -189,7 +189,8 @@ var ( // allowing forward progress. Doing it this way would also have avoided situations in which a // snapshot can break, thus eliminating any reliance on block disconnecting in our codebase. // But we live and learn. -// ---@petern: Fear not the complexity. Embrace it, and the parity semaphores will love you back. +// +// ---@petern: Fear not the complexity. Embrace it, and the parity semaphores will love you back. // // In addition to all of the above, there is one more cool thing that a snapshot does, which is that // it keeps track of an "online" checksum of all keys and values in the snapshot. This is done using diff --git a/lib/txindex.go b/lib/txindex.go index 1d1090588..3db429a83 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -188,7 +188,9 @@ func (txi *TXIndex) Start() { } // Stop TXIndex node. This method doesn't close the txindex db, make sure to call in the parent context: -// txi.TXIndexChain.DB().Close() +// +// txi.TXIndexChain.DB().Close() +// // It's important!!! Do it after the txi.updateWaitGroup.Wait(). func (txi *TXIndex) Stop() { glog.Info("TXIndex: Stopping updates and closing database") diff --git a/lib/varint.go b/lib/varint.go index b70cd0bec..063045b7f 100644 --- a/lib/varint.go +++ b/lib/varint.go @@ -55,10 +55,9 @@ func PutUvarint(buf []byte, x uint64) int { // number of bytes read (> 0). If an error occurred, the value is 0 // and the number of bytes n is <= 0 meaning: // -// n == 0: buf too small -// n < 0: value larger than 64 bits (overflow) -// and -n is the number of bytes read -// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read func Uvarint(buf []byte) (uint64, int) { var x uint64 var s uint @@ -95,10 +94,9 @@ func PutVarint(buf []byte, x int64) int { // number of bytes read (> 0). If an error occurred, the value is 0 // and the number of bytes n is <= 0 with the following meaning: // -// n == 0: buf too small -// n < 0: value larger than 64 bits (overflow) -// and -n is the number of bytes read -// +// n == 0: buf too small +// n < 0: value larger than 64 bits (overflow) +// and -n is the number of bytes read func Varint(buf []byte) (int64, int) { ux, n := Uvarint(buf) // ok to continue in presence of error x := int64(ux >> 1)