Skip to content

Commit

Permalink
bump go version to 1.20 (deso-protocol#490)
Browse files Browse the repository at this point in the history
* bump go version to 1.20

* run gofmt
  • Loading branch information
lazynina authored Apr 3, 2023
1 parent eafdfb0 commit c1415ed
Show file tree
Hide file tree
Showing 17 changed files with 158 additions and 148 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.20"

- name: Checkout branch
uses: actions/checkout@v3
Expand Down Expand Up @@ -52,7 +52,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: "1.18"
go-version: "1.20"

- name: Checkout branch
uses: actions/checkout@v3
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ backend
.idea

local_scripts/*
lib/mem.log
6 changes: 3 additions & 3 deletions desohash/sha3m/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@

// Package sha3m implements the SHA-3 fixed-output-length hash functions with modifications for DeSo proof-of-work
//
// Guidance
// # Guidance
//
// Security strengths
// # Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The sponge construction
// # The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module github.com/deso-protocol/core

go 1.18
go 1.20

require (
github.com/DataDog/datadog-go v4.5.0+incompatible
Expand Down
32 changes: 16 additions & 16 deletions integration_testing/blocksync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ import (
)

// TestSimpleBlockSync test if a node can successfully sync from another node:
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs MaxSyncBlockHeight blocks from node1.
// 5. compare node1 db matches node2 db.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs MaxSyncBlockHeight blocks from node1.
// 5. compare node1 db matches node2 db.
func TestSimpleBlockSync(t *testing.T) {
require := require.New(t)
_ = require
Expand Down Expand Up @@ -54,13 +54,13 @@ func TestSimpleBlockSync(t *testing.T) {
}

// TestSimpleSyncRestart tests if a node can successfully restart while syncing blocks.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1.
// 5. node2 disconnects from node1 and reboots.
// 6. node2 reconnects with node1 and syncs remaining blocks.
// 7. compare node1 db matches node2 db.
// 7. compare node1 db matches node2 db.
func TestSimpleSyncRestart(t *testing.T) {
require := require.New(t)
_ = require
Expand Down Expand Up @@ -105,14 +105,14 @@ func TestSimpleSyncRestart(t *testing.T) {

// TestSimpleSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while syncing blocks, and
// then connect to a different node and sync the remaining blocks.
// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 and node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1.
// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 and node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 syncs between 10 and MaxSyncBlockHeight blocks from node1.
// 5. node2 disconnects from node1 and reboots.
// 6. node2 reconnects with node3 and syncs remaining blocks.
// 7. compare node1 state matches node2 state.
// 8. compare node3 state matches node2 state.
// 7. compare node1 state matches node2 state.
// 8. compare node3 state matches node2 state.
func TestSimpleSyncDisconnectWithSwitchingToNewPeer(t *testing.T) {
require := require.New(t)
_ = require
Expand Down
7 changes: 5 additions & 2 deletions integration_testing/connection_bridge.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,14 @@ import (
//
// Let's say we have two nodes, nodeA and nodeB, that we want to bridge together. The connection bridge will then
// simulate the creation of two outbound and two inbound node connections:
//
// nodeA : connectionOutboundA -> connectionInboundB : nodeB
// nodeB : connectionOutboundB -> connectionInboundA : nodeA
//
// For example, let's say nodeA wants to send a GET_HEADERS message to nodeB, the traffic will look like this:
// GET_HEADERS: nodeA -> connectionOutboundA -> connectionInboundB -> nodeB
// HEADER_BUNDLE: nodeB -> connectionInboundB -> connectionOutboundA -> nodeA
//
// GET_HEADERS: nodeA -> connectionOutboundA -> connectionInboundB -> nodeB
// HEADER_BUNDLE: nodeB -> connectionInboundB -> connectionOutboundA -> nodeA
//
// This middleware design of the ConnectionBridge allows us to have much higher control over the communication
// between the two nodes. In particular, we have full control over the `connectionOutboundA -> connectionInboundB`
Expand Down
46 changes: 23 additions & 23 deletions integration_testing/hypersync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ import (
)

// TestSimpleHyperSync test if a node can successfully hyper sync from another node:
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hypersyncs from node1
// 5. once done, compare node1 state, db, and checksum matches node2.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hypersyncs from node1
// 5. once done, compare node1 state, db, and checksum matches node2.
func TestSimpleHyperSync(t *testing.T) {
require := require.New(t)
_ = require
Expand Down Expand Up @@ -58,12 +58,12 @@ func TestSimpleHyperSync(t *testing.T) {
}

// TestHyperSyncFromHyperSyncedNode test if a node can successfully hypersync from another hypersynced node:
// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hypersyncs state.
// 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2.
// 6. compare node1 state, db, and checksum matches node2, and node3.
// 1. Spawn three nodes node1, node2, node3 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hypersyncs state.
// 5. once done, bridge node3 and node2 so that node3 hypersyncs from node2.
// 6. compare node1 state, db, and checksum matches node2, and node3.
func TestHyperSyncFromHyperSyncedNode(t *testing.T) {
require := require.New(t)
_ = require
Expand Down Expand Up @@ -128,12 +128,12 @@ func TestHyperSyncFromHyperSyncedNode(t *testing.T) {
}

// TestSimpleHyperSyncRestart test if a node can successfully hyper sync from another node:
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hyper syncs a portion of the state from node1 and then restarts.
// 5. node2 reconnects to node1 and hypersyncs again.
// 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks, and snapshot period of HyperSyncSnapshotPeriod.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator and builds ancestral records.
// 3. bridge node1 and node2.
// 4. node2 hyper syncs a portion of the state from node1 and then restarts.
// 5. node2 reconnects to node1 and hypersyncs again.
// 6. Once node2 finishes sync, compare node1 state, db, and checksum matches node2.
func TestSimpleHyperSyncRestart(t *testing.T) {
require := require.New(t)
_ = require
Expand Down Expand Up @@ -183,12 +183,12 @@ func TestSimpleHyperSyncRestart(t *testing.T) {
}

// TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer tests if a node can successfully restart while hypersyncing.
// 1. Spawn three nodes node1, node2, and node3 with max block height of MaxSyncBlockHeight blocks.
// 2. node1, node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 hypersyncs from node1 but we restart node2 midway.
// 5. after restart, bridge node2 with node3 and resume hypersync.
// 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical.
// 1. Spawn three nodes node1, node2, and node3 with max block height of MaxSyncBlockHeight blocks.
// 2. node1, node3 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator.
// 3. bridge node1 and node2
// 4. node2 hypersyncs from node1 but we restart node2 midway.
// 5. after restart, bridge node2 with node3 and resume hypersync.
// 6. once node2 finishes, compare node1, node2, node3 state, db, and checksums are identical.
func TestSimpleHyperSyncDisconnectWithSwitchingToNewPeer(t *testing.T) {
require := require.New(t)
_ = require
Expand Down
10 changes: 5 additions & 5 deletions integration_testing/txindex_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ import (
)

// TestSimpleTxIndex test if a node can successfully build txindex after block syncing from another node:
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator, and builds txindex afterwards.
// 3. bridge node1 and node2
// 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards.
// 5. compare node1 db and txindex matches node2.
// 1. Spawn two nodes node1, node2 with max block height of MaxSyncBlockHeight blocks.
// 2. node1 syncs MaxSyncBlockHeight blocks from the "deso-seed-2.io" generator, and builds txindex afterwards.
// 3. bridge node1 and node2
// 4. node2 syncs MaxSyncBlockHeight blocks from node1, and builds txindex afterwards.
// 5. compare node1 db and txindex matches node2.
func TestSimpleTxIndex(t *testing.T) {
require := require.New(t)
_ = require
Expand Down
10 changes: 5 additions & 5 deletions lib/block_view_new_message.go
Original file line number Diff line number Diff line change
Expand Up @@ -591,11 +591,11 @@ func (bav *UtxoView) deleteDmThreadIndex(dmThreadKey DmThreadKey) {
// with a uint64 unix timestamp in nanoseconds TimestampNanos. The first AccessGroupId identifies the sender of the message,
// and the second AccessGroupId identifies the recipient of the message, i.e. we have the following basic index structure:
//
// Nothing that an AccessGroupId consists of an owner public key and a group key name (string)
// ...
// NewMessageEntry indexing:
// <SenderAccessGroupOwnerPublicKey, SenderAccessGroupKeyName,
// RecipientAccessGroupOwnerPublicKey, RecipientAccessGroupKeyName, TimestampNanos> -> NewMessageEntry
// Nothing that an AccessGroupId consists of an owner public key and a group key name (string)
// ...
// NewMessageEntry indexing:
// <SenderAccessGroupOwnerPublicKey, SenderAccessGroupKeyName,
// RecipientAccessGroupOwnerPublicKey, RecipientAccessGroupKeyName, TimestampNanos> -> NewMessageEntry
//
// Depending on the type of the thread, this message will be indexed in different ways. For example, if the thread is a
// dm thread, then we will store a single NewMessageEntry for each message sent, and two DmThreadEntry entries for
Expand Down
10 changes: 6 additions & 4 deletions lib/block_view_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1400,14 +1400,16 @@ func TestBasicTransfer(t *testing.T) {

// TestBasicTransferSignatures thoroughly tests all possible ways to sign a DeSo transaction.
// There are three available signature schemas that are accepted by the DeSo blockchain:
//
// (1) Transaction signed by user's main public key
// (2) Transaction signed by user's derived key with "DerivedPublicKey" passed in ExtraData
// (3) Transaction signed by user's derived key using DESO-DER signature standard.
// (3) Transaction signed by user's derived key using DESO-DER signature standard.
//
// We will try all these schemas while running three main tests scenarios:
// - try signing and processing a basicTransfer
// - try signing and processing a authorizeDerivedKey
// - try signing and processing a authorizeDerivedKey followed by a basicTransfer
// - try signing and processing a basicTransfer
// - try signing and processing a authorizeDerivedKey
// - try signing and processing a authorizeDerivedKey followed by a basicTransfer
//
// We use basicTransfer as a placeholder for a normal DeSo transaction (alternatively, we could have used a post,
// follow, nft, etc transaction). For each scenario we try signing the transaction with either user's main public
// key, a derived key, or a random key. Basically, we try every possible context in which a transaction can be signed.
Expand Down
39 changes: 21 additions & 18 deletions lib/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,11 +695,11 @@ func fastLog2Floor(n uint32) uint8 {
//
// In addition, there are two special cases:
//
// - When no locators are provided, the stop hash is treated as a request for
// that block, so it will either return the node associated with the stop hash
// if it is known, or nil if it is unknown
// - When locators are provided, but none of them are known, nodes starting
// after the genesis block will be returned
// - When no locators are provided, the stop hash is treated as a request for
// that block, so it will either return the node associated with the stop hash
// if it is known, or nil if it is unknown
// - When locators are provided, but none of them are known, nodes starting
// after the genesis block will be returned
//
// This is primarily a helper function for the locateBlocks and locateHeaders
// functions.
Expand Down Expand Up @@ -804,11 +804,11 @@ func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32,
//
// In addition, there are two special cases:
//
// - When no locators are provided, the stop hash is treated as a request for
// that header, so it will either return the header for the stop hash itself
// if it is known, or nil if it is unknown
// - When locators are provided, but none of them are known, headers starting
// after the genesis block will be returned
// - When no locators are provided, the stop hash is treated as a request for
// that header, so it will either return the header for the stop hash itself
// if it is known, or nil if it is unknown
// - When locators are provided, but none of them are known, headers starting
// after the genesis block will be returned
//
// This function is safe for concurrent access.
func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash *BlockHash) []*MsgDeSoHeader {
Expand All @@ -834,8 +834,9 @@ func (bc *Blockchain) LocateBestBlockChainHeaders(locator []*BlockHash, stopHash
// from the block being located.
//
// For example, assume a block chain with a side chain as depicted below:
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a
//
// genesis -> 1 -> 2 -> ... -> 15 -> 16 -> 17 -> 18
// \-> 16a -> 17a
//
// The block locator for block 17a would be the hashes of blocks:
// [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis]
Expand Down Expand Up @@ -1118,8 +1119,8 @@ func (ss SyncState) String() string {
}
}

// - Latest block height is after the latest checkpoint (if enabled)
// - Latest block has a timestamp newer than 24 hours ago
// - Latest block height is after the latest checkpoint (if enabled)
// - Latest block has a timestamp newer than 24 hours ago
//
// This function MUST be called with the ChainLock held (for reads).
func (bc *Blockchain) chainState() SyncState {
Expand Down Expand Up @@ -2752,12 +2753,14 @@ var (

// The number of hashing attempts in expectation it would take to produce the
// hash passed in. This is computed as:
// E(min(X_i, ..., X_n)) where:
// - n = (number of attempted hashes) and
// - the X_i are all U(0, MAX_HASH)
//
// E(min(X_i, ..., X_n)) where:
// - n = (number of attempted hashes) and
// - the X_i are all U(0, MAX_HASH)
//
// -> E(min(X_i, ..., X_n)) = MAX_HASH / (n + 1)
// -> E(n) ~= MAX_HASH / min_hash - 1
// - where min_hash is the block hash
// - where min_hash is the block hash
//
// We approximate this as MAX_HASH / (min_hash + 1), adding 1 to min_hash in
// order to mitigate the possibility of a divide-by-zero error.
Expand Down
Loading

0 comments on commit c1415ed

Please sign in to comment.