Skip to content
This repository has been archived by the owner on Dec 23, 2024. It is now read-only.

Commit

Permalink
feature/bootstrap: bootstrap test
Browse files Browse the repository at this point in the history
  • Loading branch information
Maksim Konovalov committed Nov 2, 2024
1 parent 9d1da02 commit bfbfc0f
Show file tree
Hide file tree
Showing 3 changed files with 267 additions and 92 deletions.
125 changes: 125 additions & 0 deletions bootstrap_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
package vshard_router_test //nolint:revive

import (
"context"
"log"
"os"
"testing"
"time"

vshard_router "github.com/KaymeKaydex/go-vshard-router"
"github.com/KaymeKaydex/go-vshard-router/providers/static"
chelper "github.com/KaymeKaydex/go-vshard-router/test_helper"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/tarantool/go-tarantool/v2"
"github.com/tarantool/go-tarantool/v2/test_helpers"
)

func TestRouter_ClusterBootstrap(t *testing.T) {
ctx := context.Background()

router, err := vshard_router.NewRouter(ctx, vshard_router.Config{
TotalBucketCount: 100,
TopologyProvider: static.NewProvider(topology),
User: "guest",
})
require.NotNil(t, router)
require.NoError(t, err)

err = router.ClusterBootstrap(ctx, true)
require.NoError(t, err)
for _, rs := range router.RouterRouteAll() {
count, err := rs.BucketsCount(ctx)
require.NoError(t, err)
require.Equal(t, count, uint64(50))
}
}

const instancesCount = 4

// init servers from our cluster
var serverNames = map[string]string{
// shard 1
"storage_1_a": "127.0.0.1:3301",
"storage_1_b": "127.0.0.1:3302",
// shard 2
"storage_2_a": "127.0.0.1:3303",
"storage_2_b": "127.0.0.1:3304",
}

var topology = map[vshard_router.ReplicasetInfo][]vshard_router.InstanceInfo{
{
Name: "storage_1",
UUID: uuid.New(),
Weight: 1,
}: {
{
Name: "storage_1_a",
UUID: uuid.New(),
Addr: "127.0.0.1:3301",
},
{
Name: "storage_1_b",
UUID: uuid.New(),
Addr: "127.0.0.1:3302",
},
},
{
Name: "storage_2",
UUID: uuid.New(),
Weight: 1,
}: {
{
Name: "storage_2_a",
UUID: uuid.New(),
Addr: "127.0.0.1:3303",
},
{
Name: "storage_2_b",
UUID: uuid.New(),
Addr: "127.0.0.1:3304",
},
},
}

func runTestMain(m *testing.M) int {

dialers := make([]tarantool.NetDialer, instancesCount)
opts := make([]test_helpers.StartOpts, instancesCount)
instances := make([]test_helpers.TarantoolInstance, instancesCount)

Check failure on line 90 in bootstrap_test.go

View workflow job for this annotation

GitHub Actions / golangci-lint

ineffectual assignment to instances (ineffassign)

i := 0
for name, addr := range serverNames {
dialers[i] = tarantool.NetDialer{
Address: addr,
User: "guest",
}

opts[i] = test_helpers.StartOpts{
Dialer: dialers[i],
InitScript: "config.lua",
Listen: addr,
WaitStart: 200 * time.Millisecond,
ConnectRetry: 10,
RetryTimeout: 500 * time.Millisecond,
WorkDir: name, // this is not wrong
}

i++
}

instances, err := chelper.StartTarantoolInstances(opts)
defer test_helpers.StopTarantoolInstances(instances)
if err != nil {
log.Printf("Failed to prepare test Tarantool: %s", err)
return 1
}

return m.Run()
}

func TestMain(m *testing.M) {
code := runTestMain(m)
os.Exit(code)
}
231 changes: 140 additions & 91 deletions replicaset_test.go
Original file line number Diff line number Diff line change
@@ -1,120 +1,169 @@
package vshard_router_test //nolint:revive
package vshard_router //nolint:revive

import (
"bytes"
"context"
"log"
"os"
"fmt"
"testing"
"time"

vshard_router "github.com/KaymeKaydex/go-vshard-router"
"github.com/KaymeKaydex/go-vshard-router/providers/static"
chelper "github.com/KaymeKaydex/go-vshard-router/test_helper"
"github.com/google/uuid"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/tarantool/go-tarantool/v2"
"github.com/tarantool/go-tarantool/v2/test_helpers"
"github.com/vmihailenco/msgpack/v5"

mockpool "github.com/KaymeKaydex/go-vshard-router/mocks/pool"
)

func TestRouter_ClusterBootstrap(t *testing.T) {
ctx := context.Background()
func TestReplicasetInfo_String(t *testing.T) {
rsUUID := uuid.New()
rsInfo := ReplicasetInfo{
Name: "test",
UUID: rsUUID,
}

router, err := vshard_router.NewRouter(ctx, vshard_router.Config{
TotalBucketCount: 100,
TopologyProvider: static.NewProvider(topology),
User: "guest",
})
require.NotNil(t, router)
require.NoError(t, err)
rs := Replicaset{
info: rsInfo,
}

err = router.ClusterBootstrap(ctx, true)
require.NoError(t, err)
require.Equal(t, rsInfo.String(), rs.String())
require.Contains(t, rsInfo.String(), "test")
require.Contains(t, rsInfo.String(), rsUUID.String())
}

const instancesCount = 4
func TestReplicaset_BucketStat(t *testing.T) {
ctx := context.TODO()
rsUUID := uuid.New()
rsInfo := ReplicasetInfo{
Name: "test",
UUID: rsUUID,
}

rs := Replicaset{
info: rsInfo,
}

t.Run("pool do error", func(t *testing.T) {
futureError := fmt.Errorf("testErr")

errFuture := tarantool.NewFuture(tarantool.NewCallRequest("test"))
errFuture.SetError(futureError)

mPool := mockpool.NewPool(t)
mPool.On("Do", mock.Anything, mock.Anything).Return(errFuture)
rs.conn = mPool

_, err := rs.BucketStat(ctx, 123)
require.Equal(t, futureError, err)
})

t.Run("unsupported or broken proto resp", func(t *testing.T) {
f := tarantool.NewFuture(tarantool.NewCallRequest("vshard.storage.bucket_stat"))

// init servers from our cluster
var serverNames = map[string]string{
// shard 1
"storage_1_a": "127.0.0.1:3301",
"storage_1_b": "127.0.0.1:3302",
// shard 2
"storage_2_a": "127.0.0.1:3303",
"storage_2_b": "127.0.0.1:3304",
bts, _ := msgpack.Marshal([]interface{}{1})

err := f.SetResponse(tarantool.Header{}, bytes.NewReader(bts))
require.NoError(t, err)

mPool := mockpool.NewPool(t)
mPool.On("Do", mock.Anything, mock.Anything).Return(f)
rs.conn = mPool

// todo: add real tests

statInfo, err := rs.BucketStat(ctx, 123)
require.Error(t, err)
require.Equal(t, statInfo, BucketStatInfo{BucketID: 0, Status: ""})
})

/*
TODO: add test for wrong bucket response
unix/:./data/storage_1_a.control> vshard.storage.bucket_stat(1000)
---
- null
- bucket_id: 1000
reason: Not found
code: 1
type: ShardingError
message: 'Cannot perform action with bucket 1000, reason: Not found'
name: WRONG_BUCKET
...
*/
}

var topology = map[vshard_router.ReplicasetInfo][]vshard_router.InstanceInfo{
{
Name: "storage_1",
UUID: uuid.New(),
Weight: 1,
}: {
func TestCalculateEtalonBalance(t *testing.T) {
tests := []struct {
name string
replicasets []Replicaset
bucketCount uint64
expectedCounts []uint64
expectError bool
}{
{
Name: "storage_1_a",
UUID: uuid.New(),
Addr: "127.0.0.1:3301",
name: "FullBalance",
replicasets: []Replicaset{
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
},
bucketCount: 9,
expectedCounts: []uint64{3, 3, 3},
expectError: false,
},
{
Name: "storage_1_b",
UUID: uuid.New(),
Addr: "127.0.0.1:3302",
name: "PinnedMoreThanWeight",
replicasets: []Replicaset{
{info: ReplicasetInfo{Weight: 1, PinnedCount: 60, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
},
bucketCount: 100,
expectedCounts: []uint64{60, 40},
expectError: false,
},
},
{
Name: "storage_2",
UUID: uuid.New(),
Weight: 1,
}: {
{
Name: "storage_2_a",
UUID: uuid.New(),
Addr: "127.0.0.1:3303",
name: "ZeroWeight",
replicasets: []Replicaset{
{info: ReplicasetInfo{Weight: 0, PinnedCount: 0, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
},
bucketCount: 10,
expectError: false,
expectedCounts: []uint64{0, 10},
},
{
Name: "storage_2_b",
UUID: uuid.New(),
Addr: "127.0.0.1:3304",
name: "ZeroAllWeights",
replicasets: []Replicaset{
{info: ReplicasetInfo{Weight: 0, PinnedCount: 0, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 0, PinnedCount: 0, IgnoreDisbalance: false}},
},
bucketCount: 10,
expectError: true,
},
{
name: "UnevenDistribution",
replicasets: []Replicaset{
{info: ReplicasetInfo{Weight: 1, PinnedCount: 0, IgnoreDisbalance: false}},
{info: ReplicasetInfo{Weight: 2, PinnedCount: 0, IgnoreDisbalance: false}},
},
bucketCount: 7,
expectError: false,
expectedCounts: []uint64{2, 5},
},
},
}

func runTestMain(m *testing.M) int {

dialers := make([]tarantool.NetDialer, instancesCount)
opts := make([]test_helpers.StartOpts, instancesCount)
instances := make([]test_helpers.TarantoolInstance, instancesCount)

i := 0
for name, addr := range serverNames {
dialers[i] = tarantool.NetDialer{
Address: addr,
User: "guest",
}

opts[i] = test_helpers.StartOpts{
Dialer: dialers[i],
InitScript: "config.lua",
Listen: addr,
WaitStart: 200 * time.Millisecond,
ConnectRetry: 10,
RetryTimeout: 500 * time.Millisecond,
WorkDir: name, // this is not wrong
}

i++
}

instances, err := chelper.StartTarantoolInstances(opts)
defer test_helpers.StopTarantoolInstances(instances)
if err != nil {
log.Printf("Failed to prepare test Tarantool: %s", err)
return 1
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := CalculateEtalonBalance(tt.replicasets, tt.bucketCount)

if tt.expectError {
require.Error(t, err)
} else {
require.NoError(t, err)
for i, expectedCount := range tt.expectedCounts {
require.Equal(t, expectedCount, tt.replicasets[i].EtalonBucketCount)
}
}
})
}

return m.Run()
}

func TestMain(m *testing.M) {
code := runTestMain(m)
os.Exit(code)
}
3 changes: 2 additions & 1 deletion test_helper/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ func StartTarantoolInstances(instsOpts []ttnt.StartOpts) ([]ttnt.TarantoolInstan
})

}
err := errGr.Wait()

return instances, errGr.Wait()
return instances, err
}

0 comments on commit bfbfc0f

Please sign in to comment.