From 6470d811d2d94b7c3a79f06bbfaf139217533b85 Mon Sep 17 00:00:00 2001 From: Simon Esposito Date: Mon, 17 Jul 2023 20:07:25 +0100 Subject: [PATCH] Add storage engine index (#1056) Introduce storage index runtime functions: * RegisterStorageIndex * RegisterStorageIndexFilter * StorageIndexList --- CHANGELOG.md | 1 + go.mod | 2 +- go.sum | 4 +- main.go | 29 +- server/api.go | 4 +- server/api_leaderboard_test.go | 2 +- server/api_storage.go | 4 +- server/api_test.go | 7 +- server/console.go | 4 +- server/console_storage.go | 4 +- server/console_storage_import.go | 12 +- server/core_storage.go | 9 +- server/core_storage_test.go | 112 ++-- server/core_wallet_test.go | 14 +- server/leaderboard_rank_cache_test.go | 4 +- server/match_common_test.go | 10 +- server/match_presence_test.go | 1 + server/match_registry_test.go | 6 +- server/matchmaker_test.go | 5 +- server/runtime.go | 43 +- server/runtime_go.go | 39 +- server/runtime_go_nakama.go | 26 +- server/runtime_javascript.go | 132 +++- server/runtime_javascript_init.go | 88 ++- server/runtime_javascript_match_core.go | 4 +- server/runtime_javascript_nakama.go | 353 ++++++----- server/runtime_lua.go | 102 +++- server/runtime_lua_match_core.go | 4 +- server/runtime_lua_nakama.go | 189 +++++- server/runtime_test.go | 4 +- server/storage_index.go | 574 ++++++++++++++++++ .../nakama-common/runtime/runtime.go | 8 +- vendor/modules.txt | 2 +- 33 files changed, 1469 insertions(+), 333 deletions(-) create mode 100644 server/storage_index.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 73bbeeec9e..4b6548aeb8 100755 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ The format is based on [keep a changelog](http://keepachangelog.com) and this pr ### Added - Introduce pagination for console API leaderboard and tournament listing endpoint. - Introduce pagination for devconsole leaderboard view. +- Add storage object indexing support and related runtime functions. ### Changed - Better formatting for graphed values in devconsole status view. diff --git a/go.mod b/go.mod index 4851faa9f1..f43023c942 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 - github.com/heroiclabs/nakama-common v0.0.0-20230713130524-38774b285b66 + github.com/heroiclabs/nakama-common v1.27.1-0.20230717184507-dff09d7c8047 github.com/jackc/pgconn v1.14.0 github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa github.com/jackc/pgtype v1.14.0 diff --git a/go.sum b/go.sum index 61300ca274..5dc97057f7 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heroiclabs/nakama-common v0.0.0-20230713130524-38774b285b66 h1:hcZM9fsheM4xprMyszVL5n+YLl2MBGlhRIrYUV0/tg0= -github.com/heroiclabs/nakama-common v0.0.0-20230713130524-38774b285b66/go.mod h1:Os8XeXGvHAap/p6M/8fQ3gle4eEXDGRQmoRNcPQTjXs= +github.com/heroiclabs/nakama-common v1.27.1-0.20230717184507-dff09d7c8047 h1:BnNhnDQBeQyPigo2F162XwYYP7RxrEgxMJx992SLK+g= +github.com/heroiclabs/nakama-common v1.27.1-0.20230717184507-dff09d7c8047/go.mod h1:Os8XeXGvHAap/p6M/8fQ3gle4eEXDGRQmoRNcPQTjXs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= diff --git a/main.go b/main.go index b077fb788b..e889cf2f7b 100644 --- a/main.go +++ b/main.go @@ -16,11 +16,8 @@ package main import ( "context" - cryptoRand "crypto/rand" - "encoding/binary" "flag" "fmt" - "math/rand" "net/http" "net/url" "os" @@ -104,14 +101,6 @@ func main() { startupLogger.Info("Node", zap.String("name", config.GetName()), zap.String("version", semver), zap.String("runtime", runtime.Version()), zap.Int("cpu", runtime.NumCPU()), zap.Int("proc", runtime.GOMAXPROCS(0))) startupLogger.Info("Data directory", zap.String("path", config.GetDataDir())) - // Initialize the global random with strongly seed. - var seed int64 - if err := binary.Read(cryptoRand.Reader, binary.BigEndian, &seed); err != nil { - startupLogger.Warn("Failed to get strongly random seed, fallback to a less random one.", zap.Error(err)) - seed = time.Now().UnixNano() - } - rand.Seed(seed) - redactedAddresses := make([]string, 0, 1) for _, address := range config.GetDatabase().Addresses { rawURL := fmt.Sprintf("postgres://%s", address) @@ -153,7 +142,12 @@ func main() { tracker.SetMatchJoinListener(matchRegistry.Join) tracker.SetMatchLeaveListener(matchRegistry.Leave) streamManager := server.NewLocalStreamManager(config, sessionRegistry, tracker) - runtime, runtimeInfo, err := server.NewRuntime(ctx, logger, startupLogger, db, jsonpbMarshaler, jsonpbUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router) + + storageIndex, err := server.NewLocalStorageIndex(logger, db) + if err != nil { + logger.Fatal("Failed to initialize storage index", zap.Error(err)) + } + runtime, runtimeInfo, err := server.NewRuntime(ctx, logger, startupLogger, db, jsonpbMarshaler, jsonpbUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, storageIndex) if err != nil { startupLogger.Fatal("Failed initializing runtime modules", zap.Error(err)) } @@ -162,14 +156,21 @@ func main() { tracker.SetPartyJoinListener(partyRegistry.Join) tracker.SetPartyLeaveListener(partyRegistry.Leave) + storageIndex.RegisterFilters(runtime) + go func() { + if err = storageIndex.Load(ctx); err != nil { + logger.Error("Failed to load storage index entries from database", zap.Error(err)) + } + }() + leaderboardScheduler.Start(runtime) googleRefundScheduler.Start(runtime) pipeline := server.NewPipeline(logger, config, db, jsonpbMarshaler, jsonpbUnmarshaler, sessionRegistry, statusRegistry, matchRegistry, partyRegistry, matchmaker, tracker, router, runtime) statusHandler := server.NewLocalStatusHandler(logger, sessionRegistry, matchRegistry, tracker, metrics, config.GetName()) - apiServer := server.StartApiServer(logger, startupLogger, db, jsonpbMarshaler, jsonpbUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, sessionRegistry, sessionCache, statusRegistry, matchRegistry, matchmaker, tracker, router, streamManager, metrics, pipeline, runtime) - consoleServer := server.StartConsoleServer(logger, startupLogger, db, config, tracker, router, streamManager, metrics, sessionRegistry, sessionCache, consoleSessionCache, loginAttemptCache, statusRegistry, statusHandler, runtimeInfo, matchRegistry, configWarnings, semver, leaderboardCache, leaderboardRankCache, leaderboardScheduler, apiServer, runtime, cookie) + apiServer := server.StartApiServer(logger, startupLogger, db, jsonpbMarshaler, jsonpbUnmarshaler, config, version, socialClient, storageIndex, leaderboardCache, leaderboardRankCache, sessionRegistry, sessionCache, statusRegistry, matchRegistry, matchmaker, tracker, router, streamManager, metrics, pipeline, runtime) + consoleServer := server.StartConsoleServer(logger, startupLogger, db, config, tracker, router, streamManager, metrics, sessionRegistry, sessionCache, consoleSessionCache, loginAttemptCache, statusRegistry, statusHandler, runtimeInfo, matchRegistry, configWarnings, semver, leaderboardCache, leaderboardRankCache, leaderboardScheduler, storageIndex, apiServer, runtime, cookie) gaenabled := len(os.Getenv("NAKAMA_TELEMETRY")) < 1 console.UIFS.Nt = !gaenabled diff --git a/server/api.go b/server/api.go index 57c44c3af4..ba25232ba6 100644 --- a/server/api.go +++ b/server/api.go @@ -69,6 +69,7 @@ type ApiServer struct { config Config version string socialClient *social.Client + storageIndex StorageIndex leaderboardCache LeaderboardCache leaderboardRankCache LeaderboardRankCache sessionCache SessionCache @@ -84,7 +85,7 @@ type ApiServer struct { grpcGatewayServer *http.Server } -func StartApiServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, matchmaker Matchmaker, tracker Tracker, router MessageRouter, streamManager StreamManager, metrics Metrics, pipeline *Pipeline, runtime *Runtime) *ApiServer { +func StartApiServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, storageIndex StorageIndex, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, matchmaker Matchmaker, tracker Tracker, router MessageRouter, streamManager StreamManager, metrics Metrics, pipeline *Pipeline, runtime *Runtime) *ApiServer { var gatewayContextTimeoutMs string if config.GetSocket().IdleTimeoutMs > 500 { // Ensure the GRPC Gateway timeout is just under the idle timeout (if possible) to ensure it has priority. @@ -119,6 +120,7 @@ func StartApiServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.DB, p socialClient: socialClient, leaderboardCache: leaderboardCache, leaderboardRankCache: leaderboardRankCache, + storageIndex: storageIndex, sessionCache: sessionCache, sessionRegistry: sessionRegistry, statusRegistry: statusRegistry, diff --git a/server/api_leaderboard_test.go b/server/api_leaderboard_test.go index b602b9d952..919f57d563 100644 --- a/server/api_leaderboard_test.go +++ b/server/api_leaderboard_test.go @@ -116,7 +116,7 @@ nk.leaderboard_create(%q, %v, %q, %q, reset, metadata) pipeline := NewPipeline(logger, cfg, db, protojsonMarshaler, protojsonUnmarshaler, nil, nil, nil, nil, nil, tracker, router, runtime) apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, - protojsonUnmarshaler, cfg, "3.0.0", nil, rtData.leaderboardCache, + protojsonUnmarshaler, cfg, "3.0.0", nil, nil, rtData.leaderboardCache, rtData.leaderboardRankCache, nil, sessionCache, nil, nil, nil, tracker, router, nil, metrics, pipeline, runtime) diff --git a/server/api_storage.go b/server/api_storage.go index 493cb003f7..5d49b6378b 100644 --- a/server/api_storage.go +++ b/server/api_storage.go @@ -214,7 +214,7 @@ func (s *ApiServer) WriteStorageObjects(ctx context.Context, in *api.WriteStorag }) } - acks, code, err := StorageWriteObjects(ctx, s.logger, s.db, s.metrics, false, ops) + acks, code, err := StorageWriteObjects(ctx, s.logger, s.db, s.metrics, s.storageIndex, false, ops) if err != nil { if code == codes.Internal { return nil, status.Error(codes.Internal, "Error writing storage objects.") @@ -279,7 +279,7 @@ func (s *ApiServer) DeleteStorageObjects(ctx context.Context, in *api.DeleteStor }) } - if code, err := StorageDeleteObjects(ctx, s.logger, s.db, false, ops); err != nil { + if code, err := StorageDeleteObjects(ctx, s.logger, s.db, s.storageIndex, false, ops); err != nil { if code == codes.Internal { return nil, status.Error(codes.Internal, "Error deleting storage objects.") } diff --git a/server/api_test.go b/server/api_test.go index 21cd500c4e..aca15cf9e1 100644 --- a/server/api_test.go +++ b/server/api_test.go @@ -52,8 +52,9 @@ var ( protojsonUnmarshaler = &protojson.UnmarshalOptions{ DiscardUnknown: false, } - metrics = NewLocalMetrics(logger, logger, nil, cfg) - _ = CheckConfig(logger, cfg) + metrics = NewLocalMetrics(logger, logger, nil, cfg) + storageIdx, _ = NewLocalStorageIndex(logger, nil, []StorageIndexConfig{}) + _ = CheckConfig(logger, cfg) ) type DummyMessageRouter struct{} @@ -199,7 +200,7 @@ func NewAPIServer(t *testing.T, runtime *Runtime) (*ApiServer, *Pipeline) { tracker := &LocalTracker{} sessionCache := NewLocalSessionCache(3600) pipeline := NewPipeline(logger, cfg, db, protojsonMarshaler, protojsonUnmarshaler, nil, nil, nil, nil, nil, tracker, router, runtime) - apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "3.0.0", nil, nil, nil, nil, sessionCache, nil, nil, nil, tracker, router, nil, metrics, pipeline, runtime) + apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "3.0.0", nil, storageIdx, nil, nil, nil, sessionCache, nil, nil, nil, tracker, router, nil, metrics, pipeline, runtime) WaitForSocket(nil, cfg) return apiServer, pipeline diff --git a/server/console.go b/server/console.go index 5c05dcc3f7..0019730426 100644 --- a/server/console.go +++ b/server/console.go @@ -149,6 +149,7 @@ type ConsoleServer struct { statusRegistry *StatusRegistry matchRegistry MatchRegistry statusHandler StatusHandler + storageIndex StorageIndex runtimeInfo *RuntimeInfo configWarnings map[string]string serverVersion string @@ -164,7 +165,7 @@ type ConsoleServer struct { httpClient *http.Client } -func StartConsoleServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.DB, config Config, tracker Tracker, router MessageRouter, streamManager StreamManager, metrics Metrics, sessionRegistry SessionRegistry, sessionCache SessionCache, consoleSessionCache SessionCache, loginAttemptCache LoginAttemptCache, statusRegistry *StatusRegistry, statusHandler StatusHandler, runtimeInfo *RuntimeInfo, matchRegistry MatchRegistry, configWarnings map[string]string, serverVersion string, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, api *ApiServer, runtime *Runtime, cookie string) *ConsoleServer { +func StartConsoleServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.DB, config Config, tracker Tracker, router MessageRouter, streamManager StreamManager, metrics Metrics, sessionRegistry SessionRegistry, sessionCache SessionCache, consoleSessionCache SessionCache, loginAttemptCache LoginAttemptCache, statusRegistry *StatusRegistry, statusHandler StatusHandler, runtimeInfo *RuntimeInfo, matchRegistry MatchRegistry, configWarnings map[string]string, serverVersion string, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, storageIndex StorageIndex, api *ApiServer, runtime *Runtime, cookie string) *ConsoleServer { var gatewayContextTimeoutMs string if config.GetConsole().IdleTimeoutMs > 500 { // Ensure the GRPC Gateway timeout is just under the idle timeout (if possible) to ensure it has priority. @@ -205,6 +206,7 @@ func StartConsoleServer(logger *zap.Logger, startupLogger *zap.Logger, db *sql.D leaderboardCache: leaderboardCache, leaderboardRankCache: leaderboardRankCache, leaderboardScheduler: leaderboardScheduler, + storageIndex: storageIndex, api: api, cookie: cookie, httpClient: &http.Client{Timeout: 5 * time.Second}, diff --git a/server/console_storage.go b/server/console_storage.go index 9407d081fd..190f1b3b93 100644 --- a/server/console_storage.go +++ b/server/console_storage.go @@ -65,7 +65,7 @@ func (s *ConsoleServer) DeleteStorageObject(ctx context.Context, in *console.Del return nil, status.Error(codes.InvalidArgument, "Requires a valid user ID.") } - code, err := StorageDeleteObjects(ctx, s.logger, s.db, true, StorageOpDeletes{ + code, err := StorageDeleteObjects(ctx, s.logger, s.db, s.storageIndex, true, StorageOpDeletes{ &StorageOpDelete{ OwnerID: in.UserId, ObjectID: &api.DeleteStorageObjectId{ @@ -334,7 +334,7 @@ func (s *ConsoleServer) WriteStorageObject(ctx context.Context, in *console.Writ return nil, status.Error(codes.InvalidArgument, "Requires a valid JSON object value.") } - acks, code, err := StorageWriteObjects(ctx, s.logger, s.db, s.metrics, true, StorageOpWrites{ + acks, code, err := StorageWriteObjects(ctx, s.logger, s.db, s.metrics, s.storageIndex, true, StorageOpWrites{ &StorageOpWrite{ OwnerID: in.UserId, Object: &api.WriteStorageObject{ diff --git a/server/console_storage_import.go b/server/console_storage_import.go index ed227c5820..2c7473a37b 100644 --- a/server/console_storage_import.go +++ b/server/console_storage_import.go @@ -129,10 +129,10 @@ func (s *ConsoleServer) importStorage(w http.ResponseWriter, r *http.Request) { // Examine file name to determine if it's a JSON or CSV import. if strings.HasSuffix(strings.ToLower(filename), ".json") { // File has .json suffix, try to import as JSON. - err = importStorageJSON(r.Context(), s.logger, s.db, s.metrics, fileBytes) + err = importStorageJSON(r.Context(), s.logger, s.db, s.metrics, s.storageIndex, fileBytes) } else { // Assume all other files are CSV. - err = importStorageCSV(r.Context(), s.logger, s.db, s.metrics, fileBytes) + err = importStorageCSV(r.Context(), s.logger, s.db, s.metrics, s.storageIndex, fileBytes) } if err != nil { @@ -145,7 +145,7 @@ func (s *ConsoleServer) importStorage(w http.ResponseWriter, r *http.Request) { } } -func importStorageJSON(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, fileBytes []byte) error { +func importStorageJSON(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, storageIndex StorageIndex, fileBytes []byte) error { importedData := make([]*importStorageObject, 0) ops := StorageOpWrites{} @@ -200,7 +200,7 @@ func importStorageJSON(ctx context.Context, logger *zap.Logger, db *sql.DB, metr return nil } - acks, _, err := StorageWriteObjects(ctx, logger, db, metrics, true, ops) + acks, _, err := StorageWriteObjects(ctx, logger, db, metrics, storageIndex, true, ops) if err != nil { logger.Warn("Failed to write imported records.", zap.Error(err)) return errors.New("could not import records due to an internal error - please consult server logs") @@ -210,7 +210,7 @@ func importStorageJSON(ctx context.Context, logger *zap.Logger, db *sql.DB, metr return nil } -func importStorageCSV(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, fileBytes []byte) error { +func importStorageCSV(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, storageIndex StorageIndex, fileBytes []byte) error { r := csv.NewReader(bytes.NewReader(fileBytes)) columnIndexes := make(map[string]int) @@ -300,7 +300,7 @@ func importStorageCSV(ctx context.Context, logger *zap.Logger, db *sql.DB, metri return nil } - acks, _, err := StorageWriteObjects(ctx, logger, db, metrics, true, ops) + acks, _, err := StorageWriteObjects(ctx, logger, db, metrics, storageIndex, true, ops) if err != nil { logger.Warn("Failed to write imported records.", zap.Error(err)) return errors.New("could not import records due to an internal error - please consult server logs") diff --git a/server/core_storage.go b/server/core_storage.go index 48e13fa804..0b8fd78674 100644 --- a/server/core_storage.go +++ b/server/core_storage.go @@ -117,7 +117,6 @@ func StorageListObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, cal // Call from the runtime. if ownerID == nil { // List storage regardless of user. - // TODO result, resultErr = StorageListObjectsAll(ctx, logger, db, true, collection, limit, cursor, sc) } else { // List for a particular user ID. @@ -464,7 +463,7 @@ WHERE return objects, err } -func StorageWriteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, authoritativeWrite bool, ops StorageOpWrites) (*api.StorageObjectAcks, codes.Code, error) { +func StorageWriteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, metrics Metrics, storageIndex StorageIndex, authoritativeWrite bool, ops StorageOpWrites) (*api.StorageObjectAcks, codes.Code, error) { var acks []*api.StorageObjectAck if err := ExecuteInTx(ctx, db, func(tx *sql.Tx) error { @@ -483,6 +482,8 @@ func StorageWriteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, me return nil, codes.Internal, err } + storageIndex.Write(ctx, ops) + return &api.StorageObjectAcks{Acks: acks}, codes.OK, nil } @@ -635,7 +636,7 @@ func storageWriteObject(ctx context.Context, logger *zap.Logger, metrics Metrics return ack, nil } -func StorageDeleteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, authoritativeDelete bool, ops StorageOpDeletes) (codes.Code, error) { +func StorageDeleteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, storageIndex StorageIndex, authoritativeDelete bool, ops StorageOpDeletes) (codes.Code, error) { // Ensure deletes are processed in a consistent order. sort.Sort(ops) @@ -681,5 +682,7 @@ func StorageDeleteObjects(ctx context.Context, logger *zap.Logger, db *sql.DB, a return codes.Internal, err } + storageIndex.Delete(ctx, ops) + return codes.OK, nil } diff --git a/server/core_storage_test.go b/server/core_storage_test.go index 26dceabf50..ada4a6dda6 100644 --- a/server/core_storage_test.go +++ b/server/core_storage_test.go @@ -42,7 +42,7 @@ func TestStorageWriteRuntimeGlobalSingle(t *testing.T) { PermissionWrite: &wrapperspb.Int32Value{Value: 1}, }, }} - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -113,7 +113,7 @@ func TestStorageWriteRuntimeUserMultiple(t *testing.T) { }, }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -146,7 +146,7 @@ func TestStorageWriteRuntimeGlobalSingleIfMatchNotExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -171,7 +171,7 @@ func TestStorageWriteRuntimeGlobalSingleIfMatchExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -196,7 +196,7 @@ func TestStorageWriteRuntimeGlobalSingleIfMatchExists(t *testing.T) { }, } - acks, code, err = StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err = StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not 0") @@ -225,7 +225,7 @@ func TestStorageWriteRuntimeGlobalSingleIfMatchExistsFail(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -250,7 +250,7 @@ func TestStorageWriteRuntimeGlobalSingleIfMatchExistsFail(t *testing.T) { }, } - acks, code, err = StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err = StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -276,7 +276,7 @@ func TestStorageWriteRuntimeGlobalSingleIfNoneMatchNotExists(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -304,7 +304,7 @@ func TestStorageWriteRuntimeGlobalSingleIfNoneMatchExists(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -328,7 +328,7 @@ func TestStorageWriteRuntimeGlobalSingleIfNoneMatchExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -364,7 +364,7 @@ func TestStorageWriteRuntimeGlobalMultipleIfMatchNotExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -392,7 +392,7 @@ func TestStorageWritePipelineUserSingle(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -443,7 +443,7 @@ func TestStorageWritePipelineUserMultiple(t *testing.T) { }, } - allAcks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + allAcks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) if err != nil { t.Fatal(err.Error()) } @@ -506,7 +506,7 @@ func TestStorageWriteRuntimeGlobalMultipleSameKey(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -575,7 +575,7 @@ func TestStorageWritePipelineUserMultipleSameKey(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not 0") @@ -629,7 +629,7 @@ func TestStorageWritePipelineIfMatchNotExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -657,7 +657,7 @@ func TestStorageWritePipelineIfMatchExistsFail(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -681,7 +681,7 @@ func TestStorageWritePipelineIfMatchExistsFail(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -710,7 +710,7 @@ func TestStorageWritePipelineIfMatchExists(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -734,7 +734,7 @@ func TestStorageWritePipelineIfMatchExists(t *testing.T) { }, } - acks, _, err = StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err = StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -766,7 +766,7 @@ func TestStorageWritePipelineIfNoneMatchNotExists(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -798,7 +798,7 @@ func TestStorageWritePipelineIfNoneMatchExists(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -822,7 +822,7 @@ func TestStorageWritePipelineIfNoneMatchExists(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -851,7 +851,7 @@ func TestStorageWritePipelinePermissionFail(t *testing.T) { }, } - acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, _, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.NotNil(t, acks, "acks was nil") @@ -874,7 +874,7 @@ func TestStorageWritePipelinePermissionFail(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, acks, "acks was not nil") assert.Equal(t, codes.InvalidArgument, code, "code did not match") @@ -901,7 +901,7 @@ func TestStorageFetchRuntimeGlobalPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -949,7 +949,7 @@ func TestStorageFetchRuntimeMixed(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1004,7 +1004,7 @@ func TestStorageFetchRuntimeUserPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1055,7 +1055,7 @@ func TestStorageFetchPipelineGlobalPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1100,7 +1100,7 @@ func TestStorageFetchPipelineUserPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1146,7 +1146,7 @@ func TestStorageFetchPipelineUserRead(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1199,7 +1199,7 @@ func TestStorageFetchPipelineUserPublic(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1251,7 +1251,7 @@ func TestStorageFetchPipelineUserOtherRead(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1296,7 +1296,7 @@ func TestStorageFetchPipelineUserOtherPublic(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1360,7 +1360,7 @@ func TestStorageFetchPipelineUserOtherPublicMixed(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1426,7 +1426,7 @@ func TestStorageRemoveRuntimeGlobalPublic(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1448,7 +1448,7 @@ func TestStorageRemoveRuntimeGlobalPublic(t *testing.T) { }, } - _, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + _, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") } @@ -1471,7 +1471,7 @@ func TestStorageRemoveRuntimeGlobalPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1493,7 +1493,7 @@ func TestStorageRemoveRuntimeGlobalPrivate(t *testing.T) { }, } - _, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + _, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") } @@ -1518,7 +1518,7 @@ func TestStorageRemoveRuntimeUserPublic(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1540,7 +1540,7 @@ func TestStorageRemoveRuntimeUserPublic(t *testing.T) { }, } - _, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + _, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") } @@ -1565,7 +1565,7 @@ func TestStorageRemoveRuntimeUserPrivate(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1587,7 +1587,7 @@ func TestStorageRemoveRuntimeUserPrivate(t *testing.T) { }, } - _, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + _, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") ids := []*api.ReadStorageObjectId{{ @@ -1622,7 +1622,7 @@ func TestStorageRemovePipelineUserWrite(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1644,7 +1644,7 @@ func TestStorageRemovePipelineUserWrite(t *testing.T) { }, } - _, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + _, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") } @@ -1669,7 +1669,7 @@ func TestStorageRemovePipelineUserDenied(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1691,7 +1691,7 @@ func TestStorageRemovePipelineUserDenied(t *testing.T) { }, } - code, err = StorageDeleteObjects(context.Background(), logger, db, false, deleteOps) + code, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, false, deleteOps) assert.NotNil(t, err, "err was nil") assert.Equal(t, code, codes.InvalidArgument, "code did not match InvalidArgument.") } @@ -1711,7 +1711,7 @@ func TestStorageRemoveRuntimeGlobalIfMatchNotExists(t *testing.T) { }, } - code, err := StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + code, err := StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.NotNil(t, err, "err was nil") assert.Equal(t, code, codes.InvalidArgument, "code did not match InvalidArgument.") } @@ -1735,7 +1735,7 @@ func TestStorageRemoveRuntimeGlobalIfMatchRejected(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1758,7 +1758,7 @@ func TestStorageRemoveRuntimeGlobalIfMatchRejected(t *testing.T) { }, } - code, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + code, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.NotNil(t, err, "err was not nil") assert.Equal(t, code, codes.InvalidArgument, "code did not match InvalidArgument.") } @@ -1782,7 +1782,7 @@ func TestStorageRemoveRuntimeGlobalIfMatch(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, true, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, true, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1805,7 +1805,7 @@ func TestStorageRemoveRuntimeGlobalIfMatch(t *testing.T) { }, } - code, err = StorageDeleteObjects(context.Background(), logger, db, true, deleteOps) + code, err = StorageDeleteObjects(context.Background(), logger, db, storageIdx, true, deleteOps) assert.Nil(t, err, "err was not nil") assert.Equal(t, code, codes.OK, "code did not match OK.") } @@ -1850,7 +1850,7 @@ func TestStorageListRuntimeUser(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1907,7 +1907,7 @@ func TestStorageListPipelineUserSelf(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -1966,7 +1966,7 @@ func TestStorageListPipelineUserOther(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") @@ -2063,7 +2063,7 @@ func TestStorageListNoRepeats(t *testing.T) { }, } - acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, false, ops) + acks, code, err := StorageWriteObjects(context.Background(), logger, db, metrics, storageIdx, false, ops) assert.Nil(t, err, "err was not nil") assert.Equal(t, codes.OK, code, "code was not OK") diff --git a/server/core_wallet_test.go b/server/core_wallet_test.go index ead06bbf65..53f9097f79 100644 --- a/server/core_wallet_test.go +++ b/server/core_wallet_test.go @@ -70,7 +70,7 @@ func TestUpdateWalletSingleUser(t *testing.T) { } db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) userID, _, _, err := AuthenticateCustom(context.Background(), logger, db, uuid.Must(uuid.NewV4()).String(), uuid.Must(uuid.NewV4()).String(), true) if err != nil { @@ -148,7 +148,7 @@ func TestUpdateWalletMultiUser(t *testing.T) { } db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) count := 5 userIDs := make([]string, 0, count) @@ -235,7 +235,7 @@ func TestUpdateWalletsMultiUser(t *testing.T) { } db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) count := 5 userIDs := make([]string, 0, count) @@ -327,7 +327,7 @@ func TestUpdateWalletsMultiUserSharedChangeset(t *testing.T) { } db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) count := 5 userIDs := make([]string, 0, count) @@ -423,7 +423,7 @@ func TestUpdateWalletsMultiUserSharedChangesetDeductions(t *testing.T) { } db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) count := 5 userIDs := make([]string, 0, count) @@ -478,7 +478,7 @@ func TestUpdateWalletsMultiUserSharedChangesetDeductions(t *testing.T) { func TestUpdateWalletsSingleUser(t *testing.T) { db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) userID, _, _, err := AuthenticateCustom(context.Background(), logger, db, uuid.Must(uuid.NewV4()).String(), uuid.Must(uuid.NewV4()).String(), true) if err != nil { @@ -525,7 +525,7 @@ func TestUpdateWalletsSingleUser(t *testing.T) { func TestUpdateWalletRepeatedSingleUser(t *testing.T) { db := NewDB(t) - nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nk := NewRuntimeGoNakamaModule(logger, db, nil, cfg, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) userID, _, _, err := AuthenticateCustom(context.Background(), logger, db, uuid.Must(uuid.NewV4()).String(), uuid.Must(uuid.NewV4()).String(), true) if err != nil { diff --git a/server/leaderboard_rank_cache_test.go b/server/leaderboard_rank_cache_test.go index 196d74e9cb..c8dc1c2e46 100644 --- a/server/leaderboard_rank_cache_test.go +++ b/server/leaderboard_rank_cache_test.go @@ -15,10 +15,10 @@ package server import ( - "testing" - + "github.com/gofrs/uuid/v5" "github.com/heroiclabs/nakama-common/api" "github.com/stretchr/testify/assert" + "testing" ) func TestLocalLeaderboardRankCache_Insert_Ascending(t *testing.T) { diff --git a/server/match_common_test.go b/server/match_common_test.go index 168d30b01c..af1eb7ff14 100644 --- a/server/match_common_test.go +++ b/server/match_common_test.go @@ -17,16 +17,16 @@ package server import ( "context" "database/sql" - "os" - "strconv" - "testing" - "time" - + "github.com/gofrs/uuid/v5" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" "go.uber.org/atomic" "go.uber.org/zap" "go.uber.org/zap/zapcore" + "os" + "strconv" + "testing" + "time" ) // loggerForTest allows for easily adjusting log output produced by tests in one place diff --git a/server/match_presence_test.go b/server/match_presence_test.go index 989a018d60..0b62a7638a 100644 --- a/server/match_presence_test.go +++ b/server/match_presence_test.go @@ -15,6 +15,7 @@ package server import ( + "github.com/gofrs/uuid/v5" "testing" ) diff --git a/server/match_registry_test.go b/server/match_registry_test.go index e5a2ce1140..c960002d77 100644 --- a/server/match_registry_test.go +++ b/server/match_registry_test.go @@ -19,14 +19,14 @@ import ( "context" "encoding/gob" "fmt" - "strings" - "testing" - "github.com/blugelabs/bluge" + "github.com/gofrs/uuid/v5" "github.com/heroiclabs/nakama-common/runtime" "go.uber.org/atomic" "go.uber.org/zap" "google.golang.org/protobuf/types/known/wrapperspb" + "strings" + "testing" ) func TestEncode(t *testing.T) { diff --git a/server/matchmaker_test.go b/server/matchmaker_test.go index 0f8d8101e7..84d7b298cb 100644 --- a/server/matchmaker_test.go +++ b/server/matchmaker_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/blugelabs/bluge" + "github.com/gofrs/uuid/v5" "github.com/heroiclabs/nakama-common/rtapi" "github.com/heroiclabs/nakama-common/runtime" "github.com/stretchr/testify/assert" @@ -1654,9 +1655,7 @@ func createTestMatchmaker(t fatalable, logger *zap.Logger, tickerActive bool, me t.Fatalf("error creating test match registry: %v", err) } - runtime, _, err := NewRuntime(context.Background(), logger, logger, nil, jsonpbMarshaler, jsonpbUnmarshaler, cfg, "", - nil, nil, nil, nil, sessionRegistry, nil, nil, - nil, tracker, metrics, nil, messageRouter) + runtime, _, err := NewRuntime(context.Background(), logger, logger, nil, jsonpbMarshaler, jsonpbUnmarshaler, cfg, "", nil, nil, nil, nil, sessionRegistry, nil, nil, nil, tracker, metrics, nil, messageRouter, storageIdx) if err != nil { t.Fatal(err) } diff --git a/server/runtime.go b/server/runtime.go index 21eab1828f..5eba95e439 100644 --- a/server/runtime.go +++ b/server/runtime.go @@ -224,6 +224,8 @@ type ( RuntimePurchaseNotificationGoogleFunction func(ctx context.Context, purchase *api.ValidatedPurchase, providerPayload string) error RuntimeSubscriptionNotificationGoogleFunction func(ctx context.Context, subscription *api.ValidatedSubscription, providerPayload string) error + RuntimeStorageIndexFilterFunction func(ctx context.Context, write *StorageOpWrite) (bool, error) + RuntimeEventFunction func(ctx context.Context, logger runtime.Logger, evt *api.Event) RuntimeEventCustomFunction func(ctx context.Context, evt *api.Event) @@ -250,6 +252,7 @@ const ( RuntimeExecutionModeSubscriptionNotificationApple RuntimeExecutionModePurchaseNotificationGoogle RuntimeExecutionModeSubscriptionNotificationGoogle + RuntimeExecutionModeStorageIndexFilter ) func (e RuntimeExecutionMode) String() string { @@ -286,6 +289,8 @@ func (e RuntimeExecutionMode) String() string { return "purchase_notification_google" case RuntimeExecutionModeSubscriptionNotificationGoogle: return "subscription_notification_google" + case RuntimeExecutionModeStorageIndexFilter: + return "storage_index_filter" } return "" @@ -511,6 +516,8 @@ type Runtime struct { purchaseNotificationGoogleFunction RuntimePurchaseNotificationGoogleFunction subscriptionNotificationGoogleFunction RuntimeSubscriptionNotificationGoogleFunction + storageIndexFilterFunctions map[string]RuntimeStorageIndexFilterFunction + leaderboardResetFunction RuntimeLeaderboardResetFunction eventFunctions *RuntimeEventFunctions @@ -616,7 +623,7 @@ func CheckRuntime(logger *zap.Logger, config Config, version string) error { return nil } -func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter) (*Runtime, *RuntimeInfo, error) { +func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, storageIndex StorageIndex) (*Runtime, *RuntimeInfo, error) { runtimeConfig := config.GetRuntime() startupLogger.Info("Initialising runtime", zap.String("path", runtimeConfig.Path)) @@ -631,19 +638,19 @@ func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql. matchProvider := NewMatchProvider() - goModules, goRPCFns, goBeforeRtFns, goAfterRtFns, goBeforeReqFns, goAfterReqFns, goMatchmakerMatchedFn, goMatchmakerCustomMatchingFn, goTournamentEndFn, goTournamentResetFn, goLeaderboardResetFn, goPurchaseNotificationAppleFn, goSubscriptionNotificationAppleFn, goPurchaseNotificationGoogleFn, goSubscriptionNotificationGoogleFn, allEventFns, goMatchNamesListFn, err := NewRuntimeProviderGo(ctx, logger, startupLogger, db, protojsonMarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, runtimeConfig.Path, paths, eventQueue, matchProvider) + goModules, goRPCFns, goBeforeRtFns, goAfterRtFns, goBeforeReqFns, goAfterReqFns, goMatchmakerMatchedFn, goMatchmakerCustomMatchingFn, goTournamentEndFn, goTournamentResetFn, goLeaderboardResetFn, goPurchaseNotificationAppleFn, goSubscriptionNotificationAppleFn, goPurchaseNotificationGoogleFn, goSubscriptionNotificationGoogleFn, goIndexFilterFns, allEventFns, goMatchNamesListFn, err := NewRuntimeProviderGo(ctx, logger, startupLogger, db, protojsonMarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, storageIndex, runtimeConfig.Path, paths, eventQueue, matchProvider) if err != nil { startupLogger.Error("Error initialising Go runtime provider", zap.Error(err)) return nil, nil, err } - luaModules, luaRPCFns, luaBeforeRtFns, luaAfterRtFns, luaBeforeReqFns, luaAfterReqFns, luaMatchmakerMatchedFn, luaTournamentEndFn, luaTournamentResetFn, luaLeaderboardResetFn, luaPurchaseNotificationAppleFn, luaSubscriptionNotificationAppleFn, luaPurchaseNotificationGoogleFn, luaSubscriptionNotificationGoogleFn, err := NewRuntimeProviderLua(logger, startupLogger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, allEventFns.eventFunction, runtimeConfig.Path, paths, matchProvider) + luaModules, luaRPCFns, luaBeforeRtFns, luaAfterRtFns, luaBeforeReqFns, luaAfterReqFns, luaMatchmakerMatchedFn, luaTournamentEndFn, luaTournamentResetFn, luaLeaderboardResetFn, luaPurchaseNotificationAppleFn, luaSubscriptionNotificationAppleFn, luaPurchaseNotificationGoogleFn, luaSubscriptionNotificationGoogleFn, luaIndexFilterFns, err := NewRuntimeProviderLua(logger, startupLogger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, allEventFns.eventFunction, runtimeConfig.Path, paths, matchProvider, storageIndex) if err != nil { startupLogger.Error("Error initialising Lua runtime provider", zap.Error(err)) return nil, nil, err } - jsModules, jsRPCFns, jsBeforeRtFns, jsAfterRtFns, jsBeforeReqFns, jsAfterReqFns, jsMatchmakerMatchedFn, jsTournamentEndFn, jsTournamentResetFn, jsLeaderboardResetFn, jsPurchaseNotificationAppleFn, jsSubscriptionNotificationAppleFn, jsPurchaseNotificationGoogleFn, jsSubscriptionNotificationGoogleFn, err := NewRuntimeProviderJS(logger, startupLogger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, allEventFns.eventFunction, runtimeConfig.Path, runtimeConfig.JsEntrypoint, matchProvider) + jsModules, jsRPCFns, jsBeforeRtFns, jsAfterRtFns, jsBeforeReqFns, jsAfterReqFns, jsMatchmakerMatchedFn, jsTournamentEndFn, jsTournamentResetFn, jsLeaderboardResetFn, jsPurchaseNotificationAppleFn, jsSubscriptionNotificationAppleFn, jsPurchaseNotificationGoogleFn, jsSubscriptionNotificationGoogleFn, jsIndexFilterFns, err := NewRuntimeProviderJS(logger, startupLogger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, allEventFns.eventFunction, runtimeConfig.Path, runtimeConfig.JsEntrypoint, matchProvider, storageIndex) if err != nil { startupLogger.Error("Error initialising JavaScript runtime provider", zap.Error(err)) return nil, nil, err @@ -2462,7 +2469,6 @@ func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql. case goMatchmakerMatchedFn != nil: allMatchmakerOverrideFunction = goMatchmakerCustomMatchingFn startupLogger.Info("Registered Go runtime Matchmaker Override function invocation") - // TODO: Handle other runtimes. } var allTournamentEndFunction RuntimeTournamentEndFunction @@ -2556,6 +2562,28 @@ func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql. startupLogger.Info("Registered JavaScript runtime Subscription Notification Google function invocation") } + allStorageIndexFilterFunctions := make(map[string]RuntimeStorageIndexFilterFunction, len(goIndexFilterFns)+len(luaIndexFilterFns)+len(jsIndexFilterFns)) + jsIndexNames := make(map[string]bool, len(jsIndexFilterFns)) + for id, fn := range jsIndexFilterFns { + allStorageIndexFilterFunctions[id] = fn + jsIndexNames[id] = true + startupLogger.Info("Registered JavaScript runtime storage index filter function invocation", zap.String("index_name", id)) + } + luaIndexNames := make(map[string]bool, len(luaIndexFilterFns)) + for id, fn := range luaIndexFilterFns { + allStorageIndexFilterFunctions[id] = fn + delete(jsIndexNames, id) + luaIndexNames[id] = true + startupLogger.Info("Registered Lua runtime storage index filter function invocation", zap.String("index_name", id)) + } + goIndexNames := make(map[string]bool, len(goIndexFilterFns)) + for id, fn := range goIndexFilterFns { + allStorageIndexFilterFunctions[id] = fn + delete(luaIndexNames, id) + goIndexNames[id] = true + startupLogger.Info("Registered Go runtime storage index filter function invocation", zap.String("index_name", id)) + } + // Lua matches are not registered the same, list only Go ones. goMatchNames := goMatchNamesListFn() for _, name := range goMatchNames { @@ -2584,6 +2612,7 @@ func NewRuntime(ctx context.Context, logger, startupLogger *zap.Logger, db *sql. subscriptionNotificationAppleFunction: allSubscriptionNotificationAppleFunction, purchaseNotificationGoogleFunction: allPurchaseNotificationGoogleFunction, subscriptionNotificationGoogleFunction: allSubscriptionNotificationGoogleFunction, + storageIndexFilterFunctions: allStorageIndexFilterFunctions, eventFunctions: allEventFns, }, rInfo, nil @@ -3319,6 +3348,10 @@ func (r *Runtime) PurchaseNotificationGoogle() RuntimePurchaseNotificationGoogle return r.purchaseNotificationGoogleFunction } +func (r *Runtime) StorageIndexFilterFunction(indexName string) RuntimeStorageIndexFilterFunction { + return r.storageIndexFilterFunctions[indexName] +} + func (r *Runtime) SubscriptionNotificationGoogle() RuntimeSubscriptionNotificationGoogleFunction { return r.subscriptionNotificationGoogleFunction } diff --git a/server/runtime_go.go b/server/runtime_go.go index 3d7b02abfe..73a06c0501 100644 --- a/server/runtime_go.go +++ b/server/runtime_go.go @@ -59,11 +59,14 @@ type RuntimeGoInitializer struct { purchaseNotificationGoogle RuntimePurchaseNotificationGoogleFunction subscriptionNotificationGoogle RuntimeSubscriptionNotificationGoogleFunction matchmakerOverride RuntimeMatchmakerOverrideFunction + storageIndexFunctions map[string]RuntimeStorageIndexFilterFunction eventFunctions []RuntimeEventFunction sessionStartFunctions []RuntimeEventFunction sessionEndFunctions []RuntimeEventFunction + storageIndex StorageIndex + match map[string]func(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule) (runtime.Match, error) matchLock *sync.RWMutex } @@ -2566,6 +2569,29 @@ func (ri *RuntimeGoInitializer) RegisterSubscriptionNotificationGoogle(fn func(c return nil } +func (ri *RuntimeGoInitializer) RegisterStorageIndex(name, collection, key string, fields []string, maxEntries int) error { + return ri.storageIndex.CreateIndex(context.Background(), name, collection, key, fields, maxEntries) +} + +func (ri *RuntimeGoInitializer) RegisterStorageIndexFilter(indexName string, fn func(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, write *runtime.StorageWrite) bool) error { + ri.storageIndexFunctions[indexName] = func(ctx context.Context, write *StorageOpWrite) (bool, error) { + ctx = NewRuntimeGoContext(ctx, ri.node, ri.version, ri.env, RuntimeExecutionModeStorageIndexFilter, nil, nil, 0, "", "", nil, "", "", "", "") + + storageWrite := &runtime.StorageWrite{ + Collection: write.Object.Collection, + Key: write.Object.Key, + UserID: write.OwnerID, + Value: write.Object.Value, + Version: write.Object.Version, + PermissionRead: int(write.Object.PermissionRead.GetValue()), + PermissionWrite: int(write.Object.PermissionWrite.GetValue()), + } + + return fn(ctx, ri.logger.WithField("mode", RuntimeExecutionModeStorageIndexFilter.String()).WithField("index_name", indexName), ri.db, ri.nk, storageWrite), nil + } + return nil +} + func (ri *RuntimeGoInitializer) RegisterMatch(name string, fn func(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule) (runtime.Match, error)) error { ri.matchLock.Lock() ri.match[name] = fn @@ -2573,11 +2599,11 @@ func (ri *RuntimeGoInitializer) RegisterMatch(name string, fn func(ctx context.C return nil } -func NewRuntimeProviderGo(ctx context.Context, logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, rootPath string, paths []string, eventQueue *RuntimeEventQueue, matchProvider *MatchProvider) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeMatchmakerOverrideFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, *RuntimeEventFunctions, func() []string, error) { +func NewRuntimeProviderGo(ctx context.Context, logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, storageIndex StorageIndex, rootPath string, paths []string, eventQueue *RuntimeEventQueue, matchProvider *MatchProvider) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeMatchmakerOverrideFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, map[string]RuntimeStorageIndexFilterFunction, *RuntimeEventFunctions, func() []string, error) { runtimeLogger := NewRuntimeGoLogger(logger) node := config.GetName() env := config.GetRuntime().Environment - nk := NewRuntimeGoNakamaModule(logger, db, protojsonMarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router) + nk := NewRuntimeGoNakamaModule(logger, db, protojsonMarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, storageIndex) match := make(map[string]func(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule) (runtime.Match, error), 0) @@ -2629,6 +2655,9 @@ func NewRuntimeProviderGo(ctx context.Context, logger, startupLogger *zap.Logger beforeReq: &RuntimeBeforeReqFunctions{}, afterReq: &RuntimeAfterReqFunctions{}, + storageIndexFunctions: make(map[string]RuntimeStorageIndexFilterFunction, 0), + storageIndex: storageIndex, + eventFunctions: make([]RuntimeEventFunction, 0), sessionStartFunctions: make([]RuntimeEventFunction, 0), sessionEndFunctions: make([]RuntimeEventFunction, 0), @@ -2653,13 +2682,13 @@ func NewRuntimeProviderGo(ctx context.Context, logger, startupLogger *zap.Logger relPath, name, fn, err := openGoModule(startupLogger, rootPath, path) if err != nil { // Errors are already logged in the function above. - return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } // Run the initialisation. if err = fn(ctx, runtimeLogger, db, nk, initializer); err != nil { startupLogger.Fatal("Error returned by InitModule function in Go module", zap.String("name", name), zap.Error(err)) - return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, errors.New("error returned by InitModule function in Go module") + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, errors.New("error returned by InitModule function in Go module") } modulePaths = append(modulePaths, relPath) } @@ -2707,7 +2736,7 @@ func NewRuntimeProviderGo(ctx context.Context, logger, startupLogger *zap.Logger } } - return modulePaths, initializer.rpc, initializer.beforeRt, initializer.afterRt, initializer.beforeReq, initializer.afterReq, initializer.matchmakerMatched, initializer.matchmakerOverride, initializer.tournamentEnd, initializer.tournamentReset, initializer.leaderboardReset, initializer.purchaseNotificationApple, initializer.subscriptionNotificationApple, initializer.purchaseNotificationGoogle, initializer.subscriptionNotificationGoogle, events, matchNamesListFn, nil + return modulePaths, initializer.rpc, initializer.beforeRt, initializer.afterRt, initializer.beforeReq, initializer.afterReq, initializer.matchmakerMatched, initializer.matchmakerOverride, initializer.tournamentEnd, initializer.tournamentReset, initializer.leaderboardReset, initializer.purchaseNotificationApple, initializer.subscriptionNotificationApple, initializer.purchaseNotificationGoogle, initializer.subscriptionNotificationGoogle, initializer.storageIndexFunctions, events, matchNamesListFn, nil } func CheckRuntimeProviderGo(logger *zap.Logger, rootPath string, paths []string) error { diff --git a/server/runtime_go_nakama.go b/server/runtime_go_nakama.go index 8b69415cc4..2659d88e25 100644 --- a/server/runtime_go_nakama.go +++ b/server/runtime_go_nakama.go @@ -64,9 +64,10 @@ type RuntimeGoNakamaModule struct { node string matchCreateFn RuntimeMatchCreateFunction satori runtime.Satori + storageIndex StorageIndex } -func NewRuntimeGoNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter) *RuntimeGoNakamaModule { +func NewRuntimeGoNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, storageIndex StorageIndex) *RuntimeGoNakamaModule { return &RuntimeGoNakamaModule{ logger: logger, db: db, @@ -84,6 +85,7 @@ func NewRuntimeGoNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler metrics: metrics, streamManager: streamManager, router: router, + storageIndex: storageIndex, node: config.GetName(), @@ -1988,7 +1990,7 @@ func (n *RuntimeGoNakamaModule) StorageWrite(ctx context.Context, writes []*runt ops = append(ops, op) } - acks, _, err := StorageWriteObjects(ctx, n.logger, n.db, n.metrics, true, ops) + acks, _, err := StorageWriteObjects(ctx, n.logger, n.db, n.metrics, n.storageIndex, true, ops) if err != nil { return nil, err } @@ -2038,11 +2040,29 @@ func (n *RuntimeGoNakamaModule) StorageDelete(ctx context.Context, deletes []*ru ops = append(ops, op) } - _, err := StorageDeleteObjects(ctx, n.logger, n.db, true, ops) + _, err := StorageDeleteObjects(ctx, n.logger, n.db, n.storageIndex, true, ops) return err } +// @group storage +// @summary List storage index entries +// @param indexName(type=string) Name of the index to list entries from. +// @param queryString(type=string) Query to filter index entries. +// @param limit(type=int) Maximum number of results to be returned. +// @return objects(*api..StorageObjectList) A list of storage objects. +// @return error(error) An optional error value if an error occurred. +func (n *RuntimeGoNakamaModule) StorageIndexList(ctx context.Context, indexName, query string, limit int) (*api.StorageObjects, error) { + if indexName == "" { + return nil, errors.New("expects a non-empty indexName") + } + if limit < 1 || limit > 100 { + return nil, errors.New("limit must be 1-100") + } + + return n.storageIndex.List(ctx, indexName, query, limit) +} + // @group users // @summary Update account, storage, and wallet information simultaneously. // @param ctx(type=context.Context) The context object represents information about the server and requester. diff --git a/server/runtime_javascript.go b/server/runtime_javascript.go index 807a158e01..ab0bf67e05 100644 --- a/server/runtime_javascript.go +++ b/server/runtime_javascript.go @@ -59,11 +59,23 @@ func (r *RuntimeJS) SetContext(ctx context.Context) { func (r *RuntimeJS) GetCallback(e RuntimeExecutionMode, key string) string { switch e { case RuntimeExecutionModeRPC: - return r.callbacks.Rpc[key] + fnId, ok := r.callbacks.Rpc[key] + if !ok { + return "" + } + return fnId case RuntimeExecutionModeBefore: - return r.callbacks.Before[key] + fnId, ok := r.callbacks.Before[key] + if !ok { + return "" + } + return fnId case RuntimeExecutionModeAfter: - return r.callbacks.After[key] + fnId, ok := r.callbacks.After[key] + if !ok { + return "" + } + return fnId case RuntimeExecutionModeMatchmaker: return r.callbacks.Matchmaker case RuntimeExecutionModeTournamentEnd: @@ -80,6 +92,12 @@ func (r *RuntimeJS) GetCallback(e RuntimeExecutionMode, key string) string { return r.callbacks.PurchaseNotificationGoogle case RuntimeExecutionModeSubscriptionNotificationGoogle: return r.callbacks.SubscriptionNotificationGoogle + case RuntimeExecutionModeStorageIndexFilter: + fnId, ok := r.callbacks.StorageIndexFilter[key] + if !ok { + return "" + } + return fnId } return "" @@ -147,6 +165,7 @@ type RuntimeProviderJS struct { currentCount *atomic.Uint32 newFn func() *RuntimeJS metrics Metrics + storageIndex StorageIndex } func (rp *RuntimeProviderJS) Rpc(ctx context.Context, id string, headers, queryParams map[string][]string, userID, username string, vars map[string]string, expiry int64, sessionID, clientIP, clientPort, lang, payload string) (string, error, codes.Code) { @@ -608,7 +627,7 @@ func (rp *RuntimeProviderJS) Put(r *RuntimeJS) { } } -func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, path, entrypoint string, matchProvider *MatchProvider) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, error) { +func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, path, entrypoint string, matchProvider *MatchProvider, storageIndex StorageIndex) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, map[string]RuntimeStorageIndexFilterFunction, error) { startupLogger.Info("Initialising JavaScript runtime provider", zap.String("path", path), zap.String("entrypoint", entrypoint)) modCache, err := cacheJavascriptModules(startupLogger, path, entrypoint) @@ -648,6 +667,7 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs poolCh: make(chan *RuntimeJS, config.GetRuntime().JsMaxCount), maxCount: uint32(config.GetRuntime().JsMaxCount), currentCount: atomic.NewUint32(uint32(config.GetRuntime().JsMinCount)), + storageIndex: storageIndex, } rpcFunctions := make(map[string]RuntimeRpcFunction, 0) @@ -663,6 +683,7 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs var subscriptionNotificationAppleFunction RuntimeSubscriptionNotificationAppleFunction var purchaseNotificationGoogleFunction RuntimePurchaseNotificationGoogleFunction var subscriptionNotificationGoogleFunction RuntimeSubscriptionNotificationGoogleFunction + storageIndexFilterFunctions := make(map[string]RuntimeStorageIndexFilterFunction, 0) matchHandlers := &RuntimeJavascriptMatchHandlers{ mapping: make(map[string]*jsMatchHandlers, 0), @@ -675,10 +696,10 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs return nil, nil } - return NewRuntimeJavascriptMatchCore(logger, name, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, matchProvider.CreateMatch, eventFn, id, node, version, stopped, mc, modCache) + return NewRuntimeJavascriptMatchCore(logger, name, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, matchProvider.CreateMatch, eventFn, id, node, version, stopped, mc, modCache, storageIndex) }) - callbacks, err := evalRuntimeModules(runtimeProviderJS, modCache, matchHandlers, matchProvider, leaderboardScheduler, localCache, func(mode RuntimeExecutionMode, id string) { + callbacks, err := evalRuntimeModules(runtimeProviderJS, modCache, matchHandlers, matchProvider, leaderboardScheduler, storageIndex, localCache, func(mode RuntimeExecutionMode, id string) { switch mode { case RuntimeExecutionModeRPC: rpcFunctions[id] = func(ctx context.Context, headers, queryParams map[string][]string, userID, username string, vars map[string]string, expiry int64, sessionID, clientIP, clientPort, lang, payload string) (string, error, codes.Code) { @@ -1648,11 +1669,15 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs subscriptionNotificationGoogleFunction = func(ctx context.Context, subscription *api.ValidatedSubscription, providerPayload string) error { return runtimeProviderJS.SubscriptionNotificationGoogle(ctx, subscription, providerPayload) } + case RuntimeExecutionModeStorageIndexFilter: + storageIndexFilterFunctions[id] = func(ctx context.Context, write *StorageOpWrite) (bool, error) { + return runtimeProviderJS.StorageIndexFilter(ctx, id, write) + } } }, false) if err != nil { logger.Error("Failed to eval JavaScript modules.", zap.Error(err)) - return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } runtimeProviderJS.newFn = func() *RuntimeJS { @@ -1666,7 +1691,7 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs logger.Fatal("Failed to initialize JavaScript runtime", zap.Error(err)) } - nakamaModule := NewRuntimeJavascriptNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, eventFn, matchProvider.CreateMatch) + nakamaModule := NewRuntimeJavascriptNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, leaderboardRankCache, storageIndex, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, eventFn, matchProvider.CreateMatch) nk, err := nakamaModule.Constructor(runtime) if err != nil { logger.Fatal("Failed to initialize JavaScript runtime", zap.Error(err)) @@ -1698,7 +1723,7 @@ func NewRuntimeProviderJS(logger, startupLogger *zap.Logger, db *sql.DB, protojs } startupLogger.Info("Allocated minimum JavaScript runtime pool") - return modCache.Names, rpcFunctions, beforeRtFunctions, afterRtFunctions, beforeReqFunctions, afterReqFunctions, matchmakerMatchedFunction, tournamentEndFunction, tournamentResetFunction, leaderboardResetFunction, purchaseNotificationAppleFunction, subscriptionNotificationAppleFunction, purchaseNotificationGoogleFunction, subscriptionNotificationGoogleFunction, nil + return modCache.Names, rpcFunctions, beforeRtFunctions, afterRtFunctions, beforeReqFunctions, afterReqFunctions, matchmakerMatchedFunction, tournamentEndFunction, tournamentResetFunction, leaderboardResetFunction, purchaseNotificationAppleFunction, subscriptionNotificationAppleFunction, purchaseNotificationGoogleFunction, subscriptionNotificationGoogleFunction, storageIndexFilterFunctions, nil } func CheckRuntimeProviderJavascript(logger *zap.Logger, config Config, version string) error { @@ -1716,7 +1741,7 @@ func CheckRuntimeProviderJavascript(logger *zap.Logger, config Config, version s mapping: make(map[string]*jsMatchHandlers, 0), } - _, err = evalRuntimeModules(rp, modCache, matchHandlers, nil, nil, nil, func(RuntimeExecutionMode, string) {}, true) + _, err = evalRuntimeModules(rp, modCache, matchHandlers, nil, nil, nil, nil, func(RuntimeExecutionMode, string) {}, true) if err != nil { logger.Error("Failed to load JavaScript module.", zap.Error(err)) } @@ -2076,7 +2101,7 @@ func (rp *RuntimeProviderJS) PurchaseNotificationApple(ctx context.Context, purc return errors.New("Runtime Purchase Notification Apple function not found.") } - purchaseMap := getJsValidatedPurchaseData(purchase) + purchaseMap := validatedPurchaseToJsObject(purchase) fn, ok := goja.AssertFunction(r.vm.Get(jsFn)) if !ok { @@ -2120,7 +2145,7 @@ func (rp *RuntimeProviderJS) SubscriptionNotificationApple(ctx context.Context, return errors.New("Runtime Subscription Notification Apple function not found.") } - subscriptionMap := getJsSubscriptionData(subscription) + subscriptionMap := subscriptionToJsObject(subscription) fn, ok := goja.AssertFunction(r.vm.Get(jsFn)) if !ok { @@ -2164,7 +2189,7 @@ func (rp *RuntimeProviderJS) PurchaseNotificationGoogle(ctx context.Context, pur return errors.New("Runtime Purchase Notification Google function not found.") } - purchaseMap := getJsValidatedPurchaseData(purchase) + purchaseMap := validatedPurchaseToJsObject(purchase) fn, ok := goja.AssertFunction(r.vm.Get(jsFn)) if !ok { @@ -2208,7 +2233,7 @@ func (rp *RuntimeProviderJS) SubscriptionNotificationGoogle(ctx context.Context, return errors.New("Runtime Subscription Notification Google function not found.") } - subscriptionMap := getJsSubscriptionData(subscription) + subscriptionMap := subscriptionToJsObject(subscription) fn, ok := goja.AssertFunction(r.vm.Get(jsFn)) if !ok { @@ -2241,15 +2266,82 @@ func (rp *RuntimeProviderJS) SubscriptionNotificationGoogle(ctx context.Context, return nil } -func evalRuntimeModules(rp *RuntimeProviderJS, modCache *RuntimeJSModuleCache, matchHandlers *RuntimeJavascriptMatchHandlers, matchProvider *MatchProvider, leaderboardScheduler LeaderboardScheduler, localCache *RuntimeJavascriptLocalCache, announceCallbackFn func(RuntimeExecutionMode, string), dryRun bool) (*RuntimeJavascriptCallbacks, error) { +func (rp *RuntimeProviderJS) StorageIndexFilter(ctx context.Context, indexName string, storageWrite *StorageOpWrite) (bool, error) { + r, err := rp.Get(ctx) + if err != nil { + return false, err + } + jsFn := r.GetCallback(RuntimeExecutionModeStorageIndexFilter, indexName) + if jsFn == "" { + rp.Put(r) + rp.logger.Error("JavaScript runtime function invalid.", zap.String("key", jsFn), zap.Error(err)) + return false, errors.New("Could not run Subscription Notification Google hook.") + } + + fn, ok := goja.AssertFunction(r.vm.Get(jsFn)) + if !ok { + rp.Put(r) + rp.logger.Error("JavaScript runtime function invalid.", zap.String("key", jsFn), zap.Error(err)) + return false, errors.New("Could not run Storage Index Filter hook.") + } + + jsLogger, err := NewJsLogger(r.vm, r.logger, zap.String("mode", RuntimeExecutionModeStorageIndexFilter.String())) + if err != nil { + rp.Put(r) + rp.logger.Error("Could not instantiate js logger.", zap.Error(err)) + return false, errors.New("Could not run Subscription Notification Google hook.") + } + + objectMap := make(map[string]interface{}, 7) + objectMap["key"] = storageWrite.Object.Key + objectMap["collection"] = storageWrite.Object.Collection + if storageWrite.OwnerID != "" { + objectMap["userId"] = storageWrite.OwnerID + } else { + objectMap["userId"] = nil + } + objectMap["version"] = storageWrite.Object.Version + objectMap["permissionRead"] = storageWrite.Object.PermissionRead + objectMap["permissionWrite"] = storageWrite.Object.PermissionWrite + + valueMap := make(map[string]interface{}) + err = json.Unmarshal([]byte(storageWrite.Object.Value), &valueMap) + if err != nil { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: %v", indexName, err.Error()) + } + pointerizeSlices(valueMap) + objectMap["value"] = valueMap + + r.SetContext(ctx) + retValue, err, _ := r.InvokeFunction(RuntimeExecutionModeStorageIndexFilter, "storageIndexFilter", fn, jsLogger, nil, nil, "", "", nil, 0, "", "", "", "", r.vm.ToValue(objectMap)) + r.SetContext(context.Background()) + rp.Put(r) + if err != nil { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: %v", indexName, err.Error()) + } + + if retValue == nil { + return false, errors.New("Invalid return type for Storage Index Filter function: bool expected") + } + + filterResult, ok := retValue.(bool) + if !ok { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: failed to assert js fn expected return type", indexName) + } + + return filterResult, nil +} + +func evalRuntimeModules(rp *RuntimeProviderJS, modCache *RuntimeJSModuleCache, matchHandlers *RuntimeJavascriptMatchHandlers, matchProvider *MatchProvider, leaderboardScheduler LeaderboardScheduler, storageIndex StorageIndex, localCache *RuntimeJavascriptLocalCache, announceCallbackFn func(RuntimeExecutionMode, string), dryRun bool) (*RuntimeJavascriptCallbacks, error) { logger := rp.logger r := goja.New() callbacks := &RuntimeJavascriptCallbacks{ - Rpc: make(map[string]string), - Before: make(map[string]string), - After: make(map[string]string), + Rpc: make(map[string]string), + Before: make(map[string]string), + After: make(map[string]string), + StorageIndexFilter: make(map[string]string), } if len(modCache.Names) == 0 { @@ -2258,7 +2350,7 @@ func evalRuntimeModules(rp *RuntimeProviderJS, modCache *RuntimeJSModuleCache, m } modName := modCache.Names[0] - initializer := NewRuntimeJavascriptInitModule(logger, callbacks, matchHandlers, announceCallbackFn) + initializer := NewRuntimeJavascriptInitModule(logger, storageIndex, callbacks, matchHandlers, announceCallbackFn) init, err := initializer.Constructor(r) if err != nil { return nil, err @@ -2269,7 +2361,7 @@ func evalRuntimeModules(rp *RuntimeProviderJS, modCache *RuntimeJSModuleCache, m return nil, err } - nakamaModule := NewRuntimeJavascriptNakamaModule(rp.logger, rp.db, rp.protojsonMarshaler, rp.protojsonUnmarshaler, rp.config, rp.socialClient, rp.leaderboardCache, rp.leaderboardRankCache, localCache, leaderboardScheduler, rp.sessionRegistry, rp.sessionCache, rp.statusRegistry, rp.matchRegistry, rp.tracker, rp.metrics, rp.streamManager, rp.router, rp.eventFn, matchProvider.CreateMatch) + nakamaModule := NewRuntimeJavascriptNakamaModule(rp.logger, rp.db, rp.protojsonMarshaler, rp.protojsonUnmarshaler, rp.config, rp.socialClient, rp.leaderboardCache, rp.leaderboardRankCache, storageIndex, localCache, leaderboardScheduler, rp.sessionRegistry, rp.sessionCache, rp.statusRegistry, rp.matchRegistry, rp.tracker, rp.metrics, rp.streamManager, rp.router, rp.eventFn, matchProvider.CreateMatch) nk, err := nakamaModule.Constructor(r) if err != nil { return nil, err diff --git a/server/runtime_javascript_init.go b/server/runtime_javascript_init.go index 62389b374a..052b2d93d6 100644 --- a/server/runtime_javascript_init.go +++ b/server/runtime_javascript_init.go @@ -15,7 +15,9 @@ package server import ( + "context" "errors" + "fmt" "strings" "sync" @@ -61,6 +63,7 @@ type RuntimeJavascriptCallbacks struct { Rpc map[string]string Before map[string]string After map[string]string + StorageIndexFilter map[string]string Matchmaker string TournamentEnd string TournamentReset string @@ -75,12 +78,14 @@ type RuntimeJavascriptInitModule struct { Logger *zap.Logger Callbacks *RuntimeJavascriptCallbacks MatchCallbacks *RuntimeJavascriptMatchHandlers + storageIndex StorageIndex announceCallbackFn func(RuntimeExecutionMode, string) } -func NewRuntimeJavascriptInitModule(logger *zap.Logger, callbacks *RuntimeJavascriptCallbacks, matchCallbacks *RuntimeJavascriptMatchHandlers, announceCallbackFn func(RuntimeExecutionMode, string)) *RuntimeJavascriptInitModule { +func NewRuntimeJavascriptInitModule(logger *zap.Logger, storageIndex StorageIndex, callbacks *RuntimeJavascriptCallbacks, matchCallbacks *RuntimeJavascriptMatchHandlers, announceCallbackFn func(RuntimeExecutionMode, string)) *RuntimeJavascriptInitModule { return &RuntimeJavascriptInitModule{ Logger: logger, + storageIndex: storageIndex, announceCallbackFn: announceCallbackFn, Callbacks: callbacks, MatchCallbacks: matchCallbacks, @@ -253,6 +258,8 @@ func (im *RuntimeJavascriptInitModule) mappings(r *goja.Runtime) map[string]func "registerAfterGetSubscription": im.registerAfterGetSubscription(r), "registerBeforeEvent": im.registerBeforeEvent(r), "registerAfterEvent": im.registerAfterEvent(r), + "registerStorageIndex": im.registerStorageIndex(r), + "registerStorageIndexFilter": im.registerStorageIndexFilter(r), } } @@ -1111,6 +1118,83 @@ func (im *RuntimeJavascriptInitModule) registerSubscriptionNotificationGoogle(r } } +func (im *RuntimeJavascriptInitModule) registerStorageIndex(r *goja.Runtime) func(call goja.FunctionCall) goja.Value { + return func(f goja.FunctionCall) goja.Value { + idxName := getJsString(r, f.Argument(0)) + idxCollection := getJsString(r, f.Argument(1)) + + var idxKey string + if !goja.IsUndefined(f.Argument(2)) && !goja.IsNull(f.Argument(2)) { + idxKey = getJsString(r, f.Argument(2)) + } + + var fields []string + ownersArray := f.Argument(3) + if goja.IsUndefined(ownersArray) || goja.IsNull(ownersArray) { + panic(r.NewTypeError("expects an array of fields")) + } + fieldsSlice, ok := ownersArray.Export().([]interface{}) + if !ok { + panic(r.NewTypeError("expects an array of fields")) + } + if len(fieldsSlice) < 1 { + panic(r.NewTypeError("expects at least one field to be set")) + } + fields = make([]string, 0, len(fieldsSlice)) + for _, field := range fieldsSlice { + fieldStr, ok := field.(string) + if !ok { + panic(r.NewTypeError("expects a string field")) + } + fields = append(fields, fieldStr) + } + + idxMaxEntries := int(getJsInt(r, f.Argument(4))) + + if err := im.storageIndex.CreateIndex(context.Background(), idxName, idxCollection, idxKey, fields, idxMaxEntries); err != nil { + panic(r.NewGoError(fmt.Errorf("Failed to register storage index: %s", err.Error()))) + } + + return goja.Undefined() + } +} + +func (im *RuntimeJavascriptInitModule) registerStorageIndexFilter(r *goja.Runtime) func(goja.FunctionCall) goja.Value { + return func(f goja.FunctionCall) goja.Value { + fName := f.Argument(0) + if goja.IsNull(fName) || goja.IsUndefined(fName) { + panic(r.NewTypeError("expects a non empty string")) + } + key := fName.String() + if key == "" { + panic(r.NewTypeError("expects a non empty string")) + } + + fn := f.Argument(1) + _, ok := goja.AssertFunction(fn) + if !ok { + panic(r.NewTypeError("expects a function")) + } + + fnObj, ok := fn.(*goja.Object) + if !ok { + panic(r.NewTypeError("expects an object")) + } + + v := fnObj.Get("name") + if v == nil { + panic(r.NewTypeError("function key could not be extracted")) + } + + fnKey := strings.Clone(v.String()) + + im.registerCallbackFn(RuntimeExecutionModeStorageIndexFilter, key, fnKey) + im.announceCallbackFn(RuntimeExecutionModeStorageIndexFilter, key) + + return goja.Undefined() + } +} + func (im *RuntimeJavascriptInitModule) getFnKey(r *goja.Runtime, fn goja.Value) (string, error) { if fn == nil { return "", errors.New("not found") @@ -1228,5 +1312,7 @@ func (im *RuntimeJavascriptInitModule) registerCallbackFn(mode RuntimeExecutionM im.Callbacks.PurchaseNotificationGoogle = fn case RuntimeExecutionModeSubscriptionNotificationGoogle: im.Callbacks.SubscriptionNotificationGoogle = fn + case RuntimeExecutionModeStorageIndexFilter: + im.Callbacks.StorageIndexFilter[key] = fn } } diff --git a/server/runtime_javascript_match_core.go b/server/runtime_javascript_match_core.go index 3e493e43c6..bad2574df5 100644 --- a/server/runtime_javascript_match_core.go +++ b/server/runtime_javascript_match_core.go @@ -68,7 +68,7 @@ type RuntimeJavaScriptMatchCore struct { ctxCancelFn context.CancelFunc } -func NewRuntimeJavascriptMatchCore(logger *zap.Logger, module string, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, localCache *RuntimeJavascriptLocalCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, id uuid.UUID, node, version string, stopped *atomic.Bool, matchHandlers *jsMatchHandlers, modCache *RuntimeJSModuleCache) (RuntimeMatchCore, error) { +func NewRuntimeJavascriptMatchCore(logger *zap.Logger, module string, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, localCache *RuntimeJavascriptLocalCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, id uuid.UUID, node, version string, stopped *atomic.Bool, matchHandlers *jsMatchHandlers, modCache *RuntimeJSModuleCache, storageIndex StorageIndex) (RuntimeMatchCore, error) { runtime := goja.New() jsLoggerInst, err := NewJsLogger(runtime, logger) @@ -76,7 +76,7 @@ func NewRuntimeJavascriptMatchCore(logger *zap.Logger, module string, db *sql.DB logger.Fatal("Failed to initialize JavaScript runtime", zap.Error(err)) } - nakamaModule := NewRuntimeJavascriptNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, rankCache, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, eventFn, matchCreateFn) + nakamaModule := NewRuntimeJavascriptNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, socialClient, leaderboardCache, rankCache, storageIndex, localCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, eventFn, matchCreateFn) nk, err := nakamaModule.Constructor(runtime) if err != nil { logger.Fatal("Failed to initialize JavaScript runtime", zap.Error(err)) diff --git a/server/runtime_javascript_nakama.go b/server/runtime_javascript_nakama.go index 0d4bc95e28..b5b026bae2 100644 --- a/server/runtime_javascript_nakama.go +++ b/server/runtime_javascript_nakama.go @@ -79,6 +79,7 @@ type runtimeJavascriptNakamaModule struct { matchRegistry MatchRegistry streamManager StreamManager router MessageRouter + storageIndex StorageIndex node string matchCreateFn RuntimeMatchCreateFunction @@ -87,7 +88,7 @@ type runtimeJavascriptNakamaModule struct { satori runtime.Satori } -func NewRuntimeJavascriptNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, localCache *RuntimeJavascriptLocalCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, matchCreateFn RuntimeMatchCreateFunction) *runtimeJavascriptNakamaModule { +func NewRuntimeJavascriptNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, storageIndex StorageIndex, localCache *RuntimeJavascriptLocalCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, matchCreateFn RuntimeMatchCreateFunction) *runtimeJavascriptNakamaModule { return &runtimeJavascriptNakamaModule{ ctx: context.Background(), logger: logger, @@ -110,6 +111,7 @@ func NewRuntimeJavascriptNakamaModule(logger *zap.Logger, db *sql.DB, protojsonM leaderboardScheduler: leaderboardScheduler, httpClient: &http.Client{}, httpClientInsecure: &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}, + storageIndex: storageIndex, node: config.GetName(), eventFn: eventFn, @@ -290,6 +292,7 @@ func (n *runtimeJavascriptNakamaModule) mappings(r *goja.Runtime) map[string]fun "channelIdBuild": n.channelIdBuild(r), "binaryToString": n.binaryToString(r), "stringToBinary": n.stringToBinary(r), + "storageIndexList": n.storageIndexList(r), } } @@ -337,6 +340,61 @@ func (n *runtimeJavascriptNakamaModule) stringToBinary(r *goja.Runtime) func(goj } } +// @group storage +// @summary List storage index entries +// @param indexName(type=string) Name of the index to list entries from. +// @param queryString(type=string) Query to filter index entries. +// @param limit(type=int) Maximum number of results to be returned. +// @return objects(nkruntime.StorageObjectList) A list of storage objects. +// @return error(error) An optional error value if an error occurred. +func (n *runtimeJavascriptNakamaModule) storageIndexList(r *goja.Runtime) func(goja.FunctionCall) goja.Value { + return func(f goja.FunctionCall) goja.Value { + idxName := getJsString(r, f.Argument(0)) + queryString := getJsString(r, f.Argument(1)) + limit := 100 + if !goja.IsUndefined(f.Argument(2)) && !goja.IsNull(f.Argument(2)) { + limit = int(getJsInt(r, f.Argument(2))) + if limit < 1 || limit > 100 { + panic(r.NewTypeError("limit must be 1-100")) + } + } + + objectList, err := n.storageIndex.List(n.ctx, idxName, queryString, int(limit)) + if err != nil { + panic(r.NewGoError(fmt.Errorf("failed to lookup storage index: %s", err.Error()))) + } + + objects := make([]interface{}, 0, len(objectList.Objects)) + for _, o := range objectList.Objects { + objectMap := make(map[string]interface{}, 9) + objectMap["key"] = o.Key + objectMap["collection"] = o.Collection + if o.UserId != "" { + objectMap["userId"] = o.UserId + } else { + objectMap["userId"] = nil + } + objectMap["version"] = o.Version + objectMap["permissionRead"] = o.PermissionRead + objectMap["permissionWrite"] = o.PermissionWrite + objectMap["createTime"] = o.CreateTime.Seconds + objectMap["updateTime"] = o.UpdateTime.Seconds + + valueMap := make(map[string]interface{}) + err = json.Unmarshal([]byte(o.Value), &valueMap) + if err != nil { + panic(r.NewGoError(fmt.Errorf("failed to convert value to json: %s", err.Error()))) + } + pointerizeSlices(valueMap) + objectMap["value"] = valueMap + + objects = append(objects, objectMap) + } + + return r.ToValue(objects) + } +} + // @group events // @summary Generate an event. // @param event_name(type=string) The name of the event to be created. @@ -1771,7 +1829,7 @@ func (n *runtimeJavascriptNakamaModule) accountGetId(r *goja.Runtime) func(goja. panic(r.NewGoError(fmt.Errorf("error getting account: %v", err.Error()))) } - accountData, err := getJsAccountData(account) + accountData, err := accountToJsObject(account) if err != nil { panic(r.NewGoError(err)) } @@ -1817,7 +1875,7 @@ func (n *runtimeJavascriptNakamaModule) accountsGetId(r *goja.Runtime) func(goja accountsData := make([]map[string]interface{}, 0, len(accounts)) for _, account := range accounts { - accountData, err := getJsAccountData(account) + accountData, err := accountToJsObject(account) if err != nil { panic(r.NewGoError(err)) } @@ -2012,7 +2070,7 @@ func (n *runtimeJavascriptNakamaModule) usersGetId(r *goja.Runtime) func(goja.Fu usersData := make([]map[string]interface{}, 0, len(users.Users)) for _, user := range users.Users { - userData, err := getJsUserData(user) + userData, err := userToJsObject(user) if err != nil { panic(r.NewGoError(err)) } @@ -2057,7 +2115,7 @@ func (n *runtimeJavascriptNakamaModule) usersGetUsername(r *goja.Runtime) func(g usersData := make([]map[string]interface{}, 0, len(users.Users)) for _, user := range users.Users { - userData, err := getJsUserData(user) + userData, err := userToJsObject(user) if err != nil { panic(r.NewGoError(err)) } @@ -2088,7 +2146,7 @@ func (n *runtimeJavascriptNakamaModule) usersGetRandom(r *goja.Runtime) func(goj usersData := make([]map[string]interface{}, 0, len(users)) for _, user := range users { - userData, err := getJsUserData(user) + userData, err := userToJsObject(user) if err != nil { panic(r.NewGoError(err)) } @@ -2746,7 +2804,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserList(r *goja.Runtime) func(goj includeNotHidden = getJsBool(r, f.Argument(2)) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) presences := n.tracker.ListByStream(stream, includeHidden, includeNotHidden) presencesList := make([]map[string]interface{}, 0, len(presences)) @@ -2803,7 +2861,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserGet(r *goja.Runtime) func(goja panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) meta := n.tracker.GetLocalBySessionIDStreamUserID(sessionID, stream, userID) if meta == nil { return goja.Null() @@ -2874,7 +2932,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserJoin(r *goja.Runtime) func(goj status = getJsString(r, f.Argument(5)) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) success, newlyTracked, err := n.streamManager.UserJoin(stream, userID, sessionID, hidden, persistence, status) if err != nil { @@ -2945,7 +3003,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserUpdate(r *goja.Runtime) func(g status = getJsString(r, f.Argument(5)) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) success, err := n.streamManager.UserUpdate(stream, userID, sessionID, hidden, persistence, status) if err != nil { @@ -2997,7 +3055,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserLeave(r *goja.Runtime) func(go panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) if err := n.streamManager.UserLeave(stream, userID, sessionID); err != nil { panic(r.NewGoError(fmt.Errorf("stream user leave failed: %v", err.Error()))) @@ -3075,7 +3133,7 @@ func (n *runtimeJavascriptNakamaModule) streamUserKick(r *goja.Runtime) func(goj panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) if err := n.streamManager.UserLeave(stream, userID, sessionID); err != nil { panic(r.NewGoError(fmt.Errorf("stream user kick failed: %v", err.Error()))) @@ -3101,7 +3159,7 @@ func (n *runtimeJavascriptNakamaModule) streamCount(r *goja.Runtime) func(goja.F panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) count := n.tracker.CountByStream(stream) @@ -3124,7 +3182,7 @@ func (n *runtimeJavascriptNakamaModule) streamClose(r *goja.Runtime) func(goja.F panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) n.tracker.UntrackByStream(stream) @@ -3150,7 +3208,7 @@ func (n *runtimeJavascriptNakamaModule) streamSend(r *goja.Runtime) func(goja.Fu panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) data := getJsString(r, f.Argument(1)) @@ -3250,7 +3308,7 @@ func (n *runtimeJavascriptNakamaModule) streamSendRaw(r *goja.Runtime) func(goja panic(r.NewTypeError("expects a stream object")) } - stream := getStreamData(r, streamObj) + stream := jsObjectToPresenceStream(r, streamObj) envelopeMap, ok := f.Argument(1).Export().(map[string]interface{}) if !ok { @@ -4198,7 +4256,7 @@ func (n *runtimeJavascriptNakamaModule) storageList(r *goja.Runtime) func(goja.F objects := make([]interface{}, 0, len(objectList.Objects)) for _, o := range objectList.Objects { - objectMap := make(map[string]interface{}) + objectMap := make(map[string]interface{}, 9) objectMap["key"] = o.Key objectMap["collection"] = o.Collection if o.UserId != "" { @@ -4355,129 +4413,138 @@ func (n *runtimeJavascriptNakamaModule) storageWrite(r *goja.Runtime) func(goja. } dataSlice, ok := data.Export().([]interface{}) if !ok { - panic(r.ToValue(r.NewTypeError("expects a valid array of data"))) + panic(r.NewTypeError("expects a valid array of data")) } - ops := make(StorageOpWrites, 0, len(dataSlice)) - for _, data := range dataSlice { - dataMap, ok := data.(map[string]interface{}) - if !ok { - panic(r.NewTypeError("expects a data entry to be an object")) - } + ops, err := jsArrayToStorageOpWrites(r, dataSlice) + if err != nil { + panic(r.NewTypeError(err.Error())) + } - var userID uuid.UUID - writeOp := &api.WriteStorageObject{} + acks, _, err := StorageWriteObjects(n.ctx, n.logger, n.db, n.metrics, n.storageIndex, true, ops) + if err != nil { + panic(r.NewGoError(fmt.Errorf("failed to write storage objects: %s", err.Error()))) + } - if collectionIn, ok := dataMap["collection"]; ok { - collection, ok := collectionIn.(string) - if !ok { - panic(r.NewTypeError("expects 'collection' value to be a string")) - } - if collection == "" { - panic(r.NewTypeError("expects 'collection' value to be non-empty")) - } - writeOp.Collection = collection - } + results := make([]interface{}, 0, len(acks.Acks)) + for _, ack := range acks.Acks { + result := make(map[string]interface{}, 4) + result["key"] = ack.Key + result["collection"] = ack.Collection + result["userId"] = ack.UserId + result["version"] = ack.Version + + results = append(results, result) + } + + return r.ToValue(results) + } +} + +func jsArrayToStorageOpWrites(r *goja.Runtime, dataSlice []any) (StorageOpWrites, error) { + ops := make(StorageOpWrites, 0, len(dataSlice)) + for _, data := range dataSlice { + dataMap, ok := data.(map[string]interface{}) + if !ok { + return nil, errors.New("expects a data entry to be an object") + } + + var userID uuid.UUID + writeOp := &api.WriteStorageObject{} - keyIn, ok := dataMap["key"] - key, ok := keyIn.(string) + if collectionIn, ok := dataMap["collection"]; ok { + collection, ok := collectionIn.(string) if !ok { - panic(r.NewTypeError("expects 'key' value to be a string")) + return nil, errors.New("expects 'collection' value to be a string") } - if key == "" { - panic(r.NewTypeError("expects 'key' value to be non-empty")) + if collection == "" { + return nil, errors.New("expects 'collection' value to be non-empty") } - writeOp.Key = key + writeOp.Collection = collection + } - userIDIn, ok := dataMap["userId"] - if userIDIn == nil { - userID = uuid.Nil - } else { - userIDStr, ok := userIDIn.(string) - if !ok { - panic(r.NewTypeError("expects 'userId' value to be a string")) - } - var err error - userID, err = uuid.FromString(userIDStr) - if err != nil { - panic(r.NewTypeError("expects 'userId' value to be a valid id")) - } - } + keyIn, ok := dataMap["key"] + key, ok := keyIn.(string) + if !ok { + return nil, errors.New("expects 'key' value to be a string") + } + if key == "" { + return nil, errors.New("expects 'key' value to be non-empty") + } + writeOp.Key = key - valueIn, ok := dataMap["value"] - valueMap, ok := valueIn.(map[string]interface{}) + userIDIn, ok := dataMap["userId"] + if userIDIn == nil { + userID = uuid.Nil + } else { + userIDStr, ok := userIDIn.(string) if !ok { - panic(r.NewTypeError("expects 'value' value to be an object")) + return nil, errors.New("expects 'userId' value to be a string") } - valueBytes, err := json.Marshal(valueMap) + var err error + userID, err = uuid.FromString(userIDStr) if err != nil { - panic(r.NewGoError(fmt.Errorf("failed to convert value: %s", err.Error()))) + return nil, errors.New("expects 'userId' value to be a valid id") } - writeOp.Value = string(valueBytes) + } - if versionIn, ok := dataMap["version"]; ok { - version, ok := versionIn.(string) - if !ok { - panic(r.NewTypeError("expects 'version' value to be a string")) - } - if version == "" { - panic(r.NewTypeError("expects 'version' value to be a non-empty string")) - } - writeOp.Version = version - } + valueIn, ok := dataMap["value"] + valueMap, ok := valueIn.(map[string]interface{}) + if !ok { + return nil, errors.New("expects 'value' value to be an object") + } + valueBytes, err := json.Marshal(valueMap) + if err != nil { + return nil, fmt.Errorf("failed to convert value: %s", err.Error()) + } + writeOp.Value = string(valueBytes) - if permissionReadIn, ok := dataMap["permissionRead"]; ok { - permissionRead, ok := permissionReadIn.(int64) - if !ok { - panic(r.NewTypeError("expects 'permissionRead' value to be a number")) - } - writeOp.PermissionRead = &wrapperspb.Int32Value{Value: int32(permissionRead)} - } else { - writeOp.PermissionRead = &wrapperspb.Int32Value{Value: 1} + if versionIn, ok := dataMap["version"]; ok { + version, ok := versionIn.(string) + if !ok { + return nil, errors.New("expects 'version' value to be a string") } - - if permissionWriteIn, ok := dataMap["permissionWrite"]; ok { - permissionWrite, ok := permissionWriteIn.(int64) - if !ok { - panic(r.NewTypeError("expects 'permissionWrite' value to be a number")) - } - writeOp.PermissionWrite = &wrapperspb.Int32Value{Value: int32(permissionWrite)} - } else { - writeOp.PermissionWrite = &wrapperspb.Int32Value{Value: 1} + if version == "" { + return nil, errors.New("expects 'version' value to be a non-empty string") } + writeOp.Version = version + } - if writeOp.Collection == "" { - panic(r.NewTypeError("expects collection to be supplied")) - } else if writeOp.Key == "" { - panic(r.NewTypeError("expects key to be supplied")) - } else if writeOp.Value == "" { - panic(r.NewTypeError("expects value to be supplied")) + if permissionReadIn, ok := dataMap["permissionRead"]; ok { + permissionRead, ok := permissionReadIn.(int64) + if !ok { + return nil, errors.New("expects 'permissionRead' value to be a number") } - - ops = append(ops, &StorageOpWrite{ - OwnerID: userID.String(), - Object: writeOp, - }) + writeOp.PermissionRead = &wrapperspb.Int32Value{Value: int32(permissionRead)} + } else { + writeOp.PermissionRead = &wrapperspb.Int32Value{Value: 1} } - acks, _, err := StorageWriteObjects(n.ctx, n.logger, n.db, n.metrics, true, ops) - if err != nil { - panic(r.NewGoError(fmt.Errorf("failed to write storage objects: %s", err.Error()))) + if permissionWriteIn, ok := dataMap["permissionWrite"]; ok { + permissionWrite, ok := permissionWriteIn.(int64) + if !ok { + return nil, errors.New("expects 'permissionWrite' value to be a number") + } + writeOp.PermissionWrite = &wrapperspb.Int32Value{Value: int32(permissionWrite)} + } else { + writeOp.PermissionWrite = &wrapperspb.Int32Value{Value: 1} } - results := make([]interface{}, 0, len(acks.Acks)) - for _, ack := range acks.Acks { - result := make(map[string]interface{}, 4) - result["key"] = ack.Key - result["collection"] = ack.Collection - result["userId"] = ack.UserId - result["version"] = ack.Version - - results = append(results, result) + if writeOp.Collection == "" { + return nil, errors.New("expects collection to be supplied") + } else if writeOp.Key == "" { + return nil, errors.New("expects key to be supplied") + } else if writeOp.Value == "" { + return nil, errors.New("expects value to be supplied") } - return r.ToValue(results) + ops = append(ops, &StorageOpWrite{ + OwnerID: userID.String(), + Object: writeOp, + }) } + + return ops, nil } // @group storage @@ -4562,7 +4629,7 @@ func (n *runtimeJavascriptNakamaModule) storageDelete(r *goja.Runtime) func(goja }) } - if _, err := StorageDeleteObjects(n.ctx, n.logger, n.db, true, ops); err != nil { + if _, err := StorageDeleteObjects(n.ctx, n.logger, n.db, n.storageIndex, true, ops); err != nil { panic(r.NewGoError(fmt.Errorf("failed to remove storage: %s", err.Error()))) } @@ -5038,7 +5105,7 @@ func (n *runtimeJavascriptNakamaModule) leaderboardList(r *goja.Runtime) func(go results := make([]interface{}, 0, len(list.Leaderboards)) for _, leaderboard := range list.Leaderboards { - t, err := getJsLeaderboardData(leaderboard) + t, err := leaderboardToJsObject(leaderboard) if err != nil { panic(r.NewGoError(err)) } @@ -5255,7 +5322,7 @@ func (n *runtimeJavascriptNakamaModule) leaderboardsGetId(r *goja.Runtime) func( leaderboardsSlice := make([]interface{}, 0, len(leaderboards)) for _, l := range leaderboards { - leaderboardMap, err := getJsLeaderboardData(l) + leaderboardMap, err := leaderboardToJsObject(l) if err != nil { panic(r.NewGoError(err)) } @@ -5359,7 +5426,7 @@ func (n *runtimeJavascriptNakamaModule) purchaseValidateApple(r *goja.Runtime) f panic(r.NewGoError(fmt.Errorf("error validating Apple receipt: %s", err.Error()))) } - validationResult := getJsValidatedPurchasesData(validation) + validationResult := purchaseResponseToJsObject(validation) return r.ToValue(validationResult) } @@ -5411,7 +5478,7 @@ func (n *runtimeJavascriptNakamaModule) purchaseValidateGoogle(r *goja.Runtime) panic(r.NewGoError(fmt.Errorf("error validating Google receipt: %s", err.Error()))) } - validationResult := getJsValidatedPurchasesData(validation) + validationResult := purchaseResponseToJsObject(validation) return r.ToValue(validationResult) } @@ -5462,7 +5529,7 @@ func (n *runtimeJavascriptNakamaModule) purchaseValidateHuawei(r *goja.Runtime) panic(r.NewGoError(fmt.Errorf("error validating Huawei receipt: %s", err.Error()))) } - validationResult := getJsValidatedPurchasesData(validation) + validationResult := purchaseResponseToJsObject(validation) return r.ToValue(validationResult) } @@ -5485,7 +5552,7 @@ func (n *runtimeJavascriptNakamaModule) purchaseGetByTransactionId(r *goja.Runti panic(r.NewGoError(fmt.Errorf("error retrieving purchase: %s", err.Error()))) } - return r.ToValue(getJsValidatedPurchaseData(purchase)) + return r.ToValue(validatedPurchaseToJsObject(purchase)) } } @@ -5526,7 +5593,7 @@ func (n *runtimeJavascriptNakamaModule) purchasesList(r *goja.Runtime) func(goja validatedPurchases := make([]interface{}, 0, len(purchases.ValidatedPurchases)) for _, p := range purchases.ValidatedPurchases { - validatedPurchase := getJsValidatedPurchaseData(p) + validatedPurchase := validatedPurchaseToJsObject(p) validatedPurchases = append(validatedPurchases, validatedPurchase) } @@ -5590,7 +5657,7 @@ func (n *runtimeJavascriptNakamaModule) subscriptionValidateApple(r *goja.Runtim panic(r.NewGoError(fmt.Errorf("error validating Apple receipt: %s", err.Error()))) } - validationResult := getJsValidatedSubscriptionData(validation) + validationResult := subscriptionResponseToJsObject(validation) return r.ToValue(validationResult) } @@ -5648,7 +5715,7 @@ func (n *runtimeJavascriptNakamaModule) subscriptionValidateGoogle(r *goja.Runti panic(r.NewGoError(fmt.Errorf("error validating Google receipt: %s", err.Error()))) } - validationResult := getJsValidatedSubscriptionData(validation) + validationResult := subscriptionResponseToJsObject(validation) return r.ToValue(validationResult) } @@ -5681,7 +5748,7 @@ func (n *runtimeJavascriptNakamaModule) subscriptionGetByProductId(r *goja.Runti panic(r.NewGoError(fmt.Errorf("error retrieving purchase: %s", err.Error()))) } - return r.ToValue(getJsSubscriptionData(subscription)) + return r.ToValue(subscriptionToJsObject(subscription)) } } @@ -5722,7 +5789,7 @@ func (n *runtimeJavascriptNakamaModule) subscriptionsList(r *goja.Runtime) func( validatedSubscriptions := make([]interface{}, 0, len(subscriptions.ValidatedSubscriptions)) for _, s := range subscriptions.ValidatedSubscriptions { - validatedSubscription := getJsSubscriptionData(s) + validatedSubscription := subscriptionToJsObject(s) validatedSubscriptions = append(validatedSubscriptions, validatedSubscription) } @@ -6020,7 +6087,7 @@ func (n *runtimeJavascriptNakamaModule) tournamentsGetId(r *goja.Runtime) func(g results := make([]interface{}, 0, len(list)) for _, tournament := range list { - tournament, err := getJsTournamentData(tournament) + tournament, err := tournamentToJsObject(tournament) if err != nil { panic(r.NewGoError(err)) } @@ -6242,7 +6309,7 @@ func (n *runtimeJavascriptNakamaModule) tournamentList(r *goja.Runtime) func(goj results := make([]interface{}, 0, len(list.Tournaments)) for _, tournament := range list.Tournaments { - t, err := getJsTournamentData(tournament) + t, err := tournamentToJsObject(tournament) if err != nil { panic(r.NewGoError(err)) } @@ -6997,7 +7064,7 @@ func (n *runtimeJavascriptNakamaModule) friendsList(r *goja.Runtime) func(goja.F userFriends := make([]interface{}, 0, len(friends.Friends)) for _, f := range friends.Friends { - fum, err := getJsUserData(f.User) + fum, err := userToJsObject(f.User) if err != nil { panic(r.NewGoError(err)) } @@ -7671,7 +7738,7 @@ func (n *runtimeJavascriptNakamaModule) groupsList(r *goja.Runtime) func(goja.Fu groupsSlice := make([]interface{}, 0, len(groups.Groups)) for _, g := range groups.Groups { - groupData, err := getJsGroupData(g) + groupData, err := groupToJsObject(g) if err != nil { panic(r.NewGoError(err)) } @@ -7712,7 +7779,7 @@ func (n *runtimeJavascriptNakamaModule) groupsGetRandom(r *goja.Runtime) func(go groupsData := make([]map[string]interface{}, 0, len(groups)) for _, group := range groups { - userData, err := getJsGroupData(group) + userData, err := groupToJsObject(group) if err != nil { panic(r.NewGoError(err)) } @@ -8507,9 +8574,9 @@ func getJsBool(r *goja.Runtime, v goja.Value) bool { return b } -func getJsAccountData(account *api.Account) (map[string]interface{}, error) { +func accountToJsObject(account *api.Account) (map[string]interface{}, error) { accountData := make(map[string]interface{}) - userData, err := getJsUserData(account.User) + userData, err := userToJsObject(account.User) if err != nil { return nil, err } @@ -8548,7 +8615,7 @@ func getJsAccountData(account *api.Account) (map[string]interface{}, error) { return accountData, nil } -func getJsUserData(user *api.User) (map[string]interface{}, error) { +func userToJsObject(user *api.User) (map[string]interface{}, error) { userData := make(map[string]interface{}, 18) userData["userId"] = user.Id userData["username"] = user.Username @@ -8591,7 +8658,7 @@ func getJsUserData(user *api.User) (map[string]interface{}, error) { return userData, nil } -func getJsGroupData(group *api.Group) (map[string]interface{}, error) { +func groupToJsObject(group *api.Group) (map[string]interface{}, error) { groupMap := make(map[string]interface{}, 12) groupMap["id"] = group.Id @@ -8617,7 +8684,7 @@ func getJsGroupData(group *api.Group) (map[string]interface{}, error) { return groupMap, nil } -func getJsLeaderboardData(leaderboard *api.Leaderboard) (map[string]interface{}, error) { +func leaderboardToJsObject(leaderboard *api.Leaderboard) (map[string]interface{}, error) { leaderboardMap := make(map[string]interface{}, 11) leaderboardMap["id"] = leaderboard.Id leaderboardMap["operator"] = strings.ToLower(leaderboard.Operator.String()) @@ -8641,7 +8708,7 @@ func getJsLeaderboardData(leaderboard *api.Leaderboard) (map[string]interface{}, return leaderboardMap, nil } -func getJsTournamentData(tournament *api.Tournament) (map[string]interface{}, error) { +func tournamentToJsObject(tournament *api.Tournament) (map[string]interface{}, error) { tournamentMap := make(map[string]interface{}, 18) tournamentMap["id"] = tournament.Id @@ -8681,10 +8748,10 @@ func getJsTournamentData(tournament *api.Tournament) (map[string]interface{}, er return tournamentMap, nil } -func getJsValidatedPurchasesData(validation *api.ValidatePurchaseResponse) map[string]interface{} { +func purchaseResponseToJsObject(validation *api.ValidatePurchaseResponse) map[string]interface{} { validatedPurchases := make([]interface{}, 0, len(validation.ValidatedPurchases)) for _, v := range validation.ValidatedPurchases { - validatedPurchases = append(validatedPurchases, getJsValidatedPurchaseData(v)) + validatedPurchases = append(validatedPurchases, validatedPurchaseToJsObject(v)) } validationMap := make(map[string]interface{}, 1) @@ -8693,7 +8760,7 @@ func getJsValidatedPurchasesData(validation *api.ValidatePurchaseResponse) map[s return validationMap } -func getJsValidatedPurchaseData(purchase *api.ValidatedPurchase) map[string]interface{} { +func validatedPurchaseToJsObject(purchase *api.ValidatedPurchase) map[string]interface{} { validatedPurchaseMap := make(map[string]interface{}, 11) validatedPurchaseMap["userId"] = purchase.UserId validatedPurchaseMap["productId"] = purchase.ProductId @@ -8718,11 +8785,11 @@ func getJsValidatedPurchaseData(purchase *api.ValidatedPurchase) map[string]inte return validatedPurchaseMap } -func getJsValidatedSubscriptionData(validation *api.ValidateSubscriptionResponse) map[string]interface{} { - return map[string]interface{}{"validatedSubscription": getJsSubscriptionData(validation.ValidatedSubscription)} +func subscriptionResponseToJsObject(validation *api.ValidateSubscriptionResponse) map[string]interface{} { + return map[string]interface{}{"validatedSubscription": subscriptionToJsObject(validation.ValidatedSubscription)} } -func getJsSubscriptionData(subscription *api.ValidatedSubscription) map[string]interface{} { +func subscriptionToJsObject(subscription *api.ValidatedSubscription) map[string]interface{} { validatedSubMap := make(map[string]interface{}, 13) validatedSubMap["userId"] = subscription.UserId validatedSubMap["productId"] = subscription.ProductId @@ -8749,7 +8816,7 @@ func getJsSubscriptionData(subscription *api.ValidatedSubscription) map[string]i return validatedSubMap } -func getStreamData(r *goja.Runtime, streamObj map[string]interface{}) PresenceStream { +func jsObjectToPresenceStream(r *goja.Runtime, streamObj map[string]interface{}) PresenceStream { stream := PresenceStream{} modeRaw, ok := streamObj["mode"] diff --git a/server/runtime_lua.go b/server/runtime_lua.go index ea6eeca74c..b262df000a 100644 --- a/server/runtime_lua.go +++ b/server/runtime_lua.go @@ -63,6 +63,7 @@ type RuntimeLuaCallbacks struct { SubscriptionNotificationApple *lua.LFunction PurchaseNotificationGoogle *lua.LFunction SubscriptionNotificationGoogle *lua.LFunction + StorageIndexFilter *MapOf[string, *lua.LFunction] } type RuntimeLuaModule struct { @@ -93,6 +94,7 @@ type RuntimeProviderLua struct { socialClient *social.Client leaderboardCache LeaderboardCache leaderboardRankCache LeaderboardRankCache + storageIndex StorageIndex sessionRegistry SessionRegistry matchRegistry MatchRegistry tracker Tracker @@ -109,14 +111,14 @@ type RuntimeProviderLua struct { statsCtx context.Context } -func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, rootPath string, paths []string, matchProvider *MatchProvider) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, error) { +func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, leaderboardRankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, eventFn RuntimeEventCustomFunction, rootPath string, paths []string, matchProvider *MatchProvider, storageIndex StorageIndex) ([]string, map[string]RuntimeRpcFunction, map[string]RuntimeBeforeRtFunction, map[string]RuntimeAfterRtFunction, *RuntimeBeforeReqFunctions, *RuntimeAfterReqFunctions, RuntimeMatchmakerMatchedFunction, RuntimeTournamentEndFunction, RuntimeTournamentResetFunction, RuntimeLeaderboardResetFunction, RuntimePurchaseNotificationAppleFunction, RuntimeSubscriptionNotificationAppleFunction, RuntimePurchaseNotificationGoogleFunction, RuntimeSubscriptionNotificationGoogleFunction, map[string]RuntimeStorageIndexFilterFunction, error) { startupLogger.Info("Initialising Lua runtime provider", zap.String("path", rootPath)) // Load Lua modules into memory by reading the file contents. No evaluation/execution at this stage. moduleCache, modulePaths, stdLibs, err := openLuaModules(startupLogger, rootPath, paths) if err != nil { // Errors already logged in the function call above. - return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } once := &sync.Once{} @@ -134,6 +136,7 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj var subscriptionNotificationAppleFunction RuntimeSubscriptionNotificationAppleFunction var purchaseNotificationGoogleFunction RuntimePurchaseNotificationGoogleFunction var subscriptionNotificationGoogleFunction RuntimeSubscriptionNotificationGoogleFunction + storageIndexFilterFunctions := make(map[string]RuntimeStorageIndexFilterFunction, 0) var sharedReg *lua.LTable var sharedGlobals *lua.LTable @@ -147,6 +150,7 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj socialClient: socialClient, leaderboardCache: leaderboardCache, leaderboardRankCache: leaderboardRankCache, + storageIndex: storageIndex, sessionRegistry: sessionRegistry, matchRegistry: matchRegistry, tracker: tracker, @@ -165,11 +169,11 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj matchProvider.RegisterCreateFn("lua", func(ctx context.Context, logger *zap.Logger, id uuid.UUID, node string, stopped *atomic.Bool, name string) (RuntimeMatchCore, error) { - return NewRuntimeLuaMatchCore(logger, name, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, once, localCache, eventFn, nil, nil, id, node, stopped, name, matchProvider) + return NewRuntimeLuaMatchCore(logger, name, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, once, localCache, eventFn, nil, nil, id, node, stopped, name, matchProvider, storageIndex) }, ) - r, err := newRuntimeLuaVM(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, moduleCache, once, localCache, matchProvider.CreateMatch, eventFn, func(execMode RuntimeExecutionMode, id string) { + r, err := newRuntimeLuaVM(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, moduleCache, once, localCache, storageIndex, matchProvider.CreateMatch, eventFn, func(execMode RuntimeExecutionMode, id string) { switch execMode { case RuntimeExecutionModeRPC: rpcFunctions[id] = func(ctx context.Context, headers, queryParams map[string][]string, userID, username string, vars map[string]string, expiry int64, sessionID, clientIP, clientPort, lang, payload string) (string, error, codes.Code) { @@ -1139,10 +1143,14 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj subscriptionNotificationGoogleFunction = func(ctx context.Context, subscription *api.ValidatedSubscription, providerPayload string) error { return runtimeProviderLua.SubscriptionNotificationGoogle(ctx, subscription, providerPayload) } + case RuntimeExecutionModeStorageIndexFilter: + storageIndexFilterFunctions[id] = func(ctx context.Context, write *StorageOpWrite) (bool, error) { + return runtimeProviderLua.StorageIndexFilter(ctx, id, write) + } } }) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, err } if config.GetRuntime().GetLuaReadOnlyGlobals() { @@ -1189,7 +1197,7 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj r.Stop() runtimeProviderLua.newFn = func() *RuntimeLua { - r, err := newRuntimeLuaVM(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, moduleCache, once, localCache, matchProvider.CreateMatch, eventFn, nil) + r, err := newRuntimeLuaVM(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, leaderboardRankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, stdLibs, moduleCache, once, localCache, storageIndex, matchProvider.CreateMatch, eventFn, nil) if err != nil { logger.Fatal("Failed to initialize Lua runtime", zap.Error(err)) } @@ -1210,7 +1218,7 @@ func NewRuntimeProviderLua(logger, startupLogger *zap.Logger, db *sql.DB, protoj } startupLogger.Info("Allocated minimum Lua runtime pool") - return modulePaths, rpcFunctions, beforeRtFunctions, afterRtFunctions, beforeReqFunctions, afterReqFunctions, matchmakerMatchedFunction, tournamentEndFunction, tournamentResetFunction, leaderboardResetFunction, purchaseNotificationAppleFunction, subscriptionNotificationAppleFunction, purchaseNotificationGoogleFunction, subscriptionNotificationGoogleFunction, nil + return modulePaths, rpcFunctions, beforeRtFunctions, afterRtFunctions, beforeReqFunctions, afterReqFunctions, matchmakerMatchedFunction, tournamentEndFunction, tournamentResetFunction, leaderboardResetFunction, purchaseNotificationAppleFunction, subscriptionNotificationAppleFunction, purchaseNotificationGoogleFunction, subscriptionNotificationGoogleFunction, storageIndexFilterFunctions, nil } func CheckRuntimeProviderLua(logger *zap.Logger, config Config, version string, paths []string) error { @@ -2000,6 +2008,65 @@ func (rp *RuntimeProviderLua) SubscriptionNotificationGoogle(ctx context.Context return errors.New("Unexpected return type from runtime Subscription Notification Google hook, must be nil.") } +func (rp *RuntimeProviderLua) StorageIndexFilter(ctx context.Context, indexName string, write *StorageOpWrite) (bool, error) { + r, err := rp.Get(ctx) + if err != nil { + return false, err + } + lf := r.GetCallback(RuntimeExecutionModeStorageIndexFilter, indexName) + if lf == nil { + rp.Put(r) + return false, fmt.Errorf("Runtime Storage Index function not found for index: %q.", indexName) + } + + luaCtx := NewRuntimeLuaContext(r.vm, r.node, r.version, r.luaEnv, RuntimeExecutionModeStorageIndexFilter, nil, nil, 0, "", "", nil, "", "", "", "") + + //table, err := storageOpWritesToTable(r.vm, storageWrites) + if err != nil { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: %v", indexName, err.Error()) + } + + writeTable := r.vm.CreateTable(0, 7) + writeTable.RawSetString("key", lua.LString(write.Object.Key)) + writeTable.RawSetString("collection", lua.LString(write.Object.Collection)) + if write.OwnerID != "" { + writeTable.RawSetString("user_id", lua.LString(write.OwnerID)) + } else { + writeTable.RawSetString("user_id", lua.LNil) + } + writeTable.RawSetString("version", lua.LString(write.Object.Version)) + writeTable.RawSetString("permission_read", lua.LNumber(write.Object.PermissionRead.GetValue())) + writeTable.RawSetString("permission_write", lua.LNumber(write.Object.PermissionWrite.GetValue())) + + valueMap := make(map[string]interface{}) + err = json.Unmarshal([]byte(write.Object.Value), &valueMap) + if err != nil { + return false, fmt.Errorf("failed to convert value to json: %s", err.Error()) + } + valueTable := RuntimeLuaConvertMap(r.vm, valueMap) + writeTable.RawSetString("value", valueTable) + + // Set context value used for logging + vmCtx := context.WithValue(ctx, ctxLoggerFields{}, map[string]string{"mode": RuntimeExecutionModeStorageIndexFilter.String()}) + r.vm.SetContext(vmCtx) + retValue, err, _, _ := r.invokeFunction(r.vm, lf, luaCtx, writeTable) + r.vm.SetContext(context.Background()) + rp.Put(r) + if err != nil { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: %v", indexName, err.Error()) + } + + if retValue == nil || retValue == lua.LNil { + return false, errors.New("Invalid return type for Storage Index Filter function: bool expected") + } + + if retValue.Type() != lua.LTBool { + return false, fmt.Errorf("Error running runtime Storage Index Filter hook for %q index: failed to assert lua fn expected return type", indexName) + } + + return lua.LVAsBool(retValue), nil +} + func (rp *RuntimeProviderLua) Get(ctx context.Context) (*RuntimeLua, error) { select { case <-ctx.Done(): @@ -2162,6 +2229,12 @@ func (r *RuntimeLua) GetCallback(e RuntimeExecutionMode, key string) *lua.LFunct return r.callbacks.PurchaseNotificationGoogle case RuntimeExecutionModeSubscriptionNotificationGoogle: return r.callbacks.SubscriptionNotificationGoogle + case RuntimeExecutionModeStorageIndexFilter: + fn, found := r.callbacks.StorageIndexFilter.Load(key) + if !found { + return nil + } + return fn } return nil @@ -2284,7 +2357,7 @@ func checkRuntimeLuaVM(logger *zap.Logger, config Config, version string, stdLib vm.Push(lua.LString(name)) vm.Call(1, 0) } - nakamaModule := NewRuntimeLuaNakamaModule(nil, nil, nil, nil, config, version, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + nakamaModule := NewRuntimeLuaNakamaModule(nil, nil, nil, nil, config, version, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) vm.PreloadModule("nakama", nakamaModule.Loader) preload := vm.GetField(vm.GetField(vm.Get(lua.EnvironIndex), "package"), "preload") @@ -2305,7 +2378,7 @@ func checkRuntimeLuaVM(logger *zap.Logger, config Config, version string, stdLib return nil } -func newRuntimeLuaVM(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, stdLibs map[string]lua.LGFunction, moduleCache *RuntimeLuaModuleCache, once *sync.Once, localCache *RuntimeLuaLocalCache, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, announceCallbackFn func(RuntimeExecutionMode, string)) (*RuntimeLua, error) { +func newRuntimeLuaVM(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, stdLibs map[string]lua.LGFunction, moduleCache *RuntimeLuaModuleCache, once *sync.Once, localCache *RuntimeLuaLocalCache, storageIndex StorageIndex, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, announceCallbackFn func(RuntimeExecutionMode, string)) (*RuntimeLua, error) { vm := lua.NewState(lua.Options{ CallStackSize: config.GetRuntime().GetLuaCallStackSize(), RegistrySize: config.GetRuntime().GetLuaRegistrySize(), @@ -2319,9 +2392,10 @@ func newRuntimeLuaVM(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojs vm.Call(1, 0) } callbacks := &RuntimeLuaCallbacks{ - RPC: &MapOf[string, *lua.LFunction]{}, - Before: &MapOf[string, *lua.LFunction]{}, - After: &MapOf[string, *lua.LFunction]{}, + RPC: &MapOf[string, *lua.LFunction]{}, + Before: &MapOf[string, *lua.LFunction]{}, + After: &MapOf[string, *lua.LFunction]{}, + StorageIndexFilter: &MapOf[string, *lua.LFunction]{}, } registerCallbackFn := func(e RuntimeExecutionMode, key string, fn *lua.LFunction) { switch e { @@ -2347,9 +2421,11 @@ func newRuntimeLuaVM(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojs callbacks.PurchaseNotificationGoogle = fn case RuntimeExecutionModeSubscriptionNotificationGoogle: callbacks.SubscriptionNotificationGoogle = fn + case RuntimeExecutionModeStorageIndexFilter: + callbacks.StorageIndexFilter.Store(key, fn) } } - nakamaModule := NewRuntimeLuaNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, rankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, once, localCache, matchCreateFn, eventFn, registerCallbackFn, announceCallbackFn) + nakamaModule := NewRuntimeLuaNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, rankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, once, localCache, storageIndex, matchCreateFn, eventFn, registerCallbackFn, announceCallbackFn) vm.PreloadModule("nakama", nakamaModule.Loader) r := &RuntimeLua{ logger: logger, diff --git a/server/runtime_lua_match_core.go b/server/runtime_lua_match_core.go index 859dbf6842..2f76405200 100644 --- a/server/runtime_lua_match_core.go +++ b/server/runtime_lua_match_core.go @@ -65,7 +65,7 @@ type RuntimeLuaMatchCore struct { ctxCancelFn context.CancelFunc } -func NewRuntimeLuaMatchCore(logger *zap.Logger, module string, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, stdLibs map[string]lua.LGFunction, once *sync.Once, localCache *RuntimeLuaLocalCache, eventFn RuntimeEventCustomFunction, sharedReg, sharedGlobals *lua.LTable, id uuid.UUID, node string, stopped *atomic.Bool, name string, matchProvider *MatchProvider) (RuntimeMatchCore, error) { +func NewRuntimeLuaMatchCore(logger *zap.Logger, module string, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, stdLibs map[string]lua.LGFunction, once *sync.Once, localCache *RuntimeLuaLocalCache, eventFn RuntimeEventCustomFunction, sharedReg, sharedGlobals *lua.LTable, id uuid.UUID, node string, stopped *atomic.Bool, name string, matchProvider *MatchProvider, storageIndex StorageIndex) (RuntimeMatchCore, error) { // Set up the Lua VM that will handle this match. vm := lua.NewState(lua.Options{ CallStackSize: config.GetRuntime().GetLuaCallStackSize(), @@ -95,7 +95,7 @@ func NewRuntimeLuaMatchCore(logger *zap.Logger, module string, db *sql.DB, proto vm.Call(1, 0) } - nakamaModule := NewRuntimeLuaNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, rankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, once, localCache, matchProvider.CreateMatch, eventFn, nil, nil) + nakamaModule := NewRuntimeLuaNakamaModule(logger, db, protojsonMarshaler, protojsonUnmarshaler, config, version, socialClient, leaderboardCache, rankCache, leaderboardScheduler, sessionRegistry, sessionCache, statusRegistry, matchRegistry, tracker, metrics, streamManager, router, once, localCache, storageIndex, matchProvider.CreateMatch, eventFn, nil, nil) vm.PreloadModule("nakama", nakamaModule.Loader) } diff --git a/server/runtime_lua_nakama.go b/server/runtime_lua_nakama.go index be7dc9f768..886b6ba014 100644 --- a/server/runtime_lua_nakama.go +++ b/server/runtime_lua_nakama.go @@ -77,6 +77,7 @@ type RuntimeLuaNakamaModule struct { matchRegistry MatchRegistry tracker Tracker metrics Metrics + storageIndex StorageIndex streamManager StreamManager router MessageRouter once *sync.Once @@ -93,7 +94,7 @@ type RuntimeLuaNakamaModule struct { satori runtime.Satori } -func NewRuntimeLuaNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, once *sync.Once, localCache *RuntimeLuaLocalCache, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, registerCallbackFn func(RuntimeExecutionMode, string, *lua.LFunction), announceCallbackFn func(RuntimeExecutionMode, string)) *RuntimeLuaNakamaModule { +func NewRuntimeLuaNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshaler *protojson.MarshalOptions, protojsonUnmarshaler *protojson.UnmarshalOptions, config Config, version string, socialClient *social.Client, leaderboardCache LeaderboardCache, rankCache LeaderboardRankCache, leaderboardScheduler LeaderboardScheduler, sessionRegistry SessionRegistry, sessionCache SessionCache, statusRegistry *StatusRegistry, matchRegistry MatchRegistry, tracker Tracker, metrics Metrics, streamManager StreamManager, router MessageRouter, once *sync.Once, localCache *RuntimeLuaLocalCache, storageIndex StorageIndex, matchCreateFn RuntimeMatchCreateFunction, eventFn RuntimeEventCustomFunction, registerCallbackFn func(RuntimeExecutionMode, string, *lua.LFunction), announceCallbackFn func(RuntimeExecutionMode, string)) *RuntimeLuaNakamaModule { return &RuntimeLuaNakamaModule{ logger: logger, db: db, @@ -115,6 +116,7 @@ func NewRuntimeLuaNakamaModule(logger *zap.Logger, db *sql.DB, protojsonMarshale router: router, once: once, localCache: localCache, + storageIndex: storageIndex, registerCallbackFn: registerCallbackFn, announceCallbackFn: announceCallbackFn, httpClient: &http.Client{}, @@ -139,6 +141,8 @@ func (n *RuntimeLuaNakamaModule) Loader(l *lua.LState) int { "register_tournament_end": n.registerTournamentEnd, "register_tournament_reset": n.registerTournamentReset, "register_leaderboard_reset": n.registerLeaderboardReset, + "register_storage_index": n.registerStorageIndex, + "register_storage_index_filter": n.registerStorageIndexFilter, "run_once": n.runOnce, "get_context": n.getContext, "event": n.event, @@ -298,6 +302,7 @@ func (n *RuntimeLuaNakamaModule) Loader(l *lua.LState) int { "channel_message_remove": n.channelMessageRemove, "channel_messages_list": n.channelMessagesList, "channel_id_build": n.channelIdBuild, + "storage_index_list": n.storageIndexList, "get_satori": n.getSatori, } @@ -496,6 +501,53 @@ func (n *RuntimeLuaNakamaModule) registerLeaderboardReset(l *lua.LState) int { return 0 } +// @group storage +// @summary Create a new storage index. +// @param indexName(type=string) Name of the index to list entries from. +// @param collection(type=string) Collection of storage engine to index objects from. +// @param key(type=string) Key of storage objects to index. Set to empty string to index all objects of collection. +// @param fields(type=table) A table of strings with the keys of the storage object whose values are to be indexed. +// @param maxEntries(type=int) Maximum number of entries kept in the index. +// @return error(error) An optional error value if an error occurred. +func (n *RuntimeLuaNakamaModule) registerStorageIndex(l *lua.LState) int { + idxName := l.CheckString(1) + collection := l.CheckString(2) + key := l.CheckString(3) + fieldsTable := l.CheckTable(4) + fields := make([]string, 0, fieldsTable.Len()) + fieldsTable.ForEach(func(k, v lua.LValue) { + if v.Type() != lua.LTString { + l.ArgError(4, "expects each field to be string") + return + } + fields = append(fields, v.String()) + }) + maxEntries := l.CheckInt(5) + + if err := n.storageIndex.CreateIndex(context.Background(), idxName, collection, key, fields, maxEntries); err != nil { + l.RaiseError("failed to create storage index: %s", err.Error()) + } + + return 0 +} + +// @group storage +// @summary List storage index entries +// @param indexName(type=string) Name of the index to register filter function. +// @param fn(type=function) A function reference which will be executed on each storage object to be written that is a candidate for the index. +// @return error(error) An optional error value if an error occurred. +func (n *RuntimeLuaNakamaModule) registerStorageIndexFilter(l *lua.LState) int { + fn := l.CheckFunction(1) + + if n.registerCallbackFn != nil { + n.registerCallbackFn(RuntimeExecutionModeStorageIndexFilter, "", fn) + } + if n.announceCallbackFn != nil { + n.announceCallbackFn(RuntimeExecutionModeStorageIndexFilter, "") + } + return 0 +} + // @group hooks // @summary Registers a function to be run only once. // @param fn(type=function) A function reference which will be executed only once. @@ -5718,6 +5770,33 @@ func (n *RuntimeLuaNakamaModule) storageWrite(l *lua.LState) int { return 1 } + ops, err := tableToStorageWrites(l, dataTable) + if err != nil { + return 0 + } + + acks, _, err := StorageWriteObjects(l.Context(), n.logger, n.db, n.metrics, n.storageIndex, true, ops) + if err != nil { + l.RaiseError(fmt.Sprintf("failed to write storage objects: %s", err.Error())) + return 0 + } + + lv := l.CreateTable(len(acks.Acks), 0) + for i, k := range acks.Acks { + kt := l.CreateTable(0, 4) + kt.RawSetString("key", lua.LString(k.Key)) + kt.RawSetString("collection", lua.LString(k.Collection)) + kt.RawSetString("user_id", lua.LString(k.UserId)) + kt.RawSetString("version", lua.LString(k.Version)) + + lv.RawSetInt(i+1, kt) + } + l.Push(lv) + return 1 +} + +func tableToStorageWrites(l *lua.LState, dataTable *lua.LTable) (StorageOpWrites, error) { + size := dataTable.Len() ops := make(StorageOpWrites, 0, size) conversionError := false dataTable.ForEach(func(k, v lua.LValue) { @@ -5851,28 +5930,38 @@ func (n *RuntimeLuaNakamaModule) storageWrite(l *lua.LState) int { Object: d, }) }) - if conversionError { - return 0 - } - acks, _, err := StorageWriteObjects(l.Context(), n.logger, n.db, n.metrics, true, ops) - if err != nil { - l.RaiseError(fmt.Sprintf("failed to write storage objects: %s", err.Error())) - return 0 - } + return ops, nil +} - lv := l.CreateTable(len(acks.Acks), 0) - for i, k := range acks.Acks { - kt := l.CreateTable(0, 4) - kt.RawSetString("key", lua.LString(k.Key)) - kt.RawSetString("collection", lua.LString(k.Collection)) - kt.RawSetString("user_id", lua.LString(k.UserId)) - kt.RawSetString("version", lua.LString(k.Version)) +func storageOpWritesToTable(l *lua.LState, ops StorageOpWrites) (*lua.LTable, error) { + lv := l.CreateTable(len(ops), 0) + for i, v := range ops { + vt := l.CreateTable(0, 7) + vt.RawSetString("key", lua.LString(v.Object.Key)) + vt.RawSetString("collection", lua.LString(v.Object.Collection)) + if v.OwnerID != "" { + vt.RawSetString("user_id", lua.LString(v.OwnerID)) + } else { + vt.RawSetString("user_id", lua.LNil) + } + vt.RawSetString("version", lua.LString(v.Object.Version)) + vt.RawSetString("permission_read", lua.LNumber(v.Object.PermissionRead.GetValue())) + vt.RawSetString("permission_write", lua.LNumber(v.Object.PermissionWrite.GetValue())) - lv.RawSetInt(i+1, kt) + valueMap := make(map[string]interface{}) + err := json.Unmarshal([]byte(v.Object.Value), &valueMap) + if err != nil { + l.RaiseError(fmt.Sprintf("failed to convert value to json: %s", err.Error())) + return nil, err + } + valueTable := RuntimeLuaConvertMap(l, valueMap) + vt.RawSetString("value", valueTable) + + lv.RawSetInt(i+1, vt) } - l.Push(lv) - return 1 + + return lv, nil } // @group storage @@ -5987,7 +6076,7 @@ func (n *RuntimeLuaNakamaModule) storageDelete(l *lua.LState) int { return 0 } - if _, err := StorageDeleteObjects(l.Context(), n.logger, n.db, true, ops); err != nil { + if _, err := StorageDeleteObjects(l.Context(), n.logger, n.db, n.storageIndex, true, ops); err != nil { l.RaiseError(fmt.Sprintf("failed to remove storage: %s", err.Error())) } @@ -9628,7 +9717,8 @@ func (n *RuntimeLuaNakamaModule) channelMessagesList(l *lua.LState) int { limit := l.OptInt(2, 100) if limit < 1 || limit > 100 { - + l.ArgError(2, "limit must be 1-100") + return 0 } forward := l.OptBool(3, true) @@ -9707,9 +9797,8 @@ func (n *RuntimeLuaNakamaModule) channelIdBuild(l *lua.LState) int { target := l.CheckString(2) chanType := l.CheckInt(3) - if chanType < 1 || chanType > 3 { - l.RaiseError("invalid channel type: expects value 1-3") + l.ArgError(3, "invalid channel type: expects value 1-3") return 0 } @@ -9730,6 +9819,60 @@ func (n *RuntimeLuaNakamaModule) channelIdBuild(l *lua.LState) int { return 1 } +// @group storage +// @summary List storage index entries +// @param indexName(type=string) Name of the index to list entries from. +// @param queryString(type=string) Query to filter index entries. +// @param limit(type=int) Maximum number of results to be returned. +// @return objects(table) A list of storage objects. +// @return error(error) An optional error value if an error occurred. +func (n *RuntimeLuaNakamaModule) storageIndexList(l *lua.LState) int { + idxName := l.CheckString(1) + queryString := l.CheckString(2) + limit := l.OptInt(3, 100) + if limit < 1 || limit > 100 { + l.ArgError(3, "invalid limit: expects value 1-100") + return 0 + } + + objectList, err := n.storageIndex.List(l.Context(), idxName, queryString, limit) + if err != nil { + l.RaiseError(err.Error()) + return 0 + } + + lv := l.CreateTable(len(objectList.GetObjects()), 0) + for i, v := range objectList.GetObjects() { + vt := l.CreateTable(0, 9) + vt.RawSetString("key", lua.LString(v.Key)) + vt.RawSetString("collection", lua.LString(v.Collection)) + if v.UserId != "" { + vt.RawSetString("user_id", lua.LString(v.UserId)) + } else { + vt.RawSetString("user_id", lua.LNil) + } + vt.RawSetString("version", lua.LString(v.Version)) + vt.RawSetString("permission_read", lua.LNumber(v.PermissionRead)) + vt.RawSetString("permission_write", lua.LNumber(v.PermissionWrite)) + vt.RawSetString("create_time", lua.LNumber(v.CreateTime.Seconds)) + vt.RawSetString("update_time", lua.LNumber(v.UpdateTime.Seconds)) + + valueMap := make(map[string]interface{}) + err = json.Unmarshal([]byte(v.Value), &valueMap) + if err != nil { + l.RaiseError(fmt.Sprintf("failed to convert value to json: %s", err.Error())) + return 0 + } + valueTable := RuntimeLuaConvertMap(l, valueMap) + vt.RawSetString("value", valueTable) + + lv.RawSetInt(i+1, vt) + } + l.Push(lv) + + return 1 +} + // @group satori // @summary Get the Satori client. // @return satori(table) The satori client. diff --git a/server/runtime_test.go b/server/runtime_test.go index f819df53e5..465bcf32b3 100644 --- a/server/runtime_test.go +++ b/server/runtime_test.go @@ -100,7 +100,7 @@ func runtimeWithModulesWithData(t *testing.T, modules map[string]string) (*Runti leaderboardRankCache: lbRankCache, } - rt, rtInfo, err := NewRuntime(ctx, logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "", nil, lbCache, lbRankCache, lbSched, nil, nil, nil, nil, nil, metrics, nil, &DummyMessageRouter{}) + rt, rtInfo, err := NewRuntime(ctx, logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "", nil, lbCache, lbRankCache, lbSched, nil, nil, nil, nil, nil, metrics, nil, &DummyMessageRouter{}, storageIdx) return rt, rtInfo, data, err } @@ -380,7 +380,7 @@ nakama.register_rpc(test.printWorld, "helloworld")`, db := NewDB(t) pipeline := NewPipeline(logger, cfg, db, protojsonMarshaler, protojsonUnmarshaler, nil, nil, nil, nil, nil, nil, nil, runtime) - apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "", nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, metrics, pipeline, runtime) + apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "", nil, storageIdx, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, metrics, pipeline, runtime) defer apiServer.Stop() WaitForSocket(nil, cfg) diff --git a/server/storage_index.go b/server/storage_index.go new file mode 100644 index 0000000000..cb126ed9c4 --- /dev/null +++ b/server/storage_index.go @@ -0,0 +1,574 @@ +// Copyright 2023 The Nakama Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package server + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/index" + "github.com/blugelabs/bluge/search" + "github.com/gofrs/uuid/v5" + "github.com/heroiclabs/nakama-common/api" + "github.com/jackc/pgtype" + "go.uber.org/zap" + "google.golang.org/protobuf/types/known/wrapperspb" + "time" +) + +type StorageIndex interface { + Write(ctx context.Context, objects StorageOpWrites) (map[string]StorageOpWrites, map[string]StorageOpDeletes) + Delete(ctx context.Context, objects StorageOpDeletes) map[string]StorageOpDeletes + List(ctx context.Context, indexName, query string, limit int) (*api.StorageObjects, error) + Load(ctx context.Context) error + CreateIndex(ctx context.Context, name, collection, key string, fields []string, maxEntries int) error + RegisterFilters(runtime *Runtime) +} + +type storageIndex struct { + Name string + MaxEntries int + Collection string + Key string + Fields []string + Index *bluge.Writer +} + +type LocalStorageIndex struct { + logger *zap.Logger + db *sql.DB + indexByName map[string]*storageIndex + indicesByCollection map[string][]*storageIndex + customFilterFunctions map[string]RuntimeStorageIndexFilterFunction +} + +func NewLocalStorageIndex(logger *zap.Logger, db *sql.DB) (StorageIndex, error) { + lsc := &LocalStorageIndex{ + logger: logger, + db: db, + indexByName: make(map[string]*storageIndex), + indicesByCollection: make(map[string][]*storageIndex), + customFilterFunctions: make(map[string]RuntimeStorageIndexFilterFunction), + } + + return lsc, nil +} + +func (si *LocalStorageIndex) Write(ctx context.Context, storageWrites StorageOpWrites) (updates map[string]StorageOpWrites, deletes map[string]StorageOpDeletes) { + batches := make(map[*storageIndex]*index.Batch, 0) + updates = make(map[string]StorageOpWrites, 0) + deletes = make(map[string]StorageOpDeletes, 0) + + updateTime := time.Now() + + for _, so := range storageWrites { + indices, found := si.indicesByCollection[so.Object.Collection] + if !found { + continue + } + + for _, idx := range indices { + if idx.Key == "" || idx.Key == so.Object.Key { + batch, ok := batches[idx] + if !ok { + batch = bluge.NewBatch() + batches[idx] = batch + } + + if fn, ok := si.customFilterFunctions[idx.Name]; ok { + insertWrite, err := fn(ctx, so) // true = upsert, false = delete + if err != nil { + si.logger.Error("Error invoking custom Storage Index Filter function", zap.String("index_name", idx.Name), zap.Error(err)) + continue + } + + if !insertWrite { + // Delete existing document from index, if any. + docId := si.storageIndexDocumentId(so.Object.Collection, so.Object.Key, so.OwnerID) + batch.Delete(docId) + + if ds, ok := deletes[idx.Name]; ok { + deletes[idx.Name] = append(ds, &StorageOpDelete{ + OwnerID: so.OwnerID, + ObjectID: &api.DeleteStorageObjectId{ + Collection: so.Object.Collection, + Key: so.Object.Key, + // Blank Version as it is irrelevant for storage index deletes. + }, + }) + } else { + deletes[idx.Name] = StorageOpDeletes{&StorageOpDelete{ + OwnerID: so.OwnerID, + ObjectID: &api.DeleteStorageObjectId{ + Collection: so.Object.Collection, + Key: so.Object.Key, + // Blank Version as it is irrelevant for storage index deletes. + }, + }} + } + + continue + } + } + + doc, err := si.mapIndexStorageFields(so.OwnerID, so.Object.Collection, so.Object.Key, so.Object.Version, so.Object.Value, idx.Fields, updateTime) + if err != nil { + si.logger.Error("Failed to map storage object values to index", zap.Error(err)) + continue + } + + if doc == nil { + continue + } + + batch.Update(doc.ID(), doc) + + if u, ok := updates[idx.Name]; ok { + updates[idx.Name] = append(u, so) + } else { + updates[idx.Name] = StorageOpWrites{so} + } + } + } + } + + for idx, b := range batches { + if err := idx.Index.Batch(b); err != nil { + si.logger.Error("Failed to update index", zap.String("index_name", idx.Name), zap.Error(err)) + continue + } + + reader, err := idx.Index.Reader() + if err != nil { + si.logger.Error("Failed to get index storage reader", zap.Error(err)) + continue + } + count, _ := reader.Count() // cannot return err + + // Apply eviction strategy if size of index is +10% than max size + if count > uint64(float32(idx.MaxEntries)*(1+0.1)) { + deleteCount := int(count - uint64(idx.MaxEntries)) + req := bluge.NewTopNSearch(deleteCount, bluge.NewMatchAllQuery()) + req.SortBy([]string{"update_time"}) + + results, err := reader.Search(ctx, req) + if err != nil { + si.logger.Error("Failed to evict storage index documents", zap.String("index_name", idx.Name)) + continue + } + + ids, err := si.queryMatchesToDocumentIds(results) + if err != nil { + si.logger.Error("Failed to get query results document ids", zap.Error(err)) + continue + } + + evictBatch := bluge.NewBatch() + for _, docID := range ids { + evictBatch.Delete(bluge.Identifier(docID)) + } + if err = idx.Index.Batch(evictBatch); err != nil { + si.logger.Error("Failed to update index", zap.String("index_name", idx.Name), zap.Error(err)) + } + } + } + + return updates, deletes +} + +func (si *LocalStorageIndex) Delete(ctx context.Context, deletes StorageOpDeletes) (ops map[string]StorageOpDeletes) { + batches := make(map[*storageIndex]*index.Batch, 0) + ops = make(map[string]StorageOpDeletes) + + for _, d := range deletes { + indices, found := si.indicesByCollection[d.ObjectID.Collection] + if !found { + return ops + } + + for _, idx := range indices { + batch, ok := batches[idx] + if !ok { + batch = bluge.NewBatch() + batches[idx] = batch + } + + docId := si.storageIndexDocumentId(d.ObjectID.Collection, d.ObjectID.Key, d.OwnerID) + batch.Delete(docId) + + if dels, ok := ops[idx.Name]; ok { + ops[idx.Name] = append(dels, d) + } else { + ops[idx.Name] = StorageOpDeletes{d} + } + } + } + + for idx, b := range batches { + if err := idx.Index.Batch(b); err != nil { + si.logger.Error("Failed to evict entries from index", zap.String("index_name", idx.Name), zap.Error(err)) + continue + } + } + + return ops +} + +func (si *LocalStorageIndex) List(ctx context.Context, indexName, query string, limit int) (*api.StorageObjects, error) { + idx, found := si.indexByName[indexName] + if !found { + return nil, fmt.Errorf("index %q not found", indexName) + } + + if limit > idx.MaxEntries { + si.logger.Warn("Attempted to list more index entries than configured maximum index size", zap.String("index_name", idx.Name), zap.Int("limit", limit), zap.Int("max_entries", idx.MaxEntries)) + } + + parsedQuery, err := ParseQueryString(query) + if err != nil { + return nil, err + } + + searchReq := bluge.NewTopNSearch(limit, parsedQuery) + + indexReader, err := idx.Index.Reader() + if err != nil { + return nil, err + } + + results, err := indexReader.Search(ctx, searchReq) + if err != nil { + return nil, err + } + + indexResults, err := si.queryMatchesToStorageIndexResults(results) + if err != nil { + return nil, err + } + + if len(indexResults) == 0 { + return &api.StorageObjects{Objects: []*api.StorageObject{}}, nil + } + + storageReads := make([]*api.ReadStorageObjectId, 0, len(indexResults)) + for _, idxResult := range indexResults { + storageReads = append(storageReads, &api.ReadStorageObjectId{ + Collection: idxResult.Collection, + Key: idxResult.Key, + UserId: idxResult.UserID, + }) + } + + objects, err := StorageReadObjects(ctx, si.logger, si.db, uuid.Nil, storageReads) + if err != nil { + return nil, err + } + + return objects, nil +} + +func (si *LocalStorageIndex) Load(ctx context.Context) error { + var rangeError error + for _, idx := range si.indexByName { + t := time.Now() + if err := si.load(ctx, idx); err != nil { + return err + } + + elapsedTimeMs := time.Since(t).Milliseconds() + si.logger.Info("Storage index loaded.", zap.Any("config", idx), zap.Int64("elapsed_time_ms", elapsedTimeMs)) + } + + return rangeError +} + +func (si *LocalStorageIndex) load(ctx context.Context, idx *storageIndex) error { + query := ` +SELECT user_id, key, version, value, read, write, update_time +FROM storage +WHERE collection = $1 +ORDER BY collection, key, user_id +LIMIT $2` + params := []any{idx.Collection, 10_000} + + if idx.Key != "" { + query = ` +SELECT user_id, key, version, value, read, write, update_time +FROM storage +WHERE collection = $1 AND key = $3 +ORDER BY collection, key, user_id +LIMIT $2` + params = append(params, idx.Key) + } + + filterFn := si.customFilterFunctions[idx.Name] + + var count int + for { + rows, err := si.db.QueryContext(ctx, query, params...) + if err != nil { + return err + } + + var rowsRead bool + batch := bluge.NewBatch() + var dbUserID *uuid.UUID + var dbKey string + for rows.Next() { + rowsRead = true + var dbVersion string + var dbValue string + var dbRead int32 + var dbWrite int32 + var dbUpdateTime pgtype.Timestamptz + if err = rows.Scan(&dbUserID, &dbKey, &dbVersion, &dbValue, &dbRead, &dbWrite, &dbUpdateTime); err != nil { + rows.Close() + return err + } + + if filterFn != nil { + ok, err := filterFn(ctx, &StorageOpWrite{ + OwnerID: dbUserID.String(), + Object: &api.WriteStorageObject{ + Collection: idx.Collection, + Key: dbKey, + Value: dbValue, + Version: dbVersion, + PermissionRead: wrapperspb.Int32(dbRead), + PermissionWrite: wrapperspb.Int32(dbWrite), + }, + }) + if err != nil { + si.logger.Error("Error invoking custom Storage Index Filter function", zap.String("index_name", idx.Name), zap.Error(err)) + } + if !ok { + continue + } + } + + doc, err := si.mapIndexStorageFields(dbUserID.String(), idx.Collection, dbKey, dbVersion, dbValue, idx.Fields, dbUpdateTime.Time) + if err != nil { + si.logger.Error("Failed to map storage object values to index", zap.Error(err)) + return err + } + + if doc == nil { + continue + } + + batch.Update(doc.ID(), doc) + count++ + if count >= idx.MaxEntries { + break + } + } + rows.Close() + + if err = idx.Index.Batch(batch); err != nil { + return err + } + + if count >= idx.MaxEntries || !rowsRead { + break + } + + query = ` +SELECT user_id, key, version, value, read, write, update_time +FROM storage +WHERE collection = $1 +AND (collection, key, user_id) > ($1, $3, $4) +ORDER BY collection, key, user_id +LIMIT $2` + if idx.Key != "" { + query = ` +SELECT user_id, key, version, value, read, write, update_time +FROM storage +WHERE collection = $1 +AND key = $3 +AND (collection, key, user_id) > ($1, $3, $4) +ORDER BY collection, key, user_id +LIMIT $2` + } + params = []any{idx.Collection, 10_000, dbKey, dbUserID} + } + + return nil +} + +func (sc *LocalStorageIndex) mapIndexStorageFields(userID, collection, key, version, value string, filters []string, updateTime time.Time) (*bluge.Document, error) { + if collection == "" || key == "" || userID == "" { + return nil, errors.New("insufficient fields to create index document id") + } + + var mapValue map[string]any + if err := json.Unmarshal([]byte(value), &mapValue); err != nil { + return nil, err + } + + if len(filters) > 0 { + // Store only subset fields of storage object value + filteredValues := make(map[string]any, len(filters)) + for _, f := range filters { + if _, found := mapValue[f]; found { + filteredValues[f] = mapValue[f] + } + } + mapValue = filteredValues + } + + if len(mapValue) == 0 { + return nil, nil + } + + rv := bluge.NewDocument(string(sc.storageIndexDocumentId(collection, key, userID))) + rv.AddField(bluge.NewDateTimeField("update_time", updateTime).StoreValue().Sortable()) + rv.AddField(bluge.NewKeywordField("collection", collection).StoreValue()) + rv.AddField(bluge.NewKeywordField("key", key).StoreValue()) + rv.AddField(bluge.NewKeywordField("user_id", userID).StoreValue()) + rv.AddField(bluge.NewKeywordField("version", version).StoreValue()) + + BlugeWalkDocument(mapValue, []string{}, rv) + + return rv, nil +} + +type indexResult struct { + Collection string + Key string + UserID string + Value string + Version string + UpdateTime time.Time +} + +func (si *LocalStorageIndex) queryMatchesToStorageIndexResults(dmi search.DocumentMatchIterator) ([]*indexResult, error) { + idxResults := make([]*indexResult, 0) + next, err := dmi.Next() + for err == nil && next != nil { + idxResult := &indexResult{} + err = next.VisitStoredFields(func(field string, value []byte) bool { + switch field { + case "collection": + idxResult.Collection = string(value) + case "key": + idxResult.Key = string(value) + case "user_id": + idxResult.UserID = string(value) + case "value": + idxResult.Value = string(value) + case "version": + idxResult.Version = string(value) + case "updateTime": + updateTime, vErr := bluge.DecodeDateTime(value) + if err != nil { + err = vErr + return false + } + idxResult.UpdateTime = updateTime + } + return true + }) + if err != nil { + return nil, err + } + idxResults = append(idxResults, idxResult) + next, err = dmi.Next() + } + if err != nil { + return nil, err + } + return idxResults, nil +} + +func (si *LocalStorageIndex) queryMatchesToDocumentIds(dmi search.DocumentMatchIterator) ([]string, error) { + next, err := dmi.Next() + ids := make([]string, 0) + for err == nil && next != nil { + next.VisitStoredFields(func(field string, value []byte) bool { + if field == "_id" { + ids = append(ids, string(value)) + return false + } + return true + }) + next, err = dmi.Next() + } + if err != nil { + return nil, err + } + return ids, nil +} + +func (si *LocalStorageIndex) CreateIndex(ctx context.Context, name, collection, key string, fields []string, maxEntries int) error { + if name == "" { + return errors.New("storage index 'name' must be set") + } + if collection == "" { + return errors.New("storage index 'collection' must be set") + } + if maxEntries < 1 { + return errors.New("storage Index 'max_entries' must be > 0") + } + if len(fields) < 1 { + return errors.New("storage Index 'fields' must contain at least one top level key to index") + } + + if _, ok := si.indexByName[name]; ok { + return fmt.Errorf("cannot create index: index with name %q already exists", name) + } + + idx, err := bluge.OpenWriter(BlugeInMemoryConfig()) + if err != nil { + return err + } + + storageIdx := &storageIndex{ + Name: name, + Collection: collection, + Key: key, + Fields: fields, + MaxEntries: maxEntries, + Index: idx, + } + si.indexByName[name] = storageIdx + + if indices, ok := si.indicesByCollection[collection]; ok { + si.indicesByCollection[collection] = append(indices, storageIdx) + } else { + si.indicesByCollection[collection] = []*storageIndex{storageIdx} + } + + si.logger.Info("Initialized storage engine index", zap.Any("config", idx)) + + return nil +} + +func (si *LocalStorageIndex) RegisterFilters(runtime *Runtime) { + for name, _ := range si.indexByName { + fn := runtime.StorageIndexFilterFunction(name) + if fn != nil { + si.customFilterFunctions[name] = fn + } + } +} + +func (si *LocalStorageIndex) storageIndexDocumentId(collection, key, userID string) bluge.Identifier { + id := fmt.Sprintf("%s.%s.%s", collection, key, userID) + + return bluge.Identifier(id) +} diff --git a/vendor/github.com/heroiclabs/nakama-common/runtime/runtime.go b/vendor/github.com/heroiclabs/nakama-common/runtime/runtime.go index bc56cbad29..48b9e30be7 100644 --- a/vendor/github.com/heroiclabs/nakama-common/runtime/runtime.go +++ b/vendor/github.com/heroiclabs/nakama-common/runtime/runtime.go @@ -308,7 +308,6 @@ It is made available to the InitModule function as an input parameter when the f NOTE: You must not cache the reference to this and reuse it as a later point as this could have unintended side effects. */ type Initializer interface { - /* RegisterRpc registers a function with the given ID. This ID can be used within client code to send an RPC message to execute the function and return the result. Results are always returned as a JSON string (or optionally empty string). @@ -837,6 +836,12 @@ type Initializer interface { // RegisterEventSessionStart can be used to define functions triggered when client sessions end. RegisterEventSessionEnd(fn func(ctx context.Context, logger Logger, evt *api.Event)) error + + // Register a new storage index. + RegisterStorageIndex(name, collection, key string, fields []string, maxEntries int) error + + // RegisterStorageIndexFilter can be used to define a filtering function for a given storage index. + RegisterStorageIndexFilter(indexName string, fn func(ctx context.Context, logger Logger, db *sql.DB, nk NakamaModule, write *StorageWrite) bool) error } type PresenceReason uint8 @@ -1065,6 +1070,7 @@ type NakamaModule interface { StorageRead(ctx context.Context, reads []*StorageRead) ([]*api.StorageObject, error) StorageWrite(ctx context.Context, writes []*StorageWrite) ([]*api.StorageObjectAck, error) StorageDelete(ctx context.Context, deletes []*StorageDelete) error + StorageIndexList(ctx context.Context, indexName, query string, limit int) (*api.StorageObjects, error) MultiUpdate(ctx context.Context, accountUpdates []*AccountUpdate, storageWrites []*StorageWrite, walletUpdates []*WalletUpdate, updateLedger bool) ([]*api.StorageObjectAck, []*WalletUpdateResult, error) diff --git a/vendor/modules.txt b/vendor/modules.txt index 42456db17b..9b445d8eee 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -151,7 +151,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/internal/genopena github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/heroiclabs/nakama-common v0.0.0-20230713130524-38774b285b66 +# github.com/heroiclabs/nakama-common v1.27.1-0.20230717184507-dff09d7c8047 ## explicit; go 1.19 github.com/heroiclabs/nakama-common/api github.com/heroiclabs/nakama-common/rtapi