diff --git a/.golangci-strict.yml b/.golangci-strict.yml
index fc5ffb0584..684c7afd34 100644
--- a/.golangci-strict.yml
+++ b/.golangci-strict.yml
@@ -34,37 +34,36 @@ linters:
#- unparam # Reports unused function parameters
- unused # (megacheck) Checks Go code for unused constants, variables, functions and types
disable:
- - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- - depguard # Go linter that checks if package imports are in a list of acceptable packages
- - dogsled # Checks assignments with too many blank identifiers # (e.g. x, _, _, _, := f())
- - err113 # Golang linter to check the errors handling expressions
- - funlen # Tool for detection of long functions
- - gochecknoglobals # Checks that no globals are present in Go code
- - gochecknoinits # Checks that no init functions are present in Go code
- - gocognit # Computes and checks the cognitive complexity of functions
- - gocyclo # Computes and checks the cyclomatic complexity of functions
- - godot # Check if comments end in a period
- - godox # Tool for detection of FIXME, TODO and other comment keywords
- - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- - gomodguard # Allow and block list linter for direct Go module dependencies.
- - lll # Reports long lines
- - nestif # Reports deeply nested if statements
- - nolintlint # Reports ill-formed or insufficient nolint directives
- - rowserrcheck # checks whether Err of rows is checked successfully
- - stylecheck # Stylecheck is a replacement for golint
- - testpackage # linter that makes you use a separate _test package
- - whitespace # Tool for detection of leading and trailing whitespace
- - wsl # Whitespace Linter - Forces you to use empty lines!
+ # - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
+ # - depguard # Go linter that checks if package imports are in a list of acceptable packages
+ # - dogsled # Checks assignments with too many blank identifiers # (e.g. x, _, _, _, := f())
+ # - err113 # Golang linter to check the errors handling expressions
+ # - funlen # Tool for detection of long functions
+ # - gochecknoglobals # Checks that no globals are present in Go code
+ # - gochecknoinits # Checks that no init functions are present in Go code
+ # - gocognit # Computes and checks the cognitive complexity of functions
+ # - gocyclo # Computes and checks the cyclomatic complexity of functions
+ # - godot # Check if comments end in a period
+ # - godox # Tool for detection of FIXME, TODO and other comment keywords
+ # - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+ # - gomodguard # Allow and block list linter for direct Go module dependencies.
+ # - lll # Reports long lines
+ # - nestif # Reports deeply nested if statements
+ # - rowserrcheck # checks whether Err of rows is checked successfully
+ # - stylecheck # Stylecheck is a replacement for golint
+ # - testpackage # linter that makes you use a separate _test package
+ # - whitespace # Tool for detection of leading and trailing whitespace
+ # - wsl # Whitespace Linter - Forces you to use empty lines!
# Once fixed, should enable
- - dupl # Tool for code clone detection
- - goconst # Finds repeated strings that could be replaced by a constant
- - goprintffuncname # Checks that printf-like functions are named with `f` at the end
- - gosec # (gas) Inspects source code for security problems
+ # - dupl # Tool for code clone detection
+ # - goconst # Finds repeated strings that could be replaced by a constant
+ # - goprintffuncname # Checks that printf-like functions are named with `f` at the end
+ # - gosec # (gas) Inspects source code for security problems
- gosimple # (megacheck) Linter for Go source code that specializes in simplifying a code
- - nakedret # Finds naked returns in functions greater than a specified function length
- - prealloc # Finds slice declarations that could potentially be preallocated
- - revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- - unparam # Reports unused function parameters
+ # - nakedret # Finds naked returns in functions greater than a specified function length
+ # - prealloc # Finds slice declarations that could potentially be preallocated
+ # - revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
+ # - unparam # Reports unused function parameters
# Don't enable fieldalignment, changing the field alignment requires checking to see if anyone uses constructors
# without names. If there is a memory issue on a specific field, that is best found with a heap profile.
@@ -76,7 +75,9 @@ linters:
# Disable goconst in test files, often we have duplicated strings across tests, but don't make sense as constants.
issues:
exclude-rules:
- - path: (_test\.go|utilities_testing\.go)
+ # cover _testing.go (utility testing files) and _test.go files
+ # base/util_testing.go / rest/utilities_testing\.*.go
+ - path: (_test.*\.go)
linters:
- goconst
- path: rest/debug.go
@@ -104,4 +105,5 @@ linters-settings:
- wrapperFunc
settings:
ruleguard:
- rules: '${configDir}/ruleguard/*.go'
+ rules: '${configDir}/ruleguard/rules-*.go'
+ failOn: all
diff --git a/.golangci.yml b/.golangci.yml
index 352043eb79..acc0ddb06a 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -33,28 +33,27 @@ linters:
- unconvert # Remove unnecessary type conversions
- unparam # Reports unused function parameters
- unused # (megacheck) Checks Go code for unused constants, variables, functions and types
- disable:
- - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- - depguard # Go linter that checks if package imports are in a list of acceptable packages
- - dogsled # Checks assignments with too many blank identifiers # (e.g. x, _, _, _, := f())
- - err113 # Golang linter to check the errors handling expressions
- - funlen # Tool for detection of long functions
- - gochecknoglobals # Checks that no globals are present in Go code
- - gochecknoinits # Checks that no init functions are present in Go code
- - gocognit # Computes and checks the cognitive complexity of functions
- - gocyclo # Computes and checks the cyclomatic complexity of functions
- - godot # Check if comments end in a period
- - godox # Tool for detection of FIXME, TODO and other comment keywords
- - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- - gomodguard # Allow and block list linter for direct Go module dependencies.
- - lll # Reports long lines
- - nestif # Reports deeply nested if statements
- - nolintlint # Reports ill-formed or insufficient nolint directives
- - rowserrcheck # checks whether Err of rows is checked successfully
- - stylecheck # Stylecheck is a replacement for golint
- - testpackage # linter that makes you use a separate _test package
- - whitespace # Tool for detection of leading and trailing whitespace
- - wsl # Whitespace Linter - Forces you to use empty lines!
+ # disable:
+ # - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
+ # - depguard # Go linter that checks if package imports are in a list of acceptable packages
+ # - dogsled # Checks assignments with too many blank identifiers # (e.g. x, _, _, _, := f())
+ # - err113 # Golang linter to check the errors handling expressions
+ # - funlen # Tool for detection of long functions
+ # - gochecknoglobals # Checks that no globals are present in Go code
+ # - gochecknoinits # Checks that no init functions are present in Go code
+ # - gocognit # Computes and checks the cognitive complexity of functions
+ # - gocyclo # Computes and checks the cyclomatic complexity of functions
+ # - godot # Check if comments end in a period
+ # - godox # Tool for detection of FIXME, TODO and other comment keywords
+ # - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
+ # - gomodguard # Allow and block list linter for direct Go module dependencies.
+ # - lll # Reports long lines
+ # - nestif # Reports deeply nested if statements
+ # - rowserrcheck # checks whether Err of rows is checked successfully
+ # - stylecheck # Stylecheck is a replacement for golint
+ # - testpackage # linter that makes you use a separate _test package
+ # - whitespace # Tool for detection of leading and trailing whitespace
+ # - wsl # Whitespace Linter - Forces you to use empty lines!
linters-settings:
govet:
@@ -65,16 +64,19 @@ linters-settings:
- ruleguard
settings:
ruleguard:
- rules: '${configDir}/ruleguard/*.go'
+ rules: '${configDir}/ruleguard/rules-*.go'
+ failOn: all
# Disable goconst in test files, often we have duplicated strings across tests, but don't make sense as constants.
issues:
exclude-rules:
- - path: (_test\.go|utilities_testing\.go)
+ # cover _testing.go (utility testing files) and _test.go files
+ # base/util_testing.go / rest/utilities_testing\.*.go
+ - path: (_test.*\.go)
linters:
- goconst
- prealloc
- - path: (_test\.go|utilities_testing\.go)
+ - path: (_test.*\.go)
linters:
- govet
text: fieldalignment
diff --git a/auth/auth.go b/auth/auth.go
index a22ccfa02c..573d09dcaa 100644
--- a/auth/auth.go
+++ b/auth/auth.go
@@ -631,6 +631,8 @@ func (auth *Authenticator) UpdateUserEmail(u User, email string) error {
if err != nil {
return nil, err
}
+ currentUser.SetUpdatedAt()
+
return currentUser, nil
}
@@ -662,6 +664,7 @@ func (auth *Authenticator) rehashPassword(user User, password string) error {
if err != nil {
return nil, err
}
+ currentUserImpl.SetUpdatedAt()
return currentUserImpl, nil
} else {
return nil, base.ErrUpdateCancel
@@ -740,6 +743,7 @@ func (auth *Authenticator) DeleteRole(role Role, purge bool, deleteSeq uint64) e
}
p.setDeleted(true)
p.SetSequence(deleteSeq)
+ p.SetUpdatedAt()
// Update channel history for default collection
channelHistory := auth.calculateHistory(p.Name(), deleteSeq, p.Channels(), nil, p.ChannelHistory())
@@ -955,6 +959,8 @@ func (auth *Authenticator) RegisterNewUser(username, email string) (User, error)
base.WarnfCtx(auth.LogCtx, "Skipping SetEmail for user %q - Invalid email address provided: %q", base.UD(username), base.UD(email))
}
}
+ user.SetUpdatedAt()
+ user.SetCreatedAt(time.Now().UTC())
err = auth.Save(user)
if base.IsCasMismatch(err) {
diff --git a/auth/principal.go b/auth/principal.go
index ffe6c6535b..d1b3e46b7c 100644
--- a/auth/principal.go
+++ b/auth/principal.go
@@ -52,6 +52,12 @@ type Principal interface {
setDeleted(bool)
IsDeleted() bool
+ // Sets the updated time for the principal document
+ SetUpdatedAt()
+
+ // Sets the created time for the principal document
+ SetCreatedAt(t time.Time)
+
// Principal includes the PrincipalCollectionAccess interface for operations against
// the _default._default collection (stored directly on the principal for backward
// compatibility)
diff --git a/auth/role.go b/auth/role.go
index 1625b6769a..3cc23d07a8 100644
--- a/auth/role.go
+++ b/auth/role.go
@@ -33,6 +33,8 @@ type roleImpl struct {
ChannelInvalSeq uint64 `json:"channel_inval_seq,omitempty"` // Sequence at which the channels were invalidated. Data remains in Channels_ for history calculation.
Deleted bool `json:"deleted,omitempty"`
CollectionsAccess map[string]map[string]*CollectionAccess `json:"collection_access,omitempty"` // Nested maps of CollectionAccess, indexed by scope and collection name
+ UpdatedAt time.Time `json:"updated_at"`
+ CreatedAt time.Time `json:"created_at"`
cas uint64
docID string // key used to store the roleImpl
}
@@ -277,6 +279,14 @@ func (role *roleImpl) Name() string {
return role.Name_
}
+func (role *roleImpl) SetUpdatedAt() {
+ role.UpdatedAt = time.Now().UTC()
+}
+
+func (role *roleImpl) SetCreatedAt(t time.Time) {
+ role.CreatedAt = t
+}
+
func (role *roleImpl) Sequence() uint64 {
return role.Sequence_
}
diff --git a/base/collection_xattr_test.go b/base/collection_xattr_test.go
index b69444e544..7d193c585e 100644
--- a/base/collection_xattr_test.go
+++ b/base/collection_xattr_test.go
@@ -1324,11 +1324,8 @@ func TestWriteWithXattrsInsertAndDeleteError(t *testing.T) {
func requireXattrsEqual(t testing.TB, expected map[string][]byte, actual map[string][]byte) {
require.Len(t, actual, len(expected), "Expected xattrs to be the same length %v, got %v", expected, actual)
for k, v := range expected {
- actualV, ok := actual[k]
- if !ok {
- require.Fail(t, "Missing expected xattr %s", k)
- }
- require.JSONEq(t, string(v), string(actualV))
+ require.Contains(t, actual, k)
+ require.JSONEq(t, string(v), string(actual[k]))
}
}
diff --git a/base/dcp_test.go b/base/dcp_test.go
index ca5c8c7617..52c9b5eecc 100644
--- a/base/dcp_test.go
+++ b/base/dcp_test.go
@@ -466,7 +466,7 @@ func TestConcurrentCBGTIndexCreation(t *testing.T) {
case <-terminatorChan:
context.Manager.Stop()
case <-time.After(20 * time.Second):
- assert.Fail(t, "manager goroutine not terminated: %v", managerUUID)
+ require.Fail(t, fmt.Sprintf("manager goroutine not terminated: %v", managerUUID))
}
}(i, terminator)
diff --git a/base/leaky_bucket.go b/base/leaky_bucket.go
index d5042b3b83..062ac8b9ec 100644
--- a/base/leaky_bucket.go
+++ b/base/leaky_bucket.go
@@ -125,7 +125,12 @@ type LeakyBucketConfig struct {
// Returns a partial error the first time ViewCustom is called
FirstTimeViewCustomPartialError bool
- PostQueryCallback func(ddoc, viewName string, params map[string]interface{}) // Issues callback after issuing query when bucket.ViewQuery is called
+
+ // QueryCallback allows tests to set a callback that will be issued prior to issuing a view query
+ QueryCallback func(ddoc, viewName string, params map[string]any) error
+ PostQueryCallback func(ddoc, viewName string, params map[string]interface{}) // Issues callback after issuing query when bucket.ViewQuery is called
+
+ N1QLQueryCallback func(ctx context.Context, statement string, params map[string]any, consistency ConsistencyMode, adhoc bool) error
PostN1QLQueryCallback func()
diff --git a/base/leaky_datastore.go b/base/leaky_datastore.go
index 2e6f2952f1..ab1e7854ef 100644
--- a/base/leaky_datastore.go
+++ b/base/leaky_datastore.go
@@ -241,6 +241,12 @@ func (lds *LeakyDataStore) ViewQuery(ctx context.Context, ddoc, name string, par
if !ok {
return nil, errors.New("bucket does not support views")
}
+ if lds.config.QueryCallback != nil {
+ err := lds.config.QueryCallback(ddoc, name, params)
+ if err != nil {
+ return nil, err
+ }
+ }
iterator, err := vs.ViewQuery(ctx, ddoc, name, params)
if lds.config.FirstTimeViewCustomPartialError {
@@ -324,10 +330,14 @@ func (lds *LeakyDataStore) SetFirstTimeViewCustomPartialError(val bool) {
lds.config.FirstTimeViewCustomPartialError = val
}
-func (lds *LeakyDataStore) SetPostQueryCallback(callback func(ddoc, viewName string, params map[string]interface{})) {
+func (lds *LeakyDataStore) SetPostQueryCallback(callback func(ddoc, viewName string, params map[string]any)) {
lds.config.PostQueryCallback = callback
}
+func (lds *LeakyDataStore) SetQueryCallback(fn func(ddoc, viewName string, params map[string]any) error) {
+ lds.config.QueryCallback = fn
+}
+
func (lds *LeakyDataStore) SetPostN1QLQueryCallback(callback func()) {
lds.config.PostN1QLQueryCallback = callback
}
@@ -447,6 +457,12 @@ func (lds *LeakyDataStore) Query(ctx context.Context, statement string, params m
if err != nil {
return nil, err
}
+ if lds.config.N1QLQueryCallback != nil {
+ err := lds.config.N1QLQueryCallback(ctx, statement, params, consistency, adhoc)
+ if err != nil {
+ return nil, err
+ }
+ }
iterator, err := n1qlStore.Query(ctx, statement, params, consistency, adhoc)
if lds.config.PostN1QLQueryCallback != nil {
diff --git a/base/main_test_bucket_pool.go b/base/main_test_bucket_pool.go
index 31fd3a75fc..6803ded7ee 100644
--- a/base/main_test_bucket_pool.go
+++ b/base/main_test_bucket_pool.go
@@ -121,6 +121,7 @@ func NewTestBucketPoolWithOptions(ctx context.Context, bucketReadierFunc TBPBuck
unclosedBuckets: make(map[string]map[string]struct{}),
useExistingBucket: TestUseExistingBucket(),
useDefaultScope: options.UseDefaultScope,
+ skipMobileXDCR: false,
}
tbp.cluster = newTestCluster(ctx, UnitTestUrl(), &tbp)
diff --git a/base/main_test_cluster.go b/base/main_test_cluster.go
index d5ea84d33e..20b97e46b1 100644
--- a/base/main_test_cluster.go
+++ b/base/main_test_cluster.go
@@ -66,11 +66,11 @@ func newTestCluster(ctx context.Context, server string, tbp *TestBucketPool) *tb
// getGocbClusterForTest makes cluster connection. Callers must close. Returns the cluster and the connection string used to connect.
func getGocbClusterForTest(ctx context.Context, server string) (*gocb.Cluster, string) {
- testClusterTimeout := 10 * time.Second
spec := BucketSpec{
- Server: server,
- TLSSkipVerify: true,
- BucketOpTimeout: &testClusterTimeout,
+ Server: server,
+ TLSSkipVerify: true,
+ // use longer timeout than DefaultBucketOpTimeout to avoid timeouts in test harness from using buckets after flush, which takes some time to reinitialize
+ BucketOpTimeout: Ptr(time.Duration(30) * time.Second),
}
connStr, err := spec.GetGoCBConnString()
if err != nil {
diff --git a/base/stats.go b/base/stats.go
index e2e2155d45..79653df629 100644
--- a/base/stats.go
+++ b/base/stats.go
@@ -87,6 +87,8 @@ const (
StatAddedVersion3dot1dot4 = "3.1.4"
StatAddedVersion3dot2dot0 = "3.2.0"
StatAddedVersion3dot2dot1 = "3.2.1"
+ StatAddedVersion3dot2dot2 = "3.2.2"
+ StatAddedVersion3dot2dot3 = "3.2.3"
StatAddedVersion3dot3dot0 = "3.3.0"
StatAddedVersion4dot0dot0 = "4.0.0"
@@ -313,6 +315,10 @@ func (g *GlobalStat) initResourceUtilizationStats() error {
if err != nil {
return err
}
+ resUtil.NumIdleQueryOps, err = NewIntStat(SubsystemDatabaseKey, "num_idle_query_ops", StatUnitNoUnits, NumIdleQueryOpsDesc, StatAddedVersion3dot2dot2, StatDeprecatedVersionNotDeprecated, StatStabilityCommitted, nil, nil, prometheus.CounterValue, 0)
+ if err != nil {
+ return err
+ }
resUtil.Uptime, err = NewDurStat(ResourceUtilizationSubsystem, "uptime", StatUnitNanoseconds, UptimeDesc, StatAddedVersion3dot0dot0, StatDeprecatedVersionNotDeprecated, StatStabilityCommitted, nil, nil, prometheus.CounterValue, time.Now())
if err != nil {
@@ -367,8 +373,9 @@ type ResourceUtilization struct {
// The node CPU usage calculation based values from /proc of user + system since the last time this function was called.
NodeCpuPercentUtil *SgwFloatStat `json:"node_cpu_percent_utilization"`
- // The number of background kv operations.
- NumIdleKvOps *SgwIntStat `json:"idle_kv_ops"`
+ // The number of background kv/query operations.
+ NumIdleKvOps *SgwIntStat `json:"idle_kv_ops"`
+ NumIdleQueryOps *SgwIntStat `json:"idle_query_ops"`
// The memory utilization (Resident Set Size) for the process, in bytes.
ProcessMemoryResident *SgwIntStat `json:"process_memory_resident"`
diff --git a/base/stats_descriptions.go b/base/stats_descriptions.go
index f7f521dcfa..19d5ec38af 100644
--- a/base/stats_descriptions.go
+++ b/base/stats_descriptions.go
@@ -314,7 +314,8 @@ const (
SyncProcessComputeDesc = "The compute unit for syncing with clients measured through cpu time and memory used for sync"
- NumIdleKvOpsDesc = "The total number of idle kv operations."
+ NumIdleKvOpsDesc = "The total number of idle kv operations."
+ NumIdleQueryOpsDesc = "The total number of idle query operations."
)
// Delta Sync stats descriptions
diff --git a/db/active_replicator.go b/db/active_replicator.go
index 778a1e31b3..880cdd5a1b 100644
--- a/db/active_replicator.go
+++ b/db/active_replicator.go
@@ -214,8 +214,10 @@ func connect(arc *activeReplicatorCommon, idSuffix string) (blipSender *blip.Sen
// - make this configurable for testing mixed-version replications
// - if unspecified, default to v2 and v3 until VV is supported with ISGR, then also include v4
protocols := []string{CBMobileReplicationV3.SubprotocolString(), CBMobileReplicationV2.SubprotocolString()}
- blipContext, err := NewSGBlipContextWithProtocols(arc.ctx, arc.config.ID+idSuffix, originPatterns, protocols, nil)
+ cancelCtx, cancelFunc := context.WithCancel(context.Background())
+ blipContext, err := NewSGBlipContextWithProtocols(arc.ctx, arc.config.ID+idSuffix, originPatterns, protocols, cancelCtx)
if err != nil {
+ cancelFunc()
return nil, nil, err
}
blipContext.WebsocketPingInterval = arc.config.WebsocketPingInterval
@@ -226,7 +228,10 @@ func connect(arc *activeReplicatorCommon, idSuffix string) (blipSender *blip.Sen
}
}
- bsc = NewBlipSyncContext(arc.ctx, blipContext, arc.config.ActiveDB, blipContext.ID, arc.replicationStats)
+ bsc, err = NewBlipSyncContext(arc.ctx, blipContext, arc.config.ActiveDB, blipContext.ID, arc.replicationStats, cancelFunc)
+ if err != nil {
+ return nil, nil, err
+ }
bsc.loggingCtx = base.CorrelationIDLogCtx(
arc.config.ActiveDB.AddDatabaseLogContext(base.NewNonCancelCtx().Ctx),
diff --git a/db/active_replicator_common.go b/db/active_replicator_common.go
index 7e26edb7cb..ab21a27fda 100644
--- a/db/active_replicator_common.go
+++ b/db/active_replicator_common.go
@@ -313,20 +313,16 @@ func (a *activeReplicatorCommon) getState() string {
return a.state
}
-// requires a.stateErrorLock
-func (a *activeReplicatorCommon) _getStateWithErrorMessage() (state string, lastErrorMessage string) {
+// getStateWithErrorMessage returns the current state and last error message for the replicator.
+func (a *activeReplicatorCommon) getStateWithErrorMessage() (state string, lastErrorMessage string) {
+ a.stateErrorLock.RLock()
+ defer a.stateErrorLock.RUnlock()
if a.lastError == nil {
return a.state, ""
}
return a.state, a.lastError.Error()
}
-func (a *activeReplicatorCommon) getStateWithErrorMessage() (state string, lastErrorMessage string) {
- a.stateErrorLock.RLock()
- defer a.stateErrorLock.RUnlock()
- return a._getStateWithErrorMessage()
-}
-
func (a *activeReplicatorCommon) GetStats() *BlipSyncStats {
a.lock.RLock()
defer a.lock.RUnlock()
@@ -357,6 +353,14 @@ func (a *activeReplicatorCommon) getCheckpointHighSeq() string {
return highSeqStr
}
+// publishStatus updates the replication status document in the metadata store.
+func (a *activeReplicatorCommon) publishStatus() {
+ a.lock.Lock()
+ defer a.lock.Unlock()
+ a._publishStatus()
+}
+
+// _publishStatus updates the replication status document in the metadata store. Requires holding a.lock before calling.
func (a *activeReplicatorCommon) _publishStatus() {
status := a._getStatusCallback()
err := setLocalStatus(a.ctx, a.config.ActiveDB.MetadataStore, a.statusKey, status, int(a.config.ActiveDB.Options.LocalDocExpirySecs))
diff --git a/db/active_replicator_pull.go b/db/active_replicator_pull.go
index a015a2ff78..87a5a68194 100644
--- a/db/active_replicator_pull.go
+++ b/db/active_replicator_pull.go
@@ -227,7 +227,7 @@ func (apr *ActivePullReplicator) _getStatus() *ReplicationStatus {
ID: apr.CheckpointID,
}
- status.Status, status.ErrorMessage = apr._getStateWithErrorMessage()
+ status.Status, status.ErrorMessage = apr.getStateWithErrorMessage()
pullStats := apr.replicationStats
status.DocsRead = pullStats.HandleRevCount.Value()
diff --git a/db/active_replicator_push.go b/db/active_replicator_push.go
index 79f22ed3f1..cd62a9f81e 100644
--- a/db/active_replicator_push.go
+++ b/db/active_replicator_push.go
@@ -181,7 +181,7 @@ func (apr *ActivePushReplicator) _initCheckpointer(remoteCheckpoints []replicati
// requires apr.lock
func (apr *ActivePushReplicator) _getStatus() *ReplicationStatus {
status := &ReplicationStatus{}
- status.Status, status.ErrorMessage = apr._getStateWithErrorMessage()
+ status.Status, status.ErrorMessage = apr.getStateWithErrorMessage()
pushStats := apr.replicationStats
status.DocsWritten = pushStats.SendRevCount.Value()
@@ -332,7 +332,7 @@ func (apr *ActivePushReplicator) _startSendingChanges(bh *blipHandler, since Seq
apr.activeSendChanges.Add(1)
go func(s *blip.Sender) {
defer apr.activeSendChanges.Add(-1)
- isComplete := bh.sendChanges(s, &sendChangesOptions{
+ isComplete, err := bh.sendChanges(s, &sendChangesOptions{
docIDs: apr.config.DocIDs,
since: since,
continuous: apr.config.Continuous,
@@ -344,8 +344,15 @@ func (apr *ActivePushReplicator) _startSendingChanges(bh *blipHandler, since Seq
ignoreNoConflicts: true, // force the passive side to accept a "changes" message, even in no conflicts mode.
changesCtx: collectionCtx.changesCtx,
})
- // On a normal completion, call complete for the replication
+ if err != nil {
+ base.InfofCtx(apr.ctx, base.KeyReplicate, "Terminating blip connection due to changes feed error: %v", err)
+ bh.ctxCancelFunc()
+ _ = apr.setError(err)
+ apr.publishStatus()
+ return
+ }
if isComplete {
+ // On a normal completion, call complete for the replication
apr.Complete()
}
}(apr.blipSender)
diff --git a/db/background_mgr_resync_dcp.go b/db/background_mgr_resync_dcp.go
index 3336b27644..7936af4da4 100644
--- a/db/background_mgr_resync_dcp.go
+++ b/db/background_mgr_resync_dcp.go
@@ -32,7 +32,14 @@ type ResyncManagerDCP struct {
VBUUIDs []uint64
useXattrs bool
ResyncedCollections map[string][]string
- lock sync.RWMutex
+ resyncCollectionInfo
+ lock sync.RWMutex
+}
+
+// resyncCollectionInfo contains information on collections included on resync run, populated in init() and used in Run()
+type resyncCollectionInfo struct {
+ hasAllCollections bool
+ collectionIDs []uint32
}
// ResyncCollections contains map of scope names with collection names against which resync needs to run
@@ -54,6 +61,9 @@ func NewResyncManagerDCP(metadataStore base.DataStore, useXattrs bool, metaKeys
}
func (r *ResyncManagerDCP) Init(ctx context.Context, options map[string]interface{}, clusterStatus []byte) error {
+ db := options["database"].(*Database)
+ resyncCollections := options["collections"].(ResyncCollections)
+
newRunInit := func() error {
uniqueUUID, err := uuid.NewRandom()
if err != nil {
@@ -65,6 +75,16 @@ func (r *ResyncManagerDCP) Init(ctx context.Context, options map[string]interfac
return nil
}
+ // Get collectionIds and store in manager for use in DCP client later
+ collectionIDs, hasAllCollections, collectionNames, err := getCollectionIdsAndNames(db, resyncCollections)
+ if err != nil {
+ return err
+ }
+ r.collectionIDs = collectionIDs
+ r.hasAllCollections = hasAllCollections
+ // add collection list to manager for use in status call
+ r.SetCollectionStatus(collectionNames)
+
if clusterStatus != nil {
var statusDoc ResyncManagerStatusDocDCP
err := base.JSONUnmarshal(clusterStatus, &statusDoc)
@@ -149,20 +169,13 @@ func (r *ResyncManagerDCP) Run(ctx context.Context, options map[string]interface
return err
}
- // Get collectionIds
- collectionIDs, hasAllCollections, collectionNames, err := getCollectionIdsAndNames(db, resyncCollections)
- if err != nil {
- return err
- }
- // add collection list to manager for use in status call
- r.SetCollectionStatus(collectionNames)
- if hasAllCollections {
+ if r.hasAllCollections {
base.InfofCtx(ctx, base.KeyAll, "[%s] running resync against all collections", resyncLoggingID)
} else {
base.InfofCtx(ctx, base.KeyAll, "[%s] running resync against specified collections", resyncLoggingID)
}
- clientOptions := getResyncDCPClientOptions(collectionIDs, db.Options.GroupID, db.MetadataKeys.DCPCheckpointPrefix(db.Options.GroupID))
+ clientOptions := getResyncDCPClientOptions(r.collectionIDs, db.Options.GroupID, db.MetadataKeys.DCPCheckpointPrefix(db.Options.GroupID))
dcpFeedKey := GenerateResyncDCPStreamName(r.ResyncID)
dcpClient, err := base.NewDCPClient(ctx, dcpFeedKey, callback, *clientOptions, bucket)
@@ -228,8 +241,8 @@ func (r *ResyncManagerDCP) Run(ctx context.Context, options map[string]interface
// If we regenerated sequences, update syncInfo for all collections affected
if regenerateSequences {
- updatedDsNames := make(map[base.ScopeAndCollectionName]struct{}, len(collectionIDs))
- for _, collectionID := range collectionIDs {
+ updatedDsNames := make(map[base.ScopeAndCollectionName]struct{}, len(r.collectionIDs))
+ for _, collectionID := range r.collectionIDs {
dbc, ok := db.CollectionByID[collectionID]
if !ok {
base.WarnfCtx(ctx, "[%s] Completed resync, but unable to update syncInfo for collection %v (not found)", resyncLoggingID, collectionID)
diff --git a/db/background_mgr_resync_dcp_test.go b/db/background_mgr_resync_dcp_test.go
index 69b48a0fd7..03cc044767 100644
--- a/db/background_mgr_resync_dcp_test.go
+++ b/db/background_mgr_resync_dcp_test.go
@@ -110,6 +110,8 @@ func TestResyncDCPInit(t *testing.T) {
}()
options := make(map[string]interface{})
+ options["database"] = db
+ options["collections"] = ResyncCollections{}
if testCase.forceReset {
options["reset"] = true
}
diff --git a/db/background_mgr_tombstone_compaction.go b/db/background_mgr_tombstone_compaction.go
index 15656487dc..875fdc53eb 100644
--- a/db/background_mgr_tombstone_compaction.go
+++ b/db/background_mgr_tombstone_compaction.go
@@ -45,11 +45,11 @@ func (t *TombstoneCompactionManager) Run(ctx context.Context, options map[string
database := options["database"].(*Database)
defer atomic.CompareAndSwapUint32(&database.CompactState, DBCompactRunning, DBCompactNotRunning)
- callback := func(docsPurged *int) {
+ updateStatusCallback := func(docsPurged *int) {
atomic.StoreInt64(&t.PurgedDocCount, int64(*docsPurged))
}
- _, err := database.Compact(ctx, true, callback, terminator)
+ _, err := database.Compact(ctx, true, updateStatusCallback, terminator, false)
if err != nil {
return err
}
diff --git a/db/blip_handler.go b/db/blip_handler.go
index da8503606b..59ef6fc4ee 100644
--- a/db/blip_handler.go
+++ b/db/blip_handler.go
@@ -348,7 +348,7 @@ func (bh *blipHandler) handleSubChanges(rq *blip.Message) error {
}()
// sendChanges runs until blip context closes, or fails due to error
startTime := time.Now()
- _ = bh.sendChanges(rq.Sender, &sendChangesOptions{
+ _, err = bh.sendChanges(rq.Sender, &sendChangesOptions{
docIDs: subChangesParams.docIDs(),
since: subChangesParams.Since(),
continuous: continuous,
@@ -361,6 +361,10 @@ func (bh *blipHandler) handleSubChanges(rq *blip.Message) error {
changesCtx: collectionCtx.changesCtx,
requestPlusSeq: requestPlusSeq,
})
+ if err != nil {
+ base.DebugfCtx(bh.loggingCtx, base.KeySyncMsg, "Closing blip connection due to changes feed error %+v\n", err)
+ bh.ctxCancelFunc()
+ }
base.DebugfCtx(bh.loggingCtx, base.KeySyncMsg, "#%d: Type:%s --> Time:%v", bh.serialNumber, rq.Profile(), time.Since(startTime))
}()
@@ -428,8 +432,8 @@ func (flag changesDeletedFlag) HasFlag(deletedFlag changesDeletedFlag) bool {
return flag&deletedFlag != 0
}
-// Sends all changes since the given sequence
-func (bh *blipHandler) sendChanges(sender *blip.Sender, opts *sendChangesOptions) (isComplete bool) {
+// sendChanges will start a changes feed and send changes. Returns bool to indicate whether the changes feed finished and all changes were sent. The error value is only used to indicate a fatal error, where the blip connection should be terminated. If the blip connection is disconnected by the client, the error will be nil, but the boolean parameter will be false.
+func (bh *blipHandler) sendChanges(sender *blip.Sender, opts *sendChangesOptions) (bool, error) {
defer func() {
if panicked := recover(); panicked != nil {
bh.replicationStats.NumHandlersPanicked.Add(1)
@@ -472,18 +476,17 @@ func (bh *blipHandler) sendChanges(sender *blip.Sender, opts *sendChangesOptions
changesDb, err := bh.copyDatabaseCollectionWithUser(bh.collectionIdx)
if err != nil {
base.WarnfCtx(bh.loggingCtx, "[%s] error sending changes: %v", bh.blipContext.ID, err)
- return false
-
+ return false, err
}
- forceClose := generateBlipSyncChanges(bh.loggingCtx, changesDb, channelSet, options, opts.docIDs, func(changes []*ChangeEntry) error {
+ forceClose, err := generateBlipSyncChanges(bh.loggingCtx, changesDb, channelSet, options, opts.docIDs, func(changes []*ChangeEntry) error {
base.DebugfCtx(bh.loggingCtx, base.KeySync, " Sending %d changes", len(changes))
for _, change := range changes {
if !strings.HasPrefix(change.ID, "_") {
// If change is a removal and we're running with protocol V3 and change change is not a tombstone
// fall into 3.0 removal handling.
// Changes with change.Revoked=true have already evaluated UserHasDocAccess in changes.go, don't check again.
- if change.allRemoved && bh.activeCBMobileSubprotocol >= CBMobileReplicationV3 && !change.Deleted && !change.Revoked {
+ if change.allRemoved && bh.activeCBMobileSubprotocol >= CBMobileReplicationV3 && !change.Deleted && !change.Revoked && !bh.db.Options.UnsupportedOptions.BlipSendDocsWithChannelRemoval {
// If client doesn't want removals / revocations, don't send change
if !opts.revocations {
continue
@@ -494,7 +497,6 @@ func (bh *blipHandler) sendChanges(sender *blip.Sender, opts *sendChangesOptions
if err == nil && userHasAccessToDoc {
continue
}
-
// If we can't determine user access due to an error, log error and fall through to send change anyway.
// In the event of an error we should be cautious and send a revocation anyway, even if the user
// may actually have an alternate access method. This is the safer approach security-wise and
@@ -546,8 +548,7 @@ func (bh *blipHandler) sendChanges(sender *blip.Sender, opts *sendChangesOptions
}
bh.db.DatabaseContext.NotifyTerminatedChanges(bh.loggingCtx, user)
}
-
- return !forceClose
+ return (err == nil && !forceClose), err
}
func (bh *blipHandler) buildChangesRow(change *ChangeEntry, revID string) []interface{} {
diff --git a/db/blip_sync_context.go b/db/blip_sync_context.go
index 5c9823f592..dabe6ad39f 100644
--- a/db/blip_sync_context.go
+++ b/db/blip_sync_context.go
@@ -34,7 +34,10 @@ const (
var ErrClosedBLIPSender = errors.New("use of closed BLIP sender")
-func NewBlipSyncContext(ctx context.Context, bc *blip.Context, db *Database, contextID string, replicationStats *BlipSyncStats) *BlipSyncContext {
+func NewBlipSyncContext(ctx context.Context, bc *blip.Context, db *Database, contextID string, replicationStats *BlipSyncStats, ctxCancelFunc context.CancelFunc) (*BlipSyncContext, error) {
+ if ctxCancelFunc == nil {
+ return nil, errors.New("cancelCtxFunc is required")
+ }
maxInFlightChangesBatches := DefaultMaxConcurrentChangesBatches
if db.Options.MaxConcurrentChangesBatches != nil {
maxInFlightChangesBatches = *db.Options.MaxConcurrentChangesBatches
@@ -55,6 +58,7 @@ func NewBlipSyncContext(ctx context.Context, bc *blip.Context, db *Database, con
inFlightChangesThrottle: make(chan struct{}, maxInFlightChangesBatches),
inFlightRevsThrottle: make(chan struct{}, maxInFlightRevs),
collections: &blipCollections{},
+ ctxCancelFunc: ctxCancelFunc,
}
if bsc.replicationStats == nil {
bsc.replicationStats = NewBlipSyncStats()
@@ -86,7 +90,7 @@ func NewBlipSyncContext(ctx context.Context, bc *blip.Context, db *Database, con
bsc.register(profile, handlerFn)
}
}
- return bsc
+ return bsc, nil
}
// BlipSyncContext represents one BLIP connection (socket) opened by a client.
@@ -133,6 +137,8 @@ type BlipSyncContext struct {
collections *blipCollections // all collections handled by blipSyncContext, implicit or via GetCollections
stats blipSyncStats // internal structure to store stats
+
+ ctxCancelFunc context.CancelFunc // function to cancel a blip replication
}
// blipSyncStats has support structures to support reporting stats at regular interval
@@ -248,6 +254,7 @@ func (bsc *BlipSyncContext) Close() {
}
bsc.reportStats(true)
close(bsc.terminator)
+ bsc.ctxCancelFunc()
})
}
diff --git a/db/change_cache_test.go b/db/change_cache_test.go
index d2bdbb22cd..0f736d2e19 100644
--- a/db/change_cache_test.go
+++ b/db/change_cache_test.go
@@ -1006,78 +1006,6 @@ func TestChannelQueryCancellation(t *testing.T) {
assert.Equal(t, initialQueryCount+1, finalQueryCount)
}
-func TestLowSequenceHandlingNoDuplicates(t *testing.T) {
- base.SetUpTestLogging(t, base.LevelDebug, base.KeyChanges, base.KeyCache)
-
- db, ctx := setupTestDBWithCacheOptions(t, shortWaitCache())
- defer db.Close(ctx)
-
- // Create a user with access to channel ABC
- authenticator := db.Authenticator(ctx)
- assert.True(t, authenticator != nil, "db.Authenticator(db.Ctx) returned nil")
- user, err := authenticator.NewUser("naomi", "letmein", channels.BaseSetOf(t, "ABC", "PBS", "NBC", "TBS"))
- assert.NoError(t, err, fmt.Sprintf("Error creating new user: %v", err))
- require.NoError(t, authenticator.Save(user))
-
- collection := GetSingleDatabaseCollection(t, db.DatabaseContext)
- // Simulate seq 3 and 4 being delayed - write 1,2,5,6
- WriteDirect(t, collection, []string{"ABC", "NBC"}, 1)
- WriteDirect(t, collection, []string{"ABC"}, 2)
- WriteDirect(t, collection, []string{"ABC", "PBS"}, 5)
- WriteDirect(t, collection, []string{"ABC", "PBS"}, 6)
-
- require.NoError(t, db.changeCache.waitForSequence(ctx, 6, base.DefaultWaitForSequence))
- db.user, err = authenticator.GetUser("naomi")
- require.NoError(t, err)
-
- // Start changes feed
-
- dbCollection, ctx := GetSingleDatabaseCollectionWithUser(ctx, t, db)
- var options ChangesOptions
- options.Since = SequenceID{Seq: 0}
- ctx, changesCtxCancel := context.WithCancel(ctx)
- options.ChangesCtx = ctx
- defer changesCtxCancel()
- options.Continuous = true
- options.Wait = true
- feed, err := dbCollection.MultiChangesFeed(ctx, base.SetOf("*"), options)
- assert.True(t, err == nil)
-
- // Array to read changes from feed to support assertions
- var changes = make([]*ChangeEntry, 0, 50)
-
- err = appendFromFeed(&changes, feed, 4, base.DefaultWaitForSequence)
-
- // Validate the initial sequences arrive as expected
- assert.True(t, err == nil)
- assert.Len(t, changes, 4)
- assert.Equal(t, &ChangeEntry{
- Seq: SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
- ID: "doc-1",
- collectionID: dbCollection.GetCollectionID(),
- Changes: []ChangeRev{{"rev": "1-a"}}}, changes[0])
-
- // Test backfill clear - sequence numbers go back to standard handling
- WriteDirect(t, collection, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
- WriteDirect(t, collection, []string{"ABC", "PBS"}, 4)
-
- require.NoError(t, db.changeCache.waitForSequenceNotSkipped(ctx, 4, base.DefaultWaitForSequence))
-
- err = appendFromFeed(&changes, feed, 2, base.DefaultWaitForSequence)
- assert.True(t, err == nil)
- assert.Len(t, changes, 6)
- assert.True(t, verifyChangesSequencesIgnoreOrder(changes, []uint64{1, 2, 5, 6, 3, 4}))
-
- WriteDirect(t, collection, []string{"ABC"}, 7)
- WriteDirect(t, collection, []string{"ABC", "NBC"}, 8)
- WriteDirect(t, collection, []string{"ABC", "PBS"}, 9)
- require.NoError(t, db.changeCache.waitForSequence(ctx, 9, base.DefaultWaitForSequence))
- newChanges, err := verifySequencesInFeed(feed, []uint64{7, 8, 9})
- require.NoError(t, err)
-
- assert.True(t, verifyChangesSequencesIgnoreOrder(append(changes, newChanges...), []uint64{1, 2, 5, 6, 3, 4, 7, 8, 9}))
-}
-
// Test race condition causing skipped sequences in changes feed. Channel feeds are processed sequentially
// in the main changes.go iteration loop, without a lock on the underlying channel caches. The following
// sequence is possible while running a changes feed for channels "A", "B":
diff --git a/db/changes.go b/db/changes.go
index e8598d0983..974905dfed 100644
--- a/db/changes.go
+++ b/db/changes.go
@@ -1375,16 +1375,16 @@ func (options ChangesOptions) String() string {
)
}
-// Used by BLIP connections for changes. Supports both one-shot and continuous changes.
-func generateBlipSyncChanges(ctx context.Context, database *DatabaseCollectionWithUser, inChannels base.Set, options ChangesOptions, docIDFilter []string, send func([]*ChangeEntry) error) (forceClose bool) {
+// Used by BLIP connections for changes. Supports both one-shot and continuous changes. Returns an error in the case that the feed does not start up, or there is a fatal error in the feed. The caller is responsible for closing the connection, no more changes will be generated. forceClose will be true if connection was terminated underneath the changes feed.
+func generateBlipSyncChanges(ctx context.Context, database *DatabaseCollectionWithUser, inChannels base.Set, options ChangesOptions, docIDFilter []string, send func([]*ChangeEntry) error) (forceClose bool, err error) {
// Store one-shot here to protect
isOneShot := !options.Continuous
- err, forceClose := GenerateChanges(ctx, database, inChannels, options, docIDFilter, send)
+ err, forceClose = GenerateChanges(ctx, database, inChannels, options, docIDFilter, send)
if _, ok := err.(*ChangesSendErr); ok {
// If there was already an error in a send function, do not send last one shot changes message, since it probably will not work anyway.
- return forceClose // error is probably because the client closed the connection
+ return forceClose, err // error is probably because the client closed the connection
}
// For one-shot changes, invoke the callback w/ nil to trigger the 'caught up' changes message. (For continuous changes, this
@@ -1392,7 +1392,7 @@ func generateBlipSyncChanges(ctx context.Context, database *DatabaseCollectionWi
if isOneShot {
_ = send(nil)
}
- return forceClose
+ return forceClose, err
}
type ChangesSendErr struct{ error }
@@ -1429,6 +1429,7 @@ func GenerateChanges(ctx context.Context, database *DatabaseCollectionWithUser,
var lastSeq SequenceID
var feed <-chan *ChangeEntry
var timeout <-chan time.Time
+ var feedErr error
// feedStarted identifies whether at least one MultiChangesFeed has been started. Used to identify when a one-shot changes is done.
feedStarted := false
@@ -1450,7 +1451,6 @@ loop:
forceClose = true
break loop
}
- var feedErr error
if len(docIDFilter) > 0 {
feed, feedErr = database.DocIDChangesFeed(ctx, inChannels, docIDFilter, options)
} else {
@@ -1469,7 +1469,6 @@ loop:
}
var sendErr error
-
// Wait for either a new change, a heartbeat, or a timeout:
select {
case entry, ok := <-feed:
@@ -1478,6 +1477,7 @@ loop:
} else if entry == nil {
sendErr = send(nil)
} else if entry.Err != nil {
+ feedErr = entry.Err
break loop // error returned by feed - end changes
} else {
entries := []*ChangeEntry{entry}
@@ -1494,6 +1494,7 @@ loop:
waiting = true
break collect
} else if entry.Err != nil {
+ feedErr = entry.Err
break loop // error returned by feed - end changes
}
entries = append(entries, entry)
@@ -1546,5 +1547,5 @@ loop:
forceClose = true
}
- return nil, forceClose
+ return feedErr, forceClose
}
diff --git a/db/crud.go b/db/crud.go
index 56a084ead9..976b0b7eb8 100644
--- a/db/crud.go
+++ b/db/crud.go
@@ -26,6 +26,7 @@ import (
const (
kMaxRecentSequences = 20 // Maximum number of sequences stored in RecentSequences before pruning is triggered
+ kMinRecentSequences = 5 // Minimum number of sequences that should be left stored in RecentSequences during compaction
unusedSequenceWarningThreshold = 10000 // Warn when releasing more than this many sequences due to existing sequence on the document
)
@@ -2109,6 +2110,9 @@ func (db *DatabaseContext) assignSequence(ctx context.Context, docSequence uint6
// so we're allowing more 'recent sequences' on the doc (20) before attempting pruning
stableSequence := db.changeCache.GetStableSequence(doc.ID).Seq
count := 0
+ // we want to keep at least kMinRecentSequences recent sequences in the recent sequences list to reduce likelihood
+ // races between compaction of resent sequences and a coalesced DCP mutation resulting in skipped/abandoned sequences
+ maxToCompact := len(doc.RecentSequences) - kMinRecentSequences
for _, seq := range doc.RecentSequences {
// Only remove sequences if they are higher than a sequence that's been seen on the
// feed. This is valid across SG nodes (which could each have a different nextSequence),
@@ -2116,6 +2120,9 @@ func (db *DatabaseContext) assignSequence(ctx context.Context, docSequence uint6
// to each node.
if seq < stableSequence {
count++
+ if count == maxToCompact {
+ break
+ }
} else {
break
}
diff --git a/db/database.go b/db/database.go
index af2bdb7ec5..2308d10efd 100644
--- a/db/database.go
+++ b/db/database.go
@@ -245,20 +245,21 @@ type APIEndpoints struct {
// UnsupportedOptions are not supported for external use
type UnsupportedOptions struct {
- UserViews *UserViewsOptions `json:"user_views,omitempty"` // Config settings for user views
- OidcTestProvider *OidcTestProviderOptions `json:"oidc_test_provider,omitempty"` // Config settings for OIDC Provider
- APIEndpoints *APIEndpoints `json:"api_endpoints,omitempty"` // Config settings for API endpoints
- WarningThresholds *WarningThresholds `json:"warning_thresholds,omitempty"` // Warning thresholds related to _sync size
- DisableCleanSkippedQuery bool `json:"disable_clean_skipped_query,omitempty"` // Clean skipped sequence processing bypasses final check (deprecated: CBG-2672)
- OidcTlsSkipVerify bool `json:"oidc_tls_skip_verify,omitempty"` // Config option to enable self-signed certs for OIDC testing.
- SgrTlsSkipVerify bool `json:"sgr_tls_skip_verify,omitempty"` // Config option to enable self-signed certs for SG-Replicate testing.
- RemoteConfigTlsSkipVerify bool `json:"remote_config_tls_skip_verify,omitempty"` // Config option to enable self signed certificates for external JavaScript load.
- GuestReadOnly bool `json:"guest_read_only,omitempty"` // Config option to restrict GUEST document access to read-only
- ForceAPIForbiddenErrors bool `json:"force_api_forbidden_errors,omitempty"` // Config option to force the REST API to return forbidden errors
- ConnectedClient bool `json:"connected_client,omitempty"` // Enables BLIP connected-client APIs
- UseQueryBasedResyncManager bool `json:"use_query_resync_manager,omitempty"` // Config option to use Query based resync manager to perform Resync op
- DCPReadBuffer int `json:"dcp_read_buffer,omitempty"` // Enables user to set their own DCP read buffer
- KVBufferSize int `json:"kv_buffer,omitempty"` // Enables user to set their own KV pool buffer
+ UserViews *UserViewsOptions `json:"user_views,omitempty"` // Config settings for user views
+ OidcTestProvider *OidcTestProviderOptions `json:"oidc_test_provider,omitempty"` // Config settings for OIDC Provider
+ APIEndpoints *APIEndpoints `json:"api_endpoints,omitempty"` // Config settings for API endpoints
+ WarningThresholds *WarningThresholds `json:"warning_thresholds,omitempty"` // Warning thresholds related to _sync size
+ DisableCleanSkippedQuery bool `json:"disable_clean_skipped_query,omitempty"` // Clean skipped sequence processing bypasses final check (deprecated: CBG-2672)
+ OidcTlsSkipVerify bool `json:"oidc_tls_skip_verify,omitempty"` // Config option to enable self-signed certs for OIDC testing.
+ SgrTlsSkipVerify bool `json:"sgr_tls_skip_verify,omitempty"` // Config option to enable self-signed certs for SG-Replicate testing.
+ RemoteConfigTlsSkipVerify bool `json:"remote_config_tls_skip_verify,omitempty"` // Config option to enable self signed certificates for external JavaScript load.
+ GuestReadOnly bool `json:"guest_read_only,omitempty"` // Config option to restrict GUEST document access to read-only
+ ForceAPIForbiddenErrors bool `json:"force_api_forbidden_errors,omitempty"` // Config option to force the REST API to return forbidden errors
+ ConnectedClient bool `json:"connected_client,omitempty"` // Enables BLIP connected-client APIs
+ UseQueryBasedResyncManager bool `json:"use_query_resync_manager,omitempty"` // Config option to use Query based resync manager to perform Resync op
+ DCPReadBuffer int `json:"dcp_read_buffer,omitempty"` // Enables user to set their own DCP read buffer
+ KVBufferSize int `json:"kv_buffer,omitempty"` // Enables user to set their own KV pool buffer
+ BlipSendDocsWithChannelRemoval bool `json:"blip_send_docs_with_channel_removal,omitempty"` // Enables sending docs with channel removals using channel filters
}
type WarningThresholds struct {
@@ -1481,15 +1482,19 @@ func (db *DatabaseContext) GetRoleIDs(ctx context.Context, useViews, includeDele
return roles, nil
}
-// Trigger tombstone compaction from view and/or GSI indexes. Several Sync Gateway indexes server tombstones (deleted documents with an xattr).
+type compactProgressCallbackFunc func(purgedDocCount *int)
+
+// Compact runs tombstone compaction from view and/or GSI indexes - ensuring there's nothing left in the indexes for tombstoned documents that have been purged by the server.
+//
+// Several Sync Gateway indexes server tombstones (deleted documents with an xattr).
// There currently isn't a mechanism for server to remove these docs from the index when the tombstone is purged by the server during
// metadata purge, because metadata purge doesn't trigger a DCP event.
// When compact is run, Sync Gateway initiates a normal delete operation for the document and xattr (a Sync Gateway purge). This triggers
// removal of the document from the index. In the event that the document has already been purged by server, we need to recreate and delete
// the document to accomplish the same result.
-type compactCallbackFunc func(purgedDocCount *int)
-
-func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, callback compactCallbackFunc, terminator *base.SafeTerminator) (int, error) {
+//
+// The `isScheduledBackgroundTask` parameter is used to indicate if the compaction is being run as part of a scheduled background task, or an ad-hoc user-initiated `/{db}/_compact` request.
+func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, optionalProgressCallback compactProgressCallbackFunc, terminator *base.SafeTerminator, isScheduledBackgroundTask bool) (purgedDocCount int, err error) {
if !skipRunningStateCheck {
if !atomic.CompareAndSwapUint32(&db.CompactState, DBCompactNotRunning, DBCompactRunning) {
return 0, base.HTTPErrorf(http.StatusServiceUnavailable, "Compaction already running")
@@ -1510,12 +1515,13 @@ func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, cal
startTime := time.Now()
purgeOlderThan := startTime.Add(-purgeInterval)
- purgedDocCount := 0
purgeErrorCount := 0
addErrorCount := 0
deleteErrorCount := 0
- defer callback(&purgedDocCount)
+ if optionalProgressCallback != nil {
+ defer optionalProgressCallback(&purgedDocCount)
+ }
base.InfofCtx(ctx, base.KeyAll, "Starting compaction of purged tombstones for %s ...", base.MD(db.Name))
@@ -1534,6 +1540,9 @@ func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, cal
for {
purgedDocs := make([]string, 0)
results, err := collection.QueryTombstones(ctx, purgeOlderThan, QueryTombstoneBatch)
+ if isScheduledBackgroundTask {
+ base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleQueryOps.Add(1)
+ }
if err != nil {
return 0, err
}
@@ -1554,11 +1563,17 @@ func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, cal
base.DebugfCtx(ctx, base.KeyCRUD, "\tDeleting %q", tombstonesRow.Id)
// First, attempt to purge.
purgeErr := collection.Purge(ctx, tombstonesRow.Id, false)
+ if isScheduledBackgroundTask {
+ base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Add(1)
+ }
if purgeErr == nil {
purgedDocs = append(purgedDocs, tombstonesRow.Id)
} else if base.IsDocNotFoundError(purgeErr) {
// If key no longer exists, need to add and remove to trigger removal from view
_, addErr := collection.dataStore.Add(tombstonesRow.Id, 0, purgeBody)
+ if isScheduledBackgroundTask {
+ base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Add(1)
+ }
if addErr != nil {
addErrorCount++
base.InfofCtx(ctx, base.KeyAll, "Couldn't compact key %s (add): %v", base.UD(tombstonesRow.Id), addErr)
@@ -1569,7 +1584,11 @@ func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, cal
// so mark it to be removed from cache, even if the subsequent delete fails
purgedDocs = append(purgedDocs, tombstonesRow.Id)
- if delErr := collection.dataStore.Delete(tombstonesRow.Id); delErr != nil {
+ delErr := collection.dataStore.Delete(tombstonesRow.Id)
+ if isScheduledBackgroundTask {
+ base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Add(1)
+ }
+ if delErr != nil {
deleteErrorCount++
base.InfofCtx(ctx, base.KeyAll, "Couldn't compact key %s (delete): %v", base.UD(tombstonesRow.Id), delErr)
}
@@ -1593,7 +1612,9 @@ func (db *Database) Compact(ctx context.Context, skipRunningStateCheck bool, cal
}
base.InfofCtx(ctx, base.KeyAll, "Compacted %v tombstones", count)
- callback(&purgedDocCount)
+ if optionalProgressCallback != nil {
+ optionalProgressCallback(&purgedDocCount)
+ }
if resultCount < QueryTombstoneBatch {
break
@@ -2467,7 +2488,7 @@ func (db *DatabaseContext) StartOnlineProcesses(ctx context.Context) (returnedEr
bgtTerminator.Close()
}()
bgt, err := NewBackgroundTask(ctx, "Compact", func(ctx context.Context) error {
- _, err := db.Compact(ctx, false, func(purgedDocCount *int) {}, bgtTerminator)
+ _, err := db.Compact(ctx, false, nil, bgtTerminator, true)
if err != nil {
base.WarnfCtx(ctx, "Error trying to compact tombstoned documents for %q with error: %v", db.Name, err)
}
diff --git a/db/database_test.go b/db/database_test.go
index 8bf9e59626..a98ce8a51f 100644
--- a/db/database_test.go
+++ b/db/database_test.go
@@ -166,6 +166,33 @@ func setupTestLeakyDBWithCacheOptions(t *testing.T, options CacheOptions, leakyO
return db, addDatabaseAndTestUserContext(ctx, db)
}
+func setupTestDBWithLeakyBucket(t testing.TB, leakyBucket *base.LeakyBucket) (*Database, context.Context) {
+ ctx := base.TestCtx(t)
+ testBucket, ok := leakyBucket.GetUnderlyingBucket().(*base.TestBucket)
+ require.True(t, ok)
+ dbcOptions := DatabaseContextOptions{
+ Scopes: GetScopesOptions(t, testBucket, 1),
+ }
+ AddOptionsFromEnvironmentVariables(&dbcOptions)
+ dbCtx, err := NewDatabaseContext(ctx, "db", leakyBucket, false, dbcOptions)
+ if err != nil {
+ leakyBucket.Close(ctx)
+ t.Fatalf("Unable to create database context: %v", err)
+ }
+ ctx = dbCtx.AddDatabaseLogContext(ctx)
+ err = dbCtx.StartOnlineProcesses(ctx)
+ if err != nil {
+ dbCtx.Close(ctx)
+ t.Fatalf("Unable to start online processes: %v", err)
+ }
+ db, err := CreateDatabase(dbCtx)
+ if err != nil {
+ dbCtx.Close(ctx)
+ t.Fatalf("Unable to create database: %v", err)
+ }
+ return db, addDatabaseAndTestUserContext(ctx, db)
+}
+
func setupTestDBDefaultCollection(t testing.TB) (*Database, context.Context) {
cacheOptions := DefaultCacheOptions()
dbcOptions := DatabaseContextOptions{
@@ -1536,6 +1563,99 @@ func TestUpdatePrincipal(t *testing.T) {
assert.Equal(t, uint64(3), nextSeq)
}
+func TestUpdatePrincipalCASRetry(t *testing.T) {
+ base.SetUpTestLogging(t, base.LevelDebug, base.KeyAuth, base.KeyCRUD)
+
+ // ensure we don't batch sequences so that the number of released sequences is deterministic
+ defer SuspendSequenceBatching()()
+
+ tb := base.GetTestBucket(t)
+ defer tb.Close(base.TestCtx(t))
+
+ tests := []struct {
+ numCASRetries int32
+ expectError bool
+ }{
+ {numCASRetries: 0},
+ {numCASRetries: 1},
+ {numCASRetries: 2},
+ {numCASRetries: 5},
+ {numCASRetries: 10},
+ {numCASRetries: auth.PrincipalUpdateMaxCasRetries - 1},
+ {numCASRetries: auth.PrincipalUpdateMaxCasRetries, expectError: true},
+ {numCASRetries: auth.PrincipalUpdateMaxCasRetries + 1, expectError: true},
+ }
+
+ var (
+ casRetryCount atomic.Int32
+ totalCASRetries atomic.Int32
+ enableCASRetry base.AtomicBool
+ )
+
+ lb := base.NewLeakyBucket(tb, base.LeakyBucketConfig{
+ UpdateCallback: func(key string) {
+ casRetryCountInt, totalCASRetriesInt := casRetryCount.Load(), totalCASRetries.Load()
+ if enableCASRetry.IsTrue() && casRetryCountInt < totalCASRetriesInt {
+ casRetryCount.Add(1)
+ casRetryCountInt = casRetryCount.Load()
+ t.Logf("foreceCASRetry %d/%d: Forcing CAS retry for key: %q", casRetryCountInt, totalCASRetriesInt, key)
+ var body []byte
+ originalCAS, err := tb.GetMetadataStore().Get(key, &body)
+ require.NoError(t, err)
+ err = tb.GetMetadataStore().Set(key, 0, nil, body)
+ require.NoError(t, err)
+ newCAS, err := tb.GetMetadataStore().Get(key, &body)
+ require.NoError(t, err)
+ t.Logf("foreceCASRetry %d/%d: Doc %q CAS changed from %d to %d", casRetryCountInt, totalCASRetriesInt, key, originalCAS, newCAS)
+ }
+ },
+ IgnoreClose: true,
+ })
+
+ db, ctx := setupTestDBWithLeakyBucket(t, lb)
+ defer db.Close(ctx)
+
+ // Create a user with access to channel ABC
+ authenticator := db.Authenticator(ctx)
+ user, err := authenticator.NewUser("naomi", "letmein", channels.BaseSetOf(t, "ABC"))
+ require.NoError(t, err)
+ require.NoError(t, authenticator.Save(user))
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("numCASRetries=%d", test.numCASRetries), func(t *testing.T) {
+ // Write an update that'll be forced into a CAS retry from the leaky bucket callback
+ userInfo, err := db.GetPrincipalForTest(t, "naomi", true)
+ require.NoError(t, err)
+ userInfo.ExplicitChannels = base.SetOf("ABC", "PBS", fmt.Sprintf("testi:%d", i))
+
+ // reset callback for subtest
+ enableCASRetry.Set(true)
+ casRetryCount.Store(0)
+ totalCASRetries.Store(test.numCASRetries)
+ sequenceReleasedCountBefore := db.sequences.dbStats.SequenceReleasedCount.Value()
+
+ _, _, err = db.UpdatePrincipal(ctx, userInfo, true, true)
+ if test.expectError {
+ require.ErrorContains(t, err, "cas mismatch")
+ } else {
+ require.NoError(t, err, "Unable to update principal")
+ }
+
+ // cap to max retries if we're doing more
+ expectedReleasedSequences := test.numCASRetries
+ if test.numCASRetries > auth.PrincipalUpdateMaxCasRetries {
+ expectedReleasedSequences = auth.PrincipalUpdateMaxCasRetries
+ }
+
+ // Ensure we released the sequences for all the CAS retries we expected to make
+ assert.EventuallyWithT(t, func(c *assert.CollectT) {
+ sequenceReleasedCountAfter := db.sequences.dbStats.SequenceReleasedCount.Value()
+ assert.Equal(c, int64(expectedReleasedSequences), sequenceReleasedCountAfter-sequenceReleasedCountBefore)
+ }, 5*time.Second, 100*time.Millisecond)
+ })
+ }
+}
+
// Re-apply one of the conflicting changes to make sure that PutExistingRevWithBody() treats it as a no-op (SG Issue #3048)
func TestRepeatedConflict(t *testing.T) {
@@ -2315,6 +2435,41 @@ func TestRecentSequenceHistory(t *testing.T) {
}
+func TestMaintainMinimumRecentSequences(t *testing.T) {
+ db, ctx := setupTestDB(t)
+ defer db.Close(ctx)
+ collection, ctx := GetSingleDatabaseCollectionWithUser(ctx, t, db)
+ const docID = "doc1"
+ allocSeq := uint64(0)
+
+ // Add 20 revisions of a single document to fill recent sequences up on the document
+ body := Body{"val": "one"}
+ for i := 0; i < 20; i++ {
+ revid, doc, err := collection.Put(ctx, docID, body)
+ require.NoError(t, err)
+ body[BodyId] = doc.ID
+ body[BodyRev] = revid
+ allocSeq++
+ }
+ // wait for the latest allocated seq to arrive at cache to move stable seq in place for recent sequence compaction
+ err := db.changeCache.waitForSequence(ctx, allocSeq, base.DefaultWaitForSequence)
+ require.NoError(t, err)
+
+ // assert that we have 20 entries in recent sequences for the above doc updates
+ doc, err := collection.GetDocument(ctx, docID, DocUnmarshalAll)
+ require.NoError(t, err)
+ assert.Equal(t, 20, len(doc.RecentSequences))
+
+ // update the original doc to trigger recent sequence compaction on the doc
+ _, _, err = collection.Put(ctx, docID, body)
+ require.NoError(t, err)
+
+ // Validate that the recent sequences are pruned to the minimum + recently assigned sequence
+ doc, err = collection.GetDocument(ctx, docID, DocUnmarshalAll)
+ require.NoError(t, err)
+ assert.Equal(t, 6, len(doc.RecentSequences))
+}
+
func TestChannelView(t *testing.T) {
db, ctx := setupTestDBWithViewsEnabled(t)
diff --git a/db/design_doc_util_test.go b/db/design_doc_util_test.go
index 521021d1f3..21a0401f17 100644
--- a/db/design_doc_util_test.go
+++ b/db/design_doc_util_test.go
@@ -33,8 +33,6 @@ func assertDesignDocExists(t testing.TB, viewStore sgbucket.ViewStore, ddocName
// assertDesignDocDoesNotExist ensures that the design doc does not exist in the dataStore.
func assertDesignDocNotExists(t testing.TB, viewStore sgbucket.ViewStore, ddocName string) bool {
ddoc, err := viewStore.GetDDoc(ddocName)
- if err == nil {
- return assert.Failf(t, "Design doc %s should not exist but but it did: %v", ddocName, ddoc)
- }
+ assert.Error(t, err, "Design doc %s should not exist but but it did: %v", ddocName, ddoc)
return assert.Truef(t, IsMissingDDocError(err), "Design doc %s should not exist but got a different error fetching it: %v", ddocName, err)
}
diff --git a/db/sg_replicate_cfg.go b/db/sg_replicate_cfg.go
index b79cdc2706..68cd2b7d40 100644
--- a/db/sg_replicate_cfg.go
+++ b/db/sg_replicate_cfg.go
@@ -115,6 +115,8 @@ type ReplicationConfig struct {
Adhoc bool `json:"adhoc,omitempty"`
BatchSize int `json:"batch_size,omitempty"`
RunAs string `json:"run_as,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
}
func DefaultReplicationConfig() ReplicationConfig {
@@ -335,6 +337,9 @@ func (rc *ReplicationConfig) Upsert(ctx context.Context, c *ReplicationUpsertCon
rc.RunAs = *c.RunAs
}
+ timeNow := time.Now().UTC()
+ rc.UpdatedAt = &timeNow
+
if c.QueryParams != nil {
// QueryParams can be either []interface{} or map[string]interface{}, so requires type-specific copying
// avoid later mutating c.QueryParams
@@ -1106,6 +1111,8 @@ func (m *sgReplicateManager) UpsertReplication(ctx context.Context, replication
} else {
// Add a new replication to the cfg. Set targetState based on initialState when specified.
replicationConfig := DefaultReplicationConfig()
+ createdAt := time.Now().UTC()
+ replicationConfig.CreatedAt = &createdAt
replicationConfig.ID = replication.ID
targetState := ReplicationStateRunning
if replication.InitialState != nil && *replication.InitialState == ReplicationStateStopped {
diff --git a/db/sg_replicate_cfg_test.go b/db/sg_replicate_cfg_test.go
index bec50ed039..7518946754 100644
--- a/db/sg_replicate_cfg_test.go
+++ b/db/sg_replicate_cfg_test.go
@@ -536,6 +536,7 @@ func TestUpsertReplicationConfig(t *testing.T) {
for _, testCase := range testCases {
t.Run(fmt.Sprintf("%s", testCase.name), func(t *testing.T) {
testCase.existingConfig.Upsert(base.TestCtx(t), testCase.updatedConfig)
+ testCase.existingConfig.UpdatedAt = nil // remove updated at field for comparison below
equal, err := testCase.existingConfig.Equals(testCase.expectedConfig)
assert.NoError(t, err)
assert.True(t, equal)
diff --git a/db/users.go b/db/users.go
index fae475c77e..49c9501c5f 100644
--- a/db/users.go
+++ b/db/users.go
@@ -86,6 +86,7 @@ func (dbc *DatabaseContext) UpdatePrincipal(ctx context.Context, updates *auth.P
if err != nil {
return replaced, princ, fmt.Errorf("Error creating user/role: %w", err)
}
+ princ.SetCreatedAt(time.Now().UTC())
changed = true
} else if !allowReplace {
err = base.HTTPErrorf(http.StatusConflict, "Already exists")
@@ -115,9 +116,9 @@ func (dbc *DatabaseContext) UpdatePrincipal(ctx context.Context, updates *auth.P
if updates.ExplicitChannels != nil && !updatedExplicitChannels.Equals(updates.ExplicitChannels) {
changed = true
}
- collectionAccessChanged, err := dbc.RequiresCollectionAccessUpdate(ctx, princ, updates.CollectionAccess)
- if err != nil {
- return false, princ, err
+ collectionAccessChanged, collectionAccessErr := dbc.RequiresCollectionAccessUpdate(ctx, princ, updates.CollectionAccess)
+ if collectionAccessErr != nil {
+ return false, princ, collectionAccessErr
} else if collectionAccessChanged {
changed = true
}
@@ -214,10 +215,15 @@ func (dbc *DatabaseContext) UpdatePrincipal(ctx context.Context, updates *auth.P
user.SetJWTLastUpdated(time.Now())
}
}
+ princ.SetUpdatedAt()
err = authenticator.Save(princ)
// On cas error, retry. Otherwise break out of loop
if base.IsCasMismatch(err) {
base.InfofCtx(ctx, base.KeyAuth, "CAS mismatch updating principal %s - will retry", base.UD(princ.Name()))
+ // release the sequence number we allocated in the failed update to avoid an abandoned sequence
+ if err := dbc.sequences.releaseSequence(ctx, nextSeq); err != nil {
+ base.InfofCtx(ctx, base.KeyAuth, "Error releasing unused sequence %d after CAS retry for principal %s: %v", nextSeq, base.UD(princ.Name()), err)
+ }
} else {
return replaced, princ, err
}
diff --git a/docs/api/components/schemas.yaml b/docs/api/components/schemas.yaml
index 6dca0db6db..acee5a4b0e 100644
--- a/docs/api/components/schemas.yaml
+++ b/docs/api/components/schemas.yaml
@@ -138,6 +138,9 @@ ExpVars:
num_idle_kv_ops:
type: integer
description: "The total number of idle kv operations."
+ num_idle_query_ops:
+ type: integer
+ description: "The total number of idle query operations."
process_cpu_percent_utilization:
type: number
format: float
@@ -1301,7 +1304,10 @@ Database:
type: object
properties:
size:
- description: The maximum number of revisions that can be stored in the revision cache.
+ description: |-
+ The maximum number of revisions that can be stored in the revision cache.
+ Note when running with greater than 1 shard count we add 10% capacity overall to avoid early eviction when some shards fill up before others, so
+ you may find that the capacity stat (revision_cache_num_items) will climb to the defined rev cache size + 10%.
type: string
default: 5000
max_memory_count_mb:
diff --git a/go.mod b/go.mod
index aee8f8419e..1e3dccedc4 100644
--- a/go.mod
+++ b/go.mod
@@ -4,8 +4,8 @@ go 1.23
require (
dario.cat/mergo v1.0.0
- github.com/KimMachineGun/automemlimit v0.6.1
- github.com/coreos/go-oidc/v3 v3.11.0
+ github.com/KimMachineGun/automemlimit v0.7.0
+ github.com/coreos/go-oidc/v3 v3.12.0
github.com/couchbase/cbgt v1.4.2-0.20241112001929-b9fdd9b009b1
github.com/couchbase/clog v0.1.0
github.com/couchbase/go-blip v0.0.0-20241014144256-13a798c348fd
@@ -34,19 +34,16 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/shirou/gopsutil/v3 v3.24.5
github.com/stretchr/testify v1.10.0
- golang.org/x/crypto v0.30.0
+ golang.org/x/crypto v0.32.0
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa
- golang.org/x/net v0.32.0
- golang.org/x/oauth2 v0.24.0
+ golang.org/x/net v0.34.0
+ golang.org/x/oauth2 v0.25.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/cilium/ebpf v0.9.1 // indirect
- github.com/containerd/cgroups/v3 v3.0.1 // indirect
- github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/couchbase/blance v0.1.6 // indirect
github.com/couchbase/cbauth v0.1.12 // indirect
github.com/couchbase/go-couchbase v0.1.1 // indirect
@@ -58,11 +55,9 @@ require (
github.com/couchbase/tools-common/types v1.1.4 // indirect
github.com/couchbaselabs/gocbconnstr/v2 v2.0.0-20240607131231-fb385523de28 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/docker/go-units v0.4.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/godbus/dbus/v5 v5.0.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
@@ -72,7 +67,6 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/opencontainers/runtime-spec v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
@@ -81,7 +75,6 @@ require (
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
- github.com/sirupsen/logrus v1.8.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
@@ -93,7 +86,7 @@ require (
go.opentelemetry.io/otel/trace v1.24.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/sys v0.29.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.5.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
diff --git a/go.sum b/go.sum
index ffcc07d568..22f08c240e 100644
--- a/go.sum
+++ b/go.sum
@@ -8,8 +8,8 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNic
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/KimMachineGun/automemlimit v0.6.1 h1:ILa9j1onAAMadBsyyUJv5cack8Y1WT26yLj/V+ulKp8=
-github.com/KimMachineGun/automemlimit v0.6.1/go.mod h1:T7xYht7B8r6AG/AqFcUdc7fzd2bIdBKmepfP2S1svPY=
+github.com/KimMachineGun/automemlimit v0.7.0 h1:7G06p/dMSf7G8E6oq+f2uOPuVncFyIlDI/pBWK49u88=
+github.com/KimMachineGun/automemlimit v0.7.0/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@@ -24,16 +24,10 @@ github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwys
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
-github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4=
-github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE=
-github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw=
-github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
-github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
-github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
-github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
+github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/couchbase/blance v0.1.6 h1:zyNew/SN2AheIoJxQ2LqqA1u3bMp03eGCer6hSDMUDs=
github.com/couchbase/blance v0.1.6/go.mod h1:2Sa/nsJSieN/r3T9LsrUYWeQ015qDsuHybhz4F4JcHU=
github.com/couchbase/cbauth v0.1.12 h1:JOAWjjp2BdubvrrggvN4yQo3oEc2ndXcRN1ONCklUOM=
@@ -81,8 +75,6 @@ github.com/couchbaselabs/rosmar v0.0.0-20250226134616-3b9ac157a3cd/go.mod h1:suZ
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -91,8 +83,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
-github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
-github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -108,8 +98,6 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
-github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
-github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@@ -164,8 +152,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
-github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
@@ -207,8 +193,6 @@ github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
-github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -256,8 +240,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
-golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI=
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
@@ -275,11 +259,11 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
-golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
+golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
+golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
-golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
+golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -291,7 +275,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -300,8 +283,8 @@ golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
-golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
diff --git a/manifest/3.2.xml b/manifest/3.2.xml
index f918f0c776..4413af8422 100644
--- a/manifest/3.2.xml
+++ b/manifest/3.2.xml
@@ -18,13 +18,13 @@ licenses/APL2.txt.
-
+
-
+
diff --git a/manifest/3.2/3.2.1.xml b/manifest/3.2/3.2.1.xml
new file mode 100644
index 0000000000..2d8983347c
--- /dev/null
+++ b/manifest/3.2/3.2.1.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/manifest/3.2/3.2.2.xml b/manifest/3.2/3.2.2.xml
new file mode 100644
index 0000000000..0fde177dd2
--- /dev/null
+++ b/manifest/3.2/3.2.2.xml
@@ -0,0 +1,30 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/manifest/product-config.json b/manifest/product-config.json
index 6cc3c550df..1d36565700 100644
--- a/manifest/product-config.json
+++ b/manifest/product-config.json
@@ -617,10 +617,29 @@
"trigger_blackduck": true,
"start_build": 515
},
- "manifest/3.2.xml": {
+ "manifest/3.2/3.2.1.xml": {
+ "do-build": false,
"release": "3.2.1",
"release_name": "Couchbase Sync Gateway 3.2.1",
"production": true,
+ "interval": 1440,
+ "go_version": "1.22.5",
+ "trigger_blackduck": true,
+ "start_build": 16
+ },
+ "manifest/3.2/3.2.2.xml": {
+ "release": "3.2.2",
+ "release_name": "Couchbase Sync Gateway 3.2.2",
+ "production": true,
+ "interval": 120,
+ "go_version": "1.22.9",
+ "trigger_blackduck": true,
+ "start_build": 14
+ },
+ "manifest/3.2.xml": {
+ "release": "3.2.3",
+ "release_name": "Couchbase Sync Gateway 3.2.3",
+ "production": true,
"interval": 120,
"go_version": "1.22.9",
"trigger_blackduck": true,
diff --git a/rest/adminapitest/admin_api_test.go b/rest/adminapitest/admin_api_test.go
index fb8ff2d0b0..1c9773f793 100644
--- a/rest/adminapitest/admin_api_test.go
+++ b/rest/adminapitest/admin_api_test.go
@@ -728,6 +728,12 @@ func TestDCPResyncCollectionsStatus(t *testing.T) {
_, ok := (rt.GetDatabase().ResyncManager.Process).(*db.ResyncManagerDCP)
require.True(t, ok)
+ // create documents in DB to cause resync to take a few seconds
+ for i := 0; i < 1000; i++ {
+ resp := rt.SendAdminRequest(http.MethodPut, "/{{.keyspace1}}/"+fmt.Sprint(i), `{"value":1}`)
+ rest.RequireStatus(t, resp, http.StatusCreated)
+ }
+
rt.TakeDbOffline()
if !testCase.specifyCollection {
@@ -738,9 +744,10 @@ func TestDCPResyncCollectionsStatus(t *testing.T) {
resp := rt.SendAdminRequest("POST", "/db/_resync?action=start", payload)
rest.RequireStatus(t, resp, http.StatusOK)
}
+ statusResponse := rt.WaitForResyncDCPStatus(db.BackgroundProcessStateRunning)
+ assert.ElementsMatch(t, statusResponse.CollectionsProcessing[scopeName], testCase.expectedResult[scopeName])
- statusResponse := rt.WaitForResyncDCPStatus(db.BackgroundProcessStateCompleted)
-
+ statusResponse = rt.WaitForResyncDCPStatus(db.BackgroundProcessStateCompleted)
assert.ElementsMatch(t, statusResponse.CollectionsProcessing[scopeName], testCase.expectedResult[scopeName])
})
}
@@ -4166,7 +4173,7 @@ func TestTombstoneCompactionPurgeInterval(t *testing.T) {
// Start compact to modify purge interval
database, _ := db.GetDatabase(dbc, nil)
- _, err = database.Compact(ctx, false, func(purgedDocCount *int) {}, base.NewSafeTerminator())
+ _, err = database.Compact(ctx, false, nil, base.NewSafeTerminator(), false)
require.NoError(t, err)
assert.EqualValues(t, test.expectedPurgeIntervalAfterCompact, dbc.GetMetadataPurgeInterval(ctx))
diff --git a/rest/api_collections_test.go b/rest/api_collections_test.go
index 8c2d301aa0..9b5a0287c5 100644
--- a/rest/api_collections_test.go
+++ b/rest/api_collections_test.go
@@ -1001,6 +1001,8 @@ func TestRuntimeConfigUpdateAfterConfigUpdateConflict(t *testing.T) {
delete(scopesConfig[scope].Collections, collection1)
assert.Equal(t, scopesConfig, dbCfg.Scopes)
originalDBCfg.Server = nil
+ dbCfg.UpdatedAt = nil // originalDBCfg fetch is from memory so has no update/create at time
+ dbCfg.CreatedAt = nil
assert.Equal(t, originalDBCfg, dbCfg)
// now assert that _config shows the same
diff --git a/rest/api_test.go b/rest/api_test.go
index cc420c5dfe..fe11e5737e 100644
--- a/rest/api_test.go
+++ b/rest/api_test.go
@@ -3071,3 +3071,32 @@ func TestAllDbs(t *testing.T) {
RequireStatus(t, resp, http.StatusOK)
require.Equal(t, fmt.Sprintf(`[{"db_name":"%s","bucket":"%s","state":"Online"}]`, rt.GetDatabase().Name, rt.GetDatabase().Bucket.GetName()), resp.Body.String())
}
+
+// TestBufferFlush will test for http.ResponseWriter implements Flusher interface
+func TestBufferFlush(t *testing.T) {
+ rt := NewRestTester(t, &RestTesterConfig{
+ SyncFn: channels.DocChannelsSyncFunction,
+ })
+ defer rt.Close()
+ ctx := base.TestCtx(t)
+
+ a := rt.ServerContext().Database(ctx, "db").Authenticator(ctx)
+
+ // Create a test user
+ user, err := a.NewUser("foo", "letmein", channels.BaseSetOf(t, "foo"))
+ require.NoError(t, err)
+ require.NoError(t, a.Save(user))
+
+ var wg sync.WaitGroup
+ var resp *TestResponse
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ resp = rt.SendUserRequest(http.MethodGet, "/{{.keyspace}}/_changes?feed=continuous&since=0&timeout=500&include_docs=true", "", "foo")
+ RequireStatus(t, resp, http.StatusOK)
+ }()
+ wg.Wait()
+
+ // assert that the response is a flushed response
+ assert.True(t, resp.Flushed)
+}
diff --git a/rest/attachment_test.go b/rest/attachment_test.go
index 47fea13798..0fd2258790 100644
--- a/rest/attachment_test.go
+++ b/rest/attachment_test.go
@@ -2411,11 +2411,10 @@ func TestPushUnknownAttachmentAsStub(t *testing.T) {
// force attachment into test client's store to validate it's fetched
attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA"))
- contentType := "text/plain"
length, digest := btcRunner.saveAttachment(btc.id, attachmentAData)
// Update doc1, include reference to non-existing attachment with recent revpos
- doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType)))
+ doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"stub":true,"revpos":1}}}`, digest, length)))
rt.WaitForVersion(doc1ID, doc1Version)
@@ -2516,12 +2515,10 @@ func TestAttachmentWithErroneousRevPos(t *testing.T) {
btcRunner.AttachmentsLock(btc.id).Unlock()
// Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment
- t.Skip("Skip until CBG-4400 is fixed")
- _, err := btcRunner.PushRevWithHistory(btc.id, docID, &version, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0)
+ updatedVersion, err := btcRunner.PushRevWithHistory(btc.id, docID, &version, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0)
require.NoError(t, err)
- // Ensure message and attachment is pushed up
- btc.pushReplication.WaitForMessage(2)
+ rt.WaitForVersion(docID, *updatedVersion)
// Get the attachment and ensure the data is updated
resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "")
@@ -2685,24 +2682,24 @@ func TestCBLRevposHandling(t *testing.T) {
defer btc.Close()
startingBody := db.Body{"foo": "bar"}
- doc1Version := rt.PutDocDirectly(doc1ID, startingBody)
- doc2Version := rt.PutDocDirectly(doc2ID, startingBody)
+ doc1Version1 := rt.PutDocDirectly(doc1ID, startingBody)
+ doc2Version1 := rt.PutDocDirectly(doc2ID, startingBody)
rt.WaitForPendingChanges()
btcRunner.StartOneshotPull(btc.id)
- btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version)
- btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version)
+ btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version1)
+ btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version1)
btcRunner.StartPush(btc.id)
attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA"))
attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB"))
- doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`))
- doc2Version = btcRunner.AddRev(btc.id, doc2ID, &doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`))
+ doc1Version2 := btcRunner.AddRev(btc.id, doc1ID, &doc1Version1, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`))
+ doc2Version2 := btcRunner.AddRev(btc.id, doc2ID, &doc2Version1, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`))
- rt.WaitForVersion(doc1ID, doc1Version)
- rt.WaitForVersion(doc2ID, doc2Version)
+ rt.WaitForVersion(doc1ID, doc1Version2)
+ rt.WaitForVersion(doc2ID, doc2Version2)
collection, ctx := rt.GetSingleTestDatabaseCollection()
_, err := collection.GetDocument(ctx, "doc1", db.DocUnmarshalAll)
@@ -2711,14 +2708,12 @@ func TestCBLRevposHandling(t *testing.T) {
require.NoError(t, err)
// Update doc1, don't change attachment, use correct revpos
- doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`))
-
- rt.WaitForVersion(doc1ID, doc1Version)
+ doc1Version3 := btcRunner.AddRev(btc.id, doc1ID, &doc1Version2, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`))
+ btc.rt.WaitForVersion(doc1ID, doc1Version3)
// Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match.
- doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`))
-
- rt.WaitForVersion(doc1ID, doc1Version)
+ doc1Version4 := btcRunner.AddRev(btc.id, doc1ID, &doc1Version3, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`))
+ rt.WaitForVersion(doc1ID, doc1Version4)
// Validate attachment exists
attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "")
@@ -2727,9 +2722,8 @@ func TestCBLRevposHandling(t *testing.T) {
attachmentPushCount := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value()
// Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment
- doc1Version = btcRunner.AddRev(btc.id, doc1ID, &doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`))
-
- rt.WaitForVersion(doc1ID, doc1Version)
+ doc1Version5 := btcRunner.AddRev(btc.id, doc1ID, &doc1Version4, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`))
+ rt.WaitForVersion(doc1ID, doc1Version5)
// Validate attachment exists and is updated
attResponse = rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "")
diff --git a/rest/blip_api_attachment_test.go b/rest/blip_api_attachment_test.go
index e1bcbab56b..79d7f28145 100644
--- a/rest/blip_api_attachment_test.go
+++ b/rest/blip_api_attachment_test.go
@@ -63,21 +63,18 @@ func TestBlipPushPullV2AttachmentV2Client(t *testing.T) {
// Create doc revision with attachment on SG.
bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}`
- version := btc.rt.PutDoc(docID, bodyText)
+ version1 := btc.rt.PutDoc(docID, bodyText)
- data := btcRunner.WaitForVersion(btc.id, docID, version)
+ data := btcRunner.WaitForVersion(btc.id, docID, version1)
bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
require.JSONEq(t, bodyTextExpected, string(data))
// Update the replicated doc at client along with keeping the same attachment stub.
bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
- version = btcRunner.AddRev(btc.id, docID, &version, []byte(bodyText))
+ version2 := btcRunner.AddRev(btc.id, docID, &version1, []byte(bodyText))
- // TODO: Replace with rt.WaitForVersion
- // Wait for the document to be replicated at SG
- btc.pushReplication.WaitForMessage(2)
-
- respBody := btc.rt.GetDocVersion(docID, version)
+ rt.WaitForVersion(docID, version2)
+ respBody := btc.rt.GetDocVersion(docID, version2)
assert.Equal(t, docID, respBody[db.BodyId])
greetings := respBody["greetings"].([]interface{})
@@ -135,20 +132,19 @@ func TestBlipPushPullV2AttachmentV3Client(t *testing.T) {
// Create doc revision with attachment on SG.
bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}`
- version := btc.rt.PutDoc(docID, bodyText)
+ version1 := btc.rt.PutDoc(docID, bodyText)
- data := btcRunner.WaitForVersion(btc.id, docID, version)
+ data := btcRunner.WaitForVersion(btc.id, docID, version1)
bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
require.JSONEq(t, bodyTextExpected, string(data))
// Update the replicated doc at client along with keeping the same attachment stub.
bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
- version = btcRunner.AddRev(btc.id, docID, &version, []byte(bodyText))
+ version2 := btcRunner.AddRev(btc.id, docID, &version1, []byte(bodyText))
- // Wait for the document to be replicated at SG
- btc.pushReplication.WaitForMessage(2)
+ rt.WaitForVersion(docID, version2)
- respBody := btc.rt.GetDocVersion(docID, version)
+ respBody := btc.rt.GetDocVersion(docID, version2)
assert.Equal(t, docID, respBody[db.BodyId])
greetings := respBody["greetings"].([]interface{})
@@ -269,7 +265,6 @@ func TestBlipProveAttachmentV2Push(t *testing.T) {
// create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client.
doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64)
doc2Version := btcRunner.AddRev(btc.id, doc2ID, nil, []byte(doc2Body))
-
btc.rt.WaitForVersion(doc2ID, doc2Version)
assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value())
@@ -299,7 +294,6 @@ func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) {
btcRunner.StartPush(btc.id)
docVersion := btcRunner.AddRev(btc.id, docID, nil, []byte(`{"greetings":[{"hi": "alice"}]}`))
-
docVersion = btcRunner.AddRev(btc.id, docID, &docVersion, []byte(`{"greetings":[{"hi": "bob"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}`))
// Wait for the documents to be replicated at SG
@@ -367,7 +361,6 @@ func TestBlipPushPullNewAttachmentNoCommonAncestor(t *testing.T) {
// CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2
bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}`
rev := NewDocVersionFromFakeRev("2-abc")
- // FIXME CBG-4400: docID: doc1 was not found on the client - expecting to update doc based on parentVersion RevID: 2-abc
_ = btcRunner.AddRev(btc.id, docID, &rev, []byte(bodyText))
bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
@@ -536,8 +529,7 @@ func TestBlipAttachNameChange(t *testing.T) {
digest := db.Sha1DigestKey(attachmentA)
// Push initial attachment data
- version := btcRunner.AddRev(client1.id, docID, EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`))
-
+ version := btcRunner.AddRev(client1.id, "doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`))
rt.WaitForVersion("doc", version)
// Confirm attachment is in the bucket
@@ -548,8 +540,8 @@ func TestBlipAttachNameChange(t *testing.T) {
// Simulate changing only the attachment name over CBL
// Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0
- version = btcRunner.AddRev(client1.id, docID, &version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`))
- client1.rt.WaitForVersion(docID, version)
+ version = btcRunner.AddRev(client1.id, "doc", &version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`))
+ client1.rt.WaitForVersion("doc", version)
// Check if attachment is still in bucket
bucketAttachmentA, _, err = client1.rt.GetSingleDataStore().GetRaw(attachmentAKey)
@@ -594,7 +586,6 @@ func TestBlipLegacyAttachNameChange(t *testing.T) {
// Store the document and attachment on the test client
_ = btcRunner.AddRev(client1.id, docID, &docVersion, rawDoc)
- // FIXME CBG-4400: docID: doc was not found on the client - expecting to update doc based on parentVersion RevID: 1-5fc93bd36377008f96fdae2719c174ed
btcRunner.AttachmentsLock(client1.id).Lock()
btcRunner.Attachments(client1.id)[digest] = attBody
@@ -653,7 +644,6 @@ func TestBlipLegacyAttachDocUpdate(t *testing.T) {
version, _ := client1.rt.GetDoc(docID)
// Store the document and attachment on the test client
- // FIXME CBG-4400: docID: doc was not found on the client - expecting to update doc based on parentVersion RevID: 1-5fc93bd36377008f96fdae2719c174ed
_ = btcRunner.AddRev(client1.id, docID, &version, rawDoc)
btcRunner.AttachmentsLock(client1.id).Lock()
btcRunner.Attachments(client1.id)[digest] = attBody
@@ -668,7 +658,6 @@ func TestBlipLegacyAttachDocUpdate(t *testing.T) {
// Update the document, leaving body intact
version = btcRunner.AddRev(client1.id, "doc", &version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`))
-
client1.rt.WaitForVersion("doc", version)
resp := client1.rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "")
diff --git a/rest/blip_api_crud_test.go b/rest/blip_api_crud_test.go
index 9a42b3384f..fbef9c139b 100644
--- a/rest/blip_api_crud_test.go
+++ b/rest/blip_api_crud_test.go
@@ -11,6 +11,7 @@ licenses/APL2.txt.
package rest
import (
+ "context"
"encoding/base64"
"encoding/json"
"fmt"
@@ -1205,7 +1206,7 @@ func TestBlipSendConcurrentRevs(t *testing.T) {
concurrentSendRevNum = 50
)
rt := NewRestTester(t, &RestTesterConfig{
- leakyBucketConfig: &base.LeakyBucketConfig{
+ LeakyBucketConfig: &base.LeakyBucketConfig{
UpdateCallback: func(_ string) {
time.Sleep(time.Millisecond * 5) // slow down rosmar - it's too quick to be throttled
},
@@ -2993,7 +2994,6 @@ func TestUnsubChanges(t *testing.T) {
activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous
require.EqualValues(t, 1, activeReplStat.Value())
- // Unsub changes
btcRunner.UnsubPullChanges(btc.id)
// Wait for unsub changes to stop the sub changes being sent before sending document up
base.RequireWaitForStat(t, activeReplStat.Value, 0)
@@ -3468,3 +3468,44 @@ func TestBlipMergeVersions(t *testing.T) {
}}, *doc.HLV)
})
}
+
+// Starts a continuous pull replication then updates the db to trigger a close.
+func TestChangesFeedExitDisconnect(t *testing.T) {
+
+ base.SetUpTestLogging(t, base.LevelInfo, base.KeyHTTP, base.KeySync, base.KeySyncMsg, base.KeyChanges, base.KeyCache)
+ btcRunner := NewBlipTesterClientRunner(t)
+ btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) {
+ var shouldChannelQueryError atomic.Bool
+ rt := NewRestTester(t, &RestTesterConfig{
+ LeakyBucketConfig: &base.LeakyBucketConfig{
+ QueryCallback: func(ddoc, viewname string, params map[string]any) error {
+ if viewname == "channels" && shouldChannelQueryError.Load() {
+ return gocb.ErrTimeout
+ }
+ return nil
+ },
+ N1QLQueryCallback: func(_ context.Context, statement string, params map[string]any, consistency base.ConsistencyMode, adhoc bool) error {
+ if strings.Contains(statement, "sg_channels") && shouldChannelQueryError.Load() {
+ return gocb.ErrTimeout
+ }
+ return nil
+ },
+ },
+ })
+ defer rt.Close()
+ const username = "alice"
+ rt.CreateUser(username, []string{"*"})
+ btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{Username: username})
+ var blipContextClosed atomic.Bool
+ btcRunner.clients[btc.id].pullReplication.bt.blipContext.OnExitCallback = func() {
+ blipContextClosed.Store(true)
+ }
+
+ shouldChannelQueryError.Store(true)
+ btcRunner.StartPull(btc.id)
+
+ require.EventuallyWithT(t, func(c *assert.CollectT) {
+ assert.True(c, blipContextClosed.Load())
+ }, time.Second*10, time.Millisecond*100)
+ })
+}
diff --git a/rest/blip_api_delta_sync_test.go b/rest/blip_api_delta_sync_test.go
index 7c5ca79f04..7e6513f33b 100644
--- a/rest/blip_api_delta_sync_test.go
+++ b/rest/blip_api_delta_sync_test.go
@@ -130,20 +130,17 @@ func TestBlipDeltaSyncPushPullNewAttachment(t *testing.T) {
// Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment
bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}`
// put doc directly needs to be here
- version := rt.PutDocDirectly(docID, JsonToMap(t, bodyText))
- data := btcRunner.WaitForVersion(btc.id, docID, version)
-
+ version1 := rt.PutDocDirectly(docID, JsonToMap(t, bodyText))
+ data := btcRunner.WaitForVersion(btc.id, docID, version1)
bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`
require.JSONEq(t, bodyTextExpected, string(data))
// Update the replicated doc at client by adding another attachment.
bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}`
- version = btcRunner.AddRev(btc.id, docID, &version, []byte(bodyText))
-
- // Wait for the document to be replicated at SG
- btc.pushReplication.WaitForMessage(2)
+ version2 := btcRunner.AddRev(btc.id, docID, &version1, []byte(bodyText))
- respBody := rt.GetDocVersion(docID, version)
+ rt.WaitForVersion(docID, version2)
+ respBody := rt.GetDocVersion(docID, version2)
assert.Equal(t, docID, respBody[db.BodyId])
greetings := respBody["greetings"].([]interface{})
@@ -890,6 +887,8 @@ func TestBlipDeltaSyncPush(t *testing.T) {
assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody))
assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody))
}
+ // wait for response body, indicating rev was written to server
+ _ = msg.Response()
respBody := rt.GetDocVersion(docID, newRev)
assert.Equal(t, "doc1", respBody[db.BodyId])
@@ -963,25 +962,33 @@ func TestBlipNonDeltaSyncPush(t *testing.T) {
btcRunner.StartPull(client.id)
btcRunner.StartPush(client.id)
+ rawBody := `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`
+ var body db.Body
+ require.NoError(t, base.JSONUnmarshal([]byte(rawBody), &body))
// create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4
- version := rt.PutDocDirectly(docID, JsonToMap(t, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`))
+ version1 := rt.PutDocDirectly(docID, body)
- data := btcRunner.WaitForVersion(client.id, docID, version)
- assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data))
+ data := btcRunner.WaitForVersion(client.id, docID, version1)
+ assert.Equal(t, rawBody, string(data))
// create doc1 rev 2-abcxyz on client
- newRev := btcRunner.AddRev(client.id, docID, &version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`))
// Check EE is delta, and CE is full-body replication
+ version2 := btcRunner.AddRev(client.id, docID, &version1, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`))
+ // MSG1: proposeChanges
+ // MSG2: rev
msg := client.waitForReplicationMessage(collection, 2)
+ require.Equal(t, db.MessageRev, msg.Profile())
+
+ // wait for the reply, indicating the message was written
+ _ = msg.Response()
// Check the request was NOT sent with a deltaSrc property
assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc])
// Check the request body was NOT the delta
msgBody, err := msg.Body()
assert.NoError(t, err)
- assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody))
assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody))
- body := rt.GetDocVersion("doc1", newRev)
- require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"])
+ fullBody := rt.GetDocVersion("doc1", version2)
+ require.Equal(t, "bob", fullBody["greetings"].([]interface{})[2].(map[string]interface{})["howdy"])
})
}
diff --git a/rest/blip_api_no_race_test.go b/rest/blip_api_no_race_test.go
index 72ec35c6d6..38a0e05267 100644
--- a/rest/blip_api_no_race_test.go
+++ b/rest/blip_api_no_race_test.go
@@ -69,7 +69,7 @@ func TestBlipPusherUpdateDatabase(t *testing.T) {
go func() {
for i := 0; shouldCreateDocs.IsTrue(); i++ {
// this will begin to error when the database is reloaded underneath the replication
- _ = btcRunner.AddRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i)))
+ btcRunner.AddRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i)))
}
rt.WaitForPendingChanges()
wg.Done()
diff --git a/rest/blip_api_replication_test.go b/rest/blip_api_replication_test.go
index 74372f9ae0..8779bf7db9 100644
--- a/rest/blip_api_replication_test.go
+++ b/rest/blip_api_replication_test.go
@@ -9,7 +9,6 @@
package rest
import (
- "fmt"
"testing"
"github.com/couchbase/sync_gateway/base"
@@ -45,18 +44,15 @@ func TestBlipClientPushAndPullReplication(t *testing.T) {
docBody := db.Body{"greetings": []map[string]interface{}{{"hello": "world!"}, {"hi": "alice"}}}
version := rt.PutDocDirectly(docID, docBody)
- seq := rt.GetDocumentSequence(docID)
-
// wait for doc on client
data := btcRunner.WaitForVersion(client.id, docID, version)
assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data))
// update doc1 on client
- _ = btcRunner.AddRev(client.id, docID, &version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`))
+ newRev := btcRunner.AddRev(client.id, docID, &version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`))
// wait for update to arrive on SG
- _, err := rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%d", seq), "", true)
- require.NoError(t, err)
+ rt.WaitForVersion(docID, newRev)
body := rt.GetDocBody(docID)
require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"])
diff --git a/rest/blip_channel_filter_test.go b/rest/blip_channel_filter_test.go
new file mode 100644
index 0000000000..077fba63a6
--- /dev/null
+++ b/rest/blip_channel_filter_test.go
@@ -0,0 +1,116 @@
+// Copyright 2024-Present Couchbase, Inc.
+//
+// Use of this software is governed by the Business Source License included
+// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
+// in that file, in accordance with the Business Source License, use of this
+// software will be governed by the Apache License, Version 2.0, included in
+// the file licenses/APL2.txt.
+
+package rest
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/couchbase/sync_gateway/channels"
+ "github.com/couchbase/sync_gateway/db"
+ "github.com/stretchr/testify/require"
+)
+
+func TestChannelFilterRemovalFromChannel(t *testing.T) {
+ btcRunner := NewBlipTesterClientRunner(t)
+ btcRunner.Run(func(t *testing.T, _ []string) {
+ for _, sendDocWithChannelRemoval := range []bool{true, false} {
+ t.Run(fmt.Sprintf("sendDocWithChannelRemoval=%v", sendDocWithChannelRemoval), func(t *testing.T) {
+ rt := NewRestTester(t, &RestTesterConfig{
+ SyncFn: channels.DocChannelsSyncFunction,
+ PersistentConfig: true,
+ })
+ defer rt.Close()
+
+ dbConfig := rt.NewDbConfig()
+ dbConfig.Unsupported = &db.UnsupportedOptions{
+ BlipSendDocsWithChannelRemoval: sendDocWithChannelRemoval,
+ }
+ rt.CreateDatabase("db", dbConfig)
+ rt.CreateUser("alice", []string{"*"})
+ rt.CreateUser("bob", []string{"A"})
+
+ btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{
+ Username: "alice",
+ Channels: []string{"A"},
+ SendRevocations: false,
+ })
+ defer btc.Close()
+
+ client := btcRunner.SingleCollection(btc.id)
+ const docID = "doc1"
+ version1 := rt.PutDoc("doc1", `{"channels":["A"]}`)
+ rt.WaitForPendingChanges()
+
+ response := rt.SendUserRequest("GET", "/{{.keyspace}}/_changes?since=0&channels=A&include_docs=true", "", "alice")
+ RequireStatus(t, response, http.StatusOK)
+
+ expectedChanges1 := fmt.Sprintf(`
+{
+ "results": [
+ {"seq":1, "id": "_user/alice", "changes":[]},
+ {"seq":3, "id": "doc1", "doc": {"_id": "doc1", "_rev":"%s", "channels": ["A"]}, "changes": [{"rev":"%s"}]}
+ ],
+ "last_seq": "3"
+}`, version1.RevTreeID, version1.RevTreeID)
+ require.JSONEq(t, expectedChanges1, string(response.BodyBytes()))
+
+ client.StartPullSince(BlipTesterPullOptions{Continuous: false, Since: "0", Channels: "A"})
+
+ btcRunner.WaitForVersion(btc.id, docID, version1)
+
+ // remove channel A from doc1
+ version2 := rt.UpdateDoc(docID, version1, `{"channels":["B"]}`)
+ markerDocID := "marker"
+ markerDocVersion := rt.PutDoc(markerDocID, `{"channels":["A"]}`)
+ rt.WaitForPendingChanges()
+
+ // alice will see doc1 rev2 with body
+ response = rt.SendUserRequest("GET", "/{{.keyspace}}/_changes?since=2&channels=A&include_docs=true", "", "alice")
+ RequireStatus(t, response, http.StatusOK)
+
+ aliceExpectedChanges2 := fmt.Sprintf(`
+{
+ "results": [
+ {"seq":4, "id": "%s", "doc": {"_id": "%s", "_rev":"%s", "channels": ["B"]}, "changes": [{"rev":"%s"}]},
+ {"seq":5, "id": "%s", "doc": {"_id": "%s", "_rev":"%s", "channels": ["A"]}, "changes": [{"rev":"%s"}]}
+ ],
+ "last_seq": "5"
+}`, docID, docID, version2.RevTreeID, version2.RevTreeID, markerDocID, markerDocID, markerDocVersion.RevTreeID, markerDocVersion.RevTreeID)
+ require.JSONEq(t, aliceExpectedChanges2, string(response.BodyBytes()))
+
+ client.StartPullSince(BlipTesterPullOptions{Continuous: false, Since: "0", Channels: "A"})
+
+ if sendDocWithChannelRemoval {
+ data := btcRunner.WaitForVersion(btc.id, docID, version2)
+ require.Equal(t, `{"channels":["B"]}`, string(data))
+ } else {
+ client.WaitForVersion(markerDocID, markerDocVersion)
+ doc, _, _ := client.GetDoc(docID)
+ require.Equal(t, `{"channels":["A"]}`, string(doc))
+ }
+
+ // bob will not see doc1
+ response = rt.SendUserRequest("GET", "/{{.keyspace}}/_changes?since=2&channels=A&include_docs=true", "", "bob")
+ RequireStatus(t, response, http.StatusOK)
+
+ bobExpectedChanges2 := fmt.Sprintf(`
+{
+ "results": [
+ {"seq":4, "id": "doc1", "removed":["A"], "doc": {"_id": "doc1", "_rev":"%s", "_removed": true}, "changes": [{"rev":"%s"}]},
+ {"seq":5, "id": "%s", "doc": {"_id": "%s", "_rev":"%s", "channels": ["A"]}, "changes": [{"rev":"%s"}]}
+ ],
+ "last_seq": "5"
+}`, version2.RevTreeID, version2.RevTreeID, markerDocID, markerDocID, markerDocVersion.RevTreeID, markerDocVersion.RevTreeID)
+ require.JSONEq(t, bobExpectedChanges2, string(response.BodyBytes()))
+ })
+ }
+ })
+}
diff --git a/rest/blip_sync.go b/rest/blip_sync.go
index cf4fb0a2a6..7791f98041 100644
--- a/rest/blip_sync.go
+++ b/rest/blip_sync.go
@@ -51,9 +51,11 @@ func (h *handler) handleBLIPSync() error {
// error is checked at the time of database load, and ignored at this time
originPatterns, _ := hostOnlyCORS(h.db.CORS.Origin)
+ cancelCtx, cancelCtxFunc := context.WithCancel(h.db.DatabaseContext.CancelContext)
// Create a BLIP context:
- blipContext, err := db.NewSGBlipContext(h.ctx(), "", originPatterns, h.db.DatabaseContext.CancelContext)
+ blipContext, err := db.NewSGBlipContext(h.ctx(), "", originPatterns, cancelCtx)
if err != nil {
+ cancelCtxFunc()
return err
}
@@ -61,7 +63,10 @@ func (h *handler) handleBLIPSync() error {
h.rqCtx = base.CorrelationIDLogCtx(h.ctx(), base.FormatBlipContextID(blipContext.ID))
h.response.Header().Set(db.BLIPCorrelationIDResponseHeader, blipContext.ID)
// Create a new BlipSyncContext attached to the given blipContext.
- ctx := db.NewBlipSyncContext(h.rqCtx, blipContext, h.db, h.formatSerialNumber(), db.BlipSyncStatsForCBL(h.db.DbStats))
+ ctx, err := db.NewBlipSyncContext(h.rqCtx, blipContext, h.db, h.formatSerialNumber(), db.BlipSyncStatsForCBL(h.db.DbStats), cancelCtxFunc)
+ if err != nil {
+ return err
+ }
defer ctx.Close()
auditFields := base.AuditFields{base.AuditFieldReplicationID: base.FormatBlipContextID(blipContext.ID)}
diff --git a/rest/changestest/changes_api_test.go b/rest/changestest/changes_api_test.go
index e2a0b19898..c7032368f3 100644
--- a/rest/changestest/changes_api_test.go
+++ b/rest/changestest/changes_api_test.go
@@ -3480,6 +3480,20 @@ func TestTombstoneCompaction(t *testing.T) {
t.Skip("If running with no xattrs compact acts as a no-op")
}
+ tests := []struct {
+ numDocs int
+ runAsScheduledBackgroundTask bool
+ }{
+ // Multiples of Batch Size
+ {numDocs: db.QueryTombstoneBatch},
+ {numDocs: db.QueryTombstoneBatch * 4},
+ // Smaller Than Batch Size
+ {numDocs: 2},
+ {numDocs: db.QueryTombstoneBatch / 4},
+ // Larger than Batch Size
+ {numDocs: db.QueryTombstoneBatch + 20},
+ }
+
var rt *rest.RestTester
numCollections := 1
@@ -3490,64 +3504,84 @@ func TestTombstoneCompaction(t *testing.T) {
rt = rest.NewRestTester(t, nil)
}
defer rt.Close()
- zero := time.Duration(0)
- rt.GetDatabase().Options.PurgeInterval = &zero
-
- compactionTotal := 0
- expectedBatches := 0
+ rt.GetDatabase().Options.PurgeInterval = base.Ptr(time.Duration(0))
- TestCompact := func(numDocs int) {
-
- count := 0
+ for _, test := range tests {
+ for _, runAsScheduledBackgroundTask := range []bool{false, true} {
+ t.Run(fmt.Sprintf("numDocs:%d asBackgroundTask:%v", test.numDocs, runAsScheduledBackgroundTask), func(t *testing.T) {
+
+ // seed with tombstones
+ for count := 0; count < test.numDocs; count++ {
+ for _, keyspace := range rt.GetKeyspaces() {
+ response := rt.SendAdminRequest("POST", fmt.Sprintf("/%s/", keyspace), `{"foo":"bar"}`)
+ assert.Equal(t, http.StatusOK, response.Code)
+ var body db.Body
+ err := base.JSONUnmarshal(response.Body.Bytes(), &body)
+ assert.NoError(t, err)
+ revID := body["rev"].(string)
+ docID := body["id"].(string)
+
+ response = rt.SendAdminRequest("DELETE", fmt.Sprintf("/%s/%s?rev=%s", keyspace, docID, revID), "")
+ assert.Equal(t, http.StatusOK, response.Code)
+ }
+ }
- for count < numDocs {
- count++
- for _, keyspace := range rt.GetKeyspaces() {
- response := rt.SendAdminRequest("POST", fmt.Sprintf("/%s/", keyspace), `{"foo":"bar"}`)
- assert.Equal(t, 200, response.Code)
- var body db.Body
- err := base.JSONUnmarshal(response.Body.Bytes(), &body)
- assert.NoError(t, err)
- revId := body["rev"].(string)
- docId := body["id"].(string)
+ expectedCompactions := test.numDocs * numCollections
+ expectedBatches := (test.numDocs/db.QueryTombstoneBatch + 1) * numCollections
- response = rt.SendAdminRequest("DELETE", fmt.Sprintf("/%s/%s?rev=%s", keyspace, docId, revId), "")
- assert.Equal(t, 200, response.Code)
- }
- }
- resp := rt.SendAdminRequest("POST", "/{{.db}}/_compact", "")
- rest.RequireStatus(t, resp, http.StatusOK)
+ numCompactionsBefore := int(rt.GetDatabase().DbStats.Database().NumTombstonesCompacted.Value())
+ var numBatchesBefore int
+ if base.TestsDisableGSI() {
+ numBatchesBefore = int(rt.GetDatabase().DbStats.Query(fmt.Sprintf(base.StatViewFormat, db.DesignDocSyncHousekeeping(), db.ViewTombstones)).QueryCount.Value())
+ } else {
+ numBatchesBefore = int(rt.GetDatabase().DbStats.Query(db.QueryTypeTombstones).QueryCount.Value())
+ }
- err := rt.WaitForCondition(func() bool {
- time.Sleep(1 * time.Second)
- return rt.GetDatabase().TombstoneCompactionManager.GetRunState() == db.BackgroundProcessStateCompleted
- })
- assert.NoError(t, err)
+ numIdleKvOpsBefore := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Value())
+ numIdleQueryOpsBefore := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleQueryOps.Value())
+
+ if runAsScheduledBackgroundTask {
+ database, err := db.CreateDatabase(rt.GetDatabase())
+ require.NoError(t, err)
+ purgedCount, err := database.Compact(base.TestCtx(t), false, nil, base.NewSafeTerminator(), true)
+ require.NoError(t, err)
+ require.Equal(t, expectedCompactions, purgedCount)
+
+ numIdleKvOpsAfter := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Value())
+ numIdleQueryOpsAfter := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleQueryOps.Value())
+
+ // cannot do equal here because there are other idle kv ops unrelated to compaction
+ assert.GreaterOrEqual(t, numIdleKvOpsAfter-numIdleKvOpsBefore, expectedCompactions)
+ assert.Equal(t, numIdleQueryOpsAfter-numIdleQueryOpsBefore, expectedBatches)
+ } else {
+ resp := rt.SendAdminRequest("POST", "/{{.db}}/_compact", "")
+ rest.RequireStatus(t, resp, http.StatusOK)
+ err := rt.WaitForCondition(func() bool {
+ return rt.GetDatabase().TombstoneCompactionManager.GetRunState() == db.BackgroundProcessStateCompleted
+ })
+ assert.NoError(t, err)
+
+ numIdleKvOpsAfter := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleKvOps.Value())
+ numIdleQueryOpsAfter := int(base.SyncGatewayStats.GlobalStats.ResourceUtilizationStats().NumIdleQueryOps.Value())
+
+ // ad-hoc compactions don't invoke idle ops - but we do have other idle kv ops so can't ensure it stays zero
+ assert.GreaterOrEqual(t, numIdleKvOpsAfter-numIdleKvOpsBefore, 0)
+ assert.Equal(t, numIdleQueryOpsAfter-numIdleQueryOpsBefore, 0)
+ }
- compactionTotal += (numDocs * numCollections)
- require.Equal(t, compactionTotal, int(rt.GetDatabase().DbStats.Database().NumTombstonesCompacted.Value()))
+ actualCompactions := int(rt.GetDatabase().DbStats.Database().NumTombstonesCompacted.Value()) - numCompactionsBefore
+ require.Equal(t, expectedCompactions, actualCompactions)
- var actualBatches int64
- if base.TestsDisableGSI() {
- actualBatches = rt.GetDatabase().DbStats.Query(fmt.Sprintf(base.StatViewFormat, db.DesignDocSyncHousekeeping(), db.ViewTombstones)).QueryCount.Value()
- } else {
- actualBatches = rt.GetDatabase().DbStats.Query(db.QueryTypeTombstones).QueryCount.Value()
+ var actualBatches int
+ if base.TestsDisableGSI() {
+ actualBatches = int(rt.GetDatabase().DbStats.Query(fmt.Sprintf(base.StatViewFormat, db.DesignDocSyncHousekeeping(), db.ViewTombstones)).QueryCount.Value()) - numBatchesBefore
+ } else {
+ actualBatches = int(rt.GetDatabase().DbStats.Query(db.QueryTypeTombstones).QueryCount.Value()) - numBatchesBefore
+ }
+ require.Equal(t, expectedBatches, actualBatches)
+ })
}
-
- expectedBatches += (numDocs/db.QueryTombstoneBatch + 1) * numCollections
- require.Equal(t, expectedBatches, int(actualBatches))
}
-
- // Multiples of Batch Size
- TestCompact(db.QueryTombstoneBatch)
- TestCompact(db.QueryTombstoneBatch * 4)
-
- // Smaller Than Batch Size
- TestCompact(2)
- TestCompact(db.QueryTombstoneBatch / 4)
-
- // Larger than Batch Size
- TestCompact(db.QueryTombstoneBatch + 20)
}
// TestOneShotGrantTiming simulates a one-shot changes feed returning before a previously issued grant has been
diff --git a/rest/config.go b/rest/config.go
index 5493e64c88..b8a6562bcc 100644
--- a/rest/config.go
+++ b/rest/config.go
@@ -193,6 +193,8 @@ type DbConfig struct {
ChangesRequestPlus *bool `json:"changes_request_plus,omitempty"` // If set, is used as the default value of request_plus for non-continuous replications
CORS *auth.CORSConfig `json:"cors,omitempty"` // Per-database CORS config
Logging *DbLoggingConfig `json:"logging,omitempty"` // Per-database Logging config
+ UpdatedAt *time.Time `json:"updated_at,omitempty"` // Time at which the database config was last updated
+ CreatedAt *time.Time `json:"created_at,omitempty"` // Time at which the database config was created
}
type ScopesConfig map[string]ScopeConfig
diff --git a/rest/config_database_test.go b/rest/config_database_test.go
index 93f55595c7..74be5a2827 100644
--- a/rest/config_database_test.go
+++ b/rest/config_database_test.go
@@ -9,10 +9,14 @@
package rest
import (
+ "encoding/json"
+ "net/http"
"testing"
"time"
+ "github.com/couchbase/sync_gateway/base"
"github.com/couchbase/sync_gateway/db"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -22,3 +26,59 @@ func TestDefaultDbConfig(t *testing.T) {
compactIntervalDays := *(DefaultDbConfig(&sc, useXattrs).CompactIntervalDays)
require.Equal(t, db.DefaultCompactInterval, time.Duration(compactIntervalDays)*time.Hour*24)
}
+
+func TestDbConfigUpdatedAtField(t *testing.T) {
+ b := base.GetTestBucket(t)
+ rt := NewRestTester(t, &RestTesterConfig{
+ CustomTestBucket: b,
+ PersistentConfig: true,
+ })
+ defer rt.Close()
+ ctx := base.TestCtx(t)
+
+ dbConfig := rt.NewDbConfig()
+ RequireStatus(t, rt.CreateDatabase("db1", dbConfig), http.StatusCreated)
+
+ sc := rt.ServerContext()
+
+ resp := rt.SendAdminRequest(http.MethodGet, "/db1/_config", "")
+ RequireStatus(t, resp, http.StatusOK)
+ var unmarshaledConfig DbConfig
+ require.NoError(t, json.Unmarshal(resp.BodyBytes(), &unmarshaledConfig))
+
+ registry := &GatewayRegistry{}
+ bName := b.GetName()
+ _, err := sc.BootstrapContext.Connection.GetMetadataDocument(ctx, bName, base.SGRegistryKey, registry)
+ require.NoError(t, err)
+
+ // Check that the config has an updatedAt field
+ require.NotNil(t, unmarshaledConfig.UpdatedAt)
+ require.NotNil(t, unmarshaledConfig.CreatedAt)
+ currUpdatedTime := unmarshaledConfig.UpdatedAt
+ currCreatedTime := unmarshaledConfig.CreatedAt
+ registryUpdated := registry.UpdatedAt
+ registryCreated := registry.CreatedAt
+
+ // avoid flake where update at seems to be the same (possibly running to fast)
+ time.Sleep(500 * time.Nanosecond)
+
+ // Update the config
+ dbConfig = rt.NewDbConfig()
+ RequireStatus(t, rt.UpsertDbConfig("db1", dbConfig), http.StatusCreated)
+
+ resp = rt.SendAdminRequest(http.MethodGet, "/db1/_config", "")
+ RequireStatus(t, resp, http.StatusOK)
+ unmarshaledConfig = DbConfig{}
+ require.NoError(t, json.Unmarshal(resp.BodyBytes(), &unmarshaledConfig))
+
+ registry = &GatewayRegistry{}
+ _, err = sc.BootstrapContext.Connection.GetMetadataDocument(ctx, b.GetName(), base.SGRegistryKey, registry)
+ require.NoError(t, err)
+
+ // asser that the db config timestamps are as expected
+ assert.Greater(t, unmarshaledConfig.UpdatedAt.UnixNano(), currUpdatedTime.UnixNano())
+ assert.Equal(t, unmarshaledConfig.CreatedAt.UnixNano(), currCreatedTime.UnixNano())
+ // assert that registry timestamps are as expected
+ assert.Equal(t, registry.CreatedAt.UnixNano(), registryCreated.UnixNano())
+ assert.Greater(t, registry.UpdatedAt.UnixNano(), registryUpdated.UnixNano())
+}
diff --git a/rest/config_manager.go b/rest/config_manager.go
index b50bff05b4..fcf57f446d 100644
--- a/rest/config_manager.go
+++ b/rest/config_manager.go
@@ -11,6 +11,7 @@ package rest
import (
"context"
"fmt"
+ "time"
"github.com/couchbase/sync_gateway/base"
"github.com/couchbase/sync_gateway/db"
@@ -105,6 +106,7 @@ func (b *bootstrapContext) InsertConfig(ctx context.Context, bucketName, groupID
}
// Persist registry
+ registry.UpdatedAt = time.Now().UTC()
writeErr := b.setGatewayRegistry(ctx, bucketName, registry)
if writeErr == nil {
base.DebugfCtx(ctx, base.KeyConfig, "Registry updated successfully")
@@ -131,6 +133,9 @@ func (b *bootstrapContext) InsertConfig(ctx context.Context, bucketName, groupID
return 0, fmt.Errorf("InsertConfig failed to persist registry after %d attempts", configUpdateMaxRetryAttempts)
}
// Step 3. Write the database config
+ timeUpdated := time.Now().UTC()
+ config.UpdatedAt = &timeUpdated
+ config.CreatedAt = &timeUpdated
cas, configErr := b.Connection.InsertMetadataDocument(ctx, bucketName, PersistentConfigKey(ctx, groupID, dbName), config)
if configErr != nil {
base.InfofCtx(ctx, base.KeyConfig, "Insert for database config returned error %v", configErr)
@@ -150,6 +155,7 @@ func (b *bootstrapContext) UpdateConfig(ctx context.Context, bucketName, groupID
var updatedConfig *DatabaseConfig
var registry *GatewayRegistry
var previousVersion string
+ var createdAtTime *time.Time
registryUpdated := false
for attempt := 1; attempt <= configUpdateMaxRetryAttempts; attempt++ {
@@ -167,6 +173,7 @@ func (b *bootstrapContext) UpdateConfig(ctx context.Context, bucketName, groupID
if existingConfig == nil {
return 0, base.ErrNotFound
}
+ createdAtTime = existingConfig.CreatedAt
base.DebugfCtx(ctx, base.KeyConfig, "UpdateConfig fetched registry and database successfully")
@@ -195,6 +202,7 @@ func (b *bootstrapContext) UpdateConfig(ctx context.Context, bucketName, groupID
}
// Persist registry
+ registry.UpdatedAt = time.Now().UTC()
writeErr := b.setGatewayRegistry(ctx, bucketName, registry)
if writeErr == nil {
base.DebugfCtx(ctx, base.KeyConfig, "UpdateConfig persisted updated registry successfully")
@@ -222,6 +230,9 @@ func (b *bootstrapContext) UpdateConfig(ctx context.Context, bucketName, groupID
}
// Step 2. Update the config document
+ timeUpdated := time.Now().UTC()
+ updatedConfig.UpdatedAt = &timeUpdated
+ updatedConfig.CreatedAt = createdAtTime
docID := PersistentConfigKey(ctx, groupID, dbName)
casOut, err := b.Connection.WriteMetadataDocument(ctx, bucketName, docID, updatedConfig.cfgCas, updatedConfig)
if err != nil {
diff --git a/rest/config_registry.go b/rest/config_registry.go
index a6534b3907..6dcc699689 100644
--- a/rest/config_registry.go
+++ b/rest/config_registry.go
@@ -12,6 +12,7 @@ import (
"context"
"fmt"
"net/http"
+ "time"
"github.com/couchbase/sync_gateway/base"
)
@@ -47,6 +48,8 @@ type GatewayRegistry struct {
Version string `json:"version"` // Registry version
ConfigGroups map[string]*RegistryConfigGroup `json:"config_groups"` // Map of config groups, keyed by config group ID
SGVersion base.ComparableBuildVersion `json:"sg_version"` // Latest patch version of Sync Gateway that touched the registry
+ UpdatedAt time.Time `json:"updated_at"` // Time the registry was last updated
+ CreatedAt time.Time `json:"created_at"` // Time the registry was created
}
const GatewayRegistryVersion = "1.0"
@@ -111,6 +114,7 @@ func NewGatewayRegistry(syncGatewayVersion base.ComparableBuildVersion) *Gateway
ConfigGroups: make(map[string]*RegistryConfigGroup),
Version: GatewayRegistryVersion,
SGVersion: syncGatewayVersion,
+ CreatedAt: time.Now().UTC(),
}
}
diff --git a/rest/config_test.go b/rest/config_test.go
index 75ff917f63..b27f99493e 100644
--- a/rest/config_test.go
+++ b/rest/config_test.go
@@ -3150,7 +3150,6 @@ func TestTLSWithoutCerts(t *testing.T) {
},
})
defer rt.Close()
-
dbConfig := rt.NewDbConfig()
dbConfig.AutoImport = true
rt.CreateDatabase("db", dbConfig)
@@ -3162,3 +3161,104 @@ func TestTLSWithoutCerts(t *testing.T) {
}, time.Second*10, time.Millisecond*100)
}
+
+func TestUserUpdatedAtField(t *testing.T) {
+ rt := NewRestTester(t, &RestTesterConfig{
+ CustomTestBucket: base.GetTestBucket(t),
+ PersistentConfig: true,
+ })
+ defer rt.Close()
+
+ dbConfig := rt.NewDbConfig()
+ RequireStatus(t, rt.CreateDatabase("db1", dbConfig), http.StatusCreated)
+
+ metaKeys := rt.GetDatabase().MetadataKeys
+
+ resp := rt.SendAdminRequest(http.MethodPost, "/db1/_user/", `{"name":"user1","password":"password"}`)
+ RequireStatus(t, resp, http.StatusCreated)
+
+ ds := rt.MetadataStore()
+ var user map[string]interface{}
+ userKey := metaKeys.UserKey("user1")
+ _, err := ds.Get(userKey, &user)
+ require.NoError(t, err)
+
+ // Check that the user has an updatedAt field
+ require.NotNil(t, user["updated_at"])
+ currTimeStr := user["updated_at"].(string)
+ currTime, err := time.Parse(time.RFC3339, currTimeStr)
+ require.NoError(t, err)
+ require.NotNil(t, user["created_at"])
+ currTimeCreatedStr := user["created_at"].(string)
+ timeCreated, err := time.Parse(time.RFC3339, currTimeCreatedStr)
+ require.NoError(t, err)
+
+ // avoid flake where update at seems to be the same (possibly running to fast)
+ time.Sleep(500 * time.Nanosecond)
+
+ resp = rt.SendAdminRequest(http.MethodPut, "/db1/_user/user1", `{"name":"user1","password":"password1"}`)
+ RequireStatus(t, resp, http.StatusOK)
+
+ user = map[string]interface{}{}
+ _, err = ds.Get(userKey, &user)
+ require.NoError(t, err)
+ newTimeStr := user["updated_at"].(string)
+ newTime, err := time.Parse(time.RFC3339, newTimeStr)
+ require.NoError(t, err)
+ newCreatedStr := user["created_at"].(string)
+ newCreated, err := time.Parse(time.RFC3339, newCreatedStr)
+ require.NoError(t, err)
+
+ assert.Greater(t, newTime.UnixNano(), currTime.UnixNano())
+ assert.Equal(t, timeCreated.UnixNano(), newCreated.UnixNano())
+}
+
+func TestRoleUpdatedAtField(t *testing.T) {
+ rt := NewRestTester(t, &RestTesterConfig{
+ CustomTestBucket: base.GetTestBucket(t),
+ PersistentConfig: true,
+ })
+ defer rt.Close()
+
+ dbConfig := rt.NewDbConfig()
+ RequireStatus(t, rt.CreateDatabase("db1", dbConfig), http.StatusCreated)
+
+ resp := rt.SendAdminRequest(http.MethodPost, "/db1/_role/", `{"name":"role1","admin_channels":["test"]}`)
+ RequireStatus(t, resp, http.StatusCreated)
+
+ ds := rt.MetadataStore()
+ metaKeys := rt.GetDatabase().MetadataKeys
+ roleKey := metaKeys.RoleKey("role1")
+ var user map[string]interface{}
+ _, err := ds.Get(roleKey, &user)
+ require.NoError(t, err)
+
+ // Check that the user has an updatedAt field
+ require.NotNil(t, user["updated_at"])
+ currTimeStr := user["updated_at"].(string)
+ currTime, err := time.Parse(time.RFC3339, currTimeStr)
+ require.NoError(t, err)
+ require.NotNil(t, user["created_at"])
+ currTimeCreatedStr := user["created_at"].(string)
+ timeCreated, err := time.Parse(time.RFC3339, currTimeCreatedStr)
+ require.NoError(t, err)
+
+ // avoid flake where update at seems to be the same (possibly running to fast)
+ time.Sleep(500 * time.Nanosecond)
+
+ resp = rt.SendAdminRequest(http.MethodPut, "/db1/_role/role1", `{"name":"role1","admin_channels":["ABC"]}`)
+ RequireStatus(t, resp, http.StatusOK)
+
+ user = map[string]interface{}{}
+ _, err = ds.Get(roleKey, &user)
+ require.NoError(t, err)
+ newTimeStr := user["updated_at"].(string)
+ newTime, err := time.Parse(time.RFC3339, newTimeStr)
+ require.NoError(t, err)
+ newCreatedStr := user["created_at"].(string)
+ newCreated, err := time.Parse(time.RFC3339, newCreatedStr)
+ require.NoError(t, err)
+
+ assert.Greater(t, newTime.UnixNano(), currTime.UnixNano())
+ assert.Equal(t, timeCreated.UnixNano(), newCreated.UnixNano())
+}
diff --git a/rest/counted_response_writer.go b/rest/counted_response_writer.go
index 60f5d216da..8c9bb6866f 100644
--- a/rest/counted_response_writer.go
+++ b/rest/counted_response_writer.go
@@ -92,3 +92,7 @@ func (w *CountedResponseWriter) isHijackable() bool {
_, ok := w.writer.(http.Hijacker)
return ok
}
+
+func (w *CountedResponseWriter) Flush() {
+ w.writer.(http.Flusher).Flush()
+}
diff --git a/rest/counted_response_writer_test.go b/rest/counted_response_writer_test.go
index 79253c257c..50480d4ba6 100644
--- a/rest/counted_response_writer_test.go
+++ b/rest/counted_response_writer_test.go
@@ -16,6 +16,7 @@ import (
"github.com/couchbase/sync_gateway/base"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -169,3 +170,17 @@ func TestCountableResponseWriterWithDelay(t *testing.T) {
}
}
+
+func TestResponseWriterSupportsFLush(t *testing.T) {
+ for _, test := range testCases {
+ t.Run(test.name, func(t *testing.T) {
+
+ stat, err := base.NewIntStat(base.SubsystemDatabaseKey, "http_bytes_written", base.StatUnitBytes, base.PublicRestBytesWrittenDesc, base.StatAddedVersion3dot1dot0, base.StatDeprecatedVersionNotDeprecated, base.StatStabilityCommitted, nil, nil, prometheus.CounterValue, 0)
+ require.NoError(t, err)
+ responseWriter := getResponseWriter(t, stat, test.name, 0)
+
+ _, ok := responseWriter.(http.Flusher)
+ assert.True(t, ok)
+ })
+ }
+}
diff --git a/rest/handler.go b/rest/handler.go
index b2e930e2fc..e6d3f1d2c5 100644
--- a/rest/handler.go
+++ b/rest/handler.go
@@ -39,6 +39,13 @@ const (
minCompressibleJSONSize = 1000
)
+var _ http.Flusher = &CountedResponseWriter{}
+var _ http.Flusher = &NonCountedResponseWriter{}
+var _ http.Flusher = &EncodedResponseWriter{}
+
+var _ http.Hijacker = &CountedResponseWriter{}
+var _ http.Hijacker = &NonCountedResponseWriter{}
+
var ErrInvalidLogin = base.HTTPErrorf(http.StatusUnauthorized, "Invalid login")
var ErrLoginRequired = base.HTTPErrorf(http.StatusUnauthorized, "Login required")
@@ -671,6 +678,11 @@ func (h *handler) validateAndWriteHeaders(method handlerMethod, accessPermission
}
}
h.updateResponseWriter()
+ // ensure wrapped ResponseWriter implements http.Flusher
+ _, ok := h.response.(http.Flusher)
+ if !ok {
+ return fmt.Errorf("http.ResponseWriter %T does not implement Flusher interface", h.response)
+ }
return nil
}
@@ -1595,10 +1607,7 @@ func (h *handler) writeMultipart(subtype string, callback func(*multipart.Writer
}
func (h *handler) flush() {
- switch r := h.response.(type) {
- case http.Flusher:
- r.Flush()
- }
+ h.response.(http.Flusher).Flush()
}
// If the error parameter is non-nil, sets the response status code appropriately and
diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go
index 9b8645fe60..9a684e9ffc 100644
--- a/rest/importtest/import_test.go
+++ b/rest/importtest/import_test.go
@@ -2322,6 +2322,127 @@ func TestImportRollback(t *testing.T) {
}
}
+// TestImportRollbackMultiplePartitions:
+// - Test is much like TestImportRollback, but with multiple partitions and multiple vBuckets rolling back
+// - Test case rollbackWithoutFailover will only rollback one partition
+func TestImportRollbackMultiplePartitions(t *testing.T) {
+ if !base.IsEnterpriseEdition() {
+ t.Skip("This test only works against EE")
+ }
+
+ if base.UnitTestUrlIsWalrus() {
+ t.Skip("This test only works against Couchbase Server - needs cbgt and import checkpointing")
+ }
+
+ base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport, base.KeyDCP, base.KeyCluster)
+ ctx := base.TestCtx(t)
+ bucket := base.GetTestBucket(t)
+ defer bucket.Close(ctx)
+
+ rt := rest.NewRestTester(t, &rest.RestTesterConfig{
+ CustomTestBucket: bucket.NoCloseClone(),
+ PersistentConfig: false,
+ DatabaseConfig: &rest.DatabaseConfig{
+ DbConfig: rest.DbConfig{
+ ImportPartitions: base.Uint16Ptr(2),
+ },
+ },
+ })
+
+ // create doc id's for vb0 and vb800
+ vb0DocIDs := []string{"abbacomes", "abdicate", "accrescent", "aconitum", "acrux", "adduction", "affrication", "algraphy", "allantoinuria", "altiloquent"}
+ vb800DocIDs := []string{"abrook", "accept", "accompaniment", "acoemeti", "adiposeness", "alkyd", "alnage", "ambulance", "anasazi", "anhydroxime"}
+
+ for _, v := range vb0DocIDs {
+ added, err := rt.GetSingleDataStore().AddRaw(v, 0, []byte(fmt.Sprintf(`{"star": "6"}`)))
+ require.True(t, added)
+ require.NoError(t, err)
+ }
+ for _, v := range vb800DocIDs {
+ added, err := rt.GetSingleDataStore().AddRaw(v, 0, []byte(fmt.Sprintf(`{"star": "6"}`)))
+ require.True(t, added)
+ require.NoError(t, err)
+ }
+
+ // wait for docs to be imported
+ changes, err := rt.WaitForChanges(20, "/{{.keyspace}}/_changes?since=0", "", true)
+ require.NoError(t, err)
+ lastSeq := changes.Last_Seq.String()
+
+ // Close db while we alter checkpoints to force rollback
+ db := rt.GetDatabase()
+ checkpointPrefix := rt.GetDatabase().MetadataKeys.DCPVersionedCheckpointPrefix(db.Options.GroupID, db.Options.ImportVersion)
+ rt.Close()
+
+ metaStore := bucket.GetMetadataStore()
+ // fetch the checkpoint for the vBucket 0 and 800, modify the checkpoint values to a higher sequence to
+ // trigger rollback upon stream open request
+ checkpointKey := fmt.Sprintf("%s%d", checkpointPrefix, 0)
+ var checkpointData base.ShardedImportDCPMetadata
+ checkpointBytes, _, err := metaStore.GetRaw(checkpointKey)
+ require.NoError(t, err)
+ require.NoError(t, base.JSONUnmarshal(checkpointBytes, &checkpointData))
+ checkpointData.SnapStart = 3000 + checkpointData.SnapStart
+ checkpointData.SnapEnd = 3000 + checkpointData.SnapEnd
+ checkpointData.SeqStart = 3000 + checkpointData.SeqStart
+ checkpointData.SeqEnd = 3000 + checkpointData.SeqEnd
+ existingVbUUID := checkpointData.FailOverLog[0][0]
+ checkpointData.FailOverLog = [][]uint64{{existingVbUUID + 1, 0}}
+
+ updatedBytes, err := base.JSONMarshal(checkpointData)
+ require.NoError(t, err)
+ err = metaStore.SetRaw(checkpointKey, 0, nil, updatedBytes)
+ require.NoError(t, err)
+
+ // vBucket 800
+ checkpointKey = fmt.Sprintf("%s%d", checkpointPrefix, 800)
+ checkpointData = base.ShardedImportDCPMetadata{}
+ checkpointBytes, _, err = metaStore.GetRaw(checkpointKey)
+ require.NoError(t, err)
+ require.NoError(t, base.JSONUnmarshal(checkpointBytes, &checkpointData))
+ checkpointData.SnapStart = 3000 + checkpointData.SnapStart
+ checkpointData.SnapEnd = 3000 + checkpointData.SnapEnd
+ checkpointData.SeqStart = 3000 + checkpointData.SeqStart
+ checkpointData.SeqEnd = 3000 + checkpointData.SeqEnd
+ existingVbUUID = checkpointData.FailOverLog[0][0]
+ checkpointData.FailOverLog = [][]uint64{{existingVbUUID + 1, 0}}
+
+ updatedBytes, err = base.JSONMarshal(checkpointData)
+ require.NoError(t, err)
+ err = metaStore.SetRaw(checkpointKey, 0, nil, updatedBytes)
+ require.NoError(t, err)
+
+ // Reopen the db, expect DCP rollback
+ rt2 := rest.NewRestTester(t, &rest.RestTesterConfig{
+ CustomTestBucket: bucket.NoCloseClone(),
+ PersistentConfig: false,
+ DatabaseConfig: &rest.DatabaseConfig{
+ DbConfig: rest.DbConfig{
+ ImportPartitions: base.Uint16Ptr(2),
+ },
+ },
+ })
+ defer rt2.Close()
+
+ for _, v := range vb0DocIDs {
+ err := rt2.GetSingleDataStore().SetRaw(v, 0, nil, []byte(fmt.Sprintf(`{"star": "6"}`)))
+ require.NoError(t, err)
+ }
+ for _, v := range vb800DocIDs {
+ err := rt2.GetSingleDataStore().SetRaw(v, 0, nil, []byte(fmt.Sprintf(`{"star": "6"}`)))
+ require.NoError(t, err)
+ }
+
+ // Add doc to non rolled back vBucket (392) and assert its imported
+ added, err := rt2.GetSingleDataStore().AddRaw("someKey", 0, []byte(fmt.Sprintf(`{"star": "6"}`)))
+ require.NoError(t, err)
+ require.True(t, added)
+
+ // wait for doc update to be imported
+ _, err = rt2.WaitForChanges(21, "/{{.keyspace}}/_changes?since="+lastSeq, "", true)
+ require.NoError(t, err)
+}
+
func TestImportUpdateExpiry(t *testing.T) {
testCases := []struct {
name string
diff --git a/rest/non_counted_response_writer.go b/rest/non_counted_response_writer.go
index fabf267d1d..e25d0663fa 100644
--- a/rest/non_counted_response_writer.go
+++ b/rest/non_counted_response_writer.go
@@ -45,3 +45,7 @@ func (w *NonCountedResponseWriter) isHijackable() bool {
_, ok := w.ResponseWriter.(http.Hijacker)
return ok
}
+
+func (w *NonCountedResponseWriter) Flush() {
+ w.ResponseWriter.(http.Flusher).Flush()
+}
diff --git a/rest/replicatortest/replicator_test.go b/rest/replicatortest/replicator_test.go
index f202abf510..5f5ee7438a 100644
--- a/rest/replicatortest/replicator_test.go
+++ b/rest/replicatortest/replicator_test.go
@@ -9,6 +9,7 @@
package replicatortest
import (
+ "context"
"encoding/json"
"expvar"
"fmt"
@@ -25,14 +26,16 @@ import (
"testing"
"time"
+ "github.com/couchbase/gocb/v2"
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"github.com/couchbase/sync_gateway/auth"
"github.com/couchbase/sync_gateway/base"
"github.com/couchbase/sync_gateway/channels"
"github.com/couchbase/sync_gateway/db"
"github.com/couchbase/sync_gateway/rest"
- "github.com/google/uuid"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
func TestReplicationAPI(t *testing.T) {
@@ -6533,7 +6536,6 @@ func TestSGR2TombstoneConflictHandling(t *testing.T) {
} else {
localActiveRT.WaitForVersion(doc2ID, expectedVersion)
}
- assert.NoError(t, err)
})
}
}
@@ -8571,6 +8573,68 @@ func TestDbConfigNoOverwriteReplications(t *testing.T) {
require.Equal(t, startReplicationConfig.Direction, config.Direction)
}
+func TestActiveReplicatorChangesFeedExit(t *testing.T) {
+ base.RequireNumTestBuckets(t, 2)
+
+ base.SetUpTestLogging(t, base.LevelDebug, base.KeyHTTP, base.KeySync, base.KeyChanges, base.KeyCRUD, base.KeyBucket)
+
+ var shouldChannelQueryError atomic.Bool
+ activeRT := rest.NewRestTester(t, &rest.RestTesterConfig{
+ LeakyBucketConfig: &base.LeakyBucketConfig{
+ QueryCallback: func(ddoc, viewname string, params map[string]any) error {
+ if viewname == "channels" && shouldChannelQueryError.Load() {
+ shouldChannelQueryError.Store(false)
+ return gocb.ErrTimeout
+ }
+ return nil
+ },
+ N1QLQueryCallback: func(_ context.Context, statement string, params map[string]any, consistency base.ConsistencyMode, adhoc bool) error {
+ // * channel query uses all docs index
+ if strings.Contains(statement, "sg_allDocs") && shouldChannelQueryError.Load() {
+ shouldChannelQueryError.Store(false)
+ return gocb.ErrTimeout
+ }
+ return nil
+ },
+ },
+ })
+ t.Cleanup(activeRT.Close)
+ _ = activeRT.Bucket()
+
+ passiveRT := rest.NewRestTesterPersistentConfig(t)
+ t.Cleanup(passiveRT.Close)
+
+ username := "alice"
+ passiveRT.CreateUser(username, []string{"*"})
+ passiveDBURL := passiveDBURLForAlice(passiveRT, username)
+ stats := dbReplicatorStats(t)
+ ar, err := db.NewActiveReplicator(activeRT.Context(), &db.ActiveReplicatorConfig{
+ ID: t.Name(),
+ Direction: db.ActiveReplicatorTypePush,
+ RemoteDBURL: passiveDBURL,
+ ActiveDB: &db.Database{
+ DatabaseContext: activeRT.GetDatabase(),
+ },
+ ChangesBatchSize: 200,
+ Continuous: false,
+ ReplicationStatsMap: stats,
+ CollectionsEnabled: !activeRT.GetDatabase().OnlyDefaultCollection(),
+ })
+ require.NoError(t, err)
+ t.Cleanup(func() { assert.NoError(t, ar.Stop()) })
+
+ docID := "doc1"
+ _ = activeRT.CreateTestDoc(docID)
+
+ shouldChannelQueryError.Store(true)
+ require.NoError(t, ar.Start(activeRT.Context()))
+
+ changesResults, err := passiveRT.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0", "", true)
+ require.NoError(t, err)
+ require.Len(t, changesResults.Results, 1)
+ require.Equal(t, docID, changesResults.Results[0].ID)
+ require.Equal(t, int64(2), stats.NumConnectAttemptsPush.Value())
+}
func requireBodyEqual(t *testing.T, expected string, doc *db.Document) {
var expectedBody db.Body
require.NoError(t, base.JSONUnmarshal([]byte(expected), &expectedBody))
@@ -8620,3 +8684,63 @@ func TestReplicatorUpdateHLVOnPut(t *testing.T) {
assert.Equal(t, base.HexCasToUint64(syncData.Cas), syncData.HLV.CurrentVersionCAS)
assert.Equal(t, base.HexCasToUint64(syncData.Cas), syncData.HLV.Version)
}
+
+func dbReplicatorStats(t *testing.T) *base.DbReplicatorStats {
+ stats, err := base.SyncGatewayStats.NewDBStats(t.Name(), false, false, false, nil, nil)
+ require.NoError(t, err)
+ dbstats, err := stats.DBReplicatorStats(t.Name())
+ require.NoError(t, err)
+ return dbstats
+}
+
+// passiveDBURLForAlice creates a public server for the passive RT and returns the URL for the alice user, e.g. http://alice:password@localhost:1234/dbname
+func passiveDBURLForAlice(rt *rest.RestTester, username string) *url.URL {
+ srv := httptest.NewServer(rt.TestPublicHandler())
+ rt.TB().Cleanup(srv.Close)
+
+ passiveDBURL, err := url.Parse(srv.URL + "/" + rt.GetDatabase().Name)
+ require.NoError(rt.TB(), err)
+
+ // Add basic auth creds to target db URL
+ passiveDBURL.User = url.UserPassword(username, rest.RestTesterDefaultUserPassword)
+ return passiveDBURL
+}
+
+func TestReplicationConfigUpdatedAt(t *testing.T) {
+ base.RequireNumTestBuckets(t, 2)
+
+ activeRT, _, remoteURLString, teardown := rest.SetupSGRPeers(t)
+ defer teardown()
+
+ // create a replication and assert the updated at field is present in the config
+ activeRT.CreateReplication("replication1", remoteURLString, db.ActiveReplicatorTypePush, nil, true, db.ConflictResolverDefault)
+
+ activeRT.WaitForReplicationStatus("replication1", db.ReplicationStateRunning)
+
+ resp := activeRT.SendAdminRequest(http.MethodGet, "/{{.db}}/_replication/replication1", "")
+ var configResponse db.ReplicationConfig
+ require.NoError(t, json.Unmarshal(resp.BodyBytes(), &configResponse))
+
+ // Check that the config has an updated_at field
+ require.NotNil(t, configResponse.UpdatedAt)
+ require.NotNil(t, configResponse.CreatedAt)
+ currTime := configResponse.UpdatedAt
+ createdAtTime := configResponse.CreatedAt
+
+ resp = activeRT.SendAdminRequest("PUT", "/{{.db}}/_replicationStatus/replication1?action=stop", "")
+ rest.RequireStatus(t, resp, http.StatusOK)
+
+ activeRT.WaitForReplicationStatus("replication1", db.ReplicationStateStopped)
+
+ // update the config
+ resp = activeRT.SendAdminRequest(http.MethodPut, "/{{.db}}/_replication/replication1", fmt.Sprintf(`{"name":"replication1","source":"%s","type":"push", "continuous":true}`, remoteURLString))
+ rest.RequireStatus(t, resp, http.StatusOK)
+
+ // Check that the updated_at field is updated when the config is updated
+ resp = activeRT.SendAdminRequest(http.MethodGet, "/{{.db}}/_replication/replication1", "")
+ configResponse = db.ReplicationConfig{}
+ require.NoError(t, json.Unmarshal(resp.BodyBytes(), &configResponse))
+
+ assert.Greater(t, configResponse.UpdatedAt.UnixNano(), currTime.UnixNano())
+ assert.Equal(t, configResponse.CreatedAt.UnixNano(), createdAtTime.UnixNano())
+}
diff --git a/rest/revocation_test.go b/rest/revocation_test.go
index 0717f2ae4d..0e30e83c3c 100644
--- a/rest/revocation_test.go
+++ b/rest/revocation_test.go
@@ -2416,7 +2416,7 @@ func TestRevocationGetSyncDataError(t *testing.T) {
// Two callbacks to cover usage with CBS/Xattrs and without
revocationTester, rt := InitScenario(
t, &RestTesterConfig{
- leakyBucketConfig: &base.LeakyBucketConfig{
+ LeakyBucketConfig: &base.LeakyBucketConfig{
GetWithXattrCallback: func(key string) error {
return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error")
}, GetRawCallback: func(key string) error {
diff --git a/rest/session_api.go b/rest/session_api.go
index a4d2e476fe..ababefb4d6 100644
--- a/rest/session_api.go
+++ b/rest/session_api.go
@@ -278,6 +278,7 @@ func (h *handler) deleteUserSessions() error {
return nil
}
user.UpdateSessionUUID()
+ user.SetUpdatedAt()
err = auth.Save(user)
if err == nil {
base.Audit(h.ctx(), base.AuditIDPublicUserSessionDeleteAll, base.AuditFields{base.AuditFieldUserName: userName})
diff --git a/rest/utilities_testing.go b/rest/utilities_testing.go
index d5b2250425..5de62226e5 100644
--- a/rest/utilities_testing.go
+++ b/rest/utilities_testing.go
@@ -59,7 +59,7 @@ type RestTesterConfig struct {
EnableNoConflictsMode bool // Enable no-conflicts mode. By default, conflicts will be allowed, which is the default behavior
EnableUserQueries bool // Enable the feature-flag for user N1QL/etc queries
CustomTestBucket *base.TestBucket // If set, use this bucket instead of requesting a new one.
- leakyBucketConfig *base.LeakyBucketConfig // Set to create and use a leaky bucket on the RT and DB. A test bucket cannot be passed in if using this option.
+ LeakyBucketConfig *base.LeakyBucketConfig // Set to create and use a leaky bucket on the RT and DB. A test bucket cannot be passed in if using this option.
adminInterface string // adminInterface overrides the default admin interface.
SgReplicateEnabled bool // SgReplicateManager disabled by default for RestTester
AutoImport *bool
@@ -177,14 +177,14 @@ func (rt *RestTester) Bucket() base.Bucket {
testBucket := rt.RestTesterConfig.CustomTestBucket
if testBucket == nil {
testBucket = base.GetTestBucket(rt.TB())
- if rt.leakyBucketConfig != nil {
- leakyConfig := *rt.leakyBucketConfig
+ if rt.LeakyBucketConfig != nil {
+ leakyConfig := *rt.LeakyBucketConfig
// Ignore closures to avoid double closing panics
leakyConfig.IgnoreClose = true
testBucket = testBucket.LeakyBucketClone(leakyConfig)
}
- } else if rt.leakyBucketConfig != nil {
- rt.TB().Fatalf("A passed in TestBucket cannot be used on the RestTester when defining a leakyBucketConfig")
+ } else if rt.LeakyBucketConfig != nil {
+ rt.TB().Fatalf("A passed in TestBucket cannot be used on the RestTester when defining a LeakyBucketConfig")
}
rt.TestBucket = testBucket
@@ -360,7 +360,7 @@ func (rt *RestTester) Bucket() base.Bucket {
}
_, isLeaky := base.AsLeakyBucket(rt.TestBucket)
var err error
- if rt.leakyBucketConfig != nil || isLeaky {
+ if rt.LeakyBucketConfig != nil || isLeaky {
_, err = rt.RestTesterServerContext.AddDatabaseFromConfigWithBucket(ctx, rt.TB(), *rt.DatabaseConfig, testBucket.Bucket)
} else {
_, err = rt.RestTesterServerContext.AddDatabaseFromConfig(ctx, *rt.DatabaseConfig)
@@ -448,11 +448,11 @@ func GetDataStoreNamesFromScopesConfig(config ScopesConfig) []sgbucket.DataStore
}
// LeakyBucket gets the bucket from the RestTester as a leaky bucket allowing for callbacks to be set on the fly.
-// The RestTester must have been set up to create and use a leaky bucket by setting leakyBucketConfig in the RT
+// The RestTester must have been set up to create and use a leaky bucket by setting LeakyBucketConfig in the RT
// config when calling NewRestTester.
func (rt *RestTester) LeakyBucket() *base.LeakyDataStore {
- if rt.leakyBucketConfig == nil {
- rt.TB().Fatalf("Cannot get leaky bucket when leakyBucketConfig was not set on RestTester initialisation")
+ if rt.LeakyBucketConfig == nil {
+ rt.TB().Fatalf("Cannot get leaky bucket when LeakyBucketConfig was not set on RestTester initialisation")
}
leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore())
if !ok {
diff --git a/rest/utilities_testing_async.go b/rest/utilities_testing_async.go
index 17a7e0381f..6f6b76dcbe 100644
--- a/rest/utilities_testing_async.go
+++ b/rest/utilities_testing_async.go
@@ -47,9 +47,7 @@ func waitForError(t *testing.T, ch <-chan error, message string) error {
}
select {
case err := <-ch:
- if err == nil {
- require.Fail(t, "[%s] Received non-error message on channel", message)
- }
+ require.Error(t, err, "[%s] Expected error message on channel", message)
return err
case <-time.After(TestChannelTimeout):
require.Fail(t, fmt.Sprintf("[%s] expected error message did not arrive in %v", message, TestChannelTimeout))
diff --git a/rest/utilities_testing_blip_client.go b/rest/utilities_testing_blip_client.go
index 9c3faf6ac0..343842a783 100644
--- a/rest/utilities_testing_blip_client.go
+++ b/rest/utilities_testing_blip_client.go
@@ -119,11 +119,9 @@ func (c *BlipTesterCollectionClient) OneShotDocsSince(ctx context.Context, since
// filter non-latest entries in cases where we haven't pruned _seqStore
if !ok {
continue
- } else if latestDocSeq := doc.latestSeq(); latestDocSeq != seq {
- // this entry should've been cleaned up from _seqStore
- require.FailNow(c.TB(), "seq %d found in _seqStore but latestSeq for doc %d - this should've been pruned out!", seq, latestDocSeq)
- continue
}
+ // make sure that seq is latestseq
+ require.Equal(c.TB(), doc.latestSeq(), seq, "this should've been pruned out!")
if !yield(seq, doc) {
base.DebugfCtx(ctx, base.KeySGTest, "OneShotDocsSince: since=%d, _seqLast=%d - stopping iteration", since, seqLast)
return
@@ -370,10 +368,7 @@ func (btcc *BlipTesterCollectionClient) _getClientDoc(docID string) (*clientDoc,
return nil, false
}
clientDoc, ok := btcc._seqStore[seq]
- if !ok {
- require.FailNow(btcc.TB(), "docID %q found in _seqFromDocID but seq %d not in _seqStore %v", docID, seq, btcc._seqStore)
- return nil, false
- }
+ require.True(btcc.TB(), ok, "docID %q found in _seqFromDocID but seq %d not in _seqStore %v", docID, seq, btcc._seqStore)
return clientDoc, ok
}
@@ -635,12 +630,9 @@ func (btr *BlipTesterReplicator) initHandlers(btc *BlipTesterClient) {
// If deltas are enabled, and we see a deltaSrc property, we'll need to patch it before storing
if btc.ClientDeltas && deltaSrc != "" {
if btc.rejectDeltasForSrcRev == deltaSrc {
- if !msg.NoReply() {
- response := msg.Response()
- response.SetError("HTTP", http.StatusUnprocessableEntity, "test code intentionally rejected delta")
- return
- }
- require.FailNow(btr.TB(), "expected delta rev message to be sent without noreply flag: %+v", msg)
+ require.False(btr.TB(), msg.NoReply(), "expected delta rev message to be sent without noreply flag: %+v", msg)
+ response := msg.Response()
+ response.SetError("HTTP", http.StatusUnprocessableEntity, "test code intentionally rejected delta")
}
// unmarshal body to extract deltaSrc
@@ -650,10 +642,7 @@ func (btr *BlipTesterReplicator) initHandlers(btc *BlipTesterClient) {
var old db.Body
doc, ok := btcr.getClientDoc(docID)
- if !ok {
- require.FailNow(btc.TB(), "docID %q not found in _seqFromDocID", docID)
- return
- }
+ require.True(btc.TB(), ok, "docID %q not found in _seqFromDocID", docID)
var deltaSrcVersion DocVersion
if btc.UseHLV() {
v, err := db.ParseVersion(deltaSrc)
@@ -915,7 +904,7 @@ func (btcc *BlipTesterCollectionClient) UseHLV() bool {
return btcc.parent.UseHLV()
}
-// saveAttachment takes base64 encoded data and stores the attachment on the client. Returns the length of the decoded data and the digest of the attachment.
+// saveAttachment takes base64 encoded data and stores the attachment on the client.
func (btc *BlipTesterCollectionClient) saveAttachment(base64data string) (dataLength int, digest string) {
btc.attachmentsLock.Lock()
defer btc.attachmentsLock.Unlock()
@@ -947,10 +936,7 @@ func (btc *BlipTesterCollectionClient) updateLastReplicatedVersion(docID string,
btc.seqLock.Lock()
defer btc.seqLock.Unlock()
doc, ok := btc._getClientDoc(docID)
- if !ok {
- require.FailNow(btc.TB(), "docID %q not found in _seqFromDocID", docID)
- return
- }
+ require.True(btc.TB(), ok, "docID %q not found in _seqFromDocID", docID)
doc.setLatestServerVersion(version)
}
@@ -958,10 +944,7 @@ func (btc *BlipTesterCollectionClient) getLastReplicatedVersion(docID string) (v
btc.seqLock.Lock()
defer btc.seqLock.Unlock()
doc, ok := btc._getClientDoc(docID)
- if !ok {
- require.FailNow(btc.TB(), "docID %q not found in _seqFromDocID", docID)
- return DocVersion{}, false
- }
+ require.True(btc.TB(), ok, "docID %q not found in _seqFromDocID", docID)
doc.lock.RLock()
latestServerVersion := doc._latestServerVersion
doc.lock.RUnlock()
@@ -1158,7 +1141,7 @@ func (btcRunner *BlipTestClientRunner) Collection(clientID uint32, collectionNam
return collectionClient
}
}
- require.FailNow(btcRunner.clients[clientID].TB(), "Could not find collection %s in BlipTesterClient", collectionName)
+ require.FailNow(btcRunner.clients[clientID].TB(), fmt.Sprintf("Could not find collection %s in BlipTesterClient", collectionName))
return nil
}
@@ -1273,12 +1256,8 @@ func (btcc *BlipTesterCollectionClient) StartPushWithOpts(opts BlipTesterPushOpt
proposeChangesResponse := proposeChangesRequest.Response()
rspBody, err := proposeChangesResponse.Body()
require.NoError(btcc.TB(), err, "Error reading proposeChanges response body")
- errorDomain := proposeChangesResponse.Properties["Error-Domain"]
- errorCode := proposeChangesResponse.Properties["Error-Code"]
- if errorDomain != "" && errorCode != "" {
- btcc.TB().Errorf("error %s %s from proposeChanges with body: %s", errorDomain, errorCode, string(rspBody))
- return
- }
+ require.NotContains(btcc.TB(), proposeChangesResponse.Properties, "Error-Domain", "unexpected error response from proposeChanges: %v, %s", proposeChangesResponse, rspBody)
+ require.NotContains(btcc.TB(), proposeChangesResponse.Properties, "Error-Code", "unexpected error response from proposeChanges: %v, %s", proposeChangesResponse, rspBody)
base.DebugfCtx(ctx, base.KeySGTest, "proposeChanges response: %s", string(rspBody))
@@ -1325,10 +1304,7 @@ func (btcc *BlipTesterCollectionClient) StartPushWithOpts(opts BlipTesterPushOpt
revRequest.Properties[db.RevMessageHistory] = history
doc, ok := btcc.getClientDoc(change.docID)
- if !ok {
- btcc.TB().Errorf("doc %s not found in _seqFromDocID", change.docID)
- return
- }
+ require.True(btcc.TB(), ok, "docID %q not found in _seqFromDocID", change.docID)
doc.lock.RLock()
serverRev := doc._revisionsBySeq[doc._seqsByVersions[change.latestServerVersion]]
docBody := doc._revisionsBySeq[doc._seqsByVersions[change.version]].body
@@ -1365,23 +1341,11 @@ func (btcc *BlipTesterCollectionClient) StartPushWithOpts(opts BlipTesterPushOpt
base.DebugfCtx(ctx, base.KeySGTest, "sent doc %s / %v", change.docID, change.version)
// block until remote has actually processed the rev and sent a response
revResp := revRequest.Response()
- if status := revResp.Properties[db.BlipErrorCode]; status != "" {
- if status == "409" {
- // conflict on write of rev - OK to ignore and let pull replication resolve
- btcc.TB().Logf("conflict on write of rev %s / %v", change.docID, change.version)
- } else {
- body, err := revResp.Body()
- require.NoError(btcc.TB(), err)
- require.FailNow(btcc.TB(), fmt.Sprintf("error response from rev: %s %s : %s", revResp.Properties["Error-Domain"], revResp.Properties["Error-Code"], body))
- }
- }
+ require.NotContains(btcc.TB(), revResp.Properties, "Error-Domain", "unexpected error response from rev %v: %s", revResp)
base.DebugfCtx(ctx, base.KeySGTest, "peer acked rev %s / %v", change.docID, change.version)
btcc.updateLastReplicatedVersion(change.docID, change.version)
doc, ok = btcc.getClientDoc(change.docID)
- if !ok {
- btcc.TB().Errorf("doc %s not found in _seqFromDocID", change.docID)
- return
- }
+ require.True(btcc.TB(), ok, "docID %q not found in _seqFromDocID", change.docID)
doc.lock.Lock()
rev := doc._revisionsBySeq[doc._seqsByVersions[change.version]]
rev.message = revRequest
@@ -1473,7 +1437,7 @@ func (btc *BlipTesterCollectionClient) StartPullSince(options BlipTesterPullOpti
errorDomain := subChangesResponse.Properties["Error-Domain"]
errorCode := subChangesResponse.Properties["Error-Code"]
if errorDomain != "" && errorCode != "" {
- require.FailNowf(btc.TB(), "error %s %s from subChanges with body: %s", errorDomain, errorCode, string(rspBody))
+ require.FailNow(btc.TB(), fmt.Sprintf("error %s %s from subChanges with body: %s", errorDomain, errorCode, string(rspBody)))
}
}
@@ -1491,6 +1455,7 @@ func (btc *BlipTesterCollectionClient) StopPush() {
}
+// UnsubPullChanges will send an UnsubChanges message to the server to stop the pull replication. Fails test harness if Sync Gateway responds with an error.
func (btc *BlipTesterCollectionClient) UnsubPullChanges() {
unsubChangesRequest := blip.NewRequest()
unsubChangesRequest.SetProfile(db.MessageUnsubChanges)
@@ -1688,15 +1653,9 @@ func (btc *BlipTesterCollectionClient) PushRevWithHistory(docID string, parentVe
proposeChangesResponse := proposeChangesRequest.Response()
rspBody, err := proposeChangesResponse.Body()
require.NoError(btc.TB(), err)
-
- errorDomain := proposeChangesResponse.Properties["Error-Domain"]
- errorCode := proposeChangesResponse.Properties["Error-Code"]
- if errorDomain != "" && errorCode != "" {
- return nil, fmt.Errorf("error %s %s from proposeChanges with body: %s", errorDomain, errorCode, string(rspBody))
- }
- if string(rspBody) != `[]` {
- return nil, fmt.Errorf("unexpected body in proposeChangesResponse: %s", string(rspBody))
- }
+ require.NotContains(btc.TB(), proposeChangesResponse.Properties, "Error-Domain", "unexpected error response from proposeChanges: %v, %s", proposeChangesResponse, rspBody)
+ require.NotContains(btc.TB(), proposeChangesResponse.Properties, "Error-Code", "unexpected error response from proposeChanges: %v, %s", proposeChangesResponse, rspBody)
+ require.Equal(btc.TB(), "[]", string(rspBody))
// send msg rev with new doc
revRequest := blip.NewRequest()
@@ -1726,7 +1685,6 @@ func (btc *BlipTesterCollectionClient) PushRevWithHistory(docID string, parentVe
revResponse := revRequest.Response()
rspBody, err = revResponse.Body()
require.NoError(btc.TB(), err)
-
if revResponse.Type() == blip.ErrorType {
return nil, fmt.Errorf("error %s %s from revResponse: %s", revResponse.Properties["Error-Domain"], revResponse.Properties["Error-Code"], rspBody)
}
@@ -1735,6 +1693,10 @@ func (btc *BlipTesterCollectionClient) PushRevWithHistory(docID string, parentVe
return &newRev.version, nil
}
+func (btc *BlipTesterCollectionClient) StoreRevOnClient(docID string, parentVersion *DocVersion, body []byte) {
+ btc.upsertDoc(docID, parentVersion, body)
+}
+
func (btc *BlipTesterCollectionClient) ProcessInlineAttachments(inputBody []byte, revGen int) (outputBody []byte) {
if !bytes.Contains(inputBody, []byte(db.BodyAttachments)) {
return inputBody
@@ -1751,16 +1713,15 @@ func (btc *BlipTesterCollectionClient) ProcessInlineAttachments(inputBody []byte
inlineAttachmentMap := inlineAttachment.(map[string]interface{})
attachmentData, ok := inlineAttachmentMap["data"]
if !ok {
- if isStub, _ := inlineAttachmentMap["stub"].(bool); isStub {
- // push the stub as-is
- continue
- }
- require.FailNow(btc.TB(), "couldn't find data or stub property for inline attachment %s:%v", attachmentName, inlineAttachment)
+ isStub, _ := inlineAttachmentMap["stub"].(bool)
+ require.True(btc.TB(), isStub, "couldn't find data and stub property for inline attachment %#v : %v", attachmentName, inlineAttachmentMap)
+ // push the stub as-is
+ continue
}
// Transform inline attachment data into metadata
data, ok := attachmentData.(string)
- require.True(btc.TB(), ok)
+ require.True(btc.TB(), ok, "inline attachment data was not a string, got %T", attachmentData)
length, digest := btc.saveAttachment(data)
@@ -1797,10 +1758,7 @@ func (btc *BlipTesterCollectionClient) GetVersion(docID string, docVersion DocVe
}
rev, ok := doc._revisionsBySeq[revSeq]
- if !ok {
- require.FailNow(btc.TB(), "seq %q for docID %q found but no rev in _seqStore", revSeq, docID)
- return nil, false
- }
+ require.True(btc.TB(), ok, "seq %q for docID %q found but no rev in _seqStore", revSeq, docID)
return rev.body, true
}
@@ -1828,7 +1786,7 @@ func (btc *BlipTesterClient) AssertOnBlipHistory(t *testing.T, msg *blip.Message
}
}
-// WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. The test will fail after 10 seocnds if a matching document is not found.
+// WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. The test will fail after 10 seconds if a matching document is not found.
func (btc *BlipTesterCollectionClient) WaitForVersion(docID string, docVersion DocVersion) (data []byte) {
if data, found := btc.GetVersion(docID, docVersion); found {
return data
@@ -1837,7 +1795,7 @@ func (btc *BlipTesterCollectionClient) WaitForVersion(docID string, docVersion D
var found bool
data, found = btc.GetVersion(docID, docVersion)
assert.True(c, found, "Could not find docID:%+v Version %+v", docID, docVersion)
- }, 10*time.Second, 50*time.Millisecond, "BlipTesterClient timed out waiting for doc %+v Version %+v", docID, docVersion)
+ }, 10*time.Second, 5*time.Millisecond, "BlipTesterClient timed out waiting for doc %+v Version %+v", docID, docVersion)
return data
}
@@ -1852,7 +1810,7 @@ func (btc *BlipTesterCollectionClient) WaitForDoc(docID string) (data []byte) {
var version *DocVersion
data, _, version = btc.GetDoc(docID)
assert.NotNil(c, version, "Could not find docID:%+v", docID)
- }, timeout, 50*time.Millisecond, "BlipTesterClient timed out waiting for doc %+v after %s", docID, timeout)
+ }, timeout, 5*time.Millisecond, "BlipTesterClient timed out waiting for doc %+v after %s", docID, timeout)
return data
}
@@ -1889,7 +1847,7 @@ func (btr *BlipTesterReplicator) WaitForMessage(serialNumber blip.MessageNumber)
var ok bool
msg, ok = btr.GetMessage(serialNumber)
assert.True(c, ok)
- }, 10*time.Second, 50*time.Millisecond, "BlipTesterReplicator timed out waiting for BLIP message: %v", serialNumber)
+ }, 10*time.Second, 5*time.Millisecond, "BlipTesterReplicator timed out waiting for BLIP message: %v", serialNumber)
return msg
}
@@ -1905,7 +1863,8 @@ func (btc *BlipTesterCollectionClient) WaitForBlipRevMessage(docID string, versi
var ok bool
msg, ok = btc.GetBlipRevMessage(docID, version)
assert.True(c, ok, "Could not find docID:%+v, Version: %+v", docID, version)
- }, 10*time.Second, 50*time.Millisecond, "BlipTesterClient timed out waiting for BLIP message docID: %v, Version: %v", docID, version)
+ }, 10*time.Second, 5*time.Millisecond, "BlipTesterClient timed out waiting for BLIP message docID: %v, Version: %v", docID, version)
+ require.NotNil(btc.TB(), msg)
return msg
}
@@ -1925,6 +1884,7 @@ func (btc *BlipTesterCollectionClient) GetBlipRevMessage(docID string, version D
defer doc.lock.RUnlock()
if seq, ok := doc._seqsByVersions[lookupVersion]; ok {
if rev, ok := doc._revisionsBySeq[seq]; ok {
+ require.NotNil(btc.TB(), rev.message, "rev.message is nil for docID:%+v, version: %+v", docID, version)
return rev.message, true
}
}
@@ -1994,11 +1954,15 @@ func (btcRunner *BlipTestClientRunner) GetVersion(clientID uint32, docID string,
return btcRunner.SingleCollection(clientID).GetVersion(docID, version)
}
-// saveAttachment takes base64 encoded data and stores the attachment on the client. Returns the length of the decoded data and the digest of the attachment.
+// saveAttachment takes base64 encoded data and stores the attachment on the client.
func (btcRunner *BlipTestClientRunner) saveAttachment(clientID uint32, attachmentData string) (int, string) {
return btcRunner.SingleCollection(clientID).saveAttachment(attachmentData)
}
+func (btcRunner *BlipTestClientRunner) StoreRevOnClient(clientID uint32, docID string, parentVersion *DocVersion, body []byte) {
+ btcRunner.SingleCollection(clientID).StoreRevOnClient(docID, parentVersion, body)
+}
+
func (btcRunner *BlipTestClientRunner) PushRevWithHistory(clientID uint32, docID string, parentVersion *DocVersion, body []byte, revCount, prunedRevCount int) (*DocVersion, error) {
return btcRunner.SingleCollection(clientID).PushRevWithHistory(docID, parentVersion, body, revCount, prunedRevCount)
}
@@ -2019,6 +1983,7 @@ func (btc *BlipTesterCollectionClient) Attachments() map[string][]byte {
return btc._attachments
}
+// UnsubPullChanges will send an UnsubChanges message to the server to stop the pull replication. Fails test harness if Sync Gateway responds with an error.
func (btcRunner *BlipTestClientRunner) UnsubPullChanges(clientID uint32) {
btcRunner.SingleCollection(clientID).UnsubPullChanges()
}
diff --git a/rest/utilities_testing_resttester.go b/rest/utilities_testing_resttester.go
index 8f5f562169..cde46c1c10 100644
--- a/rest/utilities_testing_resttester.go
+++ b/rest/utilities_testing_resttester.go
@@ -83,7 +83,7 @@ func (rt *RestTester) CreateTestDoc(docid string) DocVersion {
// PutDoc will upsert the document with a given contents.
func (rt *RestTester) PutDoc(docID string, body string) DocVersion {
- rawResponse := rt.SendAdminRequest("PUT", fmt.Sprintf("/%s/%s", rt.GetSingleKeyspace(), docID), body)
+ rawResponse := rt.SendAdminRequest("PUT", fmt.Sprintf("/%s/%s?show_cv=true", rt.GetSingleKeyspace(), docID), body)
RequireStatus(rt.TB(), rawResponse, 201)
return DocVersionFromPutResponse(rt.TB(), rawResponse)
}
@@ -131,11 +131,11 @@ func (rt *RestTester) GetDatabaseRoot(dbname string) DatabaseRoot {
// WaitForVersion retries a GET for a given document version until it returns 200 or 201 for a given document and revision. If version is not found, the test will fail.
func (rt *RestTester) WaitForVersion(docID string, version DocVersion) {
if version.RevTreeID == "" {
- require.NotEqualf(rt.TB(), "", version.CV.String(), "Expeted CV if RevTreeID is empty in WaitForVersion")
+ require.NotEqual(rt.TB(), "", version.CV.String(), "Expected CV if RevTreeID in WaitForVersion")
}
require.EventuallyWithT(rt.TB(), func(c *assert.CollectT) {
rawResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/"+docID+"?show_cv=true", "")
- if !assert.Contains(c, []int{http.StatusOK, http.StatusCreated}, rawResponse.Code) {
+ if !assert.Contains(c, []int{200, 201}, rawResponse.Code, "Unexpected status code for %s", rawResponse.Body.String()) {
return
}
var body db.Body
diff --git a/ruleguard/logwrappederr.go b/ruleguard/rules-logwrappederr.go
similarity index 100%
rename from ruleguard/logwrappederr.go
rename to ruleguard/rules-logwrappederr.go
diff --git a/ruleguard/rules-requirefailnow.go b/ruleguard/rules-requirefailnow.go
new file mode 100644
index 0000000000..a21884b757
--- /dev/null
+++ b/ruleguard/rules-requirefailnow.go
@@ -0,0 +1,38 @@
+// Copyright 2025-Present Couchbase, Inc.
+//
+// Use of this software is governed by the Business Source License included
+// in the file licenses/BSL-Couchbase.txt. As of the Change Date specified
+// in that file, in accordance with the Business Source License, use of this
+// software will be governed by the Apache License, Version 2.0, included in
+// the file licenses/APL2.txt.
+
+//go:build ruleguard
+// +build ruleguard
+
+//nolint:unused // functions in here are invoked by ruleguard, but aren't imported/used by anything Go can detect.
+package ruleguard
+
+import (
+ "github.com/quasilyte/go-ruleguard/dsl"
+)
+
+func failnow(m dsl.Matcher) {
+ m.Match(
+ `require.FailNow($t, $msg, $*_)`,
+ `require.FailNow($t, $msg)`,
+ `require.FailNowf($t, $msg)`,
+ `require.FailNowf($t, $msg, $*_)`,
+ `require.Fail($t, $msg)`,
+ `require.Fail($t, $msg, $*_)`,
+ `require.Failf($t, $msg)`,
+ `require.Failf($t, $msg, $*_)`,
+ `assert.FailNow($t, $msg, $*_)`,
+ `assert.FailNow($t, $msg)`,
+ `assert.FailNowf($t, $msg)`,
+ `assert.FailNowf($t, $msg, $*_)`,
+ `assert.Fail($t, $msg)`,
+ `assert.Fail($t, $msg, $*_)`,
+ `assert.Failf($t, $msg)`,
+ `assert.Failf($t, $msg, $*_)`,
+ ).Where(m["msg"].Pure && m["msg"].Type.Is("string") && m["msg"].Text.Matches(".*%[A-Za-z]")).Report("second argument can not contain format verbs starting with %, wrap this argument in fmt.Sprintf() if you want to use format verbs")
+}
diff --git a/topologytest/couchbase_lite_mock_peer_test.go b/topologytest/couchbase_lite_mock_peer_test.go
index 62b300a998..ef8cbe48c6 100644
--- a/topologytest/couchbase_lite_mock_peer_test.go
+++ b/topologytest/couchbase_lite_mock_peer_test.go
@@ -77,7 +77,7 @@ func (p *CouchbaseLiteMockPeer) getSingleSGBlipClient() *PeerBlipTesterClient {
for _, c := range p.blipClients {
return c
}
- require.Fail(p.TB(), "no blipClients found for %s", p)
+ require.Fail(p.TB(), fmt.Sprintf("no blipClients found for %s", p))
return nil
}
@@ -262,7 +262,7 @@ func (r *CouchbaseLiteMockReplication) Start() {
case PeerReplicationDirectionPull:
r.btcRunner.StartPull(r.btc.ID())
default:
- require.Fail(r.btc.TB(), "unsupported replication direction %q", r.direction)
+ require.Fail(r.btc.TB(), fmt.Sprintf("unsupported replication direction %q", r.direction))
}
}