diff --git a/pkg/mcs/scheduling/server/config/watcher.go b/pkg/mcs/scheduling/server/config/watcher.go index 04dad1fb3f5..18a568087e7 100644 --- a/pkg/mcs/scheduling/server/config/watcher.go +++ b/pkg/mcs/scheduling/server/config/watcher.go @@ -60,6 +60,7 @@ type Watcher struct { *PersistConfig // Some data, like the scheduler configs, should be loaded into the storage // to make sure the coordinator could access them correctly. + // It is a memory storage. storage storage.Storage // schedulersController is used to trigger the scheduler's config reloading. // Store as `*schedulers.Controller`. diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 60dbee79dc4..7617f8e5b1d 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -33,7 +33,6 @@ import ( "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/reflectutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/unrolled/render" "go.uber.org/zap" @@ -51,32 +50,36 @@ const ( transferOut = "transfer-out" ) -type balanceLeaderSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig - +type balanceLeaderSchedulerParam struct { Ranges []core.KeyRange `json:"ranges"` // Batch is used to generate multiple operators by one scheduling Batch int `json:"batch"` } +type balanceLeaderSchedulerConfig struct { + baseDefaultSchedulerConfig + balanceLeaderSchedulerParam +} + func (conf *balanceLeaderSchedulerConfig) update(data []byte) (int, any) { conf.Lock() defer conf.Unlock() - oldConfig, _ := json.Marshal(conf) + param := &conf.balanceLeaderSchedulerParam + oldConfig, _ := json.Marshal(param) - if err := json.Unmarshal(data, conf); err != nil { + if err := json.Unmarshal(data, param); err != nil { return http.StatusInternalServerError, err.Error() } - newConfig, _ := json.Marshal(conf) + newConfig, _ := json.Marshal(param) if !bytes.Equal(oldConfig, newConfig) { if !conf.validateLocked() { - if err := json.Unmarshal(oldConfig, conf); err != nil { + if err := json.Unmarshal(oldConfig, param); err != nil { return http.StatusInternalServerError, err.Error() } return http.StatusBadRequest, "invalid batch size which should be an integer between 1 and 10" } + conf.balanceLeaderSchedulerParam = *param if err := conf.save(); err != nil { log.Warn("failed to save balance-leader-scheduler config", errs.ZapError(err)) } @@ -87,23 +90,23 @@ func (conf *balanceLeaderSchedulerConfig) update(data []byte) (int, any) { if err := json.Unmarshal(data, &m); err != nil { return http.StatusInternalServerError, err.Error() } - ok := reflectutil.FindSameFieldByJSON(conf, m) + ok := reflectutil.FindSameFieldByJSON(param, m) if ok { return http.StatusOK, "Config is the same with origin, so do nothing." } return http.StatusBadRequest, "Config item is not found." } -func (conf *balanceLeaderSchedulerConfig) validateLocked() bool { +func (conf *balanceLeaderSchedulerParam) validateLocked() bool { return conf.Batch >= 1 && conf.Batch <= 10 } -func (conf *balanceLeaderSchedulerConfig) clone() *balanceLeaderSchedulerConfig { +func (conf *balanceLeaderSchedulerConfig) clone() *balanceLeaderSchedulerParam { conf.RLock() defer conf.RUnlock() ranges := make([]core.KeyRange, len(conf.Ranges)) copy(ranges, conf.Ranges) - return &balanceLeaderSchedulerConfig{ + return &balanceLeaderSchedulerParam{ Ranges: ranges, Batch: conf.Batch, } @@ -164,7 +167,7 @@ type balanceLeaderScheduler struct { // each store balanced. func newBalanceLeaderScheduler(opController *operator.Controller, conf *balanceLeaderSchedulerConfig, options ...BalanceLeaderCreateOption) Scheduler { s := &balanceLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceLeaderScheduler, conf), retryQuota: newRetryQuota(), conf: conf, handler: newBalanceLeaderHandler(conf), diff --git a/pkg/schedule/schedulers/balance_leader_test.go b/pkg/schedule/schedulers/balance_leader_test.go index f5af180bf7b..4aa8a7aca26 100644 --- a/pkg/schedule/schedulers/balance_leader_test.go +++ b/pkg/schedule/schedulers/balance_leader_test.go @@ -41,8 +41,10 @@ func TestBalanceLeaderSchedulerConfigClone(t *testing.T) { re := require.New(t) keyRanges1, _ := getKeyRanges([]string{"a", "b", "c", "d"}) conf := &balanceLeaderSchedulerConfig{ - Ranges: keyRanges1, - Batch: 10, + balanceLeaderSchedulerParam: balanceLeaderSchedulerParam{ + Ranges: keyRanges1, + Batch: 10, + }, } conf2 := conf.clone() re.Equal(conf.Batch, conf2.Batch) diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 1ebe65d732c..31d7a0488bf 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -31,6 +31,8 @@ import ( ) type balanceRegionSchedulerConfig struct { + baseDefaultSchedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -48,7 +50,7 @@ type balanceRegionScheduler struct { // each store balanced. func newBalanceRegionScheduler(opController *operator.Controller, conf *balanceRegionSchedulerConfig, opts ...BalanceRegionCreateOption) Scheduler { scheduler := &balanceRegionScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceRegionScheduler, conf), retryQuota: newRetryQuota(), name: types.BalanceRegionScheduler.String(), conf: conf, diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 47f23d470cc..6953c7f7634 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -162,7 +162,7 @@ type balanceWitnessScheduler struct { // each store balanced. func newBalanceWitnessScheduler(opController *operator.Controller, conf *balanceWitnessSchedulerConfig, options ...BalanceWitnessCreateOption) Scheduler { s := &balanceWitnessScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler), + BaseScheduler: NewBaseScheduler(opController, types.BalanceWitnessScheduler, conf), retryQuota: newRetryQuota(), conf: conf, handler: newBalanceWitnessHandler(conf), diff --git a/pkg/schedule/schedulers/base_scheduler.go b/pkg/schedule/schedulers/base_scheduler.go index 6e160effea7..26042eaf023 100644 --- a/pkg/schedule/schedulers/base_scheduler.go +++ b/pkg/schedule/schedulers/base_scheduler.go @@ -65,11 +65,16 @@ type BaseScheduler struct { name string tp types.CheckerSchedulerType + conf schedulerConfig } // NewBaseScheduler returns a basic scheduler -func NewBaseScheduler(opController *operator.Controller, tp types.CheckerSchedulerType) *BaseScheduler { - return &BaseScheduler{OpController: opController, tp: tp} +func NewBaseScheduler( + opController *operator.Controller, + tp types.CheckerSchedulerType, + conf schedulerConfig, +) *BaseScheduler { + return &BaseScheduler{OpController: opController, tp: tp, conf: conf} } func (*BaseScheduler) ServeHTTP(w http.ResponseWriter, _ *http.Request) { @@ -114,3 +119,27 @@ func (s *BaseScheduler) GetName() string { func (s *BaseScheduler) GetType() types.CheckerSchedulerType { return s.tp } + +// IsDisable implements the Scheduler interface. +func (s *BaseScheduler) IsDisable() bool { + if conf, ok := s.conf.(defaultSchedulerConfig); ok { + return conf.isDisable() + } + return false +} + +// SetDisable implements the Scheduler interface. +func (s *BaseScheduler) SetDisable(disable bool) error { + if conf, ok := s.conf.(defaultSchedulerConfig); ok { + return conf.setDisable(disable) + } + return nil +} + +// IsDefault returns if the scheduler is a default scheduler. +func (s *BaseScheduler) IsDefault() bool { + if _, ok := s.conf.(defaultSchedulerConfig); ok { + return true + } + return false +} diff --git a/pkg/schedule/schedulers/config.go b/pkg/schedule/schedulers/config.go index 0c7caf686c3..78b123981fd 100644 --- a/pkg/schedule/schedulers/config.go +++ b/pkg/schedule/schedulers/config.go @@ -17,15 +17,21 @@ package schedulers import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/log" + "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/syncutil" + "go.uber.org/zap" ) type schedulerConfig interface { + init(name string, storage endpoint.ConfigStorage, data any) save() error load(any) error - init(name string, storage endpoint.ConfigStorage, data any) } +var _ schedulerConfig = &baseSchedulerConfig{} + type baseSchedulerConfig struct { name string storage endpoint.ConfigStorage @@ -58,3 +64,42 @@ func (b *baseSchedulerConfig) load(v any) error { } return DecodeConfig([]byte(data), v) } + +// defaultSchedulerConfig is the interface to represent the default scheduler +// config. It is used in the BaseScheduler. +type defaultSchedulerConfig interface { + schedulerConfig + + isDisable() bool + setDisable(disabled bool) error +} + +type baseDefaultSchedulerConfig struct { + schedulerConfig + syncutil.RWMutex + + Disabled bool `json:"disabled"` +} + +func newBaseDefaultSchedulerConfig() baseDefaultSchedulerConfig { + return baseDefaultSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } +} + +func (b *baseDefaultSchedulerConfig) isDisable() bool { + b.Lock() + defer b.Unlock() + if err := b.load(b); err != nil { + log.Warn("failed to load scheduler config, maybe the config never persist", errs.ZapError(err)) + } + return b.Disabled +} + +func (b *baseDefaultSchedulerConfig) setDisable(disabled bool) error { + b.Lock() + defer b.Unlock() + b.Disabled = disabled + log.Info("set scheduler disable", zap.Bool("disabled", disabled)) + return b.save() +} diff --git a/pkg/schedule/schedulers/config_test.go b/pkg/schedule/schedulers/config_test.go index 31858bd7c10..9e20521854f 100644 --- a/pkg/schedule/schedulers/config_test.go +++ b/pkg/schedule/schedulers/config_test.go @@ -48,3 +48,37 @@ func TestSchedulerConfig(t *testing.T) { // report error because the config is empty and cannot be decoded require.Error(t, cfg2.load(newTc)) } + +func TestDefaultSchedulerConfig(t *testing.T) { + s := storage.NewStorageWithMemoryBackend() + + type testConfig struct { + balanceLeaderSchedulerConfig + Value string `json:"value"` + } + + cfg := &testConfig{ + balanceLeaderSchedulerConfig: balanceLeaderSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + }, + Value: "test", + } + cfg.init("test", s, cfg) + require.False(t, cfg.isDisable()) + require.NoError(t, cfg.setDisable(true)) + require.True(t, cfg.isDisable()) + + cfg2 := &testConfig{ + balanceLeaderSchedulerConfig: balanceLeaderSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + }, + } + cfg2.init("test", s, cfg2) + require.True(t, cfg2.isDisable()) + require.Equal(t, "", cfg2.Value) + + cfg3 := &testConfig{} + require.NoError(t, cfg2.load(cfg3)) + require.Equal(t, cfg.Value, cfg3.Value) + require.True(t, cfg3.Disabled) +} diff --git a/pkg/schedule/schedulers/evict_leader.go b/pkg/schedule/schedulers/evict_leader.go index a7d656a3e42..28309acddf5 100644 --- a/pkg/schedule/schedulers/evict_leader.go +++ b/pkg/schedule/schedulers/evict_leader.go @@ -231,7 +231,7 @@ type evictLeaderScheduler struct { func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) Scheduler { handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictLeaderScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_store.go b/pkg/schedule/schedulers/evict_slow_store.go index d23fc2f8ff8..8e50efb90dd 100644 --- a/pkg/schedule/schedulers/evict_slow_store.go +++ b/pkg/schedule/schedulers/evict_slow_store.go @@ -28,7 +28,6 @@ import ( "github.com/tikv/pd/pkg/schedule/plan" "github.com/tikv/pd/pkg/schedule/types" "github.com/tikv/pd/pkg/utils/apiutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/unrolled/render" "go.uber.org/zap" ) @@ -39,8 +38,7 @@ const ( ) type evictSlowStoreSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig + baseDefaultSchedulerConfig cluster *core.BasicCluster // Last timestamp of the chosen slow store for eviction. @@ -52,10 +50,10 @@ type evictSlowStoreSchedulerConfig struct { func initEvictSlowStoreSchedulerConfig() *evictSlowStoreSchedulerConfig { return &evictSlowStoreSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - lastSlowStoreCaptureTS: time.Time{}, - RecoveryDurationGap: defaultRecoveryDurationGap, - EvictedStores: make([]uint64, 0), + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + lastSlowStoreCaptureTS: time.Time{}, + RecoveryDurationGap: defaultRecoveryDurationGap, + EvictedStores: make([]uint64, 0), } } @@ -314,7 +312,7 @@ func (s *evictSlowStoreScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowStoreScheduler(opController *operator.Controller, conf *evictSlowStoreSchedulerConfig) Scheduler { handler := newEvictSlowStoreHandler(conf) return &evictSlowStoreScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowStoreScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/evict_slow_trend.go b/pkg/schedule/schedulers/evict_slow_trend.go index 8fd76bdccd4..6682b10dd35 100644 --- a/pkg/schedule/schedulers/evict_slow_trend.go +++ b/pkg/schedule/schedulers/evict_slow_trend.go @@ -46,35 +46,41 @@ type slowCandidate struct { recoverTS time.Time } +type evictSlowTrendSchedulerParam struct { + // Duration gap for recovering the candidate, unit: s. + RecoveryDurationGap uint64 `json:"recovery-duration"` + // Only evict one store for now + EvictedStores []uint64 `json:"evict-by-trend-stores"` +} + type evictSlowTrendSchedulerConfig struct { syncutil.RWMutex schedulerConfig + evictSlowTrendSchedulerParam cluster *core.BasicCluster // Candidate for eviction in current tick. evictCandidate slowCandidate // Last chosen candidate for eviction. lastEvictCandidate slowCandidate - // Duration gap for recovering the candidate, unit: s. - RecoveryDurationGap uint64 `json:"recovery-duration"` - // Only evict one store for now - EvictedStores []uint64 `json:"evict-by-trend-stores"` } func initEvictSlowTrendSchedulerConfig() *evictSlowTrendSchedulerConfig { return &evictSlowTrendSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - evictCandidate: slowCandidate{}, - lastEvictCandidate: slowCandidate{}, - RecoveryDurationGap: defaultRecoveryDurationGap, - EvictedStores: make([]uint64, 0), + schedulerConfig: &baseSchedulerConfig{}, + evictCandidate: slowCandidate{}, + lastEvictCandidate: slowCandidate{}, + evictSlowTrendSchedulerParam: evictSlowTrendSchedulerParam{ + RecoveryDurationGap: defaultRecoveryDurationGap, + EvictedStores: make([]uint64, 0), + }, } } -func (conf *evictSlowTrendSchedulerConfig) clone() *evictSlowTrendSchedulerConfig { +func (conf *evictSlowTrendSchedulerConfig) clone() *evictSlowTrendSchedulerParam { conf.RLock() defer conf.RUnlock() - return &evictSlowTrendSchedulerConfig{ + return &evictSlowTrendSchedulerParam{ RecoveryDurationGap: conf.RecoveryDurationGap, } } @@ -435,7 +441,7 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, _ bool func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSlowTrendSchedulerConfig) Scheduler { handler := newEvictSlowTrendHandler(conf) sche := &evictSlowTrendScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler), + BaseScheduler: NewBaseScheduler(opController, types.EvictSlowTrendScheduler, conf), conf: conf, handler: handler, } diff --git a/pkg/schedule/schedulers/grant_hot_region.go b/pkg/schedule/schedulers/grant_hot_region.go index 18402c14437..61bfb82162a 100644 --- a/pkg/schedule/schedulers/grant_hot_region.go +++ b/pkg/schedule/schedulers/grant_hot_region.go @@ -116,8 +116,8 @@ type grantHotRegionScheduler struct { // newGrantHotRegionScheduler creates an admin scheduler that transfers hot region peer to fixed store and hot region leader to one store. func newGrantHotRegionScheduler(opController *operator.Controller, conf *grantHotRegionSchedulerConfig) *grantHotRegionScheduler { - base := newBaseHotScheduler(opController, - statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, + statistics.DefaultHistorySampleInterval, conf) base.tp = types.GrantHotRegionScheduler handler := newGrantHotRegionHandler(conf) ret := &grantHotRegionScheduler{ diff --git a/pkg/schedule/schedulers/grant_leader.go b/pkg/schedule/schedulers/grant_leader.go index 5dbb6eef5f6..55b51a14cba 100644 --- a/pkg/schedule/schedulers/grant_leader.go +++ b/pkg/schedule/schedulers/grant_leader.go @@ -144,7 +144,7 @@ type grantLeaderScheduler struct { // newGrantLeaderScheduler creates an admin scheduler that transfers all leaders // to a store. func newGrantLeaderScheduler(opController *operator.Controller, conf *grantLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.GrantLeaderScheduler) + base := NewBaseScheduler(opController, types.GrantLeaderScheduler, conf) handler := newGrantLeaderHandler(conf) return &grantLeaderScheduler{ BaseScheduler: base, diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 6506698b75c..e9e369b68d4 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -88,8 +88,12 @@ type baseHotScheduler struct { updateWriteTime time.Time } -func newBaseHotScheduler(opController *operator.Controller, sampleDuration time.Duration, sampleInterval time.Duration) *baseHotScheduler { - base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler) +func newBaseHotScheduler( + opController *operator.Controller, + sampleDuration, sampleInterval time.Duration, + schedulerConfig schedulerConfig, +) *baseHotScheduler { + base := NewBaseScheduler(opController, types.BalanceHotRegionScheduler, schedulerConfig) ret := &baseHotScheduler{ BaseScheduler: base, regionPendings: make(map[uint64]*pendingInfluence), @@ -197,8 +201,8 @@ type hotScheduler struct { } func newHotScheduler(opController *operator.Controller, conf *hotRegionSchedulerConfig) *hotScheduler { - base := newBaseHotScheduler(opController, - conf.getHistorySampleDuration(), conf.getHistorySampleInterval()) + base := newBaseHotScheduler(opController, conf.getHistorySampleDuration(), + conf.getHistorySampleInterval(), conf) ret := &hotScheduler{ baseHotScheduler: base, conf: conf, diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 0424a582bf4..df82ccc3afc 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -29,7 +29,6 @@ import ( "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/utils/reflectutil" - "github.com/tikv/pd/pkg/utils/syncutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" "github.com/unrolled/render" @@ -58,34 +57,36 @@ var compatiblePrioritiesConfig = prioritiesConfig{ // params about hot region. func initHotRegionScheduleConfig() *hotRegionSchedulerConfig { cfg := &hotRegionSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, - MinHotByteRate: 100, - MinHotKeyRate: 10, - MinHotQueryRate: 10, - MaxZombieRounds: 3, - MaxPeerNum: 1000, - ByteRateRankStepRatio: 0.05, - KeyRateRankStepRatio: 0.05, - QueryRateRankStepRatio: 0.05, - CountRankStepRatio: 0.01, - GreatDecRatio: 0.95, - MinorDecRatio: 0.99, - SrcToleranceRatio: 1.05, // Tolerate 5% difference - DstToleranceRatio: 1.05, // Tolerate 5% difference - StrictPickingStore: true, - EnableForTiFlash: true, - RankFormulaVersion: "v2", - ForbidRWType: "none", - SplitThresholds: 0.2, - HistorySampleDuration: typeutil.NewDuration(statistics.DefaultHistorySampleDuration), - HistorySampleInterval: typeutil.NewDuration(statistics.DefaultHistorySampleInterval), + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + hotRegionSchedulerParam: hotRegionSchedulerParam{ + MinHotByteRate: 100, + MinHotKeyRate: 10, + MinHotQueryRate: 10, + MaxZombieRounds: 3, + MaxPeerNum: 1000, + ByteRateRankStepRatio: 0.05, + KeyRateRankStepRatio: 0.05, + QueryRateRankStepRatio: 0.05, + CountRankStepRatio: 0.01, + GreatDecRatio: 0.95, + MinorDecRatio: 0.99, + SrcToleranceRatio: 1.05, // Tolerate 5% difference + DstToleranceRatio: 1.05, // Tolerate 5% difference + StrictPickingStore: true, + EnableForTiFlash: true, + RankFormulaVersion: "v2", + ForbidRWType: "none", + SplitThresholds: 0.2, + HistorySampleDuration: typeutil.NewDuration(statistics.DefaultHistorySampleDuration), + HistorySampleInterval: typeutil.NewDuration(statistics.DefaultHistorySampleInterval), + }, } cfg.applyPrioritiesConfig(defaultPrioritiesConfig) return cfg } -func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerConfig { - return &hotRegionSchedulerConfig{ +func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerParam { + return &hotRegionSchedulerParam{ MinHotByteRate: conf.MinHotByteRate, MinHotKeyRate: conf.MinHotKeyRate, MinHotQueryRate: conf.MinHotQueryRate, @@ -112,12 +113,7 @@ func (conf *hotRegionSchedulerConfig) getValidConf() *hotRegionSchedulerConfig { } } -type hotRegionSchedulerConfig struct { - syncutil.RWMutex - schedulerConfig - - lastQuerySupported bool - +type hotRegionSchedulerParam struct { MinHotByteRate float64 `json:"min-hot-byte-rate"` MinHotKeyRate float64 `json:"min-hot-key-rate"` MinHotQueryRate float64 `json:"min-hot-query-rate"` @@ -158,6 +154,13 @@ type hotRegionSchedulerConfig struct { HistorySampleInterval typeutil.Duration `json:"history-sample-interval"` } +type hotRegionSchedulerConfig struct { + baseDefaultSchedulerConfig + hotRegionSchedulerParam + + lastQuerySupported bool +} + func (conf *hotRegionSchedulerConfig) encodeConfig() ([]byte, error) { conf.RLock() defer conf.RUnlock() @@ -402,7 +405,7 @@ func isPriorityValid(priorities []string) (map[string]bool, error) { return priorityMap, nil } -func (conf *hotRegionSchedulerConfig) validateLocked() error { +func (conf *hotRegionSchedulerParam) validateLocked() error { if _, err := isPriorityValid(conf.ReadPriorities); err != nil { return err } @@ -433,7 +436,9 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * conf.Lock() defer conf.Unlock() rd := render.New(render.Options{IndentJSON: true}) - oldc, _ := json.Marshal(conf) + + param := &conf.hotRegionSchedulerParam + oldc, _ := json.Marshal(param) data, err := io.ReadAll(r.Body) r.Body.Close() if err != nil { @@ -441,21 +446,22 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * return } - if err := json.Unmarshal(data, conf); err != nil { + if err := json.Unmarshal(data, param); err != nil { rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - if err := conf.validateLocked(); err != nil { + if err := param.validateLocked(); err != nil { // revert to old version - if err2 := json.Unmarshal(oldc, conf); err2 != nil { + if err2 := json.Unmarshal(oldc, param); err2 != nil { rd.JSON(w, http.StatusInternalServerError, err2.Error()) } else { rd.JSON(w, http.StatusBadRequest, err.Error()) } return } - newc, _ := json.Marshal(conf) + newc, _ := json.Marshal(param) if !bytes.Equal(oldc, newc) { + conf.hotRegionSchedulerParam = *param if err := conf.save(); err != nil { log.Warn("failed to persist config", zap.Error(err)) } @@ -469,7 +475,7 @@ func (conf *hotRegionSchedulerConfig) handleSetConfig(w http.ResponseWriter, r * rd.JSON(w, http.StatusInternalServerError, err.Error()) return } - ok := reflectutil.FindSameFieldByJSON(conf, m) + ok := reflectutil.FindSameFieldByJSON(param, m) if ok { rd.Text(w, http.StatusOK, "Config is the same with origin, so do nothing.") return diff --git a/pkg/schedule/schedulers/init.go b/pkg/schedule/schedulers/init.go index e3101d6788b..f124182ece3 100644 --- a/pkg/schedule/schedulers/init.go +++ b/pkg/schedule/schedulers/init.go @@ -56,7 +56,7 @@ func schedulersRegister() { RegisterScheduler(types.BalanceLeaderScheduler, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { conf := &balanceLeaderSchedulerConfig{ - schedulerConfig: &baseSchedulerConfig{}, + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), } if err := decoder(conf); err != nil { return nil, err @@ -86,12 +86,16 @@ func schedulersRegister() { }) RegisterScheduler(types.BalanceRegionScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &balanceRegionSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &balanceRegionSchedulerConfig{ + baseDefaultSchedulerConfig: newBaseDefaultSchedulerConfig(), + } if err := decoder(conf); err != nil { return nil, err } - return newBalanceRegionScheduler(opController, conf), nil + sche := newBalanceRegionScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // balance witness @@ -321,12 +325,16 @@ func schedulersRegister() { }) RegisterScheduler(types.LabelScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &labelSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &labelSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newLabelScheduler(opController, conf), nil + sche := newLabelScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // random merge @@ -346,12 +354,16 @@ func schedulersRegister() { }) RegisterScheduler(types.RandomMergeScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &randomMergeSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &randomMergeSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newRandomMergeScheduler(opController, conf), nil + sche := newRandomMergeScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // scatter range @@ -442,12 +454,16 @@ func schedulersRegister() { }) RegisterScheduler(types.ShuffleLeaderScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - conf := &shuffleLeaderSchedulerConfig{} + storage endpoint.ConfigStorage, decoder ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &shuffleLeaderSchedulerConfig{ + schedulerConfig: &baseSchedulerConfig{}, + } if err := decoder(conf); err != nil { return nil, err } - return newShuffleLeaderScheduler(opController, conf), nil + sche := newShuffleLeaderScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // shuffle region @@ -506,8 +522,11 @@ func schedulersRegister() { }) RegisterScheduler(types.TransferWitnessLeaderScheduler, func(opController *operator.Controller, - _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { - return newTransferWitnessLeaderScheduler(opController), nil + storage endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { + conf := &baseSchedulerConfig{} + sche := newTransferWitnessLeaderScheduler(opController, conf) + conf.init(sche.GetName(), storage, conf) + return sche, nil }) // evict slow store by trend diff --git a/pkg/schedule/schedulers/label.go b/pkg/schedule/schedulers/label.go index d1a06a5c4ff..5ba3ad962fc 100644 --- a/pkg/schedule/schedulers/label.go +++ b/pkg/schedule/schedulers/label.go @@ -29,6 +29,8 @@ import ( ) type labelSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -43,7 +45,7 @@ type labelScheduler struct { // the store with the specific label. func newLabelScheduler(opController *operator.Controller, conf *labelSchedulerConfig) Scheduler { return &labelScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler), + BaseScheduler: NewBaseScheduler(opController, types.LabelScheduler, conf), conf: conf, } } diff --git a/pkg/schedule/schedulers/random_merge.go b/pkg/schedule/schedulers/random_merge.go index f6660472f57..50ff6175ca0 100644 --- a/pkg/schedule/schedulers/random_merge.go +++ b/pkg/schedule/schedulers/random_merge.go @@ -30,6 +30,8 @@ import ( ) type randomMergeSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -42,7 +44,7 @@ type randomMergeScheduler struct { // newRandomMergeScheduler creates an admin scheduler that randomly picks two adjacent regions // then merges them. func newRandomMergeScheduler(opController *operator.Controller, conf *randomMergeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.RandomMergeScheduler) + base := NewBaseScheduler(opController, types.RandomMergeScheduler, conf) return &randomMergeScheduler{ BaseScheduler: base, conf: conf, diff --git a/pkg/schedule/schedulers/scatter_range.go b/pkg/schedule/schedulers/scatter_range.go index 37f00d2df6e..253675859b7 100644 --- a/pkg/schedule/schedulers/scatter_range.go +++ b/pkg/schedule/schedulers/scatter_range.go @@ -103,7 +103,7 @@ type scatterRangeScheduler struct { // newScatterRangeScheduler creates a scheduler that balances the distribution of leaders and regions that in the specified key range. func newScatterRangeScheduler(opController *operator.Controller, config *scatterRangeSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ScatterRangeScheduler) + base := NewBaseScheduler(opController, types.ScatterRangeScheduler, config) handler := newScatterRangeHandler(config) scheduler := &scatterRangeScheduler{ @@ -112,7 +112,10 @@ func newScatterRangeScheduler(opController *operator.Controller, config *scatter handler: handler, balanceLeader: newBalanceLeaderScheduler( opController, - &balanceLeaderSchedulerConfig{Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + &balanceLeaderSchedulerConfig{ + balanceLeaderSchedulerParam: balanceLeaderSchedulerParam{ + Ranges: []core.KeyRange{core.NewKeyRange("", "")}}, + }, // the name will not be persisted WithBalanceLeaderName("scatter-range-leader"), ), diff --git a/pkg/schedule/schedulers/scheduler.go b/pkg/schedule/schedulers/scheduler.go index 27be70680af..ac0a52fc977 100644 --- a/pkg/schedule/schedulers/scheduler.go +++ b/pkg/schedule/schedulers/scheduler.go @@ -47,6 +47,16 @@ type Scheduler interface { CleanConfig(cluster sche.SchedulerCluster) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) IsScheduleAllowed(cluster sche.SchedulerCluster) bool + // IsDiable returns if the scheduler is disabled, it only works for default schedulers. + // - BalanceRegionScheduler + // - BalanceLeaderScheduler + // - BalanceHotRegionScheduler + // - EvictSlowStoreScheduler + IsDisable() bool + // SetDisable sets the scheduler's disable, it only works for default schedulers. + SetDisable(bool) error + // IsDefault returns if the scheduler is a default scheduler. + IsDefault() bool } // EncodeConfig encode the custom config for each scheduler. diff --git a/pkg/schedule/schedulers/scheduler_controller.go b/pkg/schedule/schedulers/scheduler_controller.go index 5e1082acee3..cb4ffd6f9c2 100644 --- a/pkg/schedule/schedulers/scheduler_controller.go +++ b/pkg/schedule/schedulers/scheduler_controller.go @@ -149,13 +149,17 @@ func (c *Controller) AddSchedulerHandler(scheduler Scheduler, args ...string) er } c.schedulerHandlers[name] = scheduler + if err := scheduler.SetDisable(false); err != nil { + log.Error("can not update scheduler status", zap.String("scheduler-name", name), + errs.ZapError(err)) + // No need to return here, we still use the scheduler config to control the `disable` now. + } if err := SaveSchedulerConfig(c.storage, scheduler); err != nil { log.Error("can not save HTTP scheduler config", zap.String("scheduler-name", scheduler.GetName()), errs.ZapError(err)) return err } c.cluster.GetSchedulerConfig().AddSchedulerCfg(scheduler.GetType(), args) - err := scheduler.PrepareConfig(c.cluster) - return err + return scheduler.PrepareConfig(c.cluster) } // RemoveSchedulerHandler removes the HTTP handler for a scheduler. @@ -177,6 +181,11 @@ func (c *Controller) RemoveSchedulerHandler(name string) error { return err } + // nolint:errcheck + // SetDisable will not work now, because the config is removed. We can't + // remove the config in the future, if we want to use `Disable` of independent + // config. + _ = s.(Scheduler).SetDisable(true) if err := c.storage.RemoveSchedulerConfig(name); err != nil { log.Error("can not remove the scheduler config", errs.ZapError(err)) return err @@ -184,7 +193,6 @@ func (c *Controller) RemoveSchedulerHandler(name string) error { s.(Scheduler).CleanConfig(c.cluster) delete(c.schedulerHandlers, name) - return nil } @@ -193,7 +201,8 @@ func (c *Controller) AddScheduler(scheduler Scheduler, args ...string) error { c.Lock() defer c.Unlock() - if _, ok := c.schedulers[scheduler.GetName()]; ok { + name := scheduler.GetName() + if _, ok := c.schedulers[name]; ok { return errs.ErrSchedulerExisted.FastGenByArgs() } @@ -205,6 +214,11 @@ func (c *Controller) AddScheduler(scheduler Scheduler, args ...string) error { c.wg.Add(1) go c.runScheduler(s) c.schedulers[s.Scheduler.GetName()] = s + if err := scheduler.SetDisable(false); err != nil { + log.Error("can not update scheduler status", zap.String("scheduler-name", name), + errs.ZapError(err)) + // No need to return here, we still use the scheduler config to control the `disable` now. + } if err := SaveSchedulerConfig(c.storage, scheduler); err != nil { log.Error("can not save scheduler config", zap.String("scheduler-name", scheduler.GetName()), errs.ZapError(err)) return err @@ -232,6 +246,11 @@ func (c *Controller) RemoveScheduler(name string) error { return err } + // nolint:errcheck + // SetDisable will not work now, because the config is removed. We can't + // remove the config in the future, if we want to use `Disable` of independent + // config. + _ = s.SetDisable(true) if err := c.storage.RemoveSchedulerConfig(name); err != nil { log.Error("can not remove the scheduler config", errs.ZapError(err)) return err @@ -240,7 +259,6 @@ func (c *Controller) RemoveScheduler(name string) error { s.Stop() schedulerStatusGauge.DeleteLabelValues(name, "allow") delete(c.schedulers, name) - return nil } diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index ba734230ea5..464abdcacc9 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -16,6 +16,7 @@ package schedulers import ( "context" + "slices" "testing" "github.com/docker/go-units" @@ -505,3 +506,52 @@ func TestBalanceLeaderWithConflictRule(t *testing.T) { } } } + +func testDecoder(v any) error { + conf, ok := v.(*scatterRangeSchedulerConfig) + if ok { + conf.RangeName = "test" + } + return nil +} + +func TestIsDefault(t *testing.T) { + re := require.New(t) + cancel, _, _, oc := prepareSchedulersTest() + defer cancel() + + for schedulerType := range types.SchedulerTypeCompatibleMap { + bs, err := CreateScheduler(schedulerType, oc, + storage.NewStorageWithMemoryBackend(), + testDecoder, + func(string) error { return nil }) + re.NoError(err) + if slices.Contains(types.DefaultSchedulers, schedulerType) { + re.True(bs.IsDefault()) + } else { + re.False(bs.IsDefault()) + } + } +} + +func TestDisabled(t *testing.T) { + re := require.New(t) + cancel, _, _, oc := prepareSchedulersTest() + defer cancel() + + s := storage.NewStorageWithMemoryBackend() + for _, schedulerType := range types.DefaultSchedulers { + bs, err := CreateScheduler(schedulerType, oc, s, testDecoder, + func(string) error { return nil }) + re.NoError(err) + re.False(bs.IsDisable()) + re.NoError(bs.SetDisable(true)) + re.True(bs.IsDisable()) + + // test ms scheduling server, another server + scheduling, err := CreateScheduler(schedulerType, oc, s, testDecoder, + func(string) error { return nil }) + re.NoError(err) + re.True(scheduling.IsDisable()) + } +} diff --git a/pkg/schedule/schedulers/shuffle_hot_region.go b/pkg/schedule/schedulers/shuffle_hot_region.go index b8818dc48da..7517abb3c21 100644 --- a/pkg/schedule/schedulers/shuffle_hot_region.go +++ b/pkg/schedule/schedulers/shuffle_hot_region.go @@ -67,8 +67,8 @@ type shuffleHotRegionScheduler struct { // newShuffleHotRegionScheduler creates an admin scheduler that random balance hot regions func newShuffleHotRegionScheduler(opController *operator.Controller, conf *shuffleHotRegionSchedulerConfig) Scheduler { - base := newBaseHotScheduler(opController, - statistics.DefaultHistorySampleDuration, statistics.DefaultHistorySampleInterval) + base := newBaseHotScheduler(opController, statistics.DefaultHistorySampleDuration, + statistics.DefaultHistorySampleInterval, conf) base.tp = types.ShuffleHotRegionScheduler handler := newShuffleHotRegionHandler(conf) ret := &shuffleHotRegionScheduler{ diff --git a/pkg/schedule/schedulers/shuffle_leader.go b/pkg/schedule/schedulers/shuffle_leader.go index 842a26d9b12..71f44e49fbb 100644 --- a/pkg/schedule/schedulers/shuffle_leader.go +++ b/pkg/schedule/schedulers/shuffle_leader.go @@ -27,6 +27,8 @@ import ( ) type shuffleLeaderSchedulerConfig struct { + schedulerConfig + Ranges []core.KeyRange `json:"ranges"` // TODO: When we prepare to use Ranges, we will need to implement the ReloadConfig function for this scheduler. } @@ -40,7 +42,7 @@ type shuffleLeaderScheduler struct { // newShuffleLeaderScheduler creates an admin scheduler that shuffles leaders // between stores. func newShuffleLeaderScheduler(opController *operator.Controller, conf *shuffleLeaderSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler) + base := NewBaseScheduler(opController, types.ShuffleLeaderScheduler, conf) filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: base.GetName(), TransferLeader: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(base.GetName()), diff --git a/pkg/schedule/schedulers/shuffle_region.go b/pkg/schedule/schedulers/shuffle_region.go index a4c247d3363..33eea1d638c 100644 --- a/pkg/schedule/schedulers/shuffle_region.go +++ b/pkg/schedule/schedulers/shuffle_region.go @@ -36,7 +36,7 @@ type shuffleRegionScheduler struct { // newShuffleRegionScheduler creates an admin scheduler that shuffles regions // between stores. func newShuffleRegionScheduler(opController *operator.Controller, conf *shuffleRegionSchedulerConfig) Scheduler { - base := NewBaseScheduler(opController, types.ShuffleRegionScheduler) + base := NewBaseScheduler(opController, types.ShuffleRegionScheduler, conf) filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: base.GetName(), MoveRegion: true, OperatorLevel: constant.Low}, filter.NewSpecialUseFilter(base.GetName()), diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index a0881ae1a34..edbe2ac3545 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -143,7 +143,7 @@ func newSplitBucketHandler(conf *splitBucketSchedulerConfig) http.Handler { } func newSplitBucketScheduler(opController *operator.Controller, conf *splitBucketSchedulerConfig) *splitBucketScheduler { - base := NewBaseScheduler(opController, types.SplitBucketScheduler) + base := NewBaseScheduler(opController, types.SplitBucketScheduler, conf) handler := newSplitBucketHandler(conf) ret := &splitBucketScheduler{ BaseScheduler: base, diff --git a/pkg/schedule/schedulers/transfer_witness_leader.go b/pkg/schedule/schedulers/transfer_witness_leader.go index 2ef0fc6a4f2..52cd875719c 100644 --- a/pkg/schedule/schedulers/transfer_witness_leader.go +++ b/pkg/schedule/schedulers/transfer_witness_leader.go @@ -43,9 +43,9 @@ type transferWitnessLeaderScheduler struct { } // newTransferWitnessLeaderScheduler creates an admin scheduler that transfers witness leader of a region. -func newTransferWitnessLeaderScheduler(opController *operator.Controller) Scheduler { +func newTransferWitnessLeaderScheduler(opController *operator.Controller, conf schedulerConfig) Scheduler { return &transferWitnessLeaderScheduler{ - BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler), + BaseScheduler: NewBaseScheduler(opController, types.TransferWitnessLeaderScheduler, conf), regions: make(chan *core.RegionInfo, transferWitnessLeaderRecvMaxRegionSize), } } diff --git a/pkg/schedule/types/type.go b/pkg/schedule/types/type.go index b7e0b26482e..09491c5c1c4 100644 --- a/pkg/schedule/types/type.go +++ b/pkg/schedule/types/type.go @@ -144,4 +144,17 @@ var ( "transfer-witness-leader-scheduler": TransferWitnessLeaderScheduler, "label-scheduler": LabelScheduler, } + + // DefaultSchedulers is the default scheduler types. + // If you want to add a new scheduler, please + // 1. add it to the list + // 2. change the `schedulerConfig` interface to the `baseDefaultSchedulerConfig` + // structure in related `xxxxSchedulerConfig` + // 3. remove `syncutil.RWMutex` from related `xxxxSchedulerConfig` + DefaultSchedulers = []CheckerSchedulerType{ + BalanceLeaderScheduler, + BalanceRegionScheduler, + BalanceHotRegionScheduler, + EvictSlowStoreScheduler, + } ) diff --git a/pkg/storage/kv/mem_kv.go b/pkg/storage/kv/mem_kv.go index b97a3d6cfa1..dc24ab3e0f6 100644 --- a/pkg/storage/kv/mem_kv.go +++ b/pkg/storage/kv/mem_kv.go @@ -28,7 +28,7 @@ type memoryKV struct { tree *btree.BTreeG[memoryKVItem] } -// NewMemoryKV returns an in-memory kvBase for testing. +// NewMemoryKV returns an in-memory kvBase. func NewMemoryKV() Base { return &memoryKV{ tree: btree.NewG(2, func(i, j memoryKVItem) bool { diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index c20cfd41814..ff3cd861973 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -158,7 +158,7 @@ type evictLeaderScheduler struct { // newEvictLeaderScheduler creates an admin scheduler that transfers all leaders // out of a store. func newEvictLeaderScheduler(opController *operator.Controller, conf *evictLeaderSchedulerConfig) schedulers.Scheduler { - base := schedulers.NewBaseScheduler(opController, userEvictLeaderScheduler) + base := schedulers.NewBaseScheduler(opController, userEvictLeaderScheduler, nil) handler := newEvictLeaderHandler(conf) return &evictLeaderScheduler{ BaseScheduler: base, diff --git a/tests/server/api/scheduler_test.go b/tests/server/api/scheduler_test.go index 330a69eca63..b989298ee39 100644 --- a/tests/server/api/scheduler_test.go +++ b/tests/server/api/scheduler_test.go @@ -260,6 +260,7 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { re.Equal(len(expectMap), len(resp), "expect %v, got %v", expectMap, resp) for key := range expectMap { if !reflect.DeepEqual(resp[key], expectMap[key]) { + suite.T().Logf("key: %s, expect: %v, got: %v", key, expectMap[key], resp[key]) return false } } @@ -624,6 +625,16 @@ func (suite *scheduleTestSuite) checkAPI(cluster *tests.TestCluster) { deleteScheduler(re, urlPrefix, createdName) assertNoScheduler(re, urlPrefix, createdName) } + + // revert remove + for _, sche := range types.DefaultSchedulers { + input := make(map[string]any) + input["name"] = sche.String() + body, err := json.Marshal(input) + re.NoError(err) + addScheduler(re, urlPrefix, body) + suite.assertSchedulerExists(urlPrefix, sche.String()) + } } func (suite *scheduleTestSuite) TestDisable() { diff --git a/tools/pd-ctl/tests/scheduler/scheduler_test.go b/tools/pd-ctl/tests/scheduler/scheduler_test.go index f680a4bd2e7..0f14c48f091 100644 --- a/tools/pd-ctl/tests/scheduler/scheduler_test.go +++ b/tools/pd-ctl/tests/scheduler/scheduler_test.go @@ -661,6 +661,8 @@ func (suite *schedulerTestSuite) checkHotRegionSchedulerConfig(cluster *pdTests. re.Contains(echo, "Success!") expected1["src-tolerance-ratio"] = 1.02 checkHotSchedulerConfig(expected1) + echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "disabled", "true"}, nil) + re.Contains(echo, "Failed!") echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "set", "read-priorities", "byte,key"}, nil) re.Contains(echo, "Success!")